-
Notifications
You must be signed in to change notification settings - Fork 3
/
podi_collectcells.py
executable file
·5752 lines (4762 loc) · 241 KB
/
podi_collectcells.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
#
# Copyright 2012-2013 Ralf Kotulla
# kotulla@uwm.edu
#
# This file is part of the ODI QuickReduce pipeline package.
#
# If you find this program or parts thereof please make sure to
# cite it appropriately (please contact the author for the most
# up-to-date reference to use). Also if you find any problems
# or have suggestiosn on how to improve the code or its
# functionality please let me know. Comments and questions are
# always welcome.
#
# The code is made publicly available. Feel free to share the link
# with whoever might be interested. However, I do ask you to not
# publish additional copies on your own website or other sources.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
"""
Function
--------------------------------------------------------------------------------
**podi_collectcells** is the main script of the pODI QuickReduce pipeline. As
such, it handles all reduction steps, combining all cells from all available
OTAs into a single FITS-file. If invoked with the respective command line flags,
it also performs the astrometric and photometric calibration.
How to run podi_collectcells
--------------------------------------------------------------------------------
``podi_collectcells input output.fits (flags)``
Input here can be either
- a directory containing all OTA fits files. In this case, the directory has to
contain the basename of the files, for example ``o20130101T123456.0/``.
- the filename of a single OTA FITS file. It is then assumed that all other OTA
files reside in the same directory
Available flags
--------------------------------------------------------------------------------
* **-cals=/dir/to/cals**
Specifies the directory that holds the calibration data. -cals is processed
first, but you can change the path to individual calibration frames with any
of the next options.
* **-bias=/dir/to/bias**
Overwrites the -cals path to specify a different directory for the bias frames
* **-dark=/dir/to/darks**
Same for darks
* **-flat=/dir/to/flats**
Same for flats -
* **-bpm=/dir/to/bpms**
Same as above. Alternatively, you can specify "auto" (without the " ) as
directory. In that case the script looks for the bad pixel masks in the
directory of the script.
* **-pupilghost=/dir**
same as above for the pupil ghost file
* **-fringe=/dir**
same as above for the fringing templates
* **-fringevectors**
Fringe correction needs special "vector" frames marking dark and bright
regions. These are stored in a set of ds9 region files, with one file for each
detector, and this option allows to specify the directory holding all these
files.
* **-gain**
Apply a gain correction to all cells. If combined with the -nonlinearity
option, the relative gain values from the non-linearity are used, otherwise it
uses the constants defined in each cell header.
Note that for correct reduction, this flag needs to be applied consistently,
in particular when creating the bias, dark, and flat-field frames.
* **-persistency=/dir**
same as above. However, since persistency files are both read (from the
earlier exposures) AND written (to keep track of saturated stars in the
current exposure) make sure you have permission to write into the specified
directory as well as sufficient disk-space.
* **-wcs=some/file.fits**
Give the filename of the canned WCS solution that specifies the detector
distortion. See scamp2minifits on a way to create the necessary input file.
* **-fixwcs**
Activates the automatic WCS correction routine. If enabled, collectcells run
a source detection (podi_findstars), obtains a catalog of reference star
positions (podi_ipprefcat) and corrects telescope pointing errors (using
functions from podi_fixwcs).
* **-simplewcs**
Modifies the WCS solution that remains in the fits-frame. With this option,
the RA-ZPX solution is changed into a simple RA-TAN system without any
distortion factors. Mostly for debugging and testing.
* **-dither**
* **-target**
Now defunct
* **-prep4sex**
Changes the value of undefined pixels (those in the cell-gaps and the detector
edges) from the default NaN (Not a Number) value to -1e31, a value exceeding
the SExtractor value for bad pixels, telling SExtractor to ignore them
* **-singleota=XX**
Limits the output to only a specific OTA. For example, to only extract OTA42,
give -singleota=42.
* **-noclobber**
Skip the data reduction if the output file already exists
* **-wcsoffset=d_ra,d_dec**
Apply WCS offset in degrees to WCS solution before files are fed to
SourceExtractor. Use this option if the telescope pointing is WAY off.
* **-centralonly**
Only collect cells from the central 3x3 array, and ignore all data from the
outlying OTAs
* **-bgmode**
Disable the initial welcome splash screen
* **-photcalib**
Enable the photometric calibration of the output frames.
* **-nonlinearity=/some/dir/nonlinearity_coefficients.fits**
Activate the non-linearity correction and use the specified file as source
for all correction coefficients.
* **-plotformat=format1,format2**
Generate the diagnostic plots in the specified formats. All formats supported
by matplotlib are available - these are typically png, jpg, ps, pdf, svg
* **-nootalevelplots**
By default, all diagnostic plots are created for the full focalplane and for
each OTA individually. Adding this option disables the OTA level, restricting
diagnostic plots to only the focalplane views.
* **-qasubdirs**
Creates a directory structure parallel to the output file and sorts the
diagnostic plots into (sub-)directories. The default directory structure is:
output.fits
QA/
wcs1.png
wcs1/
OTA00.png
OTA22.png
seeing.png
seeing/
OTA00.png
OTA22.png
* **-qasubdirname**
Allows to specify the name of the "QA" directory as shown above.
* **-noqaplots**
Disable the creation of the QA plots.
* **-addfitskey=KEY,value**
Add a user-defined fits header entry to the primary header of the output file.
KEY needs to be fits-conform, i.e. 8 characters or less without spaces or
special characters. value is stored as a string in the output file. If value
contains spaces, the value needs to be quoted (eg. -addfitskey=WIYN,"greatest
of the universe")
* **-nonsidereal=a,b,c**
a: proper motion of the object (dRA*cos(dec)) given in arcseconds per hour.
b: proper motion dDec in arcsec / hour
c: Reference. This can either be a MJD directly, or a FITS file containing the
MJD-OBS header keyword which is then taken to be the reference MJD.
* **-fitradialZP**
Fit a linear radial profile to the photometric zeropoint data. The center is
assumed to be roughly the center of OTA 3,3 as per Daniel Harbeck's demo-
script. Fit results (RADZP_P0, RADZP_P1) and errors (RADZP_E0, RADZP_E1) are
stored in the primary header of the output file. If enabled, this options also
generates another diagnostic plot showing the uncorrected photometric ZP as
function of radius before and after the best-fit has been taken out.
Methods
--------------------------------------------------------------------------------
"""
from __future__ import print_function
import sys
import os
import signal
import astropy.io.fits as pyfits
import numpy
import scipy
import scipy.optimize
import ephem
import traceback
#import psutil
import datetime
import warnings
import queue
import threading
import multiprocessing
import multiprocessing.reduction
import ctypes
import time
import logging
import itertools
import bottleneck
import errno
import psutil
from astropy.io.fits.verify import VerifyWarning
warnings.simplefilter('ignore', category=VerifyWarning)
from podi_plotting import *
#
# Un-comment this block to get warnings with tracebacks for easier locating
#
# import warnings
# #warnings.simplefilter("error")
# warnings.simplefilter("ignore", RuntimeWarning)
# import traceback
# import warnings
# import sys
# def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
# print"\n"*3
# traceback.print_stack()
# log = file if hasattr(file,'write') else sys.stderr
# log.write(warnings.formatwarning(message, category, filename, lineno, line))
# print "\n"*3
# warnings.showwarning = warn_with_traceback
gain_correct_frames = False
from podi_definitions import *
from podi_commandline import *
from wiyn_filters import *
#import podi_findstars
import podi_search_ipprefcat
import podi_fixwcs
import podi_fixwcs_rotation
import podi_sitesetup as sitesetup
import podi_crosstalk
import podi_persistency
import podi_asyncfitswrite
import podi_fitskybackground
import podi_matchcatalogs
import podi_matchpupilghost
import podi_fringing
import podi_photcalib
import podi_nonlinearity
import podi_logging
import podi_cosmicrays
import podi_illumcorr
import podi_almanac
import podi_focalplanelayout
import podi_guidestars
import podi_shifthistory
import podi_photflat
import psf_quality
import podi_readfitscat
import podi_associations
import podi_calibrations
from podi_calibrations import check_filename_directory
from podi_reductionlog import *
from version import record_pipeline_versioning
from astLib import astWCS
from sharedmemory import SharedMemory
fix_cpu_count = False
if (sitesetup.number_cpus == "auto"):
try:
number_cpus = multiprocessing.cpu_count()
# print "Yippie, found %d CPUs to use in parallel!" % (number_cpus)
if (number_cpus > sitesetup.max_cpu_count and sitesetup.max_cpu_count > 1):
number_cpus = sitesetup.max_cpu_count
# print "... but using only %d of them!" % (number_cpus)
except:
pass
else:
number_cpus = sitesetup.number_cpus
def read_techdata(techdata_hdulist, ota_x, ota_y, wm_cellx, wm_celly):
logger = logging.getLogger("ReadTechData")
#logger.info("Reading from %s" % (str(techdata_hdulist)))
gain, readnoise, readnoise_e = None, None, None
cellidx_x = ota_x*8+wm_cellx
cellidx_y = ota_y*8+(7-wm_celly)
try:
gain = techdata_hdulist['GAIN'].data[cellidx_y, cellidx_x]
readnoise = techdata_hdulist['READNOISE'].data[cellidx_y, cellidx_x]
readnoise_e = techdata_hdulist['READNOISE_E'].data[cellidx_y, cellidx_x]
except:
logger.error("TECHDATA file is likely corrupted!")
pass
return gain, readnoise, readnoise_e
def apply_wcs_distortion(filename, hdu, binning, reduction_log=None):
logger = logging.getLogger("ApplyWCSmodel")
if (reduction_log is not None):
reduction_log.attempt('wcs_dist')
try:
wcs = pyfits.open(filename)
except:
if (reduction_log is not None):
reduction_log.fail('wcs_dist')
logger.error("Could not open WCS distortion model (%s)" % (filename))
return False
extname = hdu.header['EXTNAME']
try:
wcs_header = wcs[extname].header
except:
if (reduction_log is not None):
reduction_log.fail('wcs_dist')
logger.warning("Could not find distortion model for %s" % (extname))
return False
try:
for hdr_name in ('CRPIX1', 'CRPIX2'):
wcs_header[hdr_name] /= binning
for hdr_name in ('CD1_1', 'CD1_2', 'CD2_1', 'CD2_2'):
wcs_header[hdr_name] *= binning
cards = wcs_header.cards
for (keyword, value, comment) in cards:
if (keyword not in ['CRVAL1', 'CRVAL2']):
hdu.header[keyword] = (value, comment)
d_crval1 = wcs_header['CRVAL1']
if (d_crval1 > 180):
d_crval1 -= 360
hdu.header['CRVAL2'] += wcs_header['CRVAL2']
hdu.header["CRVAL1"] += d_crval1 / math.cos(math.radians(hdu.header['CRVAL2']))
# print "change crval1 by",wcs_header['CRVAL1'], d_crval1, wcs_header['CRVAL1'] / math.cos(math.radians(hdu.header['CRVAL2']))
# Make sure to write RAs that are positive
if (hdu.header["CRVAL1"] < 0):
hdu.header['CRVAL1'] -= math.floor(hdu.header['CRVAL1'] / 360.) * 360.
elif (hdu.header['CRVAL1'] > 360.):
hdu.header['CRVAL1'] = math.fmod(hdu.header['CRVAL1'], 360.0)
if ('RADESYS' in hdu.header):
hdu.header['RADESYS'] = 'ICRS'
# Rewrite TAN to TPV to make IRAF happy :-(
for hdr_name in ['CTYPE1', 'CTYPE2']:
if (hdr_name in hdu.header):
hdu.header[hdr_name] = hdu.header[hdr_name].replace("TAN", "TPV")
except:
logger.critical("something went wrong while applying the WCS model")
if (reduction_log is not None):
reduction_log.partial_fail('wcs_dist')
podi_logging.log_exception()
if (reduction_log is not None):
reduction_log.success('wcs_dist')
return True
def collect_reduce_ota(filename,
verbose=False,
options=None):
"""
collect_reduce_ota does work relating to the initial instrument
detrending **for a single OTA** from cross-talk correction, overscan-
subtraction, bias & dark correction and flat-fielding. It also derives a
sky-background level and creates a source catalog using SourceExtractor.
Parameters
----------
filename : string
Filename of the raw OTA FITS file to be reduced
verbose : Bool
Output more detailed progress information during execution. Mostly made
for debugging and error tracking.
options : struct/dictionary
This dictionary contains all configuration parameters necessary for the
reduction, such as flat-field directories and specified reduction flags
read from the command-line.
Returns
-------
data_products : dictionary
The returned dictionary contains all information generated during the
reduction, such as the reduced ImageHDU, the source catalog,
data about background levels, etc.
"""
reduction_log = ReductionLog()
data_products = {
"hdu": None,
"header": None,
"wcsdata": None,
"sky-samples": None,
"sky": None,
"tech-header": None,
"fringe_scaling": None,
"fringe-template": None,
"source-cat": None,
'pupilghost-scaling': None,
'pupilghost-template': None,
'reduction_files_used': None,
'reduction_log': reduction_log,
'psf': None,
}
if (not os.path.isfile(filename)):
stdout_write("Couldn't find file %s ..." % (filename))
else:
# Create an fits extension to hold the output
hdu = pyfits.ImageHDU()
# log_svn_version(hdu.header)
# Set the inherit keyword so that the headers removed from each
# extension are instead inherited from the primary
hdu.header["INHERIT"] = (True, "Inherit headers from PrimaryHDU")
# also create a tech-header to keep track of values used in this frame
tech_header = pyfits.Header()
# Keep track of what input files we used
reduction_files_used = {}
try:
hdulist = pyfits.open(filename, memmap=False)
except:
# This happed with corrupt files
return data_products
# Check if we can find all 1+64 extensions
if (len(hdulist) < 65):
# Something is wrong with this file
return data_products
reduction_files_used['raw'] = filename
# Get the binning factor
binning = get_binning(hdulist[1].header)
# Get the image output size (depends on binning)
size_x, size_y = get_collected_image_dimensions(binning)
#print size_x, size_y
obsid = hdulist[0].header["OBSID"]
ota = int(hdulist[0].header['FPPOS'][2:])
ota_c_x, ota_c_y = int(math.floor(ota/10)), int(math.fmod(ota,10))
# Save the fppos as name for this extension
ota_name = "OTA%02d" % ota
extname = "OTA%02d.SCI" % ota
hdu.name = extname
hdu.header['OTA'] = (ota, "OTA designation")
# podi_logging.podi_getlogger("%s - %s" % (obsid, extname), options['log_setup'])
logger = logging.getLogger("%s - %s" % (obsid, extname))
fpl = podi_focalplanelayout.FocalPlaneLayout(hdulist[0])
logger.debug("Focalplane Layout: %s" % (fpl.layout))
# Now copy the headers from the original file into the new one
firstkey = None
cards = hdulist[0].header.cards
for (keyword, value, comment) in cards:
firstkey = keyword if firstkey is None else firstkey
hdu.header[keyword] = (value, comment)
add_fits_header_title(hdu.header, "Instrument telemetry from raw data", firstkey)
#
# Add default headers here
#
hdu.header['SOFTBIN'] = 0
if ('RADESYS' in hdu.header):
del hdu.header['RADESYS']
add_fits_header_title(hdu.header, "Pipeline added/modified metadata", 'SOFTBIN')
# Also read the MJD for this frame. This will be needed later for the correction
mjd = hdu.header['MJD-OBS']
#
# in the old data, some header EXTNAMES are duplicated, so we can't look
# up extensions by name in this case. Create a dictionary with EXTNAMES
# and extension IDs to work around this problem.
#
extname2id = {}
for i in range(1, len(hdulist)):
_extname = hdulist[i].header['EXTNAME']
extname2id[_extname.lower()] = i
logger.debug("Setting up extension lookup directory:\n"+str(extname2id))
#
# Perform cross-talk correction, using coefficients found in the
# podi_crosstalk package.
#
# Procedure:
# Correct all frames for the crosstalk, then write them back into the
# original OTA position so we can perform the overscan subtraction etc.
#
logger.info("Starting work on OTA %02d of %s ..." % (ota, obsid))
mastercals = podi_calibrations.ODICalibrations(
cmdline_options=options,
hdulist=hdulist)
# Now go through each of the 8 lines
if (not mastercals.apply_crosstalk()):
reduction_log.not_selected('crosstalk')
elif (mastercals.crosstalk(mjd, ota) is None):
logger.warning("Cross-talk correction requested, but failed!")
reduction_log.fail('crosstalk')
else:
xtalk_file = mastercals.crosstalk(mjd, ota)
logger.debug("Starting crosstalk correction (%s)" % (extname))
reduction_files_used['crosstalk'] = xtalk_file
outcome = podi_crosstalk.apply_crosstalk_correction(
hdulist,
xtalk_file = xtalk_file,
fpl = fpl,
extname2id = extname2id,
options = options,
reduction_log = reduction_log
)
logger.debug("Done with crosstalk correction")
#
# Allocate memory for the merged frame, and set all pixels by default to NaN.
# Valid pixels will subsequently be overwritten with real numbers
#
merged = numpy.ones(shape=(size_x, size_y), dtype=numpy.float32)
merged[:,:] = options['indef_pixelvalue']
#
# Get some information for the OTA
#
fppos = hdulist[0].header['FPPOS']
try:
filter_name = hdulist[0].header['FILTER']
except KeyError:
filter_name = 'UNKNOWN'
try:
exposure_time = hdulist[0].header['EXPTIME']
except KeyError:
exposure_time = 0
# nonlin_data = None
# if (options['nonlinearity-set'] or options['gain_method'] == "relative"):
# nonlinearity_file = options['nonlinearity']
# if (options['nonlinearity'] == None or
# options['nonlinearity'] == "" or
# not os.path.isfile(nonlinearity_file)):
# nonlinearity_file = podi_calibrations.find_nonlinearity_coefficient_file(mjd, options)
# if (options['verbose']):
# print "Using non-linearity coefficients from",nonlinearity_file
# logger.debug("Using non-linearity coefficients from file %s" % (nonlinearity_file))
# nonlin_data = podi_nonlinearity.load_nonlinearity_correction_table(nonlinearity_file, ota)
# if (options['nonlinearity-set']):
# reduction_files_used['nonlinearity'] = nonlinearity_file
nonlin_data = None
if (not mastercals.apply_nonlinearity()):
reduction_log.not_selected('nonlinearity')
elif (mastercals.nonlinearity(mjd) is None):
reduction_log.fail('nonlinearity')
else:
reduction_log.attempt('nonlinearity')
nonlinearity_file = mastercals.nonlinearity(mjd=mjd)
logger.debug("Using non-linearity coefficients from file %s" % (nonlinearity_file))
nonlin_data = podi_nonlinearity.load_nonlinearity_correction_table(nonlinearity_file, ota)
if (nonlin_data is not None):
reduction_files_used['nonlinearity'] = nonlinearity_file
reduction_log.success('nonlinearity')
else:
logger.warning("Unable to load non-linearity corrections (%s)" % (nonlinearity_file))
reduction_log.fail('nonlinearity')
relative_gains = None
if (mastercals.apply_relative_gain()):
# In this mode, we also require a non-linearity correction file to be loaded
if (nonlin_data is not None):
# all done, we already loaded the file as part of the non-linearity correction startup
relative_gains = nonlin_data
pass
else:
relative_gains_file = mastercals.nonlinearity(mjd=mjd)
if (relative_gains_file is not None):
relative_gains = podi_nonlinearity.load_nonlinearity_correction_table(relative_gains_file, ota)
#
# Search, first in the flat-field, then in the bias-frame for a
# TECHDATA extension that holds GAINS and readout noises for each cell.
#
techdata = None
techhdulist = None
if (options['techdata'] is not None):
techfile = None
# Set the filename for the TECHDATA extension based on the user-option
if (options['techdata'] == "from_flat" and options['flat_dir'] is not None):
for ft in sitesetup.flat_order:
techfile = check_filename_directory(options['flat_dir'],
"%s_%s_bin%d.fits" % (ft, filter_name, binning))
if (os.path.isfile(techfile)):
break
elif (options['techdata'] == "from_bias" and options['bias_dir'] is not None):
techfile = check_filename_directory(options['bias_dir'], "bias_bin%s.fits" % (binning))
else:
for ft in sitesetup.flat_order:
techfile = check_filename_directory(options['techdata'],
"techdata_%s_%s_bin%d.fits" % (ft, filter_name, binning))
if (os.path.isfile(techfile)):
break
# Check if the specified file exists and read the data if possible
if (os.path.isfile(techfile)):
logger.debug("Reading techdata from file %s" % (techfile))
techhdulist = pyfits.open(techfile)
reduction_files_used['techdata'] = techfile
else:
techfile = "%s/techdata.fits" % (sitesetup.exec_dir)
if (os.path.isfile(techfile)):
logger.debug("Reading techdata from file %s" % (techfile))
techhdulist = pyfits.open(techfile)
reduction_files_used['techdata'] = techfile
else:
logger.debug("Was looking for techfile %s but couldn't find it" % (techfile))
all_gains = numpy.ones(shape=(8,8)) * -99
all_readnoise = numpy.ones(shape=(8,8)) * -99
all_readnoise_electrons = numpy.ones(shape=(8,8)) * -99
#
# Handle the trimcell option
#
hdu.header['TRIMCELL'] = (-1, "trim cell edges (-1: no)")
if (options['trimcell'] is None):
reduction_log.not_selected('trimcell')
else:
reduction_log.success('trimcell')
hdu.header['TRIMCELL'] = options['trimcell']
reduction_log.attempt('overscan')
for wm_cellx, wm_celly in itertools.product(range(8),repeat=2):
#if (not options['bgmode']):
# stdout_write("\r%s: OTA %02d, cell %s ..." % (obsid, ota, hdulist[cell].header['EXTNAME']))
cellname = "xy%d%d" % (wm_cellx, wm_celly)
cell = extname2id[cellname]
#
# Special case for cell 0,7 (the one in the bottom left corner):
# Copy the CRPIX values into the merged image header
#
if (hdulist[cell].header['EXTNAME'].lower() == "xy07"):
# print "Setting CRPIXs", hdulist[cell].header['CRPIX1'], hdulist[cell].header['CRPIX2']
hdu.header["CRPIX1"] = (hdulist[cell].header['CRPIX1'], "Ref. pixel RA")
hdu.header["CRPIX2"] = (hdulist[cell].header['CRPIX2'], "Ref. pixel DEC")
# Check if this is one of the broken cells
cellmode_id = get_cellmode(hdulist[0].header, hdulist[cell].header, fpl)
if (not cellmode_id == 0):
# This means it either broken (id=-1) or in video-mode (id=1)
if (options['keep_cells'] == False):
# no keep_cells option
continue
elif (options['keep_cells'] is None):
# keep all cells
pass
else:
cell_id = "%02d.%1d%1d" % (ota, wm_cellx, wm_celly)
if (cell_id in options['keep_cells']):
pass
else:
continue
# logger.debug("ota %02d, cell %d,%d: gain=%f, ron=%f, ron(e-)=%f" % (
# ota, wm_cellx, wm_celly, gain, readnoise, readnoise_electrons))
# all_gains[wm_cellx, wm_celly] = gain
# all_readnoise[wm_cellx, wm_celly] = readnoise
# all_readnoise_electrons[wm_cellx, wm_celly] = readnoise_electrons
#
# Now extract just the data section.
# Values are hard-coded as some of the DATASEC keywords are wrong
#
datasec = extract_datasec_from_cell(
hdulist[cell].data, binning,
trimcell=options['trimcell'])
# print "datasec.shape=",datasec.shape
# Now overscan subtract and insert into large frame
overscan_region = extract_biassec_from_cell(hdulist[cell].data, binning)
#extract_region(hdulist[cell].data, '[500:530,1:494]')
overscan_level = numpy.median(overscan_region)
datasec -= overscan_level
# logger.debug("cell %s: gain=%.2f overscan=%6.1f" % (hdulist[cell].header['EXTNAME'], gain, overscan_level))
if (options['nonlinearity-set']):
nonlin_correction = podi_nonlinearity.compute_cell_nonlinearity_correction(
datasec, wm_cellx, wm_celly, nonlin_data)
datasec += nonlin_correction
#
# Insert the reduced data-section of this cell into the large OTA frame
#
bx, tx, by, ty = cell2ota__get_target_region(
wm_cellx, wm_celly, binning,
trimcell=options['trimcell'])
merged[by:ty,bx:tx] = datasec
#
# Now that the cell is pre-reduced (merged and overscan-subtracted), assemble
# the tech-header by copying the information from the input techdata
# to the output techdata
#
if (techhdulist is not None):
gain, readnoise, readnoise_e = read_techdata(
techhdulist, ota_c_x, ota_c_y, wm_cellx, wm_celly)
if (gain is not None and readnoise is not None):
logger.debug("Using tech-data for gain & readnoise: %f %f" % (gain, readnoise))
else:
logger.debug("No gain/readnoise data available")
# Also set the gain and readnoise values to be used later for the gain correction
all_gains[wm_cellx, wm_celly] = \
gain if gain is not None else \
hdulist[cell].header['GAIN'] if 'GAIN' in hdulist[cell].header else backup_gain
all_readnoise[wm_cellx, wm_celly] = \
readnoise if gain is not None else backup_readnoise
all_readnoise_electrons[wm_cellx, wm_celly] = \
readnoise_e if readnoise_e is not None else backup_readnoise_electrons
else:
all_gains[wm_cellx, wm_celly] = \
hdulist[cell].header['GAIN'] if 'GAIN' in hdulist[cell].header else backup_gain
all_readnoise[wm_cellx, wm_celly] = backup_readnoise
all_readnoise_electrons[wm_cellx, wm_celly] = backup_readnoise_electrons
# work on next cell
reduction_log.success('overscan')
logger.debug("Collected all cells for OTA %02d of %s" % (ota, obsid))
#
# At this point we have a 4x4 Kpixel array with all cells merged
#
# If we are to do some bias subtraction:
if (not mastercals.apply_bias()):
reduction_log.not_selected('bias')
elif (mastercals.bias() is None):
reduction_log.fail('bias')
else:
bias_filename = mastercals.bias()
bias = pyfits.open(bias_filename)
reduction_files_used['bias'] = bias_filename
# Search for the bias data for the current OTA
for bias_ext in bias[1:]:
if (not is_image_extension(bias_ext)):
continue
fppos_bias = bias_ext.header['FPPOS']
if (fppos_bias == fppos):
# This is the one
try:
if (not options['keep_cells'] == False):
bias_ext.data[numpy.isnan(bias_ext.data)] = 0.
merged -= bias_ext.data
logger.debug("Subtracting bias: %s" % (bias_filename))
except:
logger.warning("Unable to subtract bias, dimensions don't match (data: %s, bias: %s)" % (
str(merged.shape), str(bias_ext.data.shape)))
reduction_log.fail('bias')
pass
break
bias.close()
hdu.header.add_history("CC-BIAS: %s" % (os.path.abspath(bias_filename)))
del bias
reduction_log.success('bias')
#
# Do some dark subtraction:
# Add at some point: use different darks for all detectors switched on
# to minimize the integration glow in guide-OTAs
#
if (not mastercals.apply_dark()):
reduction_log.not_selected('dark')
elif (mastercals.dark() is None):
reduction_log.fail('dark')
else:
# For now assume all detectors are switched on
dark_filename = mastercals.dark()
dark = pyfits.open(dark_filename)
reduction_files_used['dark'] = dark_filename
if ('EXPMEAS' in dark[0].header):
darktime = dark[0].header['EXPMEAS']
elif ('EXPTIME' in dark[0].header):
darktime = dark[0].header['EXPTIME']
else:
darktime = 1.
# Search for the flatfield data for the current OTA
for dark_ext in dark[1:]:
if (not is_image_extension(dark_ext)):
continue
fppos_dark = dark_ext.header['FPPOS']
if (fppos_dark == fppos):
# This is the one
dark_scaling = exposure_time / darktime
try:
if (not options['keep_cells'] == False):
dark_ext.data[numpy.isnan(dark_ext.data)] = 0.
merged -= (dark_ext.data * dark_scaling)
logger.debug("Subtracting dark: %s (scaling=%.2f)" % (dark_filename, dark_scaling))
except:
logger.warning("Unable to subtract dark, dimensions don't match (data: %s, dark: %s)" % (
str(merged.shape), str(dark_ext.data.shape)))
reduction_log.fail('dark')
pass
break
dark.close()
hdu.header.add_history("CC-DARK: %s" % (os.path.abspath(dark_filename)))
del dark
reduction_log.success('dark')
# By default, mark the frame as not affected by the pupilghost. This
# might be overwritten if the flat-field has PG specifications.
hdu.header['PGAFCTD'] = False
# If the third parameter points to a directory with flat-fields
hdu.header['GAIN'] = 1.3
gain_from_flatfield = None
pupilghost_center_x = pupilghost_center_y = None
if (not mastercals.apply_flat()):
reduction_log.not_selected('flat')
elif (mastercals.flat(sitesetup.flat_order) is None):
reduction_log.fail('flat')
else:
flatfield_filename = mastercals.flat(sitesetup.flat_order)
flatfield = pyfits.open(flatfield_filename)
reduction_files_used['flat'] = flatfield_filename
# Search for the flatfield data for the current OTA
for ff_ext in flatfield[1:]:
if (not is_image_extension(ff_ext)):
continue
fppos_flatfield = ff_ext.header['FPPOS']
if (fppos_flatfield == fppos):
# This is the one
try:
if (not options['keep_cells'] == False):
ff_ext.data[numpy.isnan(ff_ext.data)] = 1.
merged /= ff_ext.data
logger.debug("Dividing by flatfield: %s" % (flatfield_filename))
except:
logger.warning("Unable to apply flat-field, dimensions don't match (data: %s, flat: %s)" % (
str(merged.shape), str(ff_ext.data.shape)))
reduction_log.partial_fail('flat')
pass
# If normalizing with the flat-field, overwrite the gain
# keyword with the average gain value of the flatfield.
ff_gain = flatfield[0].header['GAIN'] \
if ('GAIN' in flatfield[0].header and flatfield[0].header['GAIN'] > 0) else -1.
gain_from_flatfield = ff_gain
logger.debug("Checking if extension has PGAFCTD keyword: %s" % (str('PGAFCTD' in ff_ext.header)))
if ('PGAFCTD' in ff_ext.header):
logger.debug("Value of PGAFCTD header keyword: %s" % (str(ff_ext.header['PGAFCTD'])))
if ('PGAFCTD' in ff_ext.header and ff_ext.header['PGAFCTD']):
# Mark this extension as having a pupilghost problem.
hdu.header['PGAFCTD'] = True
# Also copy the center position of the pupilghost
# If this is not stored in the flat-field, assume some
# standard coordinates
logger.debug("Found an extension affected by pupilghost: %s" % (extname))
for pgheader in (
'PGCENTER', 'PGCNTR_X', 'PGCNTR_Y',
'PGTMPL_X', 'PGTMPL_Y',
'PGREG_X1', 'PGREG_X2', 'PGREG_Y1', 'PGREG_Y2',
'PGEFCTVX', 'PGEFCTVY',
'PGSCALNG',
'PGROTANG',
):
if (pgheader in ff_ext.header):
hdu.header[pgheader] = (ff_ext.header[pgheader], ff_ext.header.comments[pgheader])
pupilghost_center_x = ff_ext.header['PGCNTR_X'] if 'PGCNTR_X' in ff_ext.header else numpy.NaN
pupilghost_center_y = ff_ext.header['PGCNTR_Y'] if 'PGCNTR_Y' in ff_ext.header else numpy.NaN
# if ('PGCNTR_X' in ff_ext.header):
# pupilghost_center_x = ff_ext.header['PGCNTR_X']
# hdu.header['PGCNTR_X'] = (pupilghost_center_x, "pupil ghost center position X")
# if ('PGCNTR_Y' in ff_ext.header):
# pupilghost_center_y = ff_ext.header['PGCNTR_Y']
# hdu.header['PGCNTR_Y'] = (pupilghost_center_y, "pupil ghost center position Y")
break
flatfield.close()
hdu.header.add_history("CC-FLAT: %s" % (os.path.abspath(flatfield_filename)))
del flatfield
reduction_log.success('flat')
#
# Apply illumination correction, if requested
#
if (options['illumcorr_dir'] is None):
reduction_log.not_selected('illumcorr')
else:
illumcorr_filename = podi_illumcorr.get_illumination_filename(
options['illumcorr_dir'], filter_name, binning)
if (not os.path.isfile(illumcorr_filename)):
reduction_log.fail('illumcorr')
else:
illumcorr = pyfits.open(illumcorr_filename)
reduction_files_used['illumination'] = illumcorr_filename
# Search for the flatfield data for the current OTA
for ff_ext in illumcorr:
if (not is_image_extension(ff_ext)):