diff options
-rw-r--r-- | wcsutil/headerlet.py | 35 | ||||
-rw-r--r-- | wcsutil/wcscorr.py | 6 |
2 files changed, 28 insertions, 13 deletions
diff --git a/wcsutil/headerlet.py b/wcsutil/headerlet.py index c580d1f..e3804ea 100644 --- a/wcsutil/headerlet.py +++ b/wcsutil/headerlet.py @@ -427,11 +427,12 @@ class Headerlet(pyfits.HDUList): # TODO: in the pyfits refactoring branch if will be easier to # test whether an HDUList contains a certain extension HDU # without relying on try/except - wcscorr = fobj['WCSCORR'] + wcscorr_table = fobj['WCSCORR'] except KeyError: # The WCSCORR table needs to be created wcscorr.init_wcscorr(fobj) + orig_hlt_hdu = None if createheaderlet: # Create a headerlet for the original WCS data in the file, # create an HDU from the original headerlet, and append it to @@ -439,7 +440,6 @@ class Headerlet(pyfits.HDUList): hdrname = fobj[0].header['ROOTNAME'] + '_orig' orig_hlt = createHeaderlet(fobj, hdrname) orig_hlt_hdu = HeaderletHDU.fromheaderlet(orig_hlt) - fobj.append(orig_hlt_hdu) self._delDestWCS(fobj) updateRefFiles(self[0].header.ascard, fobj[0].header.ascard) @@ -476,13 +476,24 @@ class Headerlet(pyfits.HDUList): fobj.append(self[('WCSDVARR', idx)].copy()) for idx in range(1, numd2im + 1): fobj.append(self[('D2IMARR', idx)].copy()) + + # Update the WCSCORR table with new rows from the headerlet's WCSs + wcscorr.update_wcscorr(fobj, self, 'SIPWCS') + + # Append the original headerlet + if createheaderlet and orig_hlt_hdu: + fobj.append(orig_hlt_hdu) + + # Finally, append an HDU for this headerlet + fobj.append(HeaderletHDU.fromheaderlet(self)) + if close_dest: fobj.close() else: logger.critical("Observation %s cannot be updated with headerlet " - "%s" % (dest, self.hdrname)) + "%s" % (fobj.filename(), self.hdrname)) print "Observation %s cannot be updated with headerlet %s" \ - % (dest, self.hdrname) + % (fobj.filename(), self.hdrname) def hverify(self): @@ -818,10 +829,12 @@ class HeaderletHDU(pyfits.core._NonstandardExtHDU): # Monkey-patch pyfits to add minimal support for HeaderletHDUs # TODO: Get rid of this ASAP!!! (it won't be necessary with the pyfits # refactoring branch) -__old_updateHDUtype = pyfits.Header._updateHDUtype -def __updateHDUtype(self): - if HeaderletHDU.match_header(self): - self._hdutype = HeaderletHDU - else: - __old_updateHDUtype(self) -pyfits.Header._updateHDUtype = __updateHDUtype +if not hasattr(pyfits.Header._updateHDUtype, '_patched'): + __old_updateHDUtype = pyfits.Header._updateHDUtype + def __updateHDUtype(self): + if HeaderletHDU.match_header(self): + self._hdutype = HeaderletHDU + else: + __old_updateHDUtype(self) + __updateHDUtype._patched = True + pyfits.Header._updateHDUtype = __updateHDUtype diff --git a/wcsutil/wcscorr.py b/wcsutil/wcscorr.py index 5a592c3..ccc1fb7 100644 --- a/wcsutil/wcscorr.py +++ b/wcsutil/wcscorr.py @@ -238,6 +238,7 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None): # extension version; if this should not be assumed then this can be # modified... wcs_keys = altwcs.wcskeys(source[(extname, 1)].header) + wcs_keys = filter(None, wcs_keys) wcshdr = stwcs.wcsutil.HSTWCS(source, ext=(extname, 1)).wcs2header() wcs_keywords = wcshdr.keys() @@ -290,9 +291,10 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None): if key in new_table.data.names: new_table.data.field(key)[idx] = wcshdr[key + wcs_key] + prihdr = source[0].header for key in DEFAULT_PRI_KEYS: - if key in new_table.data.names: - new_table.data.field(key)[idx] = source[0].header[key] + if key in new_table.data.names and prihdr.has_key(key): + new_table.data.field(key)[idx] = prihdr[key] # If idx was never incremented, no rows were added, so there's nothing else # to do... |