diff options
Diffstat (limited to 'wcsutil')
| -rw-r--r-- | wcsutil/__init__.py | 35 | ||||
| -rw-r--r-- | wcsutil/altwcs.py | 451 | ||||
| -rw-r--r-- | wcsutil/convertwcs.py | 118 | ||||
| -rw-r--r-- | wcsutil/getinput.py | 62 | ||||
| -rw-r--r-- | wcsutil/headerlet.py | 898 | ||||
| -rw-r--r-- | wcsutil/hstwcs.py | 451 | ||||
| -rw-r--r-- | wcsutil/instruments.py | 321 | ||||
| -rw-r--r-- | wcsutil/mappings.py | 29 | ||||
| -rw-r--r-- | wcsutil/mosaic.py | 183 | ||||
| -rw-r--r-- | wcsutil/wcscorr.py | 458 | 
10 files changed, 0 insertions, 3006 deletions
| diff --git a/wcsutil/__init__.py b/wcsutil/__init__.py deleted file mode 100644 index 9c12aa2..0000000 --- a/wcsutil/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import division # confidence high - -from altwcs import * -from hstwcs import HSTWCS - -__docformat__ = 'restructuredtext' - - -def help(): -    print 'How to create an HSTWCS object:\n\n' -    print """ \ -    1. Using a pyfits HDUList object and an extension number \n -    Example:\n - -    fobj = pyfits.open('some_file.fits')\n -    w = wcsutil.HSTWCS(fobj, 3)\n\n - -    2. Create an HSTWCS object using a qualified file name. \n -    Example:\n -    w = wcsutil.HSTWCS('j9irw4b1q_flt.fits[sci,1]')\n\n - -    3. Create an HSTWCS object using a file name and an extension number. \n -    Example:\n -    w = wcsutil.HSTWCS('j9irw4b1q_flt.fits', ext=2)\n\n -     -    4. Create an HSTWCS object from WCS with key 'O'.\n -    Example:\n - -    w = wcsutil.HSTWCS('j9irw4b1q_flt.fits', ext=2, wcskey='O')\n\n -     -    5. Create a template HSTWCS object for a DEFAULT object.\n -    Example:\n -    w = wcsutil.HSTWCS(instrument='DEFAULT')\n\n -    """ - diff --git a/wcsutil/altwcs.py b/wcsutil/altwcs.py deleted file mode 100644 index d250b52..0000000 --- a/wcsutil/altwcs.py +++ /dev/null @@ -1,451 +0,0 @@ -from __future__ import division # confidence high -import os -import string - -import numpy as np -import pywcs -import pyfits - -altwcskw = ['WCSAXES', 'CRVAL', 'CRPIX', 'PC', 'CDELT', 'CD', 'CTYPE', 'CUNIT', -            'PV', 'PS'] - -# file operations -def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", clobber=False): -    """ -    Copy the primary WCS to the hader as an alternate WCS -    with wcskey and name WCSNAME. It loops over all extensions in 'ext' - -    Parameters -    ---------- -    fname:  string or pyfits.HDUList -            a file name or a file object -    ext:    an int, a tuple, a python list of integers or a python list of tuples (e.g.('sci',1)) -            fits extensions to work with -    wcskey: string "A"-"Z" or " " -            if " ": get next available key if wcsname is also " " or try -            to get a key from WCSNAME value -    wcsname: string -             Name of alternate WCS description -    clobber: boolean -             if Ture - overwrites a WCS with the same key - -    See Also -    -------- -    wcsutils.restoreWCS: Copy an alternate WCS to the primary WCS - -    """ - -    if isinstance(fname, str): -        f = pyfits.open(fname, mode='update') -    else: -        f = fname - -    if not parpasscheck(f, ext, wcskey, wcsname): -        closefobj(fname, f) -        return - -    if isinstance(ext, int) or isinstance(ext, tuple): -        ext = [ext] -     -    wcsext = 0 -    eindx = 0 -    for e in f: -        if 'extname' in e.header and 'sci' in e.header['extname'].lower(): -            wcsext = eindx -            break -        eindx += 1 - -    if wcskey == " ": -        # try getting the key from WCSNAME -        if not wcsname.strip(): -            wkey = next_wcskey(f[wcsext].header) -        else: -            wkey = getKeyFromName(f[wcsext].header, wcsname) -    else: -        if wcskey not in available_wcskeys(f[wcsext].header): -            # reuse wcsname -            if not wcsname.strip(): -                wcsname = f[wcsext].header["WCSNAME"+wcskey] -                wname = wcsname -                wkey = wcskey -            else: -                wkey = wcskey -                wname = wcsname -        else: -            wkey = wcskey -            wname = wcsname - -    for e in ext: -        w = pywcs.WCS(f[e].header, fobj=f) -        hwcs = w.to_header() -        wcsnamekey = 'WCSNAME' + wkey -        f[e].header.update(key=wcsnamekey, value=wcsname) -        if w.wcs.has_cd(): -            pc2cd(hwcs) -        for k in hwcs.keys(): -            key = k[:7] + wkey -            f[e].header.update(key=key, value=hwcs[k]) -        #norient = np.rad2deg(np.arctan2(hwcs['CD1_2'],hwcs['CD2_2'])) -        #okey = 'ORIENT%s' % wkey -        #f[e].header.update(key=okey, value=norient) -    closefobj(fname, f) - -def restoreWCS(f, ext, wcskey=" ", wcsname=" ", clobber=False): -    """ -    Copy a WCS with key "WCSKEY" to a primary WCS - -    Reads in a WCS defined with wcskey and saves it as the primary WCS. -    If clobber is False, writes out new files whose names are the original -    names with an attached 3 character string  _'WCSKEY'_. -    Otherwise overwrites the files. Goes sequentially through the list of extensions -    The WCS is restored from the 'SCI' extension but the primary WCS of all -    extensions with the same EXTVER are updated. - - -    Parameters -    ---------- -    f:       string or pyfits.HDUList object -             a file name or a file object -    ext:     an int, a tuple, a python list of integers or a python list -             of tuples (e.g.('sci',1)) -             fits extensions to work with -    wcskey:  a charater -             "A"-"Z" - Used for one of 26 alternate WCS definitions. -             or " " - find a key from WCSNAMe value -    wcsname: string (optional) -             if given and wcskey is " ", will try to restore by WCSNAME value -    clobber: boolean -             A flag to define if the original files should be overwritten - -    See Also -    -------- -    wcsutil.archiveWCS - copy the primary WCS as an alternate WCS - -    """ -    if isinstance(f, str): -        if clobber: -            fobj = pyfits.open(f, mode='update') -        else: -            fobj = pyfits.open(f) -    else: -        fobj = f - -    if not parpasscheck(fobj, ext, wcskey, wcsname): -        closefobj(f, fobj) -        return - -    if isinstance(ext, int) or isinstance(ext, tuple): -        ext = [ext] - -    if not clobber: -        name = (fobj.filename().split('.fits')[0] + '_%s_' + '.fits') %wcskey -    else: -        name = fobj.filename() - -    if wcskey == " ": -        if wcsname.strip(): -            wkey = getKeyFromName(fobj[1].header, wcsname) -            if not wkey: -                print 'Could not get a key from wcsname %s .' % wcsname -                closefobj(f, fobj) -                return -    else: -        if wcskey not in wcskeys(fobj[1].header): -            print "Could not find alternate WCS with key %s in this file" % wcskey -            closefobj(f, fobj) -            return -        wkey = wcskey - -    for e in ext: -        try: -            extname = fobj[e].header['EXTNAME'].lower() -        except KeyError: -            continue -        #Restore always from a 'SCI' extension but write it out to 'ERR' and 'DQ' -        if extname == 'sci': -            sciver = fobj[e].header['extver'] -            try: -                nwcs = pywcs.WCS(fobj[e].header, fobj=fobj, key=wkey) -            except KeyError: -                print 'restoreWCS: Could not read WCS with key %s in file %s,  \ -                extension %d' % (wkey, fobj.filename(), e) -                closefobj(f, fobj) -                return #raise -            hwcs = nwcs.to_header() - -            if nwcs.wcs.has_cd(): -                pc2cd(hwcs, key=wkey) -            for k in hwcs.keys(): -                key = k[:-1] -                if key in fobj[e].header.keys(): -                    fobj[e].header.update(key=key, value = hwcs[k]) -                else: -                    continue -            if wcskey == 'O' and fobj[e].header.has_key('TDDALPHA'): -                fobj[e].header['TDDALPHA'] = 0.0 -                fobj[e].header['TDDBETA'] = 0.0 -            if fobj[e].header.has_key('ORIENTAT'): -                norient = np.rad2deg(np.arctan2(hwcs['CD1_2'+'%s' %wkey],hwcs['CD2_2'+'%s' %wkey])) -                fobj[e].header.update(key='ORIENTAT', value=norient) -        elif extname in ['err', 'dq', 'sdq', 'time', 'samp']: -            cextver = fobj[e].header['extver'] -            if cextver == sciver: -                for k in hwcs.keys(): -                    key = k[:-1] -                    fobj[e].header.update(key=key, value = hwcs[k]) -                if fobj[e].header.has_key('ORIENTAT'): -                    norient = np.rad2deg(np.arctan2(hwcs['CD1_2'+'%s' %wkey],hwcs['CD2_2'+'%s' %wkey])) -                    fobj[e].header.update(key='ORIENTAT', value=norient) -        else: -            continue - -    if not clobber: -        fobj.writeto(name) -    closefobj(f, fobj) - -def deleteWCS(fname, ext, wcskey=" ", wcsname=" "): -    """ -    Delete an alternate WCS defined with wcskey. -    If wcskey is " " try to get a key from WCSNAME. - -    Parameters -    ---------- -    fname:   sting or a pyfits.HDUList object -    ext:     an int, a tuple, a python list of integers or a python list of tuples (e.g.('sci',1)) -             fits extensions to work with -    wcskey:  one of 'A'-'Z' or " " -    wcsname: string -             Name of alternate WCS description -    """ -    if isinstance(fname, str): -        fobj = pyfits.open(fname, mode='update') -    else: -        fobj = fname - -    if not parpasscheck(fobj, ext, wcskey, wcsname): -        closefobj(fname, fobj) -        return - -    if isinstance(ext, int) or isinstance(ext, tuple): -        ext = [ext] - -    # Do not allow deleting the original WCS. -    if wcskey == 'O': -        print "Wcskey 'O' is reserved for the original WCS and should not be deleted." -        closefobj(fname, fobj) -        return - -    if wcskey == " ": -        # try getting the key from WCSNAME -        if wcsname == " ": -            print "Could not get a valid key from header" -            closefobj(fname, fobj) -            return -        else: -            wkey = getKeyFromName(fobj[1].header, wcsname) -            if not wkey: -                print 'Could not get a key: wcsname "%s" not found in header.' % wcsname -                closefobj(fname, fobj) -                return -    else: -        if wcskey not in wcskeys(fobj[1].header): -            print "Could not find alternate WCS with key %s in this file" % wcskey -            closefobj(fname, fobj) -            return -        wkey = wcskey - -    prexts = [] -    for i in ext: -        hdr = fobj[i].header -        try: -            w = pywcs.WCS(hdr, fobj, key=wkey) -        except KeyError: -            continue -        hwcs = w.to_header() -        if w.wcs.has_cd(): -            pc2cd(hwcs, key=wkey) -        for k in hwcs.keys(): -            del hdr[k] -            #del hdr['ORIENT'+wkey] -        prexts.append(i) -    if prexts != []: -        print 'Deleted all instances of WCS with key %s in extensions' % wkey, prexts -    else: -        print "Did not find WCS with key %s in any of the extensions" % wkey -    closefobj(fname, fobj) - -#header operations -def wcskeys(header): -    """ -    Returns a list of characters used in the header for alternate -    WCS description with WCSNAME keyword - -    Parameters -    ---------- -    hdr: pyfits.Header -    """ -    assert isinstance(header, pyfits.Header), "Requires a pyfits.Header object as input" -    names = header["WCSNAME*"] -    return [key.split('WCSNAME')[1].upper() for key in names.keys()] - -def wcsnames(header): -    """ -    Returns a dictionary of wcskey: WCSNAME pairs - -    Parameters -    ---------- -    header: pyfits.Header -    """ -    names = header["WCSNAME*"] -    d = {} -    for c in names: -        d[c.key[-1]] = c.value -    return d - -def available_wcskeys(header): -    """ -    Returns a list of characters which are not used in the header -    with WCSNAME keyword. Any of them can be used to save a new -    WCS. - -    Parameters -    ---------- -    header: pyfits.Header -    """ -    assert isinstance(header, pyfits.Header), "Requires a pyfits.Header object as input" -    all_keys = list(string.ascii_uppercase) -    used_keys = wcskeys(header) -    try: -        used_keys.remove("") -    except ValueError: -        pass -    [all_keys.remove(key) for key in used_keys] -    return all_keys - -def next_wcskey(header): -    """ -    Returns next available character to be used for an alternate WCS - -    Parameters -    ---------- -    header: pyfits.Header -    """ -    assert isinstance(header, pyfits.Header), "Requires a pyfits.Header object as input" -    allkeys = available_wcskeys(header) -    if allkeys != []: -        return allkeys[0] -    else: -        return None - -def getKeyFromName(header, wcsname): -    """ -    If WCSNAME is found in header, return its key, else return -    None. This is used to update an alternate WCS -    repeatedly and not generate new keys every time. - -    Parameters -    ---------- -    header:  pyfits.Header -    wcsname: str -             Value of WCSNAME -    """ -    wkey = None -    names = wcsnames(header) -    for item in names.items(): -        if item[1].lower() == wcsname.lower(): -            wkey = item[0] -            break -    return wkey - -def pc2cd(hdr, key=' '): -    """ -    Convert a CD PC matrix to a CD matrix. - -    WCSLIB (and PyWCS) recognizes CD keywords as input -    but converts them and works internally with the PC matrix. -    to_header() returns the PC matrix even if the i nput was a -    CD matrix. To keep input and output consistent we check -    for has_cd and convert the PC back to CD. - -    Parameters -    ---------- -    hdr: pyfits.Header - -    """ -    for c in ['1_1', '1_2', '2_1', '2_2']: -        try: -            val = hdr['PC'+c+'%s' % key] -            del hdr['PC'+c+ '%s' % key] -        except KeyError: -            if c=='1_1' or c == '2_2': -                val = 1. -            else: -                val = 0. -        hdr.update(key='CD'+c+'%s' %key, value=val) -    return hdr - -def parpasscheck(fobj, ext, wcskey, wcsname, clobber=True): - -    if not isinstance(fobj,pyfits.HDUList): -        print "First parameter must be a fits file object or a file name." -        return False -    try: -        assert (fobj.fileinfo(0)['filemode'] == 'update') -    except AssertionError: -        print "First parameter must be a file name or a file object opened in 'update' mode." -        return False - -    if not isinstance(ext, int) and not isinstance(ext, tuple) \ -       and not isinstance(ext, list): -        print "Ext must be integer, tuple, a list of int extension numbers, \ -        or a list of tuples representing a fits extension, for example ('sci', 1)." -        return False - -    if len(wcskey) != 1: -        print 'Parameter wcskey must be a character - one of "A"-"Z" or " "' -        return False - -    wcsext = 0 -    eindx = 0 -    for e in fobj: -        if 'extname' in e.header and 'sci' in e.header['extname'].lower(): -            wcsext = eindx -            break -        eindx += 1 -         - -    if wcskey == " ": -        # try getting the key from WCSNAME -        """ -        if wcsname == " " or wcsname == "": -            #wkey = next_wcskey(f[1].header) -            #if not wkey: -            #    print "Could not get a valid key from header" -            return False -        """ -        if wcsname.strip(): -            wkey = getKeyFromName(fobj[wcsext].header, wcsname) -            if wkey and not clobber: -                print 'Wcsname %s is already used.' % wcsname -                print 'Use "wcsutil.next_wcskey" to obtain a valid wcskey' -                print 'or use "clobber=True" to overwrite the values.' -                return False -    else: -        if wcskey not in available_wcskeys(fobj[wcsext].header): -            if clobber==False: -                print 'Wcskey %s is already used.' % wcskey -                print 'Use "wcsutil.next_wcskey" to obtain a valid wcskey' -                print 'or use "clobber=True" to overwrite the values.' -                return False - - -    return True - -def closefobj(fname, f): -    """ -    Functions in this module accept as input a file name or a file object. -    If the input was a file name (string) we close the object. If the user -    passed a file object we leave it to the user to close it. -    """ -    if isinstance(fname, str): -        f.close() diff --git a/wcsutil/convertwcs.py b/wcsutil/convertwcs.py deleted file mode 100644 index 2a53d57..0000000 --- a/wcsutil/convertwcs.py +++ /dev/null @@ -1,118 +0,0 @@ -import pyfits -try: -    import stwcs -    from stwcs import wcsutil -except: -    stwcs = None - -from pytools import fileutil - -OPUS_WCSKEYS = ['OCRVAL1','OCRVAL2','OCRPIX1','OCRPIX2', -                'OCD1_1','OCD1_2','OCD2_1','OCD2_2', -                'OCTYPE1','OCTYPE2'] - - -def archive_prefix_OPUS_WCS(fobj,extname='SCI'): -    """ Identifies WCS keywords which were generated by OPUS and archived -        using a prefix of 'O' for all 'SCI' extensions in the file - -        Parameters -        ---------- -        fobj: string or pyfits.HDUList -            Filename or pyfits object of a file - -    """ -    if stwcs is None: -        print '=====================' -        print 'The STWCS package is needed to convert an old-style OPUS WCS to an alternate WCS' -        print '=====================' -        raise ImportError - - -    closefits = False -    if isinstance(fobj,str): -        # A filename was provided as input -        fobj = pyfits.open(fobj,mode='update') -        closefits=True - -    # Define the header -    ext = ('sci',1) -    hdr = fobj[ext].header - -    numextn = fileutil.countExtn(fobj) -    extlist = [] -    for e in xrange(1,numextn+1): -        extlist.append(('sci',e)) - -    # Insure that the 'O' alternate WCS is present -    if 'O' not in wcsutil.wcskeys(hdr): -        # if not, archive the Primary WCS as the default OPUS WCS -        wcsutil.archiveWCS(fobj,extlist, wcskey='O', wcsname='OPUS') - -    # find out how many SCI extensions are in the image -    numextn = fileutil.countExtn(fobj,extname=extname) -    if numextn == 0: -        extname = 'PRIMARY' - -    # create HSTWCS object from PRIMARY WCS -    wcsobj = wcsutil.HSTWCS(fobj,ext=ext,wcskey='O') -    # get list of WCS keywords -    wcskeys = wcsobj.wcs2header().keys() - -    # For each SCI extension... -    for e in xrange(1,numextn+1): -        # Now, look for any WCS keywords with a prefix of 'O' -        for key in wcskeys: -            okey = 'O'+key[:7] -            hdr = fobj[(extname,e)].header -            if hdr.has_key(okey): -                # Update alternate WCS keyword with prefix-O OPUS keyword value -                hdr[key] = hdr[okey] - -    if closefits: -        fobj.close() - -def create_prefix_OPUS_WCS(fobj,extname='SCI'): -    """ Creates alternate WCS with a prefix of 'O' for OPUS generated WCS values -        to work with old MultiDrizzle. - -        Parameters -        ---------- -        fobj: string or pyfits.HDUList -            Filename or pyfits object of a file - -        Raises -        ------ -        IOError: -            if input PyFITS object was not opened in 'update' mode - -    """ -    # List of O-prefix keywords to create -    owcskeys = OPUS_WCSKEYS - -    closefits = False -    if isinstance(fobj,str): -        # A filename was provided as input -        fobj = pyfits.open(fobj,mode='update') -        closefits=True -    else: -        # check to make sure this FITS obj has been opened in update mode -        if fobj.fileinfo(0)['filemode'] != 'update': -            print 'File not opened with "mode=update". Quitting...' -            raise IOError - -    # check for existance of O-prefix WCS -    if not fobj['sci',1].header.has_key(owcskeys[0]): - -        # find out how many SCI extensions are in the image -        numextn = fileutil.countExtn(fobj,extname=extname) -        if numextn == 0: -            extname = '' -        for extn in xrange(1,numextn+1): -            hdr = fobj[(extname,extn)].header -            for okey in owcskeys: -                hdr.update(okey,hdr[okey[1:]+'O']) - -    # Close FITS image if we had to open it... -    if closefits: -        fobj.close() diff --git a/wcsutil/getinput.py b/wcsutil/getinput.py deleted file mode 100644 index a2d9781..0000000 --- a/wcsutil/getinput.py +++ /dev/null @@ -1,62 +0,0 @@ -import pyfits -from pytools import irafglob, fileutil, parseinput - -def parseSingleInput(f=None, ext=None): -    if isinstance(f, str): -        # create an HSTWCS object from a filename -        if ext != None: -            filename = f -            if isinstance(ext,tuple): -                if ext[0] == '': -                    extnum = ext[1] # handle ext=('',1) -                else: -                    extnum = ext -            else: -                extnum = int(ext) -        elif ext == None: -            filename, ext = fileutil.parseFilename(f) -            ext = fileutil.parseExtn(ext) -            if ext[0] == '': -                extnum = int(ext[1]) #handle ext=('',extnum) -            else: -                extnum = ext -        phdu = pyfits.open(filename) -        hdr0 = pyfits.getheader(filename) -        try: -            ehdr = pyfits.getheader(filename, ext=extnum) -        except (IndexError,KeyError): -            print 'Unable to get extension.', extnum -            raise - -    elif isinstance(f, pyfits.HDUList): -        phdu = f -        if ext == None: -            extnum = 0 -        else: -            extnum = ext -        ehdr = f[extnum].header -        hdr0 = f[0].header -        filename = hdr0.get('FILENAME', "") -         -    else: -        raise ValueError('Input must be a file name string or a pyfits file object') - -    return filename, hdr0, ehdr, phdu - - -def parseMultipleInput(input): -    if isinstance(input, str): -        if input[0] == '@': -            # input is an @ file -            filelist = irafglob.irafglob(input) -        else: -            try: -                filelist, output = parseinput.parseinput(input) -            except IOError: raise -    elif isinstance(input, list): -        if isinstance(input[0], wcsutil.HSTWCS): -            # a list of HSTWCS objects -            return input -        else: -            filelist = input[:] -    return filelist
\ No newline at end of file diff --git a/wcsutil/headerlet.py b/wcsutil/headerlet.py deleted file mode 100644 index a30f06b..0000000 --- a/wcsutil/headerlet.py +++ /dev/null @@ -1,898 +0,0 @@ -from __future__ import division -import logging -import os -import tarfile -import tempfile -import time -import warnings -from cStringIO import StringIO - -import numpy as np -import pyfits - -import altwcs -import wcscorr -from hstwcs import HSTWCS -from mappings import basic_wcs -from pytools.fileutil import countExtn - -module_logger = logging.getLogger('headerlet') - -import atexit -atexit.register(logging.shutdown) - -def setLogger(logger, level, mode='w'): -    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") -    log_filename = 'headerlet.log' -    fh = logging.FileHandler(log_filename, mode=mode) -    fh.setLevel(logging.DEBUG) -    fh.setFormatter(formatter) -    logger.addHandler(fh) -    logger.setLevel(level) -     -def isWCSIdentical(scifile, file2, verbose=False): -    """ -    Compares the WCS solution of 2 files. - -    Parameters -    ---------- -    scifile: file1 -    file2: file2 -    verbose: False or a python logging level -             (one of 'INFO', 'DEBUG' logging levels) -             (an integer representing a logging level) - -    Notes -    ----- -    These can be 2 science observations or 2 headerlets -    or a science observation and a headerlet. The two files -    have the same WCS solution if the following are the same: - -    - rootname/destim -    - primary WCS -    - SIP coefficients -    - NPOL distortion -    - D2IM correction -    - Velocity aberation - -    """ -    if verbose: -        setLogger(module_logger, verbose) -    else: -        module_logger.setLevel(100) - -    module_logger.info("Starting isWCSIdentical: %s" % time.asctime()) - -    result = True -    numsci1 = max(countExtn(scifile), countExtn(scifile, 'SIPWCS')) -    numsci2 = max(countExtn(file2), countExtn(file2, 'SIPWCS')) - -    if numsci1 == 0 or numsci2 == 0 or numsci1 != numsci2: -        module_logger.info("Number of SCI and SIPWCS extensions do not match.") -        result = False - -    if getRootname(scifile) != getRootname(file2): -        module_logger.info('Rootnames do not match.') -        result = False -    try: -        extname1 = pyfits.getval(scifile, 'EXTNAME', ext=('SCI', 1)) -    except KeyError: -        extname1 = 'SIPWCS' -    try: -        extname2 = pyfits.getval(file2, 'EXTNAME', ext=('SCI', 1)) -    except KeyError: -        extname2 = 'SIPWCS' - -    for i in range(1, numsci1 + 1): -        w1 = HSTWCS(scifile, ext=(extname1, i)) -        w2 = HSTWCS(file2, ext=(extname2, i)) -        if not np.allclose(w1.wcs.crval, w2.wcs.crval, rtol=1e-7) or \ -        not np.allclose(w1.wcs.crpix, w2.wcs.crpix, rtol=1e-7)  or \ -        not np.allclose(w1.wcs.cd, w2.wcs.cd, rtol=1e-7) or \ -        not (np.array(w1.wcs.ctype) == np.array(w2.wcs.ctype)).all(): -            module_logger.info('Primary WCSs do not match') -            result = False -        if w1.sip or w2.sip: -            if (w2.sip and not w1.sip) or (w1.sip and not w2.sip) or \ -               not np.allclose(w1.sip.a, w2.sip.a, rtol=1e-7) or \ -               not np.allclose(w1.sip.b, w2.sip.b, rtol=1e-7): -                module_logger.info('SIP coefficients do not match') -                result = False -        if w1.cpdis1 or w2.cpdis1: -            if w1.cpdis1 and not w2.cpdis1 or \ -                w2.cpdis1 and not w1.cpdis1 or \ -                not np.allclose(w1.cpdis1.data, w2.cpdis1.data): -                module_logger.info('NPOL distortions do not match') -                result = False -        if w1.cpdis2 or w2.cpdis2: -            if w1.cpdis2 and not w2.cpdis2 or \ -                w2.cpdis2 and not w1.cpdis2 or \ -                not np.allclose(w1.cpdis2.data, w2.cpdis2.data): -                module_logger.info('NPOL distortions do not match') -                result = False -        if w1.det2im1 or w2.det2im1: -            if w1.det2im1 and not w2.det2im1 or \ -                w2.det2im1 and not w1.det2im1 or\ -                not np.allclose(w1.det2im1.data, w2.det2im1.data): -                module_logger.info('Det2Im corrections do not match') -                result =  False -        if w1.det2im2 or w2.det2im2: -            if w1.det2im2 and not w2.det2im2 or \ -                w2.det2im2 and not w1.det2im2 or\ -                not np.allclose(w1.det2im2.data, w2.det2im2.data): -                module_logger.info('Det2Im corrections do not match') -                result = False -        if w1.vafactor != w2.vafactor: -            module_logger.info('VA factors do not match') -            result = False - -    return result - - -# TODO: It would be logical for this to be part of the Headerlet class, perhaps -# as a classmethod -def createHeaderlet(fname, hdrname, destim=None, output=None, verbose=False, logmode='w'): -    """ -    Create a headerlet from a science observation - -    Parameters -    ---------- -    fname: string -           Name of file with science observation -    hdrname: string -           Name for the headerlet, stored in the primary header of the headerlet -    destim: string -           Destination image, stored in the primary header of the headerlet. -           If None ROOTNAME is used of the science observation is used. -           ROOTNAME has precedence, destim is used for observations without -           ROOTNAME in the primary header -    output: string -           Save the headerlet to the given filename. -    verbose: False or a python logging level -             (one of 'INFO', 'DEBUG' logging levels) -             (an integer representing a logging level) -    logmode: 'w', 'a' -            used internally for controling access to the log file -    """ - -    if verbose: -        setLogger(module_logger, verbose, mode=logmode) -    else: -        module_logger.setLevel(100) - -    module_logger.info("Starting createHeaderlet: %s" % time.asctime()) -    phdukw = {'IDCTAB': True, -            'NPOLFILE': True, -            'D2IMFILE': True} -    if not isinstance(fname, pyfits.HDUList): -        fobj = pyfits.open(fname) -        close_file = True -    else: -        fobj = fname -        close_file = False -    if destim is None: -        try: -            destim = fobj[0].header['ROOTNAME'] -        except KeyError: -            module_logger.exception('Required keyword "DESTIM" not found') -            print 'Please provide a value for the DESTIM keyword' -            raise -    if hdrname is None: -        module_logger.critical("Required keyword 'HDRNAME' not given") -        raise ValueError("Please provide a name for the headerlet, HDRNAME is " -                         "a required parameter.") -         -    # get the version of STWCS used to create the WCS of the science file. -    try: -        upwcsver = fobj[0].header.ascard['STWCSVER'] -    except KeyError: -        upwcsver = pyfits.Card("STWCSVER", " ", -                               "Version of STWCS used to update the WCS") -     -    # get all keys for alternate WCSs -    altkeys = altwcs.wcskeys(fobj[('SCI', 1)].header) -     -    if 'O' in altkeys: -        altkeys.remove('O') -     -    numsci = countExtn(fname, 'SCI') -    module_logger.debug("Number of 'SCI' extensions in file %s is %s" -                 % (fname, numsci)) -    hdul = pyfits.HDUList() -    phdu = pyfits.PrimaryHDU() -    phdu.header.update('DESTIM', destim, -                       comment='Destination observation root name') -    phdu.header.update('HDRNAME', hdrname, comment='Headerlet name') -    fmt="%Y-%m-%dT%H:%M:%S" -    phdu.header.update('DATE', time.strftime(fmt), -                       comment='Date FITS file was generated') -    phdu.header.ascard.append(upwcsver) - -    refs = updateRefFiles(fobj[0].header.ascard, phdu.header.ascard, verbose=verbose) -    phdukw.update(refs) -    phdu.header.update(key='VAFACTOR', -                       value=fobj[('SCI',1)].header.get('VAFACTOR', 1.)) -    hdul.append(phdu) - -    for e in range(1, numsci + 1): -        hwcs = HSTWCS(fname, ext=('SCI', e)) -        h = hwcs.wcs2header(sip2hdr=True).ascard -        for ak in altkeys: -            awcs = HSTWCS(fname,ext=('SCI', e), wcskey=ak) -            h.extend(awcs.wcs2header(idc2hdr=False).ascard) -        h.append(pyfits.Card(key='VAFACTOR', value=hwcs.vafactor,  -                             comment='Velocity aberration plate scale factor')) -        h.insert(0, pyfits.Card(key='EXTNAME', value='SIPWCS', -                                comment='Extension name')) -        h.insert(1, pyfits.Card(key='EXTVER', value=e, -                                comment='Extension version')) - -        fhdr = fobj[('SCI', e)].header.ascard -        if phdukw['NPOLFILE']: -            cpdis = fhdr['CPDIS*...'] -            for c in range(1, len(cpdis) + 1): -                h.append(cpdis[c - 1]) -                dp = fhdr['DP%s.*...' % c] -                h.extend(dp) - -                try: -                    h.append(fhdr['CPERROR%s' % c]) -                except KeyError: -                    pass - -            try: -                h.append(fhdr['NPOLEXT']) -            except KeyError: -                pass - -        if phdukw['D2IMFILE']: -            try: -                h.append(fhdr['D2IMEXT']) -            except KeyError: -                pass - -            try: -                h.append(fhdr['AXISCORR']) -            except KeyError: -                module_logger.exception("'D2IMFILE' kw exists but keyword 'AXISCORR' was not found in " -                                 "%s['SCI',%d]" % (fname, e)) -                raise - -            try: -                h.append(fhdr['D2IMERR']) -            except KeyError: -                h.append(pyfits.Card(key='DPERROR', value=0, -                                     comment='Maximum error of D2IMARR')) - -        hdu = pyfits.ImageHDU(header=pyfits.Header(h)) -        # temporary fix for pyfits ticket # 48 -        hdu._extver = e -        hdul.append(hdu) -    numwdvarr = countExtn(fname, 'WCSDVARR') -    numd2im = countExtn(fname, 'D2IMARR') -    for w in range(1, numwdvarr + 1): -        hdu = fobj[('WCSDVARR', w)].copy() -        # temporary fix for pyfits ticket # 48 -        hdu._extver = w -        hdul.append(hdu) -    for d in range(1, numd2im + 1): -        hdu = fobj[('D2IMARR', d)].copy() -        # temporary fix for pyfits ticket # 48 -        hdu._extver = d -        hdul.append(hdu) -    if output is not None: -        # write the headerlet to a file -        if not output.endswith('_hdr.fits'): -            output = output + '_hdr.fits' -        hdul.writeto(output, clobber=True) -    if close_file: -        fobj.close() -    return Headerlet(hdul,verbose=verbose, logmode='a') - -def applyHeaderlet(hdrfile, destfile, createheaderlet=True, hdrname=None, -                   verbose=False): -    """ -    Apply headerlet 'hdrfile' to a science observation 'destfile' - -    Parameters -    ---------- -    hdrfile: string -             Headerlet file -    destfile: string -             File name of science observation whose WCS solution will be updated -    createheaderlet: boolean -            True (default): before updating, create a headerlet with the -            WCS old solution. -    hdrname: string or None (default) -            will be the value of the HDRNAME keyword in the headerlet generated -            for the old WCS solution.  If not specified, a sensible default -            will be used.  Not required if createheaderlet is False -    verbose: False or a python logging level -             (one of 'INFO', 'DEBUG' logging levels) -             (an integer representing a logging level) -    """ -    if verbose: -        setLogger(module_logger, verbose) -    else: -        module_logger.setLevel(100) -    module_logger.info("Starting applyHeaderlet: %s" % time.asctime()) -    hlet = Headerlet(hdrfile, verbose=verbose, logmode='a') -    hlet.apply(destfile, createheaderlet=createheaderlet, hdrname=hdrname) - -def updateRefFiles(source, dest, verbose=False): -    """ -    Update the reference files name in the primary header of 'dest' -    using values from 'source' - -    Parameters -    ---------- -    source: pyfits.Header.ascardlist -    dest:   pyfits.Header.ascardlist -    """ -    module_logger.info("Updating reference files") -    phdukw = {'IDCTAB': True, -            'NPOLFILE': True, -            'D2IMFILE': True} - -    try: -        wind = dest.index_of('HISTORY') -    except KeyError: -        wind = len(dest) -    for key in phdukw.keys(): -        try: -            value = source[key] -            dest.insert(wind, value) -        except KeyError: -            # TODO: I don't understand what the point of this is.  Is it meant -            # for logging purposes?  Right now it isn't used. -            phdukw[key] = False -    return phdukw - -def getRootname(fname): -    """ -    returns the value of ROOTNAME or DESTIM -    """ - -    try: -        rootname = pyfits.getval(fname, 'ROOTNAME') -    except KeyError: -        rootname = pyfits.getval(fname, 'DESTIM') -    return rootname - -def mapFitsExt2HDUListInd(fname, extname): -    """ -    Map FITS extensions with 'EXTNAME' to HDUList indexes. -    """ - -    if not isinstance(fname, pyfits.HDUList): -        f = pyfits.open(fname) -        close_file = True -    else: -        f = fname -        close_file = False -    d = {} -    for hdu in f: -        # TODO: Replace calls to header.has_key() with `in header` once -        # pyfits refactoring branch is in production use -        if hdu.header.has_key('EXTNAME') and hdu.header['EXTNAME'] == extname: -            extver = hdu.header['EXTVER'] -            d[(extname, extver)] = f.index_of((extname, extver)) -    if close_file: -        f.close() -    return d - - -class Headerlet(pyfits.HDUList): -    """ -    A Headerlet class -    Ref: http://stsdas.stsci.edu/stsci_python_sphinxdocs/stwcs/headerlet_def.html -    """ - -    def __init__(self, fobj, wcskeys=[], mode='copyonwrite', verbose=False, logmode='w'): -        """ -        Parameters -        ---------- -        fobj:  string -                Name of headerlet file, file-like object, a list of HDU -                instances, or an HDUList instance -        wcskeys: python list -                a list of wcskeys to be included in the headerlet -                created from the old WCS solution before the -                science file is updated. If empty: all alternate (if any) -                WCSs are copied to the headerlet. -        mode: string, optional -                Mode with which to open the given file object -        """ -        self.verbose = verbose -        self.hdr_logger = logging.getLogger('headerlet.Headerlet') -        if self.verbose: -                setLogger(self.hdr_logger, self.verbose, mode=logmode) -        else: -            self.hdr_logger.setLevel(100) - -        self.hdr_logger.info("Creating a Headerlet object from wcskeys %s" % wcskeys) -        self.wcskeys = wcskeys -        if not isinstance(fobj, list): -            fobj = pyfits.open(fobj, mode=mode) - -        super(Headerlet, self).__init__(fobj) -        self.fname = self.filename() -        self.hdrname = self[0].header["HDRNAME"] -        self.stwcsver = self[0].header.get("STWCSVER", "") -        self.destim = self[0].header["DESTIM"] -        self.idctab = self[0].header.get("IDCTAB", "") -        self.npolfile = self[0].header.get("NPOLFILE", "") -        self.d2imfile = self[0].header.get("D2IMFILE", "") -        self.vafactor = self[1].header.get("VAFACTOR", 1) #None instead of 1? -        self.d2imerr = 0 -        self.axiscorr = 1 -         -    def apply(self, dest, createheaderlet=True, hdrname=None, attach=True, createsummary=True): -        """ -        Apply this headerlet to a file. -         -        Parameters -        ---------- -        dest:    string -                 Name of file to which to apply the WCS in the headerlet -        hdrname: string -                 A unique name for the headerlet -        createheaderlet: boolean -                 A flag which indicates if a headerlet should be created  -                 from the old WCS and attached to the science file (default: True) -        attach:  boolean, default: True -                 By default the headerlet being applied will be attached  -                 as an extension to the science file. Set 'attach' to False -                 to disable this. -        createsummary: boolean, default: True -                 Set this to False to disable creating and updating of wcscorr table. -                 This is used primarily for testing. -        """ -        self.hverify() -        if self.verify_dest(dest): -            if not isinstance(dest, pyfits.HDUList): -                fobj = pyfits.open(dest, mode='update') -                close_dest = True -            else: -                fobj = dest -                close_dest = False - -            # Create the WCSCORR HDU/table from the existing WCS keywords if -            # necessary -            if createsummary: -                try: -                    # TODO: in the pyfits refactoring branch if will be easier to -                    # test whether an HDUList contains a certain extension HDU -                    # without relying on try/except -                    wcscorr_table = fobj['WCSCORR'] -                except KeyError: -                    # The WCSCORR table needs to be created -                    wcscorr.init_wcscorr(fobj) - -            orig_hlt_hdu = None -            numhlt = countExtn(fobj, 'HDRLET') -            if createheaderlet: -                # Create a headerlet for the original WCS data in the file, -                # create an HDU from the original headerlet, and append it to -                # the file -                if not hdrname: -                    hdrname = fobj[0].header['ROOTNAME'] + '_orig' -                orig_hlt = createHeaderlet(fobj, hdrname, verbose=self.verbose, logmode='a') -                orig_hlt_hdu = HeaderletHDU.fromheaderlet(orig_hlt) -                orig_hlt_hdu.update_ext_version(numhlt + 1) -                numhlt += 1 - -            self._delDestWCS(fobj) -            refs = updateRefFiles(self[0].header.ascard, fobj[0].header.ascard, verbose=self.verbose) -            numsip = countExtn(self, 'SIPWCS') -            for idx in range(1, numsip + 1): -                fhdr = fobj[('SCI', idx)].header.ascard -                siphdr = self[('SIPWCS', idx)].header.ascard -                # a minimal attempt to get the position of the WCS keywords group -                # in the header by looking for the PA_APER kw. -                # at least make sure the WCS kw are written befir the HISTORY kw -                # if everything fails, append the kw to the header -                try: -                    wind = fhdr.index_of('PA_APER') -                except KeyError: -                    try: -                        wind = fhdr.index_of('HISTORY') -                    except KeyError: -                        wind = len(fhdr) -                self.hdr_logger.debug("Inserting WCS keywords at index %s" % wind) -                # TODO: Drop .keys() when refactored pyfits comes into use -                for k in siphdr.keys(): -                    if k not in ['XTENSION', 'BITPIX', 'NAXIS', 'PCOUNT', -                                 'GCOUNT','EXTNAME', 'EXTVER', 'ORIGIN', -                                 'INHERIT', 'DATE', 'IRAF-TLM']: -                        fhdr.insert(wind, siphdr[k]) -                    else: -                        pass - -            #! Always attach these extensions last. Otherwise their headers may -            # get updated with the other WCS kw. -            numwdvar = countExtn(self, 'WCSDVARR') -            numd2im = countExtn(self, 'D2IMARR') -            for idx in range(1, numwdvar + 1): -                fobj.append(self[('WCSDVARR', idx)].copy()) -            for idx in range(1, numd2im + 1): -                fobj.append(self[('D2IMARR', idx)].copy()) - -            # Update the WCSCORR table with new rows from the headerlet's WCSs -            if createsummary: -                wcscorr.update_wcscorr(fobj, self, 'SIPWCS') - -            # Append the original headerlet -            if createheaderlet and orig_hlt_hdu: -                fobj.append(orig_hlt_hdu) -             -            if attach: -                # Finally, append an HDU for this headerlet -                new_hlt = HeaderletHDU.fromheaderlet(self) -                new_hlt.update_ext_version(numhlt + 1) -                fobj.append(new_hlt) - -            if close_dest: -                fobj.close() -        else: -            self.hdr_logger.critical("Observation %s cannot be updated with headerlet " -                            "%s" % (fobj.filename(), self.hdrname)) -            print "Observation %s cannot be updated with headerlet %s" \ -                  % (fobj.filename(), self.hdrname) - - -    def hverify(self): -        self.verify() -        assert(self[0].header.has_key('DESTIM') and -               self[0].header['DESTIM'].strip()) -        assert(self[0].header.has_key('HDRNAME') and -               self[0].header['HDRNAME'].strip()) -        assert(self[0].header.has_key('STWCSVER')) - - -    def verify_dest(self, dest): -        """ -        verifies that the headerlet can be applied to the observation - -        DESTIM in the primary header of the headerlet must match ROOTNAME -        of the science file (or the name of the destination file) -        """ - -        try: -            if not isinstance(dest, pyfits.HDUList): -                droot = pyfits.getval(dest, 'ROOTNAME') -            else: -                droot = dest[0].header['ROOTNAME'] -        except KeyError: -            self.hdr_logger.debug("Keyword 'ROOTNAME' not found in destination file") -            droot = dest.split('.fits')[0] -        if droot == self.destim: -            self.hdr_logger.debug("verify_destim() returned True") -            return True -        else: -            self.hdr_logger.debug("verify_destim() returned False") -            return False - -    def tofile(self, fname, destim=None, hdrname=None, clobber=False): -        if not destim or not hdrname: -            self.hverify() -        self.writeto(fname, clobber=clobber) - -    def _delDestWCS(self, dest): -        """ -        Delete the WCS of a science file -        """ - -        self.hdr_logger.info("Deleting all WCSs of file %s" % dest.filename()) -        numext = len(dest) - -        for idx in range(numext): -            # Only delete WCS from extensions which may have WCS keywords -            if dest[idx].__dict__.has_key('_xtn') and "IMAGE" in dest[idx]._xtn: -                self._removeD2IM(dest[idx]) -                self._removeSIP(dest[idx]) -                self._removeLUT(dest[idx]) -                self._removePrimaryWCS(dest[idx]) -                self._removeIDCCoeffs(dest[idx]) -                try: -                    del dest[idx].header.ascard['VAFACTOR'] -                except KeyError: -                    pass -                 -        self._removeRefFiles(dest[0]) -        self._removeAltWCS(dest, ext=range(numext)) -        numwdvarr = countExtn(dest, 'WCSDVARR') -        numd2im = countExtn(dest, 'D2IMARR') -        for idx in range(1, numwdvarr + 1): -            del dest[('WCSDVARR', idx)] -        for idx in range(1, numd2im + 1): -            del dest[('D2IMARR', idx)] -     -    def _removeRefFiles(self, phdu): -        """ -        phdu: Primary HDU -        """ -        refkw = ['IDCTAB', 'NPOLFILE', 'D2IMFILE'] -        for kw in refkw: -            try: -                del phdu.header.ascard[kw] -            except KeyError: -                pass -         -    def _removeSIP(self, ext): -        """ -        Remove the SIP distortion of a FITS extension -        """ - -        self.hdr_logger.debug("Removing SIP distortion from (%s, %s)" -                     % (ext.name, ext._extver)) -        for prefix in ['A', 'B', 'AP', 'BP']: -            try: -                order = ext.header[prefix + '_ORDER'] -                del ext.header[prefix + '_ORDER'] -            except KeyError: -                continue -            for i in range(order + 1): -                for j in range(order + 1): -                    key = prefix + '_%d_%d' % (i, j) -                    try: -                        del ext.header[key] -                    except KeyError: -                        pass -        try: -            del ext.header['IDCTAB'] -        except KeyError: -            pass - -    def _removeLUT(self, ext): -        """ -        Remove the Lookup Table distortion of a FITS extension -        """ - -        self.hdr_logger.debug("Removing LUT distortion from (%s, %s)" -                     % (ext.name, ext._extver)) -        try: -            cpdis = ext.header['CPDIS*'] -        except KeyError: -            return -        try: -            for c in range(1, len(cpdis) + 1): -                del ext.header['DP%s.*...' % c] -                del ext.header[cpdis[c - 1].key] -            del ext.header['CPERR*'] -            del ext.header['NPOLFILE'] -            del ext.header['NPOLEXT'] -        except KeyError: -            pass - -    def _removeD2IM(self, ext): -        """ -        Remove the Detector to Image correction of a FITS extension -        """ - -        self.hdr_logger.debug("Removing D2IM correction from (%s, %s)" -                     % (ext.name, ext._extver)) -        d2imkeys = ['D2IMFILE', 'AXISCORR', 'D2IMEXT', 'D2IMERR'] -        for k in d2imkeys: -            try: -                del ext.header[k] -            except KeyError: -                pass - -    def _removeAltWCS(self, dest, ext): -        """ -        Remove Alternate WCSs of a FITS extension. -        A WCS with wcskey 'O' is never deleted. -        """ -        dkeys = altwcs.wcskeys(dest[('SCI', 1)].header) -        self.hdr_logger.debug("Removing alternate WCSs with keys %s from %s" -                     % (dkeys, dest.filename())) -        for k in dkeys: -            altwcs.deleteWCS(dest, ext=ext, wcskey=k) - -    def _removePrimaryWCS(self, ext): -        """ -        Remove the primary WCS of a FITS extension -        """ - -        self.hdr_logger.debug("Removing Primary WCS from (%s, %s)" -                     % (ext.name, ext._extver)) -        naxis = ext.header.ascard['NAXIS'].value -        for key in basic_wcs: -            for i in range(1, naxis + 1): -                try: -                    del ext.header.ascard[key + str(i)] -                except KeyError: -                    pass -        try: -            del ext.header.ascard['WCSAXES'] -        except KeyError: -            pass - -    def _removeIDCCoeffs(self, ext): -        """ -        Remove IDC coefficients of a FITS extension -        """ - -        self.hdr_logger.debug("Removing IDC coefficient from (%s, %s)" -                     % (ext.name, ext._extver)) -        coeffs = ['OCX10', 'OCX11', 'OCY10', 'OCY11', 'IDCSCALE'] -        for k in coeffs: -            try: -                del ext.header.ascard[k] -            except KeyError: -                pass - - -class HeaderletHDU(pyfits.core._NonstandardExtHDU): -    """ -    A non-standard extension HDU for encapsulating Headerlets in a file.  These -    HDUs have an extension type of HDRLET and their EXTNAME is derived from the -    Headerlet's HDRNAME. - -    The data itself is a tar file containing a single file, which is the -    Headerlet file itself.  The file name is derived from the HDRNAME keyword, -    and should be in the form `<HDRNAME>_hdr.fits`.  If the COMPRESS keyword -    evaluates to `True`, the tar file is compressed with gzip compression. - -    The Headerlet contained in the HDU's data can be accessed by the -    `headerlet` attribute. -    """ - -    _xtn = _extension = 'HDRLET' - -    def __init__(self, data=None, header=None): -        super(HeaderletHDU, self).__init__(data=data, header=header) -        # TODO: This can be removed after the next pyfits release, but for now -        # the _ExtensionHDU base class sets self._xtn = '' in its __init__(). -        self._xtn = self._extension -        # For some reason _NonstandardExtHDU.__init__ sets self.name = None, -        # even if it's already been set by the EXTNAME keyword in -        # _ExtensionHDU.__init__() -_-; -        if header and header.has_key('EXTNAME') and not self.name: -            self.name = header['EXTNAME'] -        # self._extver, if set, is still preserved.  From -        # _ExtensionHDU.__init__()...totally inconsistent. - -    def __getattr__(self, attr): -        if attr == 'data': -            size = self.size() -            self._file.seek(self._datLoc) -            self.__dict__[attr] = self._file.read(size) -        elif attr == 'headerlet': -            self._file.seek(self._datLoc) -            s = StringIO() -            # Read the data into a StringIO--reading directly from the file -            # won't work (at least for gzipped files) due to problems deep -            # within the gzip module that make it difficult to read gzip files -            # embedded in another file -            s.write(self._file.read(self.size())) -            s.seek(0) -            if self._header['COMPRESS']: -                mode = 'r:gz' -            else: -                mode = 'r' -            t = tarfile.open(mode=mode, fileobj=s) -            members = t.getmembers() -            if not len(members): -                raise ValueError('The Headerlet contents are missing.') -            elif len(members) > 1: -                warnings.warn('More than one file is contained in this ' -                              'only the headerlet file should be present.') -            hlt_name = self._header['HDRNAME'] + '_hdr.fits' -            try: -                hlt_info = t.getmember(hlt_name) -            except KeyError: -                warnings.warn('The file %s was missing from the HDU data.  ' -                              'Assuming that the first file in the data is ' -                              'headerlet file.' % hlt_name) -                hlt_info = members[0] -            hlt_file = t.extractfile(hlt_info) -            # hlt_file is a file-like object -            hlt = Headerlet(hlt_file, mode='readonly') -            self.__dict__[attr] = hlt -        else: -            return pyfits.core._ValidHDU.__getattr__(self, attr) -        try: -            return self.__dict__[attr] -        except KeyError: -            raise AttributeError(attr) - -    @classmethod -    def fromheaderlet(cls, headerlet, compress=False): -        """ -        Creates a new HeaderletHDU from a given Headerlet object. - -        Parameters -        ---------- -        headerlet : Headerlet -            A valid Headerlet object. -        compress : bool, optional -            Gzip compress the headerlet data. -        """ - -        phdu = headerlet[0] -        phduhdr = phdu.header -        hlt_filename = phdu.header['HDRNAME'] + '_hdr.fits' - -        # TODO: As it stands there's no good way to write out an HDUList in -        # memory, since it automatically closes the given file-like object when -        # it's done writing.  I'd argue that if passed an open file handler it -        # should not close it, but for now we'll have to write to a temp file. -        fd, name = tempfile.mkstemp() -        try: -            f = os.fdopen(fd, 'rb+') -            headerlet.writeto(f) -            # The tar file itself we'll write in memory, as it should be -            # relatively small -            if compress: -                mode = 'w:gz' -            else: -                mode = 'w' -            s = StringIO() -            t = tarfile.open(mode=mode, fileobj=s) -            t.add(name, arcname=hlt_filename) -            t.close() -        finally: -            os.remove(name) - -        cards = [ -            pyfits.Card('XTENSION', cls._extension, 'Headerlet extension'), -            pyfits.Card('BITPIX',    8, 'array data type'), -            pyfits.Card('NAXIS',     1, 'number of array dimensions'), -            pyfits.Card('NAXIS1',    len(s.getvalue()), 'Axis length'), -            pyfits.Card('PCOUNT',    0, 'number of parameters'), -            pyfits.Card('GCOUNT',    1, 'number of groups'), -            pyfits.Card('EXTNAME', cls._extension, -                        'name of the headerlet extension'), -            phdu.header.ascard['HDRNAME'], -            phdu.header.ascard['DATE'], -            pyfits.Card('SIPNAME', headerlet['SIPWCS', 1].header['WCSNAMEA'], -                        'SIP distortion model name'), -            phdu.header.ascard['NPOLFILE'], -            phdu.header.ascard['D2IMFILE'], -            pyfits.Card('COMPRESS', compress, 'Uses gzip compression') -        ] - -        header = pyfits.Header(pyfits.CardList(cards)) - -        hdu = cls(data=pyfits.DELAYED, header=header) -        hdu._file = s -        hdu._datLoc = 0 -        return hdu - -    @classmethod -    def match_header(cls, header): -        """ -        This is a class method used in the pyfits refactoring branch to -        recognize whether or not this class should be used for instantiating -        an HDU object based on values in the header. - -        It is included here for forward-compatibility. -        """ - -        card = header.ascard[0] -        if card.key != 'XTENSION': -            return False -        xtension = card.value.rstrip() -        return xtension == cls._extension - -    # TODO: Add header verification - -    def _summary(self): -        # TODO: Perhaps make this more descriptive... -        return '%-10s  %-12s  %4d' % (self.name, self.__class__.__name__, -                                      len(self._header.ascard)) - -# Monkey-patch pyfits to add minimal support for HeaderletHDUs -# TODO: Get rid of this ASAP!!! (it won't be necessary with the pyfits -# refactoring branch) -if not hasattr(pyfits.Header._updateHDUtype, '_patched'): -    __old_updateHDUtype = pyfits.Header._updateHDUtype -    def __updateHDUtype(self): -        if HeaderletHDU.match_header(self): -            self._hdutype = HeaderletHDU -        else: -            __old_updateHDUtype(self) -    __updateHDUtype._patched = True -    pyfits.Header._updateHDUtype = __updateHDUtype diff --git a/wcsutil/hstwcs.py b/wcsutil/hstwcs.py deleted file mode 100644 index 26dad5d..0000000 --- a/wcsutil/hstwcs.py +++ /dev/null @@ -1,451 +0,0 @@ -from __future__ import division # confidence high - -import os.path -from pywcs import WCS -import pyfits -import instruments -from stwcs.distortion import models, coeff_converter -import altwcs -import numpy as np -from pytools import fileutil -from pytools.fileutil import DEGTORAD, RADTODEG - -import getinput -import mappings -from mappings import inst_mappings, ins_spec_kw -from mappings import basic_wcs - - -__docformat__ = 'restructuredtext' - -class HSTWCS(WCS): - -    def __init__(self, fobj=None, ext=None, minerr=0.0, wcskey=" "): -        """ -        Create a WCS object based on the instrument. - -        In addition to basic WCS keywords this class provides -        instrument specific information needed in distortion computation. - -        Parameters -        ---------- -        fobj: string or PyFITS HDUList object or None -                a file name, e.g j9irw4b1q_flt.fits -                a fully qualified filename[EXTNAME,EXTNUM], e.g. j9irw4b1q_flt.fits[sci,1] -                a pyfits file object, e.g pyfits.open('j9irw4b1q_flt.fits'), in which case the -                user is responsible for closing the file object. -        ext:  int or None -                extension number -                if ext==None, it is assumed the data is in the primary hdu -        minerr: float -                minimum value a distortion correction must have in order to be applied. -                If CPERRja, CQERRja are smaller than minerr, the corersponding -                distortion is not applied. -        wcskey: str -                A one character A-Z or " " used to retrieve and define an -                alternate WCS description. -        """ - -        self.inst_kw = ins_spec_kw -        self.minerr = minerr -        self.wcskey = wcskey - -        if fobj is not None: -            filename, hdr0, ehdr, phdu = getinput.parseSingleInput(f=fobj, -                                                                   ext=ext) -            self.filename = filename -            instrument_name = hdr0.get('INSTRUME', 'DEFAULT') -            if instrument_name in ['IRAF/ARTDATA','',' ','N/A']: -                self.instrument = 'DEFAULT' -            else: -                self.instrument = instrument_name -            WCS.__init__(self, ehdr, fobj=phdu, minerr=self.minerr, -                         key=self.wcskey) -            # If input was a pyfits HDUList object, it's the user's -            # responsibility to close it, otherwise, it's closed here. -            if not isinstance(fobj, pyfits.HDUList): -                phdu.close() -            self.setInstrSpecKw(hdr0, ehdr) -            self.readIDCCoeffs(ehdr) -            extname = ehdr.get('EXTNAME', '') -            extnum = ehdr.get('EXTVER', None) -            self.extname = (extname, extnum) -        else: -            # create a default HSTWCS object -            self.instrument = 'DEFAULT' -            WCS.__init__(self, minerr=self.minerr, key=self.wcskey) -            self.pc2cd() -            self.setInstrSpecKw() -        self.setPscale() -        self.setOrient() - -    def readIDCCoeffs(self, header): -        """ -        Reads in first order IDCTAB coefficients if present in the header -        """ -        coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale'] -        for c in coeffs: -            self.__setattr__(c, header.get(c, None)) - -    def setInstrSpecKw(self, prim_hdr=None, ext_hdr=None): -        """ -        Populate the instrument specific attributes: - -        These can be in different headers but each instrument class has knowledge -        of where to look for them. - -        Parameters -        ---------- -        prim_hdr: pyfits.Header -                  primary header -        ext_hdr:  pyfits.Header -                  extension header - -        """ -        if self.instrument in inst_mappings.keys(): -            inst_kl = inst_mappings[self.instrument] -            inst_kl = instruments.__dict__[inst_kl] -            insobj = inst_kl(prim_hdr, ext_hdr) - -            for key in self.inst_kw: -                try: -                    self.__setattr__(key, insobj.__getattribute__(key)) -                except AttributeError: -                    # Some of the instrument's attributes are recorded in the primary header and -                    # were already set, (e.g. 'DETECTOR'), the code below is a check for that case. -                    if not self.__getattribute__(key): -                        raise -                    else: -                        pass - -        else: -            raise KeyError, "Unsupported instrument - %s" %self.instrument - -    def setPscale(self): -        """ -        Calculates the plate scale from the CD matrix -        """ -        try: -            cd11 = self.wcs.cd[0][0] -            cd21 = self.wcs.cd[1][0] -            self.pscale = np.sqrt(np.power(cd11,2)+np.power(cd21,2)) * 3600. -        except AttributeError: -            print "This file has a PC matrix. You may want to convert it \n \ -            to a CD matrix, if reasonable, by running pc2.cd() method.\n \ -            The plate scale can be set then by calling setPscale() method.\n" -            self.pscale = None - -    def setOrient(self): -        """ -        Computes ORIENTAT from the CD matrix -        """ -        try: -            cd12 = self.wcs.cd[0][1] -            cd22 = self.wcs.cd[1][1] -            self.orientat = RADTODEG(np.arctan2(cd12,cd22)) -        except AttributeError: -            print "This file has a PC matrix. You may want to convert it \n \ -            to a CD matrix, if reasonable, by running pc2.cd() method.\n \ -            The orientation can be set then by calling setOrient() method.\n" -            self.pscale = None - -    def updatePscale(self, scale): -        """ -        Updates the CD matrix with a new plate scale -        """ -        self.wcs.cd = self.wcs.cd/self.pscale*scale -        self.setPscale() - -    def readModel(self, update=False, header=None): -        """ -        Reads distortion model from IDCTAB. - -        If IDCTAB is not found ('N/A', "", or not found on disk), then -        if SIP coefficients and first order IDCTAB coefficients are present -        in the header, restore the idcmodel from the header. -        If not - assign None to self.idcmodel. - -        Parameters -        ---------- -        header: pyfits.Header -                fits extension header -        update: boolean (False) -                if True - record the following IDCTAB quantities as header keywords: -                CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF, -                IDCV2REF, IDCV3REF -        """ -        if self.idctab in [None, '', ' ','N/A']: -            #Keyword idctab is not present in header - check for sip coefficients -            if header is not None and header.has_key('IDCSCALE'): -                self._readModelFromHeader(header) -            else: -                print "Distortion model is not available: IDCTAB=None\n" -                self.idcmodel = None -        elif not os.path.exists(fileutil.osfn(self.idctab)): -            if header is not None and header.has_key('IDCSCALE'): -                self._readModelFromHeader(header) -            else: -                print 'Distortion model is not available: IDCTAB file %s not found\n' % self.idctab -                self.idcmodel = None -        else: -            self.readModelFromIDCTAB(header=header, update=update) - -    def _readModelFromHeader(self, header): -        # Recreate idc model from SIP coefficients and header kw -        print 'Restoring IDC model from SIP coefficients\n' -        model = models.GeometryModel() -        cx, cy = coeff_converter.sip2idc(self) -        model.cx = cx -        model.cy = cy -        model.name = "sip" -        model.norder = header['A_ORDER'] - -        refpix = {} -        refpix['XREF'] = header['IDCXREF'] -        refpix['YREF'] = header['IDCYREF'] -        refpix['PSCALE'] = header['IDCSCALE'] -        refpix['V2REF'] = header['IDCV2REF'] -        refpix['V3REF'] = header['IDCV3REF'] -        refpix['THETA'] = header['IDCTHETA'] -        model.refpix = refpix - -        self.idcmodel = model - - -    def readModelFromIDCTAB(self, header=None, update=False): -        """ -        Read distortion model from idc table. - -        Parameters -        ---------- -        header: pyfits.Header -                fits extension header -        update: boolean (False) -                if True - save teh following as header keywords: -                CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF, -                IDCV2REF, IDCV3REF - -        """ -        if self.date_obs == None: -            print 'date_obs not available\n' -            self.idcmodel = None -            return -        if self.filter1 == None and self.filter2 == None: -            'No filter information available\n' -            self.idcmodel = None -            return - -        self.idcmodel = models.IDCModel(self.idctab, -                    chip=self.chip, direction='forward', date=self.date_obs, -                    filter1=self.filter1, filter2=self.filter2, -                    offtab=self.offtab, binned=self.binned) - -        if update: -            if header==None: -                print 'Update header with IDC model kw requested but header was not provided\n.' -            else: -                self._updatehdr(header) - -    def wcs2header(self, sip2hdr=False, idc2hdr=True): -        """ -        Create a pyfits.Header object from WCS keywords. - -        If the original header had a CD matrix, return a CD matrix, -        otherwise return a PC matrix. - -        Parameters -        ---------- -        sip2hdr: boolean -                 If True - include SIP coefficients -        """ -        h = self.to_header() -        if self.wcs.has_cd(): -            h = altwcs.pc2cd(h, key=self.wcskey) - -        if idc2hdr: -            for card in self._idc2hdr(): -                h.update(card.key,value=card.value,comment=card.comment) -        try: -            del h.ascard['RESTFRQ'] -            del h.ascard['RESTWAV'] -        except KeyError: pass - -        if sip2hdr and self.sip: -            for card in self._sip2hdr('a'): -                h.update(card.key,value=card.value,comment=card.comment) -            for card in self._sip2hdr('b'): -                h.update(card.key,value=card.value,comment=card.comment) - -            try: -                ap = self.sip.ap -            except AssertionError: -                ap = None -            try: -                bp = self.sip.bp -            except AssertionError: -                bp = None - -            if ap: -                for card in self._sip2hdr('ap'): -                    h.update(card.key,value=card.value,comment=card.comment) -            if bp: -                for card in self._sip2hdr('bp'): -                    h.update(card.key,value=card.value,comment=card.comment) -        return h - - -    def _sip2hdr(self, k): -        """ -        Get a set of SIP coefficients in the form of an array -        and turn them into a pyfits.Cardlist. -        k - one of 'a', 'b', 'ap', 'bp' -        """ - -        cards = pyfits.CardList() -        korder = self.sip.__getattribute__(k+'_order') -        cards.append(pyfits.Card(key=k.upper()+'_ORDER', value=korder)) -        coeffs = self.sip.__getattribute__(k) -        ind = coeffs.nonzero() -        for i in range(len(ind[0])): -            card = pyfits.Card(key=k.upper()+'_'+str(ind[0][i])+'_'+str(ind[1][i]), -                               value=coeffs[ind[0][i], ind[1][i]]) -            cards.append(card) -        return cards - -    def _idc2hdr(self): -        # save some of the idc coefficients -        coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale'] -        cards = pyfits.CardList() -        for c in coeffs: -            try: -                val = self.__getattribute__(c) -            except AttributeError: -                continue -            cards.append(pyfits.Card(key=c, value=val)) -        return cards - -    def pc2cd(self): -        self.wcs.cd = self.wcs.pc.copy() - -    def all_sky2pix(self,ra,dec,origin): -        """ -        Performs full inverse transformation using iterative solution -        on full forward transformation with complete distortion model. - -        NOTES -        ----- -        We now need to find the position we want by iterative -        improvement of an initial guess - the centre of the chip - -        The method is to derive an "effective CD matrix" and use that -        to apply a correction until we are close enough (as defined by -        the ERR variable) - -        Code from the drizzle task TRANBACK (dither$drizzle/tranback.f) -        defined the algorithm for this implementation - -        """ -        from stwcs.distortion import utils - -        # Define some output arrays -        xout = np.zeros(len(ra),dtype=np.float64) -        yout = np.zeros(len(ra),dtype=np.float64) -        # ... and internal arrays -        x = np.zeros(3,dtype=np.float64) -        y = np.zeros(3,dtype=np.float64) - -        # define delta for comparison -        err = 0.0001 - -        # Use linear WCS as frame in which to perform fit -        # rather than on the sky -        undistort = True -        if self.sip is None: -            # Only apply distortion if distortion coeffs are present. -            undistort = False -        wcslin = utils.output_wcs([self],undistort=undistort) - -        # We can only transform 1 position at a time -        for r,d,n in zip(ra,dec,xrange(len(ra))): -            # First guess for output -            x[0],y[0] = self.wcs_sky2pix(r,d,origin) -            # also convert RA,Dec into undistorted linear pixel positions -            lx,ly = wcslin.wcs_sky2pix(r,d,origin) - -            # Loop around until we are close enough (max 20 iterations) -            ev_old = None -            for i in xrange(20): -                x[1] = x[0] + 1.0 -                y[1] = y[0] -                x[2] = x[0] -                y[2] = y[0] + 1.0 -                # Perform full transformation on pixel position -                rao,deco = self.all_pix2sky(x,y,origin) -                # convert RA,Dec into linear pixel positions for fitting -                xo,yo = wcslin.wcs_sky2pix(rao,deco,origin) - -                # Compute deltas between output and initial guess as -                # an affine transform then invert that transformation -                dxymat = np.array([[xo[1]-xo[0],yo[1]-yo[0]], -                          [xo[2]-xo[0],yo[2]-yo[0]]],dtype=np.float64) - -                invmat = np.linalg.inv(dxymat) -                # compute error in output position -                dx = lx - xo[0] -                dy = ly - yo[0] - -                # record the old position -                xold = x[0] -                yold = y[0] - -                # Update the initial guess position using the transform -                x[0] = xold + dx*dxymat[0][0] + dy*dxymat[1][0] -                y[0] = yold + dx*dxymat[0][1] + dy*dxymat[1][1] - -                # Work out the error vector length -                ev = np.sqrt((x[0] - xold)**2 + (y[0] - yold)**2) - -                # initialize record of previous error measurement during 1st iteration -                if ev_old is None: -                    ev_old = ev - -                # Check to see whether we have reached the limit or -                # the new error is greater than error from previous iteration -                if ev < err or (np.abs(ev) > np.abs(ev_old)): -                    break -                # remember error measurement from previous iteration -                ev_old = ev - -            xout[n] = x[0] -            yout[n] = y[0] - -        return xout,yout - -    def _updatehdr(self, ext_hdr): -        #kw2add : OCX10, OCX11, OCY10, OCY11 -        # record the model in the header for use by pydrizzle -        ext_hdr.update('OCX10', self.idcmodel.cx[1,0]) -        ext_hdr.update('OCX11', self.idcmodel.cx[1,1]) -        ext_hdr.update('OCY10', self.idcmodel.cy[1,0]) -        ext_hdr.update('OCY11', self.idcmodel.cy[1,1]) -        ext_hdr.update('IDCSCALE', self.idcmodel.refpix['PSCALE']) -        ext_hdr.update('IDCTHETA', self.idcmodel.refpix['THETA']) -        ext_hdr.update('IDCXREF', self.idcmodel.refpix['XREF']) -        ext_hdr.update('IDCYREF', self.idcmodel.refpix['YREF']) -        ext_hdr.update('IDCV2REF', self.idcmodel.refpix['V2REF']) -        ext_hdr.update('IDCV3REF', self.idcmodel.refpix['V3REF']) - -    def printwcs(self): -        """ -        Print the basic WCS keywords. -        """ -        print 'WCS Keywords\n' -        print 'CD_11  CD_12: %r %r' % (self.wcs.cd[0,0],  self.wcs.cd[0,1]) -        print 'CD_21  CD_22: %r %r' % (self.wcs.cd[1,0],  self.wcs.cd[1,1]) -        print 'CRVAL    : %r %r' % (self.wcs.crval[0], self.wcs.crval[1]) -        print 'CRPIX    : %r %r' % (self.wcs.crpix[0], self.wcs.crpix[1]) -        print 'NAXIS    : %d %d' % (self.naxis1, self.naxis2) -        print 'Plate Scale : %r' % self.pscale -        print 'ORIENTAT : %r' % self.orientat - - diff --git a/wcsutil/instruments.py b/wcsutil/instruments.py deleted file mode 100644 index 997bdc8..0000000 --- a/wcsutil/instruments.py +++ /dev/null @@ -1,321 +0,0 @@ -from __future__ import division # confidence high - -import pyfits -from mappings import ins_spec_kw -     -class InstrWCS(object): -    """ -    A base class for instrument specific keyword definition. -    It prvides a default implementation (modeled by ACS) for -    all set_kw methods. -    """ -    def __init__(self, hdr0=None, hdr=None): -        self.exthdr = hdr    -        self.primhdr = hdr0 -        self.set_ins_spec_kw() -         -    def set_ins_spec_kw(self): -        """ -        This method MUST call all set_kw methods. -        There should be a set_kw method for all kw listed in  -        mappings.ins_spec_kw. TypeError handles the case when  -        fobj='DEFAULT'. -        """ -        self.set_idctab() -        self.set_offtab() -        self.set_date_obs() -        self.set_ra_targ() -        self.set_dec_targ() -        self.set_pav3() -        self.set_detector() -        self.set_filter1() -        self.set_filter2() -        self.set_vafactor() -        self.set_naxis1() -        self.set_naxis2() -        self.set_ltv1() -        self.set_ltv2() -        self.set_binned() -        self.set_chip() -        self.set_parity() -         -    def set_idctab(self): -        try: -            self.idctab = self.primhdr['IDCTAB'] -        except (KeyError, TypeError): -            self.idctab = None -             -    def set_offtab(self): -        try: -            self.offtab = self.primhdr['OFFTAB'] -        except (KeyError, TypeError): -            self.offtab = None - -    def set_date_obs(self): -        try: -            self.date_obs = self.primhdr['DATE-OBS'] -        except (KeyError, TypeError): -            self.date_obs = None -             -    def set_ra_targ(self): -        try: -            self.ra_targ = self.primhdr['RA-TARG'] -        except (KeyError, TypeError): -            self.ra_targ = None -     -    def set_dec_targ(self): -        try: -            self.dec_targ = self.primhdr['DEC-TARG'] -        except (KeyError, TypeError): -            self.dec_targ = None - -    def set_pav3(self): -        try: -            self.pav3 = self.primhdr['PA_V3'] -        except (KeyError, TypeError): -            self.pav3 = None -             -    def set_filter1(self): -        try: -            self.filter1 = self.primhdr['FILTER1'] -        except (KeyError, TypeError): -            self.filter1 = None -             -    def set_filter2(self): -        try: -            self.filter2 = self.primhdr['FILTER2'] -        except (KeyError, TypeError): -            self.filter2 = None -             -    def set_vafactor(self):  -        try: -            self.vafactor = self.exthdr['VAFACTOR'] -        except (KeyError, TypeError): -            self.vafactor = 1 -             -    def set_naxis1(self): -        try: -            self.naxis1 = self.exthdr['naxis1'] -        except (KeyError, TypeError): -            try: -                self.naxis1 = self.exthdr['npix1'] -            except (KeyError, TypeError): -                self.naxis1 = None -                 -    def set_naxis2(self): -        try: -            self.naxis2 = self.exthdr['naxis2'] -        except (KeyError, TypeError): -            try: -                self.naxis2 = self.exthdr['npix2'] -            except (KeyError, TypeError): -                self.naxis2 = None -                 -    def set_ltv1(self): -        try: -            self.ltv1 = self.exthdr['LTV1'] -        except (KeyError, TypeError): -            self.ltv1 = 0.0 -         -    def set_ltv2(self): -        try: -            self.ltv2 = self.exthdr['LTV2'] -        except (KeyError, TypeError): -            self.ltv2 = 0.0 -             -    def set_binned(self): -        try: -            self.binned = self.exthdr['BINAXIS1'] -        except (KeyError, TypeError): -            self.binned = 1 -         -    def set_chip(self): -        try: -            self.chip = self.exthdr['CCDCHIP'] -        except (KeyError, TypeError): -            self.chip = 1 -                   -    def set_parity(self): -        self.parity = [[1.0,0.0],[0.0,-1.0]] -     -    def set_detector(self): -        # each instrument has a different kw for detector and it can be  -        # in a different header, so this is to be handled by the instrument classes  -        self.detector = 'DEFAULT' -         -class ACSWCS(InstrWCS): -    """ -    get instrument specific kw     -    """ -     -    def __init__(self, hdr0, hdr): -        self.primhdr = hdr0 -        self.exthdr = hdr -        InstrWCS.__init__(self,hdr0, hdr) -        self.set_ins_spec_kw() -         -    def set_detector(self): -        try: -            self.detector = self.primhdr['DETECTOR']   -        except KeyError: -            print 'ERROR: Detector kw not found.\n' -            raise -         -    def set_parity(self): -        parity = {'WFC':[[1.0,0.0],[0.0,-1.0]], -                'HRC':[[-1.0,0.0],[0.0,1.0]], -                'SBC':[[-1.0,0.0],[0.0,1.0]]} -         -        if self.detector not in parity.keys(): -            parity = InstrWCS.set_parity(self) -        else: -            self.parity = parity[self.detector] -     -         -class WFPC2WCS(InstrWCS):    - - -    def __init__(self, hdr0, hdr): -        self.primhdr = hdr0 -        self.exthdr = hdr -        InstrWCS.__init__(self,hdr0, hdr) -        self.set_ins_spec_kw() -     -    def set_filter1(self): -        self.filter1 = self.primhdr.get('FILTNAM1', None) -        if self.filter1 == " " or self.filter1 == None: -            self.filter1 = 'CLEAR1' - -    def set_filter2(self): -        self.filter2 = self.primhdr.get('FILTNAM2', None) -        if self.filter2 == " " or self.filter2 == None: -            self.filter2 = 'CLEAR2' -             -     -    def set_binned(self): -        mode = self.primhdr.get('MODE', 1) -        if mode == 'FULL': -            self.binned = 1 -        elif mode == 'AREA': -            self.binned = 2 - -    def set_chip(self): -        self.chip = self.exthdr.get('DETECTOR', 1) -     -    def set_parity(self): -        self.parity = [[-1.0,0.],[0.,1.0]] -         -    def set_detector(self): -        try: -            self.detector = self.exthdr['DETECTOR'] -        except KeyError: -            print 'ERROR: Detector kw not found.\n' -            raise -     - -class WFC3WCS(InstrWCS): -    """ -    Create a WFC3 detector specific class -    """ -     -    def __init__(self, hdr0, hdr): -        self.primhdr = hdr0 -        self.exthdr = hdr -        InstrWCS.__init__(self,hdr0, hdr) -        self.set_ins_spec_kw() -     -    def set_detector(self): -        try: -            self.detector = self.primhdr['DETECTOR']   -        except KeyError: -            print 'ERROR: Detector kw not found.\n' -            raise -         -    def set_filter1(self): -        self.filter1 = self.primhdr.get('FILTER', None) -        if self.filter1 == " " or self.filter1 == None: -            self.filter1 = 'CLEAR' -     -    def set_filter2(self): -        #Nicmos idc tables do not allow 2 filters. -        self.filter2 = 'CLEAR' -         -    def set_parity(self): -        parity = {'UVIS':[[-1.0,0.0],[0.0,1.0]],  -          'IR':[[-1.0,0.0],[0.0,1.0]]} -         -        if self.detector not in parity.keys(): -            parity = InstrWCS.set_parity(self) -        else: -            self.parity = parity[self.detector] -             -class NICMOSWCS(InstrWCS): -    """ -    Create a NICMOS specific class -    """ -     -    def __init__(self, hdr0, hdr): -        self.primhdr = hdr0 -        self.exthdr = hdr -        InstrWCS.__init__(self,hdr0, hdr) -        self.set_ins_spec_kw() -     -    def set_parity(self): -        self.parity = [[-1.0,0.],[0.,1.0]] -         -    def set_filter1(self): -        self.filter1 = self.primhdr.get('FILTER', None) -        if self.filter1 == " " or self.filter1 == None: -            self.filter1 = 'CLEAR' - -    def set_filter2(self): -        #Nicmos idc tables do not allow 2 filters. -        self.filter2 = 'CLEAR' -         -    def set_chip(self): -        self.chip = self.detector -             -    def set_detector(self): -        try: -            self.detector = self.primhdr['CAMERA']   -        except KeyError: -            print 'ERROR: Detector kw not found.\n' -            raise -     -class STISWCS(InstrWCS): -    """ -    A STIS specific class -    """ -     -    def __init__(self, hdr0, hdr): -        self.primhdr = hdr0 -        self.exthdr = hdr -        InstrWCS.__init__(self,hdr0, hdr) -        self.set_ins_spec_kw() -         -    def set_parity(self): -        self.parity = [[-1.0,0.],[0.,1.0]] -         -    def set_filter1(self): -        self.filter1 = self.exthdr.get('OPT_ELEM', None) -        if self.filter1 == " " or self.filter1 == None: -            self.filter1 = 'CLEAR1' - -    def set_filter2(self): -        self.filter2 = self.exthdr.get('FILTER', None) -        if self.filter2 == " " or self.filter2 == None: -            self.filter2 = 'CLEAR2' -             -    def set_detector(self): -        try: -            self.detector = self.primhdr['DETECTOR']   -        except KeyError: -            print 'ERROR: Detector kw not found.\n' -            raise -         -    def set_date_obs(self): -        try: -            self.date_obs = self.exthdr['DATE-OBS'] -        except (KeyError, TypeError): -            self.date_obs = None -    
\ No newline at end of file diff --git a/wcsutil/mappings.py b/wcsutil/mappings.py deleted file mode 100644 index 24038bf..0000000 --- a/wcsutil/mappings.py +++ /dev/null @@ -1,29 +0,0 @@ -from __future__ import division # confidence high - -# This dictionary maps an instrument into an instrument class -# The instrument class handles instrument specific keywords - -inst_mappings={'WFPC2': 'WFPC2WCS', -                'ACS': 'ACSWCS', -                'NICMOS': 'NICMOSWCS', -                'STIS': 'STISWCS', -                'WFC3': 'WFC3WCS', -                'DEFAULT': 'InstrWCS' -                } - - -# A list of instrument specific keywords -# Every instrument class must have methods which define each of these -# as class attributes. -ins_spec_kw = [ 'idctab', 'offtab', 'date_obs', 'ra_targ', 'dec_targ', 'pav3', \ -                'detector', 'ltv1', 'ltv2', 'parity', 'binned','vafactor', \ -                'chip', 'naxis1', 'naxis2', 'filter1', 'filter2'] - -# A list of keywords defined in the primary header. -# The HSTWCS class sets this as attributes  -prim_hdr_kw = ['detector', 'offtab', 'idctab', 'date-obs',  -              'pa_v3', 'ra_targ', 'dec_targ'] - -# These are the keywords which are archived before MakeWCS is run -basic_wcs = ['CD1_', 'CD2_', 'CRVAL', 'CTYPE', 'CRPIX', 'CTYPE', 'CDELT', 'CUNIT'] -             diff --git a/wcsutil/mosaic.py b/wcsutil/mosaic.py deleted file mode 100644 index d4ee660..0000000 --- a/wcsutil/mosaic.py +++ /dev/null @@ -1,183 +0,0 @@ -from __future__ import division -import numpy as np -from matplotlib import pyplot as plt -import pyfits -import string - -from pytools import parseinput, irafglob -from stwcs.distortion import utils -from stwcs import updatewcs, wcsutil -from stwcs.wcsutil import altwcs - -def vmosaic(fnames, outwcs=None, ref_wcs=None, ext=None, extname=None, undistort=True, wkey='V', wname='VirtualMosaic', plot=False, clobber=False): -    """ -    Create a virtual mosaic using the WCS of the input images. - -    Parameters -    ---------- -    fnames: a string or a list -              a string or a list of filenames, or a list of wcsutil.HSTWCS objects -    outwcs: an HSTWCS object -             if given, represents the output tangent plane -             if None, the output WCS is calculated from the input observations. -    ref_wcs: an HSTWCS object -             if output wcs is not given, this will be used as a reference for the -             calculation of the output WCS. If ref_wcs is None and outwcs is None, -             then the first observation in th einput list is used as reference. -    ext:    an int, a tuple or a list -              an int - represents a FITS extension, e.g. 0 is the primary HDU -              a tuple - uses the notation (extname, extver), e.g. ('sci',1) -              Can be a list of integers or tuples representing FITS extensions -    extname: string -              the value of the EXTNAME keyword for the extensions to be used in the mosaic -    undistort: boolean (default: True) -               undistort (or not) the output WCS -    wkey:   string -              default: 'V' -              one character A-Z to be used to record the virtual mosaic WCS as -              an alternate WCS in the headers of the input files. -    wname:  string -              default: 'VirtualMosaic -              a string to be used as a WCSNAME value for the alternate WCS representign -              the virtual mosaic -    plot:   boolean -              if True and matplotlib is installed will make a plot of the tangent plane -              and the location of the input observations. -    clobber: boolean -              This covers the case when an alternate WCS with the requested key -              already exists in the header of the input files. -              if clobber is True, it will be overwritten -              if False, it will compute the new one but will not write it to the headers. - -    Notes -    ----- -    The algorithm is: -    1. If output WCS is not given it is calculated from the input WCSes. -       The first image is used as a reference, if no reference is given. -       This represents the virtual mosaic WCS. -    2. For each input observation/chip, an HSTWCS object is created -       and its footprint on the sky is calculated (using only the four corners). -    3. For each input observation the footprint is projected on the output -       tangent plane and the virtual WCS is recorded in the header. -    """ -    wcsobjects = readWCS(fnames, ext, extname) -    if outwcs != None: -        outwcs = outwcs.deepcopy() -    else: -        if ref_wcs != None: -            outwcs = utils.output_wcs(wcsobjects, ref_wcs=ref_wcs, undistort=undistort) -        else: -            outwcs = utils.output_wcs(wcsobjects, undistort=undistort) -    if plot: -        outc=np.array([[0.,0], [outwcs.naxis1,0], -                             [outwcs.naxis1, outwcs.naxis2], -                             [0,outwcs.naxis2], [0,0]]) -        plt.plot(outc[:,0], outc[:,1]) -    for wobj in wcsobjects: -        outcorners = outwcs.wcs_sky2pix(wobj.calcFootprint(),1) -        if plot: -            plt.plot(outcorners[:,0], outcorners[:,1]) -        objwcs = outwcs.deepcopy() -        objwcs.wcs.crpix = objwcs.wcs.crpix - (outcorners[0]) -        updatehdr(wobj.filename, objwcs,wkey=wkey, wcsname=wname, ext=wobj.extname, clobber=clobber) -    return outwcs - -def updatehdr(fname, wcsobj, wkey, wcsname, ext=1, clobber=False): -    hdr = pyfits.getheader(fname, ext=ext) -    all_keys = list(string.ascii_uppercase) -    if wkey.upper() not in all_keys: -        raise KeyError, "wkey must be one character: A-Z" -    if wkey not in altwcs.available_wcskeys(hdr): -        if not clobber: -            raise ValueError, "wkey %s is already in use. Use clobber=True to overwrite it or specify a different key." %wkey -        else: -            altwcs.deleteWCS(fname, ext=ext, wcskey='V') -    f = pyfits.open(fname, mode='update') - -    hwcs = wcs2header(wcsobj) -    wcsnamekey = 'WCSNAME' + wkey -    f[ext].header.update(key=wcsnamekey, value=wcsname) -    for k in hwcs.keys(): -        f[ext].header.update(key=k[:7]+wkey, value=hwcs[k]) - -    f.close() - -def wcs2header(wcsobj): - -    h = wcsobj.to_header() - -    if wcsobj.wcs.has_cd(): -        altwcs.pc2cd(h) -    h.update('CTYPE1', 'RA---TAN') -    h.update('CTYPE2', 'DEC--TAN') -    norient = np.rad2deg(np.arctan2(h['CD1_2'],h['CD2_2'])) -    #okey = 'ORIENT%s' % wkey -    okey = 'ORIENT' -    h.update(key=okey, value=norient) -    return h - -def readWCS(input, exts=None, extname=None): -    if isinstance(input, str): -        if input[0] == '@': -            # input is an @ file -            filelist = irafglob.irafglob(input) -        else: -            try: -                filelist, output = parseinput.parseinput(input) -            except IOError: raise -    elif isinstance(input, list): -        if isinstance(input[0], wcsutil.HSTWCS): -            # a list of HSTWCS objects -            return input -        else: -            filelist = input[:] -    wcso = [] -    fomited = [] -    # figure out which FITS extension(s) to use -    if exts == None and extname == None: -        #Assume it's simple FITS and the data is in the primary HDU -        for f in filelist: -            try: -                wcso = wcsutil.HSTWCS(f) -            except AttributeError: -                fomited.append(f) -                continue -    elif exts != None and validateExt(exts): -        exts = [exts] -        for f in filelist: -            try: -                wcso.extend([wcsutil.HSTWCS(f, ext=e) for e in exts]) -            except KeyError: -                fomited.append(f) -                continue -    elif extname != None: -        for f in filelist: -            fobj = pyfits.open(f) -            for i in range(len(fobj)): -                try: -                    ename = fobj[i].header['EXTNAME'] -                except KeyError: -                    continue -                if ename.lower() == extname.lower(): -                    wcso.append(wcsutil.HSTWCS(f,ext=i)) -                else: -                    continue -            fobj.close() -    if fomited != []: -        print "These files were skipped:" -        for f in fomited: -            print f -    return wcso - - -def validateExt(ext): -    if not isinstance(ext, int) and not isinstance(ext, tuple) \ -       and not isinstance(ext, list): -        print "Ext must be integer, tuple, a list of int extension numbers, \ -        or a list of tuples representing a fits extension, for example ('sci', 1)." -        return False -    else: -        return True - - - diff --git a/wcsutil/wcscorr.py b/wcsutil/wcscorr.py deleted file mode 100644 index bab0964..0000000 --- a/wcsutil/wcscorr.py +++ /dev/null @@ -1,458 +0,0 @@ -import os,copy -import pyfits -import numpy as np - -from pytools import fileutil -import stwcs -from stwcs.wcsutil import altwcs -import convertwcs - - -DEFAULT_WCS_KEYS = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2', -                    'CD1_1','CD1_2','CD2_1','CD2_2', -                    'CTYPE1','CTYPE2'] -DEFAULT_PRI_KEYS = ['PA_V3'] -### -### WCSEXT table related keyword archive functions -### -def init_wcscorr(input, force=False): -    """ -    This function will initialize the WCSCORR table if it is not already present, -    and look for WCS keywords with a prefix of 'O' as the original OPUS -    generated WCS as the initial row for the table or use the current WCS -    keywords as initial row if no 'O' prefix keywords are found. - -    This function will NOT overwrite any rows already present. - -    This function works on all SCI extensions at one time. -    """ - -    # TODO: Create some sort of decorator or (for Python2.5) context for -    # opening a FITS file and closing it when done, if necessary -    if not isinstance(input, pyfits.HDUList): -        # input must be a filename, so open as PyFITS object -        fimg = pyfits.open(input, mode='update') -        need_to_close = True -    else: -        fimg = input -        need_to_close = False - -    # Do not try to generate a WCSCORR table for a simple FITS file -    if len(fimg) == 1: -        return  -     -    # Verify that a WCSCORR extension does not already exist... -    for extn in fimg: -        if extn.header.has_key('extname') and \ -           extn.header['extname'] == 'WCSCORR': -            if not force: -                return -            else: -                del fimg['WCSCORR'] -    # define the primary columns of the WCSEXT table with initial rows for each -    # SCI extension for the original OPUS solution -    numsci = fileutil.countExtn(fimg) - -    # create new table with more rows than needed initially to make it easier to -    # add new rows later -    wcsext = create_wcscorr(numrows=numsci, padding=numsci * 4) -    # Assign the correct EXTNAME value to this table extension -    wcsext.header.update('TROWS', numsci * 2, -                         comment='Number of updated rows in table') -    wcsext.header.update('EXTNAME', 'WCSCORR', -                         comment='Table with WCS Update history') -    wcsext.header.update('EXTVER', 1) - -    used_wcskeys = None -    wcs1 = stwcs.wcsutil.HSTWCS(fimg,ext=('SCI',1)) -    idc2header = True -    if wcs1.idcscale is None: -        idc2header = False -    wcs_keywords = wcs1.wcs2header(idc2hdr=idc2header).keys() - -    # Now copy original OPUS values into table -    for extver in xrange(1, numsci + 1): -        rowind = find_wcscorr_row(wcsext.data, -                                  {'WCS_ID': 'OPUS', 'EXTVER': extver, -                                   'WCS_key':'O'}) -        # There should only EVER be a single row for each extension with OPUS values -        rownum = np.where(rowind)[0][0] -        #print 'Archiving OPUS WCS in row number ',rownum,' in WCSCORR table for SCI,',extver - -        hdr = fimg['SCI', extver].header -        # define set of WCS keywords which need to be managed and copied to the table -        if used_wcskeys is None: -            used_wcskeys = altwcs.wcskeys(hdr) -        # Check to see whether or not there is an OPUS alternate WCS present, -        # if so, get its values directly, otherwise, archive the PRIMARY WCS -        # as the OPUS values in the WCSCORR table -        if 'O' not in used_wcskeys: -            altwcs.archiveWCS(fimg,('SCI',extver),wcskey='O', wcsname='OPUS') -        wkey = 'O' - -        wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=wkey) -        wcshdr = wcs.wcs2header(idc2hdr=idc2header) - -        for key in DEFAULT_PRI_KEYS: -            prihdr_keys = [] -            if not hdr.has_key(key): -                prihdr_keys.append(key) - -        if wcsext.data.field('CRVAL1')[rownum] != 0: -            # If we find values for these keywords already in the table, do not -            # overwrite them again -            print 'WCS keywords already updated...' -            break -        for key in wcs_keywords: -            if key in wcsext.data.names: -                wcsext.data.field(key)[rownum] = wcshdr[(key+wkey)[:8]] -        # Now get any keywords from PRIMARY header needed for WCS updates -        for key in prihdr_keys: -            wcsext.data.field(key)[rownum] = fimg[0].header[key] - -    # Now that we have archived the OPUS alternate WCS, remove it from the list -    # of used_wcskeys -    if 'O' in used_wcskeys: -        used_wcskeys.remove('O') - -    # Now copy remaining alternate WCSs into table -    # TODO: Much of this appears to be redundant with update_wcscorr; consider -    # merging them... -    for uwkey in used_wcskeys: -        if wkey == ' ': -            break -        for extver in xrange(1, numsci + 1): -            hdr = fimg['SCI', extver].header -            wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), -                                       wcskey=uwkey) -            wcshdr = wcs.wcs2header() -            if 'WCSNAME' + uwkey not in wcshdr: -                idctab = fileutil.osfn(fimg[0].header['idctab']) -                idcname = os.path.split(idctab)[-1] -                idcname = idcname[:idcname.find('_')] -                wcsid = 'IDC_' + idcname -            else: -                wcsid = wcshdr['WCSNAME' + uwkey] - -            # identify next empty row -            rowind = find_wcscorr_row(wcsext.data, selections={'wcs_id': ''}) -            rows = np.where(rowind) -            if len(rows[0]) > 0: -                rownum = np.where(rowind)[0][0] -            else: -                print 'No available rows found for updating. ' -            #print 'Archiving current WCS row number ',rownum,' in WCSCORR table for SCI,',extver - -            # Update selection columns for this row with relevant values -            wcsext.data.field('WCS_ID')[rownum] = wcsid -            wcsext.data.field('EXTVER')[rownum] = extver -            wcsext.data.field('WCS_key')[rownum] = uwkey - -            # Look for standard WCS keyword values -            for key in wcs_keywords: -                if key in wcsext.data.names: -                    wcsext.data.field(key)[rownum] = wcshdr[key + uwkey] -            # Now get any keywords from PRIMARY header needed for WCS updates -            for key in prihdr_keys: -                wcsext.data.field(key)[rownum] = fimg[0].header[key] - -    # Append this table to the image FITS file -    fimg.append(wcsext) -    # force an update now -    # set the verify flag to 'warn' so that it will always succeed, but still -    # tell the user if PyFITS detects any problems with the file as a whole -    fimg.flush('warn') - -    if need_to_close: -        fimg.close() - - -def find_wcscorr_row(wcstab, selections): -    """ -    Return an array of indices from the table (NOT HDU) 'wcstab' that matches the -    selections specified by the user. - -    The row selection criteria must be specified as a dictionary with -    column name as key and value(s) representing the valid desired row values. -    For example, {'wcs_id':'OPUS','extver':2}. -    """ - -    mask = None -    for i in selections: -        icol = wcstab.field(i) -        if isinstance(icol,np.chararray): icol = icol.rstrip() -        bmask = (icol == selections[i]) -        if mask is None: -            mask = bmask.copy() -        else: -            mask = np.logical_and(mask,bmask) -        del bmask -    return mask - - -def archive_wcs_file(image, wcs_id=None): -    """ -    Update WCSCORR table with rows for each SCI extension to record the -    newly updated WCS keyword values. -    """ - -    if not isinstance(image, pyfits.HDUList): -        fimg = pyfits.open(image, mode='update') -        close_image = True -    else: -        fimg = image -        close_image = False - -    update_wcscorr(fimg, wcs_id=wcs_id) - -    if close_image: -        fimg.close() - - -def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None): -    """ -    Update WCSCORR table with a new row or rows for this extension header. It -    copies the current set of WCS keywords as a new row of the table based on -    keyed WCSs as per Paper I Multiple WCS standard). - -    Parameters -    ---------- -    dest : HDUList -        The HDU list whose WCSCORR table should be appended to (the WCSCORR HDU -        must already exist) -    source : HDUList, optional -        The HDU list containing the extension from which to extract the WCS -        keywords to add to the WCSCORR table.  If None, the dest is also used -        as the source. -    extname : str, optional -        The extension name from which to take new WCS keywords.  If there are -        multiple extensions with that name, rows are added for each extension -        version. -    wcs_id : str, optional -        The name of the WCS to add, as in the WCSNAMEa keyword.  If -        unspecified, all the WCSs in the specified extensions are added. -    """ - -    if source is None: -        source = dest - -    numext = fileutil.countExtn(source, extname) -    if numext == 0: -        raise ValueError('No %s extensions found in the source HDU list.' -                         % extname) - -    # Current implementation assumes the same WCS keywords are in each -    # extension version; if this should not be assumed then this can be -    # modified... -    wcs_keys = altwcs.wcskeys(source[(extname, 1)].header) -    wcs_keys = filter(None, wcs_keys) -    wcshdr = stwcs.wcsutil.HSTWCS(source, ext=(extname, 1)).wcs2header() -    wcs_keywords = wcshdr.keys() - -    if 'O' in wcs_keys: -        wcs_keys.remove('O') # 'O' is reserved for original OPUS WCS - -    # If we're looking for a particular wcs_id, test ahead of time that it's -    # actually present in the specified extension headers -    if wcs_id: -        wcs_key = '' -        for wcs_key in wcs_keys: -            wcsname = source[(extname, 1)].header['WCSNAME' + wcs_key] -            if wcs_id == wcsname: -                break -        else: -            raise ValueError('A WCS with name %s was not found in the %s ' -                             'extension headers in the source HDU list.' -                             % (wcs_id, extname)) -        wcs_keys = [wcs_key] # We're only interested in this one - -    # create new table for hdr and populate it with the newly updated values -    new_table = create_wcscorr(numrows=0, padding=len(wcs_keys)*numext) -    old_table = dest['WCSCORR'] - -    idx = -1 -    for wcs_key in wcs_keys: -        for extver in range(1, numext + 1): -            extn = (extname, extver) -            hdr = source[extn].header -            wcsname = hdr['WCSNAME' + wcs_key] -            selection = {'WCS_ID': wcsname, 'EXTVER': extver, -                         'WCS_key': wcs_key} - -            # Ensure that an entry for this WCS is not already in the dest -            # table; if so just skip it -            rowind = find_wcscorr_row(old_table.data, selection) -            if np.any(rowind): -                continue - -            idx += 1 - -            wcs = stwcs.wcsutil.HSTWCS(source, ext=extn, wcskey=wcs_key) -            wcshdr = wcs.wcs2header() - -            # Update selection column values -            for key, val in selection.iteritems(): -                new_table.data.field(key)[idx] = val - -            for key in wcs_keywords: -                if key in new_table.data.names: -                    new_table.data.field(key)[idx] = wcshdr[key + wcs_key] - -            prihdr = source[0].header -            for key in DEFAULT_PRI_KEYS: -                if key in new_table.data.names and prihdr.has_key(key): -                    new_table.data.field(key)[idx] = prihdr[key] - -    # If idx was never incremented, no rows were added, so there's nothing else -    # to do... -    if idx < 0: -        return - -    # Now, we need to merge this into the existing table -    rowind = find_wcscorr_row(old_table.data, {'wcs_id':''}) -    old_nrows = np.where(rowind)[0][0] -    new_nrows = new_table.data.shape[0] - -    # check to see if there is room for the new row -    if (old_nrows + new_nrows) > old_table.data.shape[0]-1: -        pad_rows = 2 * new_nrows -        # if not, create a new table with 'pad_rows' new empty rows -        upd_table = pyfits.new_table(old_table.columns,header=old_table.header, -                                     nrows=old_table.data.shape[0]+pad_rows) -    else: -        upd_table = old_table -        pad_rows = 0 -    # Now, add -    for name in old_table.columns.names: -        # reset the default values to ones specific to the row definitions -        for i in range(pad_rows): -            upd_table.data.field(name)[old_nrows+i] = old_table.data.field(name)[-1] -        # Now populate with values from new table -        upd_table.data.field(name)[old_nrows:old_nrows + new_nrows] = \ -                new_table.data.field(name) -    upd_table.header.update('TROWS', old_nrows + new_nrows) -     -    # replace old extension with newly updated table extension -    dest['WCSCORR'] = upd_table - - -def restore_file_from_wcscorr(image, id='OPUS', wcskey=''): -    """ Copies the values of the WCS from the WCSCORR based on ID specified by user. -    The default will be to restore the original OPUS-derived values to the Primary WCS. -    If wcskey is specified, the WCS with that key will be updated instead. -    """ - -    if not isinstance(image, pyfits.HDUList): -        fimg = pyfits.open(image, mode='update') -        close_image = True -    else: -        fimg = image -        close_image = False -    numsci = fileutil.countExtn(fimg) -    wcs_table = fimg['WCSCORR'] -    orig_rows = (wcs_table.data.field('WCS_ID') == 'OPUS') -    # create an HSTWCS object to figure out what WCS keywords need to be updated -    wcsobj = stwcs.wcsutil.HSTWCS(fimg,ext=('sci',1)) -    wcshdr = wcsobj.wcs2header() -    for extn in range(1,numsci+1): -        # find corresponding row from table -        ext_rows = (wcs_table.data.field('EXTVER') == extn) -        erow = np.where(np.logical_and(ext_rows,orig_rows))[0][0] -        for key in wcshdr: -            if key in wcs_table.data.names: # insure that keyword is column in table -                tkey = key - -                if 'orient' in key.lower(): -                    key = 'ORIENT' -                if wcskey == '': -                    skey = key -                else: -                    skey = key[:7]+wcskey -                fimg['sci',extn].header.update(skey,wcs_table.data.field(tkey)[erow]) -        for key in DEFAULT_PRI_KEYS: -            if key in wcs_table.data.names: -                if wcskey == '': -                    pkey = key -                else: -                    pkey = key[:7]+wcskey -                fimg[0].header.update(pkey,wcs_table.data.field(key)[erow]) - -    # close the image now that the update has been completed. -    if close_image: -        fimg.close() - - -def create_wcscorr(descrip=False, numrows=1, padding=0): -    """ -    Return the basic definitions for a WCSCORR table. -    The dtype definitions for the string columns are set to the maximum allowed so -    that all new elements will have the same max size which will be automatically -    truncated to this limit upon updating (if needed). - -    The table is initialized with rows corresponding to the OPUS solution -    for all the 'SCI' extensions. -    """ - -    trows = numrows + padding -    # define initialized arrays as placeholders for column data -    # TODO: I'm certain there's an easier way to do this... for example, simply -    # define the column names and formats, then create an empty array using -    # them as a dtype, then create the new table from that array. -    def_float64_zeros = np.array([0.0] * trows, dtype=np.float64) -    def_float64_ones = def_float64_zeros + 1.0 -    def_float_col = {'format': 'D', 'array': def_float64_zeros.copy()} -    def_float1_col = {'format': 'D', 'array':def_float64_ones.copy()} -    def_str40_col = {'format': '40A', -                     'array': np.array([''] * trows, dtype='S40')} -    def_str24_col = {'format': '24A', -                     'array': np.array([''] * trows, dtype='S24')} -    def_int32_col = {'format': 'J', -                     'array': np.array([0]*trows,dtype=np.int32)} - -    # If more columns are needed, simply add their definitions to this list -    col_names = [('CRVAL1',   def_float_col), ('CRVAL2',    def_float_col), -                 ('CRPIX1',   def_float_col), ('CRPIX2',    def_float_col), -                 ('CD1_1',    def_float_col), ('CD1_2',     def_float_col), -                 ('CD2_1',    def_float_col), ('CD2_2',     def_float_col), -                 ('CTYPE1',   def_str24_col), ('CTYPE2',    def_str24_col), -                 ('ORIENTAT', def_float_col), ('PA_V3',     def_float_col), -                 ('Delta_RA', def_float_col), ('Delta_Dec', def_float_col), -                 ('RMS_RA',   def_float_col), ('RMS_Dec',   def_float_col), -                 ('Delta_Orientat', def_float_col), -                 ('Delta_Scale', def_float1_col), -                 ('NMatch',   def_int32_col), ('Catalog',   def_str40_col)] - -    # Define selector columns -    id_col = pyfits.Column(name='WCS_ID', format='40A', -                           array=np.array(['OPUS'] * numrows + [''] * padding, -                                          dtype='S24')) -    extver_col = pyfits.Column(name='EXTVER', format='I', -                               array=np.array(range(1, numrows + 1), -                                              dtype=np.int16)) -    wcskey_col = pyfits.Column(name='WCS_key', format='A', -                               array=np.array(['O'] * numrows + [''] * padding, -                                              dtype='S')) -    # create list of remaining columns to be added to table -    col_list = [id_col, extver_col, wcskey_col] # start with selector columns -     -    for c in col_names: -        cdef = copy.deepcopy(c[1]) -        col_list.append(pyfits.Column(name=c[0], format=cdef['format'], -                        array=cdef['array'])) - -    if descrip: -        col_list.append( -            pyfits.Column(name='Descrip', format='128A', -                          array=np.array( -                              ['Original WCS computed by OPUS'] * numrows, -                              dtype='S128'))) - -    # Now create the new table from the column definitions -    newtab = pyfits.new_table(pyfits.ColDefs(col_list), nrows=trows) -    # The fact that setting .name is necessary should be considered a bug in -    # pyfits. -    # TODO: Make sure this is fixed in pyfits, then remove this -    newtab.name = 'WCSCORR' - -    return newtab - | 
