summaryrefslogtreecommitdiff
path: root/stwcs/wcsutil
diff options
context:
space:
mode:
Diffstat (limited to 'stwcs/wcsutil')
-rw-r--r--stwcs/wcsutil/__init__.py2
-rw-r--r--stwcs/wcsutil/altwcs.py91
-rw-r--r--stwcs/wcsutil/convertwcs.py53
-rw-r--r--stwcs/wcsutil/getinput.py26
-rw-r--r--stwcs/wcsutil/headerlet.py336
-rw-r--r--stwcs/wcsutil/hstwcs.py205
-rw-r--r--stwcs/wcsutil/instruments.py52
-rw-r--r--stwcs/wcsutil/mappings.py32
-rw-r--r--stwcs/wcsutil/mosaic.py57
-rw-r--r--stwcs/wcsutil/wcscorr.py146
-rw-r--r--stwcs/wcsutil/wcsdiff.py53
11 files changed, 540 insertions, 513 deletions
diff --git a/stwcs/wcsutil/__init__.py b/stwcs/wcsutil/__init__.py
index 65280be..d9d21c5 100644
--- a/stwcs/wcsutil/__init__.py
+++ b/stwcs/wcsutil/__init__.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import, print_function # confidence high
+from __future__ import absolute_import, print_function
from .altwcs import *
from .hstwcs import HSTWCS
diff --git a/stwcs/wcsutil/altwcs.py b/stwcs/wcsutil/altwcs.py
index 5dc7dd6..64cdc87 100644
--- a/stwcs/wcsutil/altwcs.py
+++ b/stwcs/wcsutil/altwcs.py
@@ -1,7 +1,6 @@
-from __future__ import division, print_function # confidence high
-import os
-import string
+from __future__ import absolute_import, division, print_function
+import string
import numpy as np
from astropy import wcs as pywcs
from astropy.io import fits
@@ -9,9 +8,11 @@ from stsci.tools import fileutil as fu
altwcskw = ['WCSAXES', 'CRVAL', 'CRPIX', 'PC', 'CDELT', 'CD', 'CTYPE', 'CUNIT',
'PV', 'PS']
-altwcskw_extra = ['LATPOLE','LONPOLE','RESTWAV','RESTFRQ']
+altwcskw_extra = ['LATPOLE', 'LONPOLE', 'RESTWAV', 'RESTFRQ']
# file operations
+
+
def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", reusekey=False):
"""
Copy the primary WCS to the header as an alternate WCS
@@ -77,7 +78,7 @@ def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", reusekey=False):
closefobj(fname, f)
raise KeyError("Wcskey %s is aready used. \
Run archiveWCS() with reusekey=True to overwrite this alternate WCS. \
- Alternatively choose another wcskey with altwcs.available_wcskeys()." %wcskey)
+ Alternatively choose another wcskey with altwcs.available_wcskeys()." % wcskey)
elif wcskey == " ":
# wcsname exists, overwrite it if reuse is True or get the next key
if wcsname.strip() in wcsnames(f[wcsext].header).values():
@@ -89,13 +90,13 @@ def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", reusekey=False):
wkey = next_wcskey(f[wcsext].header)
elif wkey is None:
closefobj(fname, f)
- raise KeyError("Could not get a valid wcskey from wcsname %s" %wcsname)
+ raise KeyError("Could not get a valid wcskey from wcsname %s" % wcsname)
else:
closefobj(fname, f)
raise KeyError("Wcsname %s is aready used. \
Run archiveWCS() with reusekey=True to overwrite this alternate WCS. \
Alternatively choose another wcskey with altwcs.available_wcskeys() or\
- choose another wcsname." %wcsname)
+ choose another wcsname." % wcsname)
else:
wkey = next_wcskey(f[wcsext].header)
if wcsname.strip():
@@ -103,7 +104,7 @@ def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", reusekey=False):
else:
# determine which WCSNAME needs to be replicated in archived WCS
wnames = wcsnames(f[wcsext].header)
- if 'O' in wnames: del wnames['O'] # we don't want OPUS/original
+ if 'O' in wnames: del wnames['O'] # we don't want OPUS/original
if len(wnames) > 0:
if ' ' in wnames:
wname = wnames[' ']
@@ -139,15 +140,16 @@ def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", reusekey=False):
f[e].header[wcsnamekey] = wname
try:
- old_wcsname=hwcs.pop('WCSNAME')
+ old_wcsname = hwcs.pop('WCSNAME')
except:
pass
for k in hwcs.keys():
- key = k[:7] + wkey
+ key = k[: 7] + wkey
f[e].header[key] = hwcs[k]
closefobj(fname, f)
+
def restore_from_to(f, fromext=None, toext=None, wcskey=" ", wcsname=" "):
"""
Copy an alternate WCS from one extension as a primary WCS of another extension
@@ -188,14 +190,14 @@ def restore_from_to(f, fromext=None, toext=None, wcskey=" ", wcsname=" "):
raise ValueError("Input parameters problem")
# Interpret input 'ext' value to get list of extensions to process
- #ext = _buildExtlist(fobj, ext)
+ # ext = _buildExtlist(fobj, ext)
if isinstance(toext, str):
toext = [toext]
# the case of an HDUList object in memory without an associated file
- #if fobj.filename() is not None:
+ # if fobj.filename() is not None:
# name = fobj.filename()
simplefits = fu.isFits(fobj)[1] is 'simple'
@@ -221,14 +223,15 @@ def restore_from_to(f, fromext=None, toext=None, wcskey=" ", wcsname=" "):
if not countext:
raise KeyError("File does not have extension with extname %s", fromext)
else:
- for i in range(1, countext+1):
+ for i in range(1, countext + 1):
for toe in toext:
_restore(fobj, fromextnum=i, fromextnam=fromext, toextnum=i, toextnam=toe, ukey=wkey)
if fobj.filename() is not None:
- #fobj.writeto(name)
+ # fobj.writeto(name)
closefobj(f, fobj)
+
def restoreWCS(f, ext, wcskey=" ", wcsname=" "):
"""
Copy a WCS with key "WCSKEY" to the primary WCS
@@ -272,9 +275,6 @@ def restoreWCS(f, ext, wcskey=" ", wcsname=" "):
# the case of an HDUList object in memory without an associated file
- #if fobj.filename() is not None:
- # name = fobj.filename()
-
simplefits = fu.isFits(fobj)[1] is 'simple'
if simplefits:
wcskeyext = 0
@@ -297,6 +297,7 @@ def restoreWCS(f, ext, wcskey=" ", wcsname=" "):
if fobj.filename() is not None:
closefobj(f, fobj)
+
def deleteWCS(fname, ext, wcskey=" ", wcsname=" "):
"""
Delete an alternate WCS defined with wcskey.
@@ -351,7 +352,7 @@ def deleteWCS(fname, ext, wcskey=" ", wcsname=" "):
prexts = []
for i in ext:
hdr = fobj[i].header
- hwcs = readAltWCS(fobj,i,wcskey=wkey)
+ hwcs = readAltWCS(fobj, i, wcskey=wkey)
if hwcs is None:
continue
for k in hwcs[::-1]:
@@ -363,6 +364,7 @@ def deleteWCS(fname, ext, wcskey=" ", wcsname=" "):
print("Did not find WCS with key %s in any of the extensions" % wkey)
closefobj(fname, fobj)
+
def _buildExtlist(fobj, ext):
"""
Utility function to interpret the provided value of 'ext' and return a list
@@ -378,8 +380,8 @@ def _buildExtlist(fobj, ext):
If a string is provided, it should specify the EXTNAME of extensions
with WCSs to be archived
"""
- if not isinstance(ext,list):
- if isinstance(ext,str):
+ if not isinstance(ext, list):
+ if isinstance(ext, str):
extstr = ext
ext = []
for extn in range(1, len(fobj)):
@@ -391,6 +393,7 @@ def _buildExtlist(fobj, ext):
raise KeyError("Valid extensions in 'ext' parameter need to be specified.")
return ext
+
def _restore(fobj, ukey, fromextnum,
toextnum=None, fromextnam=None, toextnam=None, verbose=True):
"""
@@ -415,7 +418,7 @@ def _restore(fobj, ukey, fromextnum,
if toextnam:
toextension = (toextnam, toextnum)
else:
- toextension =toextnum
+ toextension = toextnum
else:
toextension = fromextension
@@ -445,34 +448,38 @@ def _restore(fobj, ukey, fromextnum,
fobj[toextension].header['TDDALPHA'] = 0.0
fobj[toextension].header['TDDBETA'] = 0.0
if 'ORIENTAT' in fobj[toextension].header:
- norient = np.rad2deg(np.arctan2(hwcs['CD1_2'+'%s' %ukey], hwcs['CD2_2'+'%s' %ukey]))
+ norient = np.rad2deg(np.arctan2(hwcs['CD1_2' + '%s' % ukey], hwcs['CD2_2' + '%s' % ukey]))
fobj[toextension].header['ORIENTAT'] = norient
# Reset 2014 TDD keywords prior to computing new values (if any are computed)
- for kw in ['TDD_CYA','TDD_CYB','TDD_CXA','TDD_CXB']:
+ for kw in ['TDD_CYA', 'TDD_CYB', 'TDD_CXA', 'TDD_CXB']:
if kw in fobj[toextension].header:
fobj[toextension].header[kw] = 0.0
-#header operations
+
+# header operations
+
+
def _check_headerpars(fobj, ext):
if not isinstance(fobj, fits.Header) and not isinstance(fobj, fits.HDUList) \
- and not isinstance(fobj, str):
+ and not isinstance(fobj, str):
raise ValueError("Expected a file name, a file object or a header\n")
if not isinstance(fobj, fits.Header):
- #raise ValueError("Expected a valid ext parameter when input is a file")
if not isinstance(ext, int) and not isinstance(ext, tuple):
raise ValueError("Expected ext to be a number or a tuple, e.g. ('SCI', 1)\n")
+
def _getheader(fobj, ext):
if isinstance(fobj, str):
- hdr = fits.getheader(fobj,ext)
+ hdr = fits.getheader(fobj, ext)
elif isinstance(fobj, fits.Header):
hdr = fobj
else:
hdr = fobj[ext].header
return hdr
-def readAltWCS(fobj, ext, wcskey=' ',verbose=False):
+
+def readAltWCS(fobj, ext, wcskey=' ', verbose=False):
"""
Reads in alternate primary WCS from specified extension.
@@ -495,12 +502,12 @@ def readAltWCS(fobj, ext, wcskey=' ',verbose=False):
if isinstance(fobj, str):
fobj = fits.open(fobj)
- hdr = _getheader(fobj,ext)
+ hdr = _getheader(fobj, ext)
try:
nwcs = pywcs.WCS(hdr, fobj=fobj, key=wcskey)
except KeyError:
if verbose:
- print('readAltWCS: Could not read WCS with key %s' %wcskey)
+ print('readAltWCS: Could not read WCS with key %s' % wcskey)
print(' Skipping %s[%s]' % (fobj.filename(), str(ext)))
return None
hwcs = nwcs.to_header()
@@ -510,7 +517,8 @@ def readAltWCS(fobj, ext, wcskey=' ',verbose=False):
return hwcs
-def convertAltWCS(fobj,ext,oldkey=" ",newkey=' '):
+
+def convertAltWCS(fobj, ext, oldkey=" ", newkey=' '):
"""
Translates the alternate/primary WCS with one key to an alternate/primary WCS with
another key.
@@ -534,7 +542,7 @@ def convertAltWCS(fobj,ext,oldkey=" ",newkey=' '):
hdr: `astropy.io.fits.Header`
header object with keywords renamed from oldkey to newkey
"""
- hdr = readAltWCS(fobj,ext,wcskey=oldkey)
+ hdr = readAltWCS(fobj, ext, wcskey=oldkey)
if hdr is None:
return None
# Converting WCS to new key
@@ -543,10 +551,11 @@ def convertAltWCS(fobj,ext,oldkey=" ",newkey=' '):
cname = card
else:
cname = card.rstrip(oldkey)
- hdr.rename_key(card,cname+newkey,force=True)
+ hdr.rename_key(card, cname + newkey, force=True)
return hdr
+
def wcskeys(fobj, ext=None):
"""
Returns a list of characters used in the header for alternate
@@ -565,11 +574,12 @@ def wcskeys(fobj, ext=None):
names = hdr["WCSNAME*"]
d = []
for key in names:
- wkey = key.replace('WCSNAME','')
+ wkey = key.replace('WCSNAME', '')
if wkey == '': wkey = ' '
d.append(wkey)
return d
+
def wcsnames(fobj, ext=None):
"""
Returns a dictionary of wcskey: WCSNAME pairs
@@ -588,11 +598,12 @@ def wcsnames(fobj, ext=None):
names = hdr["WCSNAME*"]
d = {}
for keyword, value in names.items():
- wkey = keyword.replace('WCSNAME','')
+ wkey = keyword.replace('WCSNAME', '')
if wkey == '': wkey = ' '
d[wkey] = value
return d
+
def available_wcskeys(fobj, ext=None):
"""
Returns a list of characters which are not used in the header
@@ -618,6 +629,7 @@ def available_wcskeys(fobj, ext=None):
[all_keys.remove(key) for key in used_keys]
return all_keys
+
def next_wcskey(fobj, ext=None):
"""
Returns next available character to be used for an alternate WCS
@@ -638,6 +650,7 @@ def next_wcskey(fobj, ext=None):
else:
return None
+
def getKeyFromName(header, wcsname):
"""
If WCSNAME is found in header, return its key, else return
@@ -663,6 +676,7 @@ def getKeyFromName(header, wcsname):
wkey = None
return wkey
+
def pc2cd(hdr, key=' '):
"""
Convert a CD matrix to a CD matrix.
@@ -690,6 +704,7 @@ def pc2cd(hdr, key=' '):
hdr['CD{0}{1}'.format(c, key)] = val
return hdr
+
def _parpasscheck(fobj, ext, wcskey, fromext=None, toext=None, reusekey=False):
"""
Check input parameters to altwcs functions
@@ -722,7 +737,7 @@ def _parpasscheck(fobj, ext, wcskey, fromext=None, toext=None, reusekey=False):
return False
if not isinstance(ext, int) and not isinstance(ext, tuple) \
- and not isinstance(ext,str) \
+ and not isinstance(ext, str) \
and not isinstance(ext, list) and ext is not None:
print("Ext must be integer, tuple, string,a list of int extension numbers, \n\
or a list of tuples representing a fits extension, for example ('sci', 1).")
@@ -733,7 +748,7 @@ def _parpasscheck(fobj, ext, wcskey, fromext=None, toext=None, reusekey=False):
return False
if not isinstance(toext, list) and not isinstance(toext, str) and \
- toext is not None :
+ toext is not None:
print("toext must be a string or a list of strings representing extname")
return False
@@ -743,6 +758,7 @@ def _parpasscheck(fobj, ext, wcskey, fromext=None, toext=None, reusekey=False):
return True
+
def closefobj(fname, f):
"""
Functions in this module accept as input a file name or a file object.
@@ -752,6 +768,7 @@ def closefobj(fname, f):
if isinstance(fname, str):
f.close()
+
def mapFitsExt2HDUListInd(fname, extname):
"""
Map FITS extensions with 'EXTNAME' to HDUList indexes.
diff --git a/stwcs/wcsutil/convertwcs.py b/stwcs/wcsutil/convertwcs.py
index a384eb1..e47a829 100644
--- a/stwcs/wcsutil/convertwcs.py
+++ b/stwcs/wcsutil/convertwcs.py
@@ -1,4 +1,5 @@
-from __future__ import print_function
+from __future__ import absolute_import, division, print_function
+from astropy.io import fits
try:
import stwcs
from stwcs import wcsutil
@@ -7,12 +8,12 @@ except:
from stsci.tools import fileutil
-OPUS_WCSKEYS = ['OCRVAL1','OCRVAL2','OCRPIX1','OCRPIX2',
- 'OCD1_1','OCD1_2','OCD2_1','OCD2_2',
- 'OCTYPE1','OCTYPE2']
+OPUS_WCSKEYS = ['OCRVAL1', 'OCRVAL2', 'OCRPIX1', 'OCRPIX2',
+ 'OCD1_1', 'OCD1_2', 'OCD2_1', 'OCD2_2',
+ 'OCTYPE1', 'OCTYPE2']
-def archive_prefix_OPUS_WCS(fobj,extname='SCI'):
+def archive_prefix_OPUS_WCS(fobj, extname='SCI'):
""" Identifies WCS keywords which were generated by OPUS and archived
using a prefix of 'O' for all 'SCI' extensions in the file
@@ -28,43 +29,42 @@ def archive_prefix_OPUS_WCS(fobj,extname='SCI'):
print('=====================')
raise ImportError
-
closefits = False
- if isinstance(fobj,str):
+ if isinstance(fobj, str):
# A filename was provided as input
- fobj = fits.open(fobj,mode='update')
- closefits=True
+ fobj = fits.open(fobj, mode='update')
+ closefits = True
# Define the header
- ext = ('sci',1)
+ ext = ('sci', 1)
hdr = fobj[ext].header
numextn = fileutil.countExtn(fobj)
extlist = []
- for e in range(1,numextn+1):
- extlist.append(('sci',e))
+ for e in range(1, numextn + 1):
+ extlist.append(('sci', e))
# Insure that the 'O' alternate WCS is present
if 'O' not in wcsutil.wcskeys(hdr):
# if not, archive the Primary WCS as the default OPUS WCS
- wcsutil.archiveWCS(fobj,extlist, wcskey='O', wcsname='OPUS')
+ wcsutil.archiveWCS(fobj, extlist, wcskey='O', wcsname='OPUS')
# find out how many SCI extensions are in the image
- numextn = fileutil.countExtn(fobj,extname=extname)
+ numextn = fileutil.countExtn(fobj, extname=extname)
if numextn == 0:
extname = 'PRIMARY'
# create HSTWCS object from PRIMARY WCS
- wcsobj = wcsutil.HSTWCS(fobj,ext=ext,wcskey='O')
+ wcsobj = wcsutil.HSTWCS(fobj, ext=ext, wcskey='O')
# get list of WCS keywords
wcskeys = list(wcsobj.wcs2header().keys())
# For each SCI extension...
- for e in range(1,numextn+1):
+ for e in range(1, numextn + 1):
# Now, look for any WCS keywords with a prefix of 'O'
for key in wcskeys:
- okey = 'O'+key[:7]
- hdr = fobj[(extname,e)].header
+ okey = 'O' + key[: 7]
+ hdr = fobj[(extname, e)].header
if okey in hdr:
# Update alternate WCS keyword with prefix-O OPUS keyword value
hdr[key] = hdr[okey]
@@ -72,7 +72,8 @@ def archive_prefix_OPUS_WCS(fobj,extname='SCI'):
if closefits:
fobj.close()
-def create_prefix_OPUS_WCS(fobj,extname='SCI'):
+
+def create_prefix_OPUS_WCS(fobj, extname='SCI'):
""" Creates alternate WCS with a prefix of 'O' for OPUS generated WCS values
to work with old MultiDrizzle.
@@ -91,10 +92,10 @@ def create_prefix_OPUS_WCS(fobj,extname='SCI'):
owcskeys = OPUS_WCSKEYS
closefits = False
- if isinstance(fobj,str):
+ if isinstance(fobj, str):
# A filename was provided as input
fobj = fits.open(fobj, mode='update')
- closefits=True
+ closefits = True
else:
# check to make sure this FITS obj has been opened in update mode
if fobj.fileinfo(0)['filemode'] != 'update':
@@ -102,16 +103,16 @@ def create_prefix_OPUS_WCS(fobj,extname='SCI'):
raise IOError
# check for existance of O-prefix WCS
- if owcskeys[0] not in fobj['sci',1].header:
+ if owcskeys[0] not in fobj['sci', 1].header:
# find out how many SCI extensions are in the image
- numextn = fileutil.countExtn(fobj,extname=extname)
+ numextn = fileutil.countExtn(fobj, extname=extname)
if numextn == 0:
extname = ''
- for extn in range(1,numextn+1):
- hdr = fobj[(extname,extn)].header
+ for extn in range(1, numextn + 1):
+ hdr = fobj[(extname, extn)].header
for okey in owcskeys:
- hdr[okey] = hdr[okey[1:]+'O']
+ hdr[okey] = hdr[okey[1: ] + 'O']
# Close FITS image if we had to open it...
if closefits:
diff --git a/stwcs/wcsutil/getinput.py b/stwcs/wcsutil/getinput.py
index 8ee1123..c8b3b1b 100644
--- a/stwcs/wcsutil/getinput.py
+++ b/stwcs/wcsutil/getinput.py
@@ -1,23 +1,27 @@
+from __future__ import absolute_import, division, print_function
+
from astropy.io import fits
from stsci.tools import irafglob, fileutil, parseinput
+#from . import HSTWCS
+
def parseSingleInput(f=None, ext=None):
if isinstance(f, str):
# create an HSTWCS object from a filename
- if ext != None:
+ if ext is not None:
filename = f
- if isinstance(ext,tuple):
+ if isinstance(ext, tuple):
if ext[0] == '':
- extnum = ext[1] # handle ext=('',1)
+ extnum = ext[1] # handle ext=('',1)
else:
extnum = ext
else:
extnum = int(ext)
- elif ext == None:
+ elif ext is None:
filename, ext = fileutil.parseFilename(f)
ext = fileutil.parseExtn(ext)
if ext[0] == '':
- extnum = int(ext[1]) #handle ext=('',extnum)
+ extnum = int(ext[1]) # handle ext=('',extnum)
else:
extnum = ext
phdu = fits.open(filename)
@@ -29,7 +33,7 @@ def parseSingleInput(f=None, ext=None):
elif isinstance(f, fits.HDUList):
phdu = f
- if ext == None:
+ if ext is None:
extnum = 0
else:
extnum = ext
@@ -54,9 +58,9 @@ def parseMultipleInput(input):
filelist, output = parseinput.parseinput(input)
except IOError: raise
elif isinstance(input, list):
- if isinstance(input[0], wcsutil.HSTWCS):
- # a list of HSTWCS objects
- return input
- else:
- filelist = input[:]
+ #if isinstance(input[0], HSTWCS):
+ ## a list of HSTWCS objects
+ #return input
+ #else:
+ filelist = input[:]
return filelist
diff --git a/stwcs/wcsutil/headerlet.py b/stwcs/wcsutil/headerlet.py
index 5a980aa..03861df 100644
--- a/stwcs/wcsutil/headerlet.py
+++ b/stwcs/wcsutil/headerlet.py
@@ -34,7 +34,9 @@ from . import wcscorr
from .hstwcs import HSTWCS
from .mappings import basic_wcs
-#### Logging support functions
+# Logging support functions
+
+
class FuncNameLoggingFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
if '%(funcName)s' not in fmt:
@@ -59,8 +61,8 @@ logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
FITS_STD_KW = ['XTENSION', 'BITPIX', 'NAXIS', 'PCOUNT',
- 'GCOUNT', 'EXTNAME', 'EXTVER', 'ORIGIN',
- 'INHERIT', 'DATE', 'IRAF-TLM']
+ 'GCOUNT', 'EXTNAME', 'EXTVER', 'ORIGIN',
+ 'INHERIT', 'DATE', 'IRAF-TLM']
DEFAULT_SUMMARY_COLS = ['HDRNAME', 'WCSNAME', 'DISTNAME', 'AUTHOR', 'DATE',
'SIPNAME', 'NPOLFILE', 'D2IMFILE', 'DESCRIP']
@@ -120,10 +122,13 @@ def with_logging(func):
return func(*args, **kw)
return wrapped
-#### Utility functions
+# Utility functions
+
+
def is_par_blank(par):
return par in ['', ' ', 'INDEF', "None", None]
+
def parse_filename(fname, mode='readonly'):
"""
Interprets the input as either a filename of a file that needs to be opened
@@ -174,6 +179,7 @@ def parse_filename(fname, mode='readonly'):
fname = ''
return fobj, fname, close_fobj
+
def get_headerlet_kw_names(fobj, kw='HDRNAME'):
"""
Returns a list of specified keywords from all HeaderletHDU
@@ -198,6 +204,7 @@ def get_headerlet_kw_names(fobj, kw='HDRNAME'):
return hdrnames
+
def get_header_kw_vals(hdr, kwname, kwval, default=0):
if kwval is None:
if kwname in hdr:
@@ -206,6 +213,7 @@ def get_header_kw_vals(hdr, kwname, kwval, default=0):
kwval = default
return kwval
+
@with_logging
def find_headerlet_HDUs(fobj, hdrext=None, hdrname=None, distname=None,
strict=True, logging=False, logmode='w'):
@@ -259,9 +267,9 @@ def find_headerlet_HDUs(fobj, hdrext=None, hdrname=None, distname=None,
hdrlets = []
if hdrext is not None and isinstance(hdrext, int):
- if hdrext in range(len(fobj)): # insure specified hdrext is in fobj
+ if hdrext in range(len(fobj)): # insure specified hdrext is in fobj
if isinstance(fobj[hdrext], fits.hdu.base.NonstandardExtHDU) and \
- fobj[hdrext].header['EXTNAME'] == 'HDRLET':
+ fobj[hdrext].header['EXTNAME'] == 'HDRLET':
hdrlets.append(hdrext)
else:
for ext in fobj:
@@ -280,9 +288,9 @@ def find_headerlet_HDUs(fobj, hdrext=None, hdrname=None, distname=None,
(hdrextnum == ext.header['EXTVER']) and
(hdrextname == ext.header['EXTNAME']))
hdrname_match = ((hdrname is not None) and
- (hdrname == ext.header['HDRNAME']))
+ (hdrname == ext.header['HDRNAME']))
distname_match = ((distname is not None) and
- (distname == ext.header['DISTNAME']))
+ (distname == ext.header['DISTNAME']))
if hdrext_match or hdrname_match or distname_match:
hdrlets.append(fobj.index(ext))
@@ -310,6 +318,7 @@ def find_headerlet_HDUs(fobj, hdrext=None, hdrname=None, distname=None,
return hdrlets
+
def verify_hdrname_is_unique(fobj, hdrname):
"""
Verifies that no other HeaderletHDU extension has the specified hdrname.
@@ -331,6 +340,7 @@ def verify_hdrname_is_unique(fobj, hdrname):
return unique
+
def update_versions(sourcehdr, desthdr):
"""
Update keywords which store version numbers
@@ -344,6 +354,7 @@ def update_versions(sourcehdr, desthdr):
except KeyError:
desthdr[key] = (" ", phdukw[key])
+
def update_ref_files(source, dest):
"""
Update the reference files name in the primary header of 'dest'
@@ -372,8 +383,9 @@ def update_ref_files(source, dest):
phdukw[key] = False
return phdukw
+
def print_summary(summary_cols, summary_dict, pad=2, maxwidth=None, idcol=None,
- output=None, clobber=True, quiet=False ):
+ output=None, clobber=True, quiet=False ):
"""
Print out summary dictionary to STDOUT, and possibly an output file
@@ -404,9 +416,9 @@ def print_summary(summary_cols, summary_dict, pad=2, maxwidth=None, idcol=None,
for row in range(nrows):
if idcol:
outstr += COLUMN_FMT.format(idcol['vals'][row],
- width=idcol['width']+pad)
+ width=idcol['width'] + pad)
for kw in summary_cols:
- val = summary_dict[kw]['vals'][row][:(column_widths[kw]-pad)]
+ val = summary_dict[kw]['vals'][row][:(column_widths[kw] - pad)]
outstr += COLUMN_FMT.format(val, width=column_widths[kw])
outstr += '\n'
if not quiet:
@@ -415,7 +427,7 @@ def print_summary(summary_cols, summary_dict, pad=2, maxwidth=None, idcol=None,
# If specified, write info to separate text file
write_file = False
if output:
- output = fu.osfn(output) # Expand any environment variables in filename
+ output = fu.osfn(output) # Expand any environment variables in filename
write_file = True
if os.path.exists(output):
if clobber:
@@ -430,11 +442,13 @@ def print_summary(summary_cols, summary_dict, pad=2, maxwidth=None, idcol=None,
fout.write(outstr)
fout.close()
-#### Private utility functions
+# Private utility functions
+
+
def _create_primary_HDU(fobj, fname, wcsext, destim, hdrname, wcsname,
- sipname, npolfile, d2imfile,
- nmatch,catalog, wcskey,
- author, descrip, history):
+ sipname, npolfile, d2imfile,
+ nmatch, catalog, wcskey,
+ author, descrip, history):
# convert input values into valid FITS kw values
if author is None:
author = ''
@@ -447,7 +461,7 @@ def _create_primary_HDU(fobj, fname, wcsext, destim, hdrname, wcsname,
npolname, npolfile = utils.build_npolname(fobj, npolfile)
logger.info("Setting npolfile value to %s" % npolname)
- d2imname, d2imfile = utils.build_d2imname(fobj,d2imfile)
+ d2imname, d2imfile = utils.build_d2imname(fobj, d2imfile)
logger.info("Setting d2imfile value to %s" % d2imname)
distname = utils.build_distname(sipname, npolname, d2imname)
@@ -461,23 +475,14 @@ def _create_primary_HDU(fobj, fname, wcsext, destim, hdrname, wcsname,
else:
history = ''
- rms_ra = fobj[wcsext].header.get("CRDER1"+wcskey, 0)
- rms_dec = fobj[wcsext].header.get("CRDER2"+wcskey, 0)
+ rms_ra = fobj[wcsext].header.get("CRDER1" + wcskey, 0)
+ rms_dec = fobj[wcsext].header.get("CRDER2" + wcskey, 0)
if not nmatch:
- nmatch = fobj[wcsext].header.get("NMATCH"+wcskey, 0)
+ nmatch = fobj[wcsext].header.get("NMATCH" + wcskey, 0)
if not catalog:
- catalog = fobj[wcsext].header.get('CATALOG'+wcskey, "")
+ catalog = fobj[wcsext].header.get('CATALOG' + wcskey, "")
# get the version of STWCS used to create the WCS of the science file.
- #try:
- #upwcsver = fobj[0].header.cards[fobj[0].header.index('UPWCSVER')]
- #except KeyError:
- #upwcsver = pyfits.Card("UPWCSVER", " ",
- #"Version of STWCS used to update the WCS")
- #try:
- #pywcsver = fobj[0].header.cards[fobj[0].header.index('PYWCSVER')]
- #except KeyError:
- #pywcsver = pyfits.Card("PYWCSVER", " ",
- #"Version of PYWCS used to update the WCS")
+
upwcsver = fobj[0].header.get('UPWCSVER', "")
pywcsver = fobj[0].header.get('PYWCSVER', "")
# build Primary HDU
@@ -495,7 +500,7 @@ def _create_primary_HDU(fobj, fname, wcsext, destim, hdrname, wcsname,
phdu.header['D2IMFILE'] = (d2imfile,
'origin of detector to image correction')
phdu.header['IDCTAB'] = (idctab,
- 'origin of Polynomial Distortion')
+ 'origin of Polynomial Distortion')
phdu.header['AUTHOR'] = (author, 'headerlet created by this user')
phdu.header['DESCRIP'] = (descrip,
'Short description of headerlet solution')
@@ -526,7 +531,9 @@ def _create_primary_HDU(fobj, fname, wcsext, destim, hdrname, wcsname,
return phdu
-#### Public Interface functions
+# Public Interface functions
+
+
@with_logging
def extract_headerlet(filename, output, extnum=None, hdrname=None,
clobber=False, logging=True):
@@ -611,11 +618,11 @@ def extract_headerlet(filename, output, extnum=None, hdrname=None,
@with_logging
def write_headerlet(filename, hdrname, output=None, sciext='SCI',
- wcsname=None, wcskey=None, destim=None,
- sipname=None, npolfile=None, d2imfile=None,
- author=None, descrip=None, history=None,
- nmatch=None, catalog=None,
- attach=True, clobber=False, logging=False):
+ wcsname=None, wcskey=None, destim=None,
+ sipname=None, npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ nmatch=None, catalog=None,
+ attach=True, clobber=False, logging=False):
"""
Save a WCS as a headerlet FITS file.
@@ -733,22 +740,22 @@ def write_headerlet(filename, hdrname, output=None, sciext='SCI',
# Interpret sciext input for this file
if isinstance(sciext, int):
- sciextlist = [sciext] # allow for specification of simple FITS header
+ sciextlist = [sciext] # allow for specification of simple FITS header
elif isinstance(sciext, str):
numsciext = countExtn(fobj, sciext)
if numsciext > 0:
- sciextlist = [tuple((sciext,i)) for i in range(1, numsciext+1)]
+ sciextlist = [tuple((sciext, i)) for i in range(1, numsciext + 1)]
else:
sciextlist = [0]
elif isinstance(sciext, list):
sciextlist = sciext
else:
- errstr = "Expected sciext to be a list of FITS extensions with science data\n"+\
- " a valid EXTNAME string, or an integer."
+ errstr = "Expected sciext to be a list of FITS extensions with science data\n" + \
+ " a valid EXTNAME string, or an integer."
logger.critical(errstr)
raise ValueError
- wnames = altwcs.wcsnames(fobj,ext=sciextlist[0])
+ wnames = altwcs.wcsnames(fobj, ext=sciextlist[0])
# Insure that WCSCORR table has been created with all original
# WCS's recorded prior to adding the headerlet WCS
@@ -756,7 +763,7 @@ def write_headerlet(filename, hdrname, output=None, sciext='SCI',
if wcsname is None:
scihdr = fobj[sciextlist[0]].header
- wname = scihdr['wcsname'+wcskey]
+ wname = scihdr['wcsname' + wcskey]
else:
wname = wcsname
if hdrname in [None, ' ', '']:
@@ -764,17 +771,17 @@ def write_headerlet(filename, hdrname, output=None, sciext='SCI',
logger.critical('Creating the headerlet from image %s' % fname)
hdrletobj = create_headerlet(fobj, sciext=sciextlist,
- wcsname=wname, wcskey=wcskey,
- hdrname=hdrname,
- sipname=sipname, npolfile=npolfile,
- d2imfile=d2imfile, author=author,
- descrip=descrip, history=history,
- nmatch=nmatch, catalog=catalog,
- logging=False)
+ wcsname=wname, wcskey=wcskey,
+ hdrname=hdrname,
+ sipname=sipname, npolfile=npolfile,
+ d2imfile=d2imfile, author=author,
+ descrip=descrip, history=history,
+ nmatch=nmatch, catalog=catalog,
+ logging=False)
if attach:
# Check to see whether or not a HeaderletHDU with
- #this hdrname already exists
+ # this hdrname already exists
hdrnames = get_headerlet_kw_names(fobj)
if hdrname not in hdrnames:
hdrlet_hdu = HeaderletHDU.fromheaderlet(hdrletobj)
@@ -810,14 +817,15 @@ def write_headerlet(filename, hdrname, output=None, sciext='SCI',
outname = output
if not outname.endswith('.fits'):
- outname = '{0}_{1}_hlet.fits'.format(frootname,outname)
+ outname = '{0}_{1}_hlet.fits'.format(frootname, outname)
# If user specifies an output filename for headerlet, write it out
hdrletobj.tofile(outname, clobber=clobber)
- logger.critical( 'Created Headerlet file %s ' % outname)
+ logger.critical('Created Headerlet file %s ' % outname)
del hdrletobj
+
@with_logging
def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
wcskey=" ", wcsname=None,
@@ -856,7 +864,8 @@ def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
if " ", use the primary (default)
if None use wcsname
wcsname: string or None
- if wcskey is None use wcsname specified here to choose an alternate WCS for the headerlet
+ if wcskey is None use wcsname specified here to choose an alternate WCS
+ for the headerlet
sipname: string or None (default)
Name of unique file where the polynomial distortion coefficients were
read from. If None, the behavior is:
@@ -935,7 +944,7 @@ def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
if not wcsname:
# User did not specify a value for 'wcsname'
if wcsnamekw in fobj[wcsext].header:
- #check if there's a WCSNAME for this wcskey in the header
+ # check if there's a WCSNAME for this wcskey in the header
wcsname = fobj[wcsext].header[wcsnamekw]
logger.info("Setting wcsname from header[%s] to %s" % (wcsnamekw, wcsname))
else:
@@ -973,8 +982,9 @@ def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
wkeys = altwcs.wcskeys(fobj, ext=wcsext)
if wcskey != ' ':
if wcskey not in wkeys:
- logger.critical('No WCS with wcskey=%s found in extension %s. Skipping...' % (wcskey, str(wcsext)))
- raise ValueError("No WCS with wcskey=%s found in extension %s. Skipping...' % (wcskey, str(wcsext))")
+ mess = "Skipping extension {0} - no WCS with wcskey={1} found.".format(wcsext, wcskey)
+ logger.critical(mess)
+ raise ValueError(mess)
# get remaining required keywords
if destim is None:
@@ -1005,13 +1015,11 @@ def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
logger.critical(message)
raise KeyError
-
-
hdul = []
phdu = _create_primary_HDU(fobj, fname, wcsext, destim, hdrname, wcsname,
- sipname, npolfile, d2imfile,
- nmatch, catalog, wcskey,
- author, descrip, history)
+ sipname, npolfile, d2imfile,
+ nmatch, catalog, wcskey,
+ author, descrip, history)
hdul.append(phdu)
wcsdvarr_extns = []
"""
@@ -1100,14 +1108,6 @@ def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
whdu.update_ext_version(ihdu.header['D2IM2.EXTVER'])
hdul.append(whdu)
-
- #if hwcs.det2im1 or hwcs.det2im2:
- #try:
- #darr = hdul[('D2IMARR', 1)]
- #except KeyError:
- #whdu = whdul[('D2IMARR')]
- #whdu.update_ext_version(1)
- #hdul.append(whdu)
if close_file:
fobj.close()
@@ -1115,9 +1115,10 @@ def create_headerlet(filename, sciext='SCI', hdrname=None, destim=None,
hlet.init_attrs()
return hlet
+
@with_logging
def apply_headerlet_as_primary(filename, hdrlet, attach=True, archive=True,
- force=False, logging=False, logmode='a'):
+ force=False, logging=False, logmode='a'):
"""
Apply headerlet 'hdrfile' to a science observation 'destfile' as the primary WCS
@@ -1146,18 +1147,19 @@ def apply_headerlet_as_primary(filename, hdrlet, attach=True, archive=True,
hdrlet = [hdrlet]
if len(hdrlet) != len(filename):
logger.critical("Filenames must have matching headerlets. "
- "{0:d} filenames and {1:d} headerlets specified".format(len(filename),len(hdrlet)))
+ "{0:d} filenames and {1:d} headerlets specified".format(len(filename),
+ len(hdrlet)))
- for fname,h in zip(filename,hdrlet):
- print("Applying {0} as Primary WCS to {1}".format(h,fname))
+ for fname, h in zip(filename, hdrlet):
+ print("Applying {0} as Primary WCS to {1}".format(h, fname))
hlet = Headerlet.fromfile(h, logging=logging, logmode=logmode)
hlet.apply_as_primary(fname, attach=attach, archive=archive,
- force=force)
+ force=force)
@with_logging
def apply_headerlet_as_alternate(filename, hdrlet, attach=True, wcskey=None,
- wcsname=None, logging=False, logmode='w'):
+ wcsname=None, logging=False, logmode='w'):
"""
Apply headerlet to a science observation as an alternate WCS
@@ -1188,13 +1190,14 @@ def apply_headerlet_as_alternate(filename, hdrlet, attach=True, wcskey=None,
hdrlet = [hdrlet]
if len(hdrlet) != len(filename):
logger.critical("Filenames must have matching headerlets. "
- "{0:d} filenames and {1:d} headerlets specified".format(len(filename),len(hdrlet)))
+ "{0:d} filenames and {1:d} headerlets specified".format(len(filename),
+ len(hdrlet)))
- for fname,h in zip(filename,hdrlet):
- print('Applying {0} as an alternate WCS to {1}'.format(h,fname))
+ for fname, h in zip(filename, hdrlet):
+ print('Applying {0} as an alternate WCS to {1}'.format(h, fname))
hlet = Headerlet.fromfile(h, logging=logging, logmode=logmode)
hlet.apply_as_alternate(fname, attach=attach,
- wcsname=wcsname, wcskey=wcskey)
+ wcsname=wcsname, wcskey=wcskey)
@with_logging
@@ -1218,12 +1221,13 @@ def attach_headerlet(filename, hdrlet, logging=False, logmode='a'):
hdrlet = [hdrlet]
if len(hdrlet) != len(filename):
logger.critical("Filenames must have matching headerlets. "
- "{0:d} filenames and {1:d} headerlets specified".format(len(filename),len(hdrlet)))
+ "{0:d} filenames and {1:d} headerlets specified".format(len(filename),
+ len(hdrlet)))
- for fname,h in zip(filename,hdrlet):
- print('Attaching {0} as Headerlet extension to {1}'.format(h,fname))
+ for fname, h in zip(filename, hdrlet):
+ print('Attaching {0} as Headerlet extension to {1}'.format(h, fname))
hlet = Headerlet.fromfile(h, logging=logging, logmode=logmode)
- hlet.attach_to_file(fname,archive=True)
+ hlet.attach_to_file(fname, archive=True)
@with_logging
@@ -1262,12 +1266,13 @@ def delete_headerlet(filename, hdrname=None, hdrext=None, distname=None,
filename = [filename]
for f in filename:
- print("Deleting Headerlet from ",f)
+ print("Deleting Headerlet from ", f)
_delete_single_headerlet(f, hdrname=hdrname, hdrext=hdrext,
- distname=distname, logging=logging, logmode='a')
+ distname=distname, logging=logging, logmode='a')
+
def _delete_single_headerlet(filename, hdrname=None, hdrext=None, distname=None,
- logging=False, logmode='w'):
+ logging=False, logmode='w'):
"""
Deletes HeaderletHDU(s) from a SINGLE science file
@@ -1297,7 +1302,7 @@ def _delete_single_headerlet(filename, hdrname=None, hdrext=None, distname=None,
logmode: 'a' or 'w'
"""
hdrlet_ind = find_headerlet_HDUs(filename, hdrname=hdrname, hdrext=hdrext,
- distname=distname, logging=logging, logmode='a')
+ distname=distname, logging=logging, logmode='a')
if len(hdrlet_ind) == 0:
message = """
No HDUs deleted... No Headerlet HDUs found with '
@@ -1404,8 +1409,8 @@ def headerlet_summary(filename, columns=None, pad=2, maxwidth=None,
# Print out the summary dictionary
print_summary(summary_cols, summary_dict, pad=pad, maxwidth=maxwidth,
- idcol=extnums_col, output=output,
- clobber=clobber, quiet=quiet)
+ idcol=extnums_col, output=output,
+ clobber=clobber, quiet=quiet)
@with_logging
@@ -1452,7 +1457,7 @@ def restore_from_headerlet(filename, hdrname=None, hdrext=None, archive=True,
message = """
Multiple Headerlet extensions found with the same name.
%d Headerlets with "%s" = %s found in %s.
- """% (len(hdrlet_ind), kwerr, kwval, fname)
+ """ % (len(hdrlet_ind), kwerr, kwval, fname)
if close_fobj:
fobj.close()
logger.critical(message)
@@ -1464,7 +1469,7 @@ def restore_from_headerlet(filename, hdrname=None, hdrext=None, archive=True,
if hasattr(fobj[hdrlet_ind[0]], 'hdulist'):
hdrlet = fobj[hdrlet_indx].hdulist
else:
- hdrlet = fobj[hdrlet_indx].headerlet # older convention in PyFITS
+ hdrlet = fobj[hdrlet_indx].headerlet # older convention in PyFITS
# read in the names of the extensions which HeaderletHDU updates
extlist = []
@@ -1503,7 +1508,7 @@ def restore_from_headerlet(filename, hdrname=None, hdrext=None, archive=True,
else:
if 'idctab' in scihdr:
priwcs_hdrname = ''.join(['IDC_',
- utils.extract_rootname(scihdr['idctab'], suffix='_idc')])
+ utils.extract_rootname(scihdr['idctab'], suffix='_idc')])
else:
priwcs_hdrname = 'UNKNOWN'
priwcs_name = priwcs_hdrname
@@ -1513,7 +1518,7 @@ def restore_from_headerlet(filename, hdrname=None, hdrext=None, archive=True,
if archive and priwcs_unique:
if priwcs_unique:
newhdrlet = create_headerlet(fobj, sciext=scihdr['extname'],
- hdrname=priwcs_hdrname)
+ hdrname=priwcs_hdrname)
newhdrlet.attach_to_file(fobj)
#
# copy hdrlet as a primary
@@ -1598,7 +1603,7 @@ def restore_all_with_distname(filename, distname, primary, archive=True,
if hasattr(fobj[primary_ind], 'hdulist'):
primary_hdrlet = fobj[primary_ind].hdulist
else:
- primary_hdrlet = fobj[primary_ind].headerlet # older convention in PyFITS
+ primary_hdrlet = fobj[primary_ind].headerlet # older convention in PyFITS
pri_distname = primary_hdrlet[0].header['distname']
if pri_distname != distname:
if close_fobj:
@@ -1625,7 +1630,7 @@ def restore_all_with_distname(filename, distname, primary, archive=True,
if hasattr(fobj[hlet], 'hdulist'):
hdrlet = fobj[hlet].hdulist
else:
- hdrlet = fobj[hlet].headerlet # older convention in PyFITS
+ hdrlet = fobj[hlet].headerlet # older convention in PyFITS
if hlet == primary_ind:
hdrlet.apply_as_primary(fobj, attach=False,
archive=archive, force=True)
@@ -1641,11 +1646,11 @@ def restore_all_with_distname(filename, distname, primary, archive=True,
@with_logging
def archive_as_headerlet(filename, hdrname, sciext='SCI',
- wcsname=None, wcskey=None, destim=None,
- sipname=None, npolfile=None, d2imfile=None,
- author=None, descrip=None, history=None,
- nmatch=None, catalog=None,
- logging=False, logmode='w'):
+ wcsname=None, wcskey=None, destim=None,
+ sipname=None, npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ nmatch=None, catalog=None,
+ logging=False, logmode='w'):
"""
Save a WCS as a headerlet extension and write it out to a file.
@@ -1735,7 +1740,7 @@ def archive_as_headerlet(filename, hdrname, sciext='SCI',
if wcsname is None:
scihdr = fobj[sciext, 1].header
- wcsname = scihdr['wcsname'+wcskey]
+ wcsname = scihdr['wcsname' + wcskey]
if hdrname in [None, ' ', '']:
hdrname = wcsname
@@ -1745,13 +1750,13 @@ def archive_as_headerlet(filename, hdrname, sciext='SCI',
hdrnames = get_headerlet_kw_names(fobj)
if hdrname not in hdrnames:
hdrletobj = create_headerlet(fobj, sciext=sciext,
- wcsname=wcsname, wcskey=wcskey,
- hdrname=hdrname,
- sipname=sipname, npolfile=npolfile,
- d2imfile=d2imfile, author=author,
- descrip=descrip, history=history,
- nmatch=nmatch, catalog=catalog,
- logging=False)
+ wcsname=wcsname, wcskey=wcskey,
+ hdrname=hdrname,
+ sipname=sipname, npolfile=npolfile,
+ d2imfile=d2imfile, author=author,
+ descrip=descrip, history=history,
+ nmatch=nmatch, catalog=catalog,
+ logging=False)
hlt_hdu = HeaderletHDU.fromheaderlet(hdrletobj)
if destim is not None:
@@ -1771,7 +1776,10 @@ def archive_as_headerlet(filename, hdrname, sciext='SCI',
if close_fobj:
fobj.close()
-#### Headerlet Class definitions
+
+# Headerlet Class definitions
+
+
class Headerlet(fits.HDUList):
"""
A Headerlet class
@@ -1811,9 +1819,9 @@ class Headerlet(fits.HDUList):
self.distname = self[0].header["DISTNAME"]
try:
- self.vafactor = self[("SIPWCS", 1)].header.get("VAFACTOR", 1) #None instead of 1?
+ self.vafactor = self[("SIPWCS", 1)].header.get("VAFACTOR", 1) # None instead of 1?
except (IndexError, KeyError):
- self.vafactor = self[0].header.get("VAFACTOR", 1) #None instead of 1?
+ self.vafactor = self[0].header.get("VAFACTOR", 1) # None instead of 1?
self.author = self[0].header["AUTHOR"]
self.descrip = self[0].header["DESCRIP"]
@@ -1846,7 +1854,7 @@ class Headerlet(fits.HDUList):
init_logging('class Headerlet', level=logging, mode=logmode)
return hlet
- def apply_as_primary(self, fobj, attach=True, archive=True, force=False):
+ def apply_as_primary(self, fobj, attach=True, archive=True, force=False):
"""
Copy this headerlet as a primary WCS to fobj
@@ -1877,7 +1885,8 @@ class Headerlet(fits.HDUList):
if close_dest:
fobj.close()
raise ValueError("Destination name does not match headerlet"
- "Observation %s cannot be updated with headerlet %s" % (fname, self.hdrname))
+ "Observation {0} cannot be updated with"
+ "headerlet {1}".format((fname, self.hdrname)))
# Check to see whether the distortion model in the destination
# matches the distortion model in the headerlet being applied
@@ -1886,7 +1895,7 @@ class Headerlet(fits.HDUList):
dist_models_equal = self.equal_distmodel(dname)
if not dist_models_equal and not force:
raise ValueError("Distortion models do not match"
- " To overwrite the distortion model, set force=True")
+ " To overwrite the distortion model, set force=True")
orig_hlt_hdu = None
numhlt = countExtn(fobj, 'HDRLET')
@@ -1896,15 +1905,14 @@ class Headerlet(fits.HDUList):
# WCS's recorded prior to adding the headerlet WCS
wcscorr.init_wcscorr(fobj)
-
- ### start archive
+ # start archive
# If archive has been specified
- # regardless of whether or not the distortion models are equal...
+ # regardless of whether or not the distortion models are equal...
numsip = countExtn(self, 'SIPWCS')
sciext_list = []
alt_hlethdu = []
- for i in range(1, numsip+1):
+ for i in range(1, numsip + 1):
sipheader = self[('SIPWCS', i)].header
sciext_list.append((sipheader['TG_ENAME'], sipheader['TG_EVER']))
target_ext = sciext_list[0]
@@ -1920,10 +1928,10 @@ class Headerlet(fits.HDUList):
# Create a headerlet for the original Primary WCS data in the file,
# create an HDU from the original headerlet, and append it to
# the file
- orig_hlt = create_headerlet(fobj, sciext=sciext_list, #[target_ext],
- wcsname=wcsname,
- hdrname=hdrname,
- logging=self.logging)
+ orig_hlt = create_headerlet(fobj, sciext=sciext_list, # [target_ext],
+ wcsname=wcsname,
+ hdrname=hdrname,
+ logging=self.logging)
orig_hlt_hdu = HeaderletHDU.fromheaderlet(orig_hlt)
numhlt += 1
orig_hlt_hdu.header['EXTVER'] = numhlt
@@ -1931,7 +1939,6 @@ class Headerlet(fits.HDUList):
else:
logger.info("Headerlet with name %s is already attached" % hdrname)
-
if dist_models_equal:
# Use the WCSNAME to determine whether or not to archive
# Primary WCS as altwcs
@@ -1945,8 +1952,8 @@ class Headerlet(fits.HDUList):
else:
if 'idctab' in scihdr:
priwcs_name = ''.join(['IDC_',
- utils.extract_rootname(scihdr['idctab'],
- suffix='_idc')])
+ utils.extract_rootname(scihdr['idctab'],
+ suffix='_idc')])
else:
priwcs_name = 'UNKNOWN'
nextkey = altwcs.next_wcskey(fobj, ext=target_ext)
@@ -1958,11 +1965,11 @@ class Headerlet(fits.HDUList):
if hname != 'OPUS' and hname not in hdrlet_extnames:
# get HeaderletHDU for alternate WCS as well
alt_hlet = create_headerlet(fobj, sciext=sciext_list,
- wcsname=hname, wcskey=wcskey,
- hdrname=hname, sipname=None,
- npolfile=None, d2imfile=None,
- author=None, descrip=None, history=None,
- logging=self.logging)
+ wcsname=hname, wcskey=wcskey,
+ hdrname=hname, sipname=None,
+ npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ logging=self.logging)
numhlt += 1
alt_hlet_hdu = HeaderletHDU.fromheaderlet(alt_hlet)
alt_hlet_hdu.header['EXTVER'] = numhlt
@@ -1970,8 +1977,8 @@ class Headerlet(fits.HDUList):
hdrlet_extnames.append(hname)
self._del_dest_WCS_ext(fobj)
- for i in range(1, numsip+1):
- target_ext = sciext_list[i-1]
+ for i in range(1, numsip + 1):
+ target_ext = sciext_list[i - 1]
self._del_dest_WCS(fobj, target_ext)
sipwcs = HSTWCS(self, ('SIPWCS', i))
idckw = sipwcs._idc2hdr()
@@ -2035,19 +2042,19 @@ class Headerlet(fits.HDUList):
fobj[target_ext].header.extend(priwcs[0].header)
if sipwcs.cpdis1:
- whdu = priwcs[('WCSDVARR', (i-1)*numnpol+1)].copy()
+ whdu = priwcs[('WCSDVARR', (i - 1) * numnpol + 1)].copy()
whdu.update_ext_version(self[('SIPWCS', i)].header['DP1.EXTVER'])
fobj.append(whdu)
if sipwcs.cpdis2:
- whdu = priwcs[('WCSDVARR', i*numnpol)].copy()
+ whdu = priwcs[('WCSDVARR', i * numnpol)].copy()
whdu.update_ext_version(self[('SIPWCS', i)].header['DP2.EXTVER'])
fobj.append(whdu)
- if sipwcs.det2im1: #or sipwcs.det2im2:
- whdu = priwcs[('D2IMARR', (i-1)*numd2im+1)].copy()
+ if sipwcs.det2im1: # or sipwcs.det2im2:
+ whdu = priwcs[('D2IMARR', (i - 1) * numd2im + 1)].copy()
whdu.update_ext_version(self[('SIPWCS', i)].header['D2IM1.EXTVER'])
fobj.append(whdu)
if sipwcs.det2im2:
- whdu = priwcs[('D2IMARR', i*numd2im)].copy()
+ whdu = priwcs[('D2IMARR', i * numd2im)].copy()
whdu.update_ext_version(self[('SIPWCS', i)].header['D2IM2.EXTVER'])
fobj.append(whdu)
@@ -2070,7 +2077,6 @@ class Headerlet(fits.HDUList):
if close_dest:
fobj.close()
-
def apply_as_alternate(self, fobj, attach=True, wcskey=None, wcsname=None):
"""
Copy this headerlet as an alternate WCS to fobj
@@ -2098,17 +2104,19 @@ class Headerlet(fits.HDUList):
if close_dest:
fobj.close()
raise ValueError("Destination name does not match headerlet"
- "Observation %s cannot be updated with headerlet %s" % (fname, self.hdrname))
+ "Observation %s cannot be updated with"
+ "headerlet %s" % (fname, self.hdrname))
# Verify whether this headerlet has the same distortion
- #found in the image being updated
+ # found in the image being updated
dname = self.get_destination_model(fobj)
dist_models_equal = self.equal_distmodel(dname)
if not dist_models_equal:
raise ValueError("Distortion models do not match \n"
"Headerlet: %s \n"
"Destination file: %s\n"
- "attach_to_file() can be used to append this headerlet" %(self.distname, dname))
+ "attach_to_file() can be used to append this headerlet" %
+ (self.distname, dname))
# Insure that WCSCORR table has been created with all original
# WCS's recorded prior to adding the headerlet WCS
@@ -2148,12 +2156,12 @@ class Headerlet(fits.HDUList):
hwcs_header = altwcs.pc2cd(hwcs_header, key=wkey)
for ax in range(1, hwcs.naxis + 1):
hwcs_header['CTYPE{0}{1}'.format(ax, wkey)] = \
- self[('SIPWCS', 1)].header['CTYPE{0}'.format(ax)]
+ self[('SIPWCS', 1)].header['CTYPE{0}'.format(ax)]
fhdr.extend(hwcs_header)
fhdr['WCSNAME' + wkey] = wname
# also update with HDRNAME (a non-WCS-standard kw)
for kw in self.fit_kws:
- #fhdr.insert(wind, pyfits.Card(kw + wkey,
+ # fhdr.insert(wind, pyfits.Card(kw + wkey,
# self[0].header[kw]))
fhdr.append(fits.Card(kw + wkey, self[0].header[kw]))
# Update the WCSCORR table with new rows from the headerlet's WCSs
@@ -2210,7 +2218,7 @@ class Headerlet(fits.HDUList):
fobj.close()
def info(self, columns=None, pad=2, maxwidth=None,
- output=None, clobber=True, quiet=False):
+ output=None, clobber=True, quiet=False):
"""
Prints a summary of this headerlet
The summary includes:
@@ -2240,7 +2248,7 @@ class Headerlet(fits.HDUList):
"""
summary_cols, summary_dict = self.summary(columns=columns)
print_summary(summary_cols, summary_dict, pad=pad, maxwidth=maxwidth,
- idcol=None, output=output, clobber=clobber, quiet=quiet)
+ idcol=None, output=output, clobber=clobber, quiet=quiet)
def summary(self, columns=None):
"""
@@ -2301,7 +2309,7 @@ class Headerlet(fits.HDUList):
HDRNAME.
"""
unique = verify_hdrname_is_unique(dest, self.hdrname)
- logger.debug("verify_hdrname() returned %s"%unique)
+ logger.debug("verify_hdrname() returned %s" % unique)
return unique
def get_destination_model(self, dest):
@@ -2318,7 +2326,7 @@ class Headerlet(fits.HDUList):
else:
destim = dest
dname = destim[0].header['DISTNAME'] if 'distname' in destim[0].header \
- else self.build_distname(dest)
+ else self.build_distname(dest)
if destim_opened:
destim.close()
return dname
@@ -2357,7 +2365,8 @@ class Headerlet(fits.HDUList):
else:
logger.debug("verify_destim() returned False")
logger.critical("Destination name does not match headerlet. "
- "Observation %s cannot be updated with headerlet %s" % (fname, self.hdrname))
+ "Observation %s cannot be updated with"
+ "headerlet %s" % (fname, self.hdrname))
return False
def build_distname(self, dest):
@@ -2377,7 +2386,7 @@ class Headerlet(fits.HDUList):
sipname, idctab = utils.build_sipname(dest, dest, None)
npolname, npolfile = utils.build_npolname(dest, npolfile)
d2imname, d2imfile = utils.build_d2imname(dest, d2imfile)
- dname = utils.build_distname(sipname,npolname,d2imname)
+ dname = utils.build_distname(sipname, npolname, d2imname)
return dname
def tofile(self, fname, destim=None, hdrname=None, clobber=False):
@@ -2418,8 +2427,7 @@ class Headerlet(fits.HDUList):
else:
for idx in range(numext):
# Only delete WCS from extensions which may have WCS keywords
- if ('XTENSION' in dest[idx].header and
- dest[idx].header['XTENSION'] == 'IMAGE'):
+ if ('XTENSION' in dest[idx].header and dest[idx].header['XTENSION'] == 'IMAGE'):
self._remove_d2im(dest[idx])
self._remove_sip(dest[idx])
self._remove_lut(dest[idx])
@@ -2427,12 +2435,7 @@ class Headerlet(fits.HDUList):
self._remove_idc_coeffs(dest[idx])
self._remove_fit_values(dest[idx])
self._remove_ref_files(dest[0])
- """
- if not ext:
- self._remove_alt_WCS(dest, ext=range(numext))
- else:
- self._remove_alt_WCS(dest, ext=ext)
- """
+
def _del_dest_WCS_ext(self, dest):
numwdvarr = countExtn(dest, 'WCSDVARR')
numd2im = countExtn(dest, 'D2IMARR')
@@ -2459,13 +2462,13 @@ class Headerlet(fits.HDUList):
Remove the any existing astrometric fit values from a FITS extension
"""
- logger.debug("Removing astrometric fit values from (%s, %s)"%
+ logger.debug("Removing astrometric fit values from (%s, %s)" %
(ext.name, ext.ver))
dkeys = altwcs.wcskeys(ext.header)
- if 'O' in dkeys: dkeys.remove('O') # Do not remove wcskey='O' values
+ if 'O' in dkeys: dkeys.remove('O') # Do not remove wcskey='O' values
for fitkw in ['NMATCH', 'CATALOG']:
for k in dkeys:
- fkw = (fitkw+k).rstrip()
+ fkw = (fitkw + k).rstrip()
if fkw in ext.header:
del ext.header[fkw]
@@ -2544,7 +2547,7 @@ class Headerlet(fits.HDUList):
dkeys = altwcs.wcskeys(dest[('SCI', 1)].header)
for val in ['O', '', ' ']:
if val in dkeys:
- dkeys.remove(val) # Never delete WCS with wcskey='O'
+ dkeys.remove(val) # Never delete WCS with wcskey='O'
logger.debug("Removing alternate WCSs with keys %s from %s"
% (dkeys, dest.filename()))
@@ -2588,6 +2591,7 @@ class Headerlet(fits.HDUList):
except KeyError:
pass
+
@with_logging
def _idc2hdr(fromhdr, tohdr, towkey=' '):
"""
@@ -2598,7 +2602,7 @@ def _idc2hdr(fromhdr, tohdr, towkey=' '):
coeffs = ['OCX10', 'OCX11', 'OCY10', 'OCY11', 'IDCSCALE']
for c in coeffs:
try:
- tohdr[c+towkey] = fromhdr[c]
+ tohdr[c + towkey] = fromhdr[c]
logger.debug("Copied %s to header")
except KeyError:
continue
@@ -2663,8 +2667,8 @@ def get_extname_extver_list(fobj, sciext):
else:
extlist = sciext[:]
else:
- errstr = "Expected sciext to be a list of FITS extensions with science data\n"+\
- " a valid EXTNAME string, or an integer."
+ errstr = "Expected sciext to be a list of FITS extensions with science data\n" + \
+ " a valid EXTNAME string, or an integer."
logger.critical(errstr)
raise ValueError
return extlist
diff --git a/stwcs/wcsutil/hstwcs.py b/stwcs/wcsutil/hstwcs.py
index bfebcfc..27f6467 100644
--- a/stwcs/wcsutil/hstwcs.py
+++ b/stwcs/wcsutil/hstwcs.py
@@ -1,25 +1,21 @@
-from __future__ import absolute_import, division, print_function # confidence high
+from __future__ import absolute_import, division, print_function
import os
from astropy.wcs import WCS
from astropy.io import fits
-from stwcs.distortion import models, coeff_converter
+from ..distortion import models, coeff_converter
import numpy as np
from stsci.tools import fileutil
-from . import altwcs
+from . import pc2cd
from . import getinput
-from . import mappings
from . import instruments
from .mappings import inst_mappings, ins_spec_kw
-from .mappings import basic_wcs
-__docformat__ = 'restructuredtext'
+__all__ = ['HSTWCS']
-#
-#### Utility functions copied from 'updatewcs.utils' to avoid circular imports
-#
-def extract_rootname(kwvalue,suffix=""):
+
+def extract_rootname(kwvalue, suffix=""):
""" Returns the rootname from a full reference filename
If a non-valid value (any of ['','N/A','NONE','INDEF',None]) is input,
@@ -28,13 +24,13 @@ def extract_rootname(kwvalue,suffix=""):
This function will also replace any 'suffix' specified with a blank.
"""
# check to see whether a valid kwvalue has been provided as input
- if kwvalue.strip() in ['','N/A','NONE','INDEF',None]:
- return 'NONE' # no valid value, so return 'NONE'
+ if kwvalue.strip() in ['', 'N/A', 'NONE', 'INDEF', None]:
+ return 'NONE' # no valid value, so return 'NONE'
# for a valid kwvalue, parse out the rootname
# strip off any environment variable from input filename, if any are given
if '$' in kwvalue:
- fullval = kwvalue[kwvalue.find('$')+1:]
+ fullval = kwvalue[kwvalue.find('$') + 1:]
else:
fullval = kwvalue
# Extract filename without path from kwvalue
@@ -44,12 +40,13 @@ def extract_rootname(kwvalue,suffix=""):
rootname = fileutil.buildNewRootname(fname)
# Now, remove any known suffix from rootname
- rootname = rootname.replace(suffix,'')
+ rootname = rootname.replace(suffix, '')
return rootname.strip()
+
def build_default_wcsname(idctab):
- idcname = extract_rootname(idctab,suffix='_idc')
+ idcname = extract_rootname(idctab, suffix='_idc')
wcsname = 'IDC_' + idcname
return wcsname
@@ -93,12 +90,9 @@ class NoConvergence(Exception):
self.accuracy = kwargs.pop('accuracy', None)
self.niter = kwargs.pop('niter', None)
self.divergent = kwargs.pop('divergent', None)
- self.failed2converge= kwargs.pop('failed2converge', None)
+ self.failed2converge = kwargs.pop('failed2converge', None)
-#
-#### HSTWCS Class definition
-#
class HSTWCS(WCS):
def __init__(self, fobj=None, ext=None, minerr=0.0, wcskey=" "):
@@ -138,7 +132,6 @@ class HSTWCS(WCS):
self.filename = filename
instrument_name = hdr0.get('INSTRUME', 'DEFAULT')
if instrument_name == 'DEFAULT' or instrument_name not in list(inst_mappings.keys()):
- #['IRAF/ARTDATA','',' ','N/A']:
self.instrument = 'DEFAULT'
else:
self.instrument = instrument_name
@@ -189,7 +182,7 @@ class HSTWCS(WCS):
Reads in first order IDCTAB coefficients if present in the header
"""
coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale',
- 'idcv2ref','idcv3ref', 'idctheta']
+ 'idcv2ref', 'idcv3ref', 'idctheta']
for c in coeffs:
self.__setattr__(c, header.get(c, None))
@@ -225,7 +218,7 @@ class HSTWCS(WCS):
pass
else:
- raise KeyError("Unsupported instrument - %s" %self.instrument)
+ raise KeyError("Unsupported instrument - %s" % self.instrument)
def setPscale(self):
"""
@@ -234,7 +227,7 @@ class HSTWCS(WCS):
try:
cd11 = self.wcs.cd[0][0]
cd21 = self.wcs.cd[1][0]
- self.pscale = np.sqrt(np.power(cd11,2)+np.power(cd21,2)) * 3600.
+ self.pscale = np.sqrt(np.power(cd11, 2) + np.power(cd21, 2)) * 3600.
except AttributeError:
if self.wcs.has_cd():
print("This file has a PC matrix. You may want to convert it \n \
@@ -249,7 +242,7 @@ class HSTWCS(WCS):
try:
cd12 = self.wcs.cd[0][1]
cd22 = self.wcs.cd[1][1]
- self.orientat = np.rad2deg(np.arctan2(cd12,cd22))
+ self.orientat = np.rad2deg(np.arctan2(cd12, cd22))
except AttributeError:
if self.wcs.has_cd():
print("This file has a PC matrix. You may want to convert it \n \
@@ -261,7 +254,7 @@ class HSTWCS(WCS):
"""
Updates the CD matrix with a new plate scale
"""
- self.wcs.cd = self.wcs.cd/self.pscale*scale
+ self.wcs.cd = self.wcs.cd / self.pscale * scale
self.setPscale()
def readModel(self, update=False, header=None):
@@ -282,8 +275,8 @@ class HSTWCS(WCS):
CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF,
IDCV2REF, IDCV3REF
"""
- if self.idctab in [None, '', ' ','N/A']:
- #Keyword idctab is not present in header - check for sip coefficients
+ if self.idctab in [None, '', ' ', 'N/A']:
+ # Keyword idctab is not present in header - check for sip coefficients
if header is not None and 'IDCSCALE' in header:
self._readModelFromHeader(header)
else:
@@ -319,7 +312,6 @@ class HSTWCS(WCS):
self.idcmodel = model
-
def readModelFromIDCTAB(self, header=None, update=False):
"""
Read distortion model from idc table.
@@ -334,25 +326,26 @@ class HSTWCS(WCS):
IDCV2REF, IDCV3REF
"""
- if self.date_obs == None:
+ if self.date_obs is None:
print('date_obs not available\n')
self.idcmodel = None
return
- if self.filter1 == None and self.filter2 == None:
+ if self.filter1 is None and self.filter2 is None:
'No filter information available\n'
self.idcmodel = None
return
self.idcmodel = models.IDCModel(self.idctab,
- chip=self.chip, direction='forward', date=self.date_obs,
- filter1=self.filter1, filter2=self.filter2,
- offtab=self.offtab, binned=self.binned)
+ chip=self.chip, direction='forward',
+ date=self.date_obs,
+ filter1=self.filter1, filter2=self.filter2,
+ offtab=self.offtab, binned=self.binned)
if self.ltv1 != 0. or self.ltv2 != 0.:
self.resetLTV()
if update:
- if header==None:
+ if header is None:
print('Update header with IDC model kw requested but header was not provided\n.')
else:
self._updatehdr(header)
@@ -393,7 +386,7 @@ class HSTWCS(WCS):
if not wcskey:
wcskey = self.wcs.alt
if self.wcs.has_cd():
- h = altwcs.pc2cd(h, key=wcskey)
+ h = pc2cd(h, key=wcskey)
if 'wcsname' not in h:
if self.idctab is not None:
@@ -440,13 +433,13 @@ class HSTWCS(WCS):
k - one of 'a', 'b', 'ap', 'bp'
"""
- cards = [] #fits.CardList()
- korder = self.sip.__getattribute__(k+'_order')
- cards.append(fits.Card(keyword=k.upper()+'_ORDER', value=korder))
+ cards = []
+ korder = self.sip.__getattribute__(k + '_order')
+ cards.append(fits.Card(keyword=k.upper() + '_ORDER', value=korder))
coeffs = self.sip.__getattribute__(k)
ind = coeffs.nonzero()
for i in range(len(ind[0])):
- card = fits.Card(keyword=k.upper()+'_'+str(ind[0][i])+'_'+str(ind[1][i]),
+ card = fits.Card(keyword=k.upper() + '_' + str(ind[0][i]) + '_' + str(ind[1][i]),
value=coeffs[ind[0][i], ind[1][i]])
cards.append(card)
return cards
@@ -454,7 +447,7 @@ class HSTWCS(WCS):
def _idc2hdr(self):
# save some of the idc coefficients
coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale']
- cards = [] #fits.CardList()
+ cards = []
for c in coeffs:
try:
val = self.__getattribute__(c)
@@ -711,27 +704,25 @@ adaptive=False, detect_divergence=False, quiet=False)
try:
ra = np.asarray(args[0], dtype=np.float64)
dec = np.asarray(args[1], dtype=np.float64)
- #assert( len(ra.shape) == 1 and len(dec.shape) == 1 )
+ # assert( len(ra.shape) == 1 and len(dec.shape) == 1 )
origin = int(args[2])
vect1D = True
except:
- raise TypeError("When providing three arguments, they must " \
- "be (Ra, Dec, origin) where Ra and Dec are " \
- "Nx1 vectors.")
+ raise TypeError("When providing three arguments, they must "
+ "be (Ra, Dec, origin) where Ra and Dec are "
+ "Nx1 vectors.")
elif nargs == 2:
try:
rd = np.asarray(args[0], dtype=np.float64)
- #assert( rd.shape[1] == 2 )
- ra = rd[:,0]
- dec = rd[:,1]
+ ra = rd[:, 0]
+ dec = rd[:, 1]
origin = int(args[1])
vect1D = False
except:
- raise TypeError("When providing two arguments, they must " \
- "be (RaDec, origin) where RaDec is a Nx2 array.")
+ raise TypeError("When providing two arguments, they must "
+ "be (RaDec, origin) where RaDec is a Nx2 array.")
else:
- raise TypeError("Expected 2 or 3 arguments, {:d} given." \
- .format(nargs))
+ raise TypeError("Expected 2 or 3 arguments, {:d} given.".format(nargs))
# process optional arguments:
accuracy = kwargs.pop('accuracy', 1.0e-4)
@@ -743,8 +734,8 @@ adaptive=False, detect_divergence=False, quiet=False)
#####################################################################
## INITIALIZE ITERATIVE PROCESS: ##
#####################################################################
- x0, y0 = self.wcs_world2pix(ra, dec, origin) # <-- initial approximation
- # (WCS based only)
+ x0, y0 = self.wcs_world2pix(ra, dec, origin) # <-- initial approximation
+ # (WCS based only)
# see if an iterative solution is required (when any of the
# non-CD-matrix corrections are present). If not required
@@ -757,17 +748,17 @@ adaptive=False, detect_divergence=False, quiet=False)
if vect1D:
return [x0, y0]
else:
- return np.dstack([x0,y0])[0]
+ return np.dstack([x0, y0])[0]
- x = x0.copy() # 0-order solution
- y = y0.copy() # 0-order solution
+ x = x0.copy() # 0-order solution
+ y = y0.copy() # 0-order solution
# initial correction:
dx, dy = self.pix2foc(x, y, origin)
# If pix2foc does not apply all the required distortion
# corrections then replace the above line with:
- #r0, d0 = self.all_pix2world(x, y, origin)
- #dx, dy = self.wcs_world2pix(r0, d0, origin )
+ # r0, d0 = self.all_pix2world(x, y, origin)
+ # dx, dy = self.wcs_world2pix(r0, d0, origin )
dx -= x0
dy -= y0
@@ -776,21 +767,21 @@ adaptive=False, detect_divergence=False, quiet=False)
y -= dy
# norn (L2) squared of the correction:
- dn2prev = dx**2+dy**2
- dn2 = dn2prev
+ dn2prev = dx ** 2 + dy ** 2
+ dn2 = dn2prev
# prepare for iterative process
- iterlist = list(range(1, maxiter+1))
- accuracy2 = accuracy**2
- ind = None
- inddiv = None
+ iterlist = list(range(1, maxiter + 1))
+ accuracy2 = accuracy ** 2
+ ind = None
+ inddiv = None
- npts = x.shape[0]
+ npts = x.shape[0]
# turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
- old_over = np.geterr()['over']
- np.seterr(invalid = 'ignore', over = 'ignore')
+ old_over = np.geterr()['over']
+ np.seterr(invalid='ignore', over='ignore')
#####################################################################
## NON-ADAPTIVE ITERATIONS: ##
@@ -805,13 +796,13 @@ adaptive=False, detect_divergence=False, quiet=False)
dx, dy = self.pix2foc(x, y, origin)
# If pix2foc does not apply all the required distortion
# corrections then replace the above line with:
- #r0, d0 = self.all_pix2world(x, y, origin)
- #dx, dy = self.wcs_world2pix(r0, d0, origin )
+ # r0, d0 = self.all_pix2world(x, y, origin)
+ # dx, dy = self.wcs_world2pix(r0, d0, origin )
dx -= x0
dy -= y0
# update norn (L2) squared of the correction:
- dn2 = dx**2+dy**2
+ dn2 = dx ** 2 + dy ** 2
# check for divergence (we do this in two stages
# to optimize performance for the most common
@@ -826,12 +817,12 @@ adaptive=False, detect_divergence=False, quiet=False)
x[ind] -= dx[ind]
y[ind] -= dy[ind]
# switch to adaptive iterations:
- ind, = np.where((dn2 >= accuracy2) & \
- (dn2 <= dn2prev) & np.isfinite(dn2))
+ ind, = np.where((dn2 >= accuracy2) &
+ (dn2 <= dn2prev) & np.isfinite(dn2))
iterlist = iterlist[k:]
adaptive = True
break
- #dn2prev[ind] = dn2[ind]
+ # dn2prev[ind] = dn2[ind]
dn2prev = dn2
# apply correction:
@@ -854,22 +845,22 @@ adaptive=False, detect_divergence=False, quiet=False)
dx[ind], dy[ind] = self.pix2foc(x[ind], y[ind], origin)
# If pix2foc does not apply all the required distortion
# corrections then replace the above line with:
- #r0[ind], d0[ind] = self.all_pix2world(x[ind], y[ind], origin)
- #dx[ind], dy[ind] = self.wcs_world2pix(r0[ind], d0[ind], origin)
+ # r0[ind], d0[ind] = self.all_pix2world(x[ind], y[ind], origin)
+ # dx[ind], dy[ind] = self.wcs_world2pix(r0[ind], d0[ind], origin)
dx[ind] -= x0[ind]
dy[ind] -= y0[ind]
# update norn (L2) squared of the correction:
- dn2 = dx**2+dy**2
+ dn2 = dx ** 2 + dy ** 2
# update indices of elements that still need correction:
if detect_divergence:
ind, = np.where((dn2 >= accuracy2) & (dn2 <= dn2prev))
- #ind = ind[np.where((dn2[ind] >= accuracy2) & (dn2[ind] <= dn2prev))]
+ # ind = ind[np.where((dn2[ind] >= accuracy2) & (dn2[ind] <= dn2prev))]
dn2prev[ind] = dn2[ind]
else:
ind, = np.where(dn2 >= accuracy2)
- #ind = ind[np.where(dn2[ind] >= accuracy2)]
+ # ind = ind[np.where(dn2[ind] >= accuracy2)]
# apply correction:
x[ind] -= dx[ind]
@@ -880,9 +871,9 @@ adaptive=False, detect_divergence=False, quiet=False)
## AND FAILED-TO-CONVERGE POINTS ##
#####################################################################
# Identify diverging and/or invalid points:
- invalid = (((~np.isfinite(y)) | (~np.isfinite(x)) | \
- (~np.isfinite(dn2))) & \
- (np.isfinite(ra)) & (np.isfinite(dec)))
+ invalid = (((~np.isfinite(y)) | (~np.isfinite(x)) |
+ (~np.isfinite(dn2))) &
+ (np.isfinite(ra)) & (np.isfinite(dec)))
# When detect_divergence==False, dn2prev is outdated (it is the
# norm^2 of the very first correction). Still better than nothing...
inddiv, = np.where(((dn2 >= accuracy2) & (dn2 > dn2prev)) | invalid)
@@ -891,7 +882,7 @@ adaptive=False, detect_divergence=False, quiet=False)
# identify points that did not converge within
# 'maxiter' iterations:
if k >= maxiter:
- ind,= np.where((dn2 >= accuracy2) & (dn2 <= dn2prev) & (~invalid))
+ ind, = np.where((dn2 >= accuracy2) & (dn2 <= dn2prev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
@@ -907,50 +898,50 @@ adaptive=False, detect_divergence=False, quiet=False)
sol = [x, y]
err = [np.abs(dx), np.abs(dy)]
else:
- sol = np.dstack( [x, y] )[0]
- err = np.dstack( [np.abs(dx), np.abs(dy)] )[0]
+ sol = np.dstack([x, y] )[0]
+ err = np.dstack([np.abs(dx), np.abs(dy)] )[0]
# restore previous numpy error settings:
- np.seterr(invalid = old_invalid, over = old_over)
+ np.seterr(invalid=old_invalid, over=old_over)
if inddiv is None:
- raise NoConvergence("'HSTWCS.all_world2pix' failed to " \
- "converge to the requested accuracy after {:d} " \
- "iterations.".format(k), best_solution = sol, \
- accuracy = err, niter = k, failed2converge = ind, \
- divergent = None)
+ raise NoConvergence("'HSTWCS.all_world2pix' failed to "
+ "converge to the requested accuracy after {:d} "
+ "iterations.".format(k), best_solution=sol,
+ accuracy=err, niter=k, failed2converge=ind,
+ divergent=None)
else:
- raise NoConvergence("'HSTWCS.all_world2pix' failed to " \
- "converge to the requested accuracy.{0:s}" \
- "After {1:d} iterations, the solution is diverging " \
- "at least for one input point." \
- .format(os.linesep, k), best_solution = sol, \
- accuracy = err, niter = k, failed2converge = ind, \
- divergent = inddiv)
+ raise NoConvergence("'HSTWCS.all_world2pix' failed to "
+ "converge to the requested accuracy.{0:s}"
+ "After {1:d} iterations, the solution is diverging "
+ "at least for one input point."
+ .format(os.linesep, k), best_solution=sol,
+ accuracy=err, niter=k, failed2converge=ind,
+ divergent=inddiv)
#####################################################################
## FINALIZE AND FORMAT DATA FOR RETURN: ##
#####################################################################
# restore previous numpy error settings:
- np.seterr(invalid = old_invalid, over = old_over)
+ np.seterr(invalid=old_invalid, over=old_over)
if vect1D:
return [x, y]
else:
- return np.dstack( [x, y] )[0]
+ return np.dstack([x, y] )[0]
def _updatehdr(self, ext_hdr):
- #kw2add : OCX10, OCX11, OCY10, OCY11
+ # kw2add : OCX10, OCX11, OCY10, OCY11
# record the model in the header for use by pydrizzle
- ext_hdr['OCX10'] = self.idcmodel.cx[1,0]
- ext_hdr['OCX11'] = self.idcmodel.cx[1,1]
- ext_hdr['OCY10'] = self.idcmodel.cy[1,0]
- ext_hdr['OCY11'] = self.idcmodel.cy[1,1]
+ ext_hdr['OCX10'] = self.idcmodel.cx[1, 0]
+ ext_hdr['OCX11'] = self.idcmodel.cx[1, 1]
+ ext_hdr['OCY10'] = self.idcmodel.cy[1, 0]
+ ext_hdr['OCY11'] = self.idcmodel.cy[1, 1]
ext_hdr['IDCSCALE'] = self.idcmodel.refpix['PSCALE']
ext_hdr['IDCTHETA'] = self.idcmodel.refpix['THETA']
ext_hdr['IDCXREF'] = self.idcmodel.refpix['XREF']
ext_hdr['IDCYREF'] = self.idcmodel.refpix['YREF']
- ext_hdr['IDCV2REF'] = self.idcmodel.refpix['V2REF']
+ ext_hdr['IDCV2REF'] = self.idcmodel.refpix['V2REF']
ext_hdr['IDCV3REF'] = self.idcmodel.refpix['V3REF']
def printwcs(self):
@@ -958,8 +949,8 @@ adaptive=False, detect_divergence=False, quiet=False)
Print the basic WCS keywords.
"""
print('WCS Keywords\n')
- print('CD_11 CD_12: %r %r' % (self.wcs.cd[0,0], self.wcs.cd[0,1]))
- print('CD_21 CD_22: %r %r' % (self.wcs.cd[1,0], self.wcs.cd[1,1]))
+ print('CD_11 CD_12: %r %r' % (self.wcs.cd[0, 0], self.wcs.cd[0, 1]))
+ print('CD_21 CD_22: %r %r' % (self.wcs.cd[1, 0], self.wcs.cd[1, 1]))
print('CRVAL : %r %r' % (self.wcs.crval[0], self.wcs.crval[1]))
print('CRPIX : %r %r' % (self.wcs.crpix[0], self.wcs.crpix[1]))
print('NAXIS : %d %d' % (self.naxis1, self.naxis2))
diff --git a/stwcs/wcsutil/instruments.py b/stwcs/wcsutil/instruments.py
index f662513..83fb1bd 100644
--- a/stwcs/wcsutil/instruments.py
+++ b/stwcs/wcsutil/instruments.py
@@ -1,6 +1,5 @@
-from __future__ import absolute_import, division, print_function # confidence high
+from __future__ import absolute_import, division, print_function
-from .mappings import ins_spec_kw
class InstrWCS(object):
"""
@@ -135,13 +134,14 @@ class InstrWCS(object):
self.chip = 1
def set_parity(self):
- self.parity = [[1.0,0.0],[0.0,-1.0]]
+ self.parity = [[1.0, 0.0], [0.0, -1.0]]
def set_detector(self):
# each instrument has a different kw for detector and it can be
# in a different header, so this is to be handled by the instrument classes
self.detector = 'DEFAULT'
+
class ACSWCS(InstrWCS):
"""
get instrument specific kw
@@ -150,7 +150,7 @@ class ACSWCS(InstrWCS):
def __init__(self, hdr0, hdr):
self.primhdr = hdr0
self.exthdr = hdr
- InstrWCS.__init__(self,hdr0, hdr)
+ InstrWCS.__init__(self, hdr0, hdr)
self.set_ins_spec_kw()
def set_detector(self):
@@ -161,9 +161,9 @@ class ACSWCS(InstrWCS):
raise
def set_parity(self):
- parity = {'WFC':[[1.0,0.0],[0.0,-1.0]],
- 'HRC':[[-1.0,0.0],[0.0,1.0]],
- 'SBC':[[-1.0,0.0],[0.0,1.0]]}
+ parity = {'WFC': [[1.0, 0.0], [0.0, -1.0]],
+ 'HRC': [[-1.0, 0.0], [0.0, 1.0]],
+ 'SBC': [[-1.0, 0.0], [0.0, 1.0]]}
if self.detector not in list(parity.keys()):
parity = InstrWCS.set_parity(self)
@@ -173,24 +173,22 @@ class ACSWCS(InstrWCS):
class WFPC2WCS(InstrWCS):
-
def __init__(self, hdr0, hdr):
self.primhdr = hdr0
self.exthdr = hdr
- InstrWCS.__init__(self,hdr0, hdr)
+ InstrWCS.__init__(self, hdr0, hdr)
self.set_ins_spec_kw()
def set_filter1(self):
self.filter1 = self.primhdr.get('FILTNAM1', None)
- if self.filter1 == " " or self.filter1 == None:
+ if self.filter1 == " " or self.filter1 is None:
self.filter1 = 'CLEAR1'
def set_filter2(self):
self.filter2 = self.primhdr.get('FILTNAM2', None)
- if self.filter2 == " " or self.filter2 == None:
+ if self.filter2 == " " or self.filter2 is None:
self.filter2 = 'CLEAR2'
-
def set_binned(self):
mode = self.primhdr.get('MODE', 1)
if mode == 'FULL':
@@ -202,7 +200,7 @@ class WFPC2WCS(InstrWCS):
self.chip = self.exthdr.get('DETECTOR', 1)
def set_parity(self):
- self.parity = [[-1.0,0.],[0.,1.0]]
+ self.parity = [[-1.0, 0.], [0., 1.0]]
def set_detector(self):
try:
@@ -220,7 +218,7 @@ class WFC3WCS(InstrWCS):
def __init__(self, hdr0, hdr):
self.primhdr = hdr0
self.exthdr = hdr
- InstrWCS.__init__(self,hdr0, hdr)
+ InstrWCS.__init__(self, hdr0, hdr)
self.set_ins_spec_kw()
def set_detector(self):
@@ -232,22 +230,23 @@ class WFC3WCS(InstrWCS):
def set_filter1(self):
self.filter1 = self.primhdr.get('FILTER', None)
- if self.filter1 == " " or self.filter1 == None:
+ if self.filter1 == " " or self.filter1 is None:
self.filter1 = 'CLEAR'
def set_filter2(self):
- #Nicmos idc tables do not allow 2 filters.
+ # Nicmos idc tables do not allow 2 filters.
self.filter2 = 'CLEAR'
def set_parity(self):
- parity = {'UVIS':[[-1.0,0.0],[0.0,1.0]],
- 'IR':[[-1.0,0.0],[0.0,1.0]]}
+ parity = {'UVIS': [[-1.0, 0.0], [0.0, 1.0]],
+ 'IR': [[-1.0, 0.0], [0.0, 1.0]]}
if self.detector not in list(parity.keys()):
parity = InstrWCS.set_parity(self)
else:
self.parity = parity[self.detector]
+
class NICMOSWCS(InstrWCS):
"""
Create a NICMOS specific class
@@ -256,19 +255,19 @@ class NICMOSWCS(InstrWCS):
def __init__(self, hdr0, hdr):
self.primhdr = hdr0
self.exthdr = hdr
- InstrWCS.__init__(self,hdr0, hdr)
+ InstrWCS.__init__(self, hdr0, hdr)
self.set_ins_spec_kw()
def set_parity(self):
- self.parity = [[-1.0,0.],[0.,1.0]]
+ self.parity = [[-1.0, 0.], [0., 1.0]]
def set_filter1(self):
self.filter1 = self.primhdr.get('FILTER', None)
- if self.filter1 == " " or self.filter1 == None:
+ if self.filter1 == " " or self.filter1 is None:
self.filter1 = 'CLEAR'
def set_filter2(self):
- #Nicmos idc tables do not allow 2 filters.
+ # Nicmos idc tables do not allow 2 filters.
self.filter2 = 'CLEAR'
def set_chip(self):
@@ -281,6 +280,7 @@ class NICMOSWCS(InstrWCS):
print('ERROR: Detector kw not found.\n')
raise
+
class STISWCS(InstrWCS):
"""
A STIS specific class
@@ -289,20 +289,20 @@ class STISWCS(InstrWCS):
def __init__(self, hdr0, hdr):
self.primhdr = hdr0
self.exthdr = hdr
- InstrWCS.__init__(self,hdr0, hdr)
+ InstrWCS.__init__(self, hdr0, hdr)
self.set_ins_spec_kw()
def set_parity(self):
- self.parity = [[-1.0,0.],[0.,1.0]]
+ self.parity = [[-1.0, 0.], [0., 1.0]]
def set_filter1(self):
self.filter1 = self.exthdr.get('OPT_ELEM', None)
- if self.filter1 == " " or self.filter1 == None:
+ if self.filter1 == " " or self.filter1 is None:
self.filter1 = 'CLEAR1'
def set_filter2(self):
self.filter2 = self.exthdr.get('FILTER', None)
- if self.filter2 == " " or self.filter2 == None:
+ if self.filter2 == " " or self.filter2 is None:
self.filter2 = 'CLEAR2'
def set_detector(self):
diff --git a/stwcs/wcsutil/mappings.py b/stwcs/wcsutil/mappings.py
index 24038bf..a85df13 100644
--- a/stwcs/wcsutil/mappings.py
+++ b/stwcs/wcsutil/mappings.py
@@ -1,29 +1,29 @@
-from __future__ import division # confidence high
+from __future__ import division
# This dictionary maps an instrument into an instrument class
# The instrument class handles instrument specific keywords
-inst_mappings={'WFPC2': 'WFPC2WCS',
- 'ACS': 'ACSWCS',
- 'NICMOS': 'NICMOSWCS',
- 'STIS': 'STISWCS',
- 'WFC3': 'WFC3WCS',
- 'DEFAULT': 'InstrWCS'
- }
+inst_mappings = {'WFPC2': 'WFPC2WCS',
+ 'ACS': 'ACSWCS',
+ 'NICMOS': 'NICMOSWCS',
+ 'STIS': 'STISWCS',
+ 'WFC3': 'WFC3WCS',
+ 'DEFAULT': 'InstrWCS'
+ }
# A list of instrument specific keywords
# Every instrument class must have methods which define each of these
# as class attributes.
-ins_spec_kw = [ 'idctab', 'offtab', 'date_obs', 'ra_targ', 'dec_targ', 'pav3', \
- 'detector', 'ltv1', 'ltv2', 'parity', 'binned','vafactor', \
- 'chip', 'naxis1', 'naxis2', 'filter1', 'filter2']
+ins_spec_kw = ['idctab', 'offtab', 'date_obs', 'ra_targ', 'dec_targ',
+ 'pav3', 'detector', 'ltv1', 'ltv2', 'parity', 'binned',
+ 'vafactor', 'chip', 'naxis1', 'naxis2', 'filter1', 'filter2']
# A list of keywords defined in the primary header.
-# The HSTWCS class sets this as attributes
-prim_hdr_kw = ['detector', 'offtab', 'idctab', 'date-obs',
- 'pa_v3', 'ra_targ', 'dec_targ']
+# The HSTWCS class sets this as attributes
+prim_hdr_kw = ['detector', 'offtab', 'idctab', 'date-obs',
+ 'pa_v3', 'ra_targ', 'dec_targ']
# These are the keywords which are archived before MakeWCS is run
-basic_wcs = ['CD1_', 'CD2_', 'CRVAL', 'CTYPE', 'CRPIX', 'CTYPE', 'CDELT', 'CUNIT']
-
+basic_wcs = ['CD1_', 'CD2_', 'CRVAL', 'CTYPE', 'CRPIX', 'CTYPE',
+ 'CDELT', 'CUNIT']
diff --git a/stwcs/wcsutil/mosaic.py b/stwcs/wcsutil/mosaic.py
index 9d2d0a3..a757e0c 100644
--- a/stwcs/wcsutil/mosaic.py
+++ b/stwcs/wcsutil/mosaic.py
@@ -1,15 +1,17 @@
-from __future__ import division, print_function
+from __future__ import absolute_import, division, print_function
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import fits
import string
from stsci.tools import parseinput, irafglob
-from stwcs.distortion import utils
-from stwcs import updatewcs, wcsutil
-from stwcs.wcsutil import altwcs
+from ..distortion import utils
+from .. import wcsutil
+from ..wcsutil import altwcs
-def vmosaic(fnames, outwcs=None, ref_wcs=None, ext=None, extname=None, undistort=True, wkey='V', wname='VirtualMosaic', plot=False, clobber=False):
+
+def vmosaic(fnames, outwcs=None, ref_wcs=None, ext=None, extname=None, undistort=True,
+ wkey='V', wname='VirtualMosaic', plot=False, clobber=False):
"""
Create a virtual mosaic using the WCS of the input images.
@@ -61,27 +63,29 @@ def vmosaic(fnames, outwcs=None, ref_wcs=None, ext=None, extname=None, undistort
tangent plane and the virtual WCS is recorded in the header.
"""
wcsobjects = readWCS(fnames, ext, extname)
- if outwcs != None:
+ if outwcs is not None:
outwcs = outwcs.deepcopy()
else:
- if ref_wcs != None:
+ if ref_wcs is not None:
outwcs = utils.output_wcs(wcsobjects, ref_wcs=ref_wcs, undistort=undistort)
else:
outwcs = utils.output_wcs(wcsobjects, undistort=undistort)
if plot:
- outc=np.array([[0.,0], [outwcs._naxis1, 0],
- [outwcs._naxis1, outwcs._naxis2],
- [0, outwcs._naxis2], [0, 0]])
- plt.plot(outc[:,0], outc[:,1])
+ outc = np.array([[0., 0], [outwcs._naxis1, 0],
+ [outwcs._naxis1, outwcs._naxis2],
+ [0, outwcs._naxis2], [0, 0]])
+ plt.plot(outc[:, 0], outc[:, 1])
for wobj in wcsobjects:
- outcorners = outwcs.wcs_world2pix(wobj.calc_footprint(),1)
+ outcorners = outwcs.wcs_world2pix(wobj.calc_footprint(), 1)
if plot:
- plt.plot(outcorners[:,0], outcorners[:,1])
+ plt.plot(outcorners[:, 0], outcorners[:, 1])
objwcs = outwcs.deepcopy()
objwcs.wcs.crpix = objwcs.wcs.crpix - (outcorners[0])
- updatehdr(wobj.filename, objwcs,wkey=wkey, wcsname=wname, ext=wobj.extname, clobber=clobber)
+ updatehdr(wobj.filename, objwcs, wkey=wkey, wcsname=wname, ext=wobj.extname,
+ clobber=clobber)
return outwcs
+
def updatehdr(fname, wcsobj, wkey, wcsname, ext=1, clobber=False):
hdr = fits.getheader(fname, ext=ext)
all_keys = list(string.ascii_uppercase)
@@ -89,7 +93,9 @@ def updatehdr(fname, wcsobj, wkey, wcsname, ext=1, clobber=False):
raise KeyError("wkey must be one character: A-Z")
if wkey not in altwcs.available_wcskeys(hdr):
if not clobber:
- raise ValueError("wkey %s is already in use. Use clobber=True to overwrite it or specify a different key." %wkey)
+ raise ValueError("wkey {0} is already in use."
+ "Use clobber=True to overwrite it or"
+ "specify a different key.".format(wkey))
else:
altwcs.deleteWCS(fname, ext=ext, wcskey='V')
f = fits.open(fname, mode='update')
@@ -98,24 +104,25 @@ def updatehdr(fname, wcsobj, wkey, wcsname, ext=1, clobber=False):
wcsnamekey = 'WCSNAME' + wkey
f[ext].header[wcsnamekey] = wcsname
for k in hwcs:
- f[ext].header[k[:7]+wkey] = hwcs[k]
+ f[ext].header[k[: 7] + wkey] = hwcs[k]
f.close()
-def wcs2header(wcsobj):
+def wcs2header(wcsobj):
h = wcsobj.to_header()
if wcsobj.wcs.has_cd():
altwcs.pc2cd(h)
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
- norient = np.rad2deg(np.arctan2(h['CD1_2'],h['CD2_2']))
- #okey = 'ORIENT%s' % wkey
+ norient = np.rad2deg(np.arctan2(h['CD1_2'], h['CD2_2']))
+ # okey = 'ORIENT%s' % wkey
okey = 'ORIENT'
h[okey] = norient
return h
+
def readWCS(input, exts=None, extname=None):
if isinstance(input, str):
if input[0] == '@':
@@ -134,15 +141,15 @@ def readWCS(input, exts=None, extname=None):
wcso = []
fomited = []
# figure out which FITS extension(s) to use
- if exts == None and extname == None:
- #Assume it's simple FITS and the data is in the primary HDU
+ if exts is None and extname is None:
+ # Assume it's simple FITS and the data is in the primary HDU
for f in filelist:
try:
wcso = wcsutil.HSTWCS(f)
except AttributeError:
fomited.append(f)
continue
- elif exts != None and validateExt(exts):
+ elif exts is not None and validateExt(exts):
exts = [exts]
for f in filelist:
try:
@@ -150,7 +157,7 @@ def readWCS(input, exts=None, extname=None):
except KeyError:
fomited.append(f)
continue
- elif extname != None:
+ elif extname is not None:
for f in filelist:
fobj = fits.open(f)
for i in range(len(fobj)):
@@ -159,7 +166,7 @@ def readWCS(input, exts=None, extname=None):
except KeyError:
continue
if ename.lower() == extname.lower():
- wcso.append(wcsutil.HSTWCS(f,ext=i))
+ wcso.append(wcsutil.HSTWCS(f, ext=i))
else:
continue
fobj.close()
@@ -179,5 +186,3 @@ def validateExt(ext):
else:
return True
-
-
diff --git a/stwcs/wcsutil/wcscorr.py b/stwcs/wcsutil/wcscorr.py
index 3f9b7d5..48a1978 100644
--- a/stwcs/wcsutil/wcscorr.py
+++ b/stwcs/wcsutil/wcscorr.py
@@ -1,21 +1,20 @@
from __future__ import absolute_import, division, print_function
-import os,copy
+import copy
import numpy as np
from astropy.io import fits
import stwcs
-from stwcs.wcsutil import altwcs
-from stwcs.updatewcs import utils
+from . import altwcs
+from ..updatewcs import utils
from stsci.tools import fileutil
-from . import convertwcs
-DEFAULT_WCS_KEYS = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2',
- 'CD1_1','CD1_2','CD2_1','CD2_2',
- 'CTYPE1','CTYPE2','ORIENTAT']
-DEFAULT_PRI_KEYS = ['HDRNAME','SIPNAME','NPOLNAME','D2IMNAME','DESCRIP']
-COL_FITSKW_DICT = {'RMS_RA':'sci.crder1','RMS_DEC':'sci.crder2',
- 'NMatch':'sci.nmatch','Catalog':'sci.catalog'}
+DEFAULT_WCS_KEYS = ['CRVAL1', 'CRVAL2', 'CRPIX1', 'CRPIX2',
+ 'CD1_1', 'CD1_2', 'CD2_1', 'CD2_2',
+ 'CTYPE1', 'CTYPE2', 'ORIENTAT']
+DEFAULT_PRI_KEYS = ['HDRNAME', 'SIPNAME', 'NPOLNAME', 'D2IMNAME', 'DESCRIP']
+COL_FITSKW_DICT = {'RMS_RA': 'sci.crder1', 'RMS_DEC': 'sci.crder2',
+ 'NMatch': 'sci.nmatch', 'Catalog': 'sci.catalog'}
###
### WCSEXT table related keyword archive functions
@@ -53,7 +52,7 @@ def init_wcscorr(input, force=False):
return
else:
del fimg['wcscorr']
- print('Initializing new WCSCORR table for ',fimg.filename())
+ print('Initializing new WCSCORR table for ', fimg.filename())
used_wcskeys = altwcs.wcskeys(fimg['SCI', 1].header)
@@ -64,14 +63,14 @@ def init_wcscorr(input, force=False):
# create new table with more rows than needed initially to make it easier to
# add new rows later
- wcsext = create_wcscorr(descrip=True,numrows=numsci, padding=(numsci*numwcs) + numsci * 4)
+ wcsext = create_wcscorr(descrip=True, numrows=numsci, padding=(numsci * numwcs) + numsci * 4)
# Assign the correct EXTNAME value to this table extension
wcsext.header['TROWS'] = (numsci * 2, 'Number of updated rows in table')
wcsext.header['EXTNAME'] = ('WCSCORR', 'Table with WCS Update history')
wcsext.header['EXTVER'] = 1
# define set of WCS keywords which need to be managed and copied to the table
- wcs1 = stwcs.wcsutil.HSTWCS(fimg,ext=('SCI',1))
+ wcs1 = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', 1))
idc2header = True
if wcs1.idcscale is None:
idc2header = False
@@ -79,18 +78,18 @@ def init_wcscorr(input, force=False):
prihdr = fimg[0].header
prihdr_keys = DEFAULT_PRI_KEYS
- pri_funcs = {'SIPNAME':stwcs.updatewcs.utils.build_sipname,
- 'NPOLNAME':stwcs.updatewcs.utils.build_npolname,
- 'D2IMNAME':stwcs.updatewcs.utils.build_d2imname}
+ pri_funcs = {'SIPNAME': stwcs.updatewcs.utils.build_sipname,
+ 'NPOLNAME': stwcs.updatewcs.utils.build_npolname,
+ 'D2IMNAME': stwcs.updatewcs.utils.build_d2imname}
# Now copy original OPUS values into table
for extver in range(1, numsci + 1):
rowind = find_wcscorr_row(wcsext.data,
{'WCS_ID': 'OPUS', 'EXTVER': extver,
- 'WCS_key':'O'})
+ 'WCS_key': 'O'})
# There should only EVER be a single row for each extension with OPUS values
rownum = np.where(rowind)[0][0]
- #print 'Archiving OPUS WCS in row number ',rownum,' in WCSCORR table for SCI,',extver
+ # print 'Archiving OPUS WCS in row number ',rownum,' in WCSCORR table for SCI,',extver
hdr = fimg['SCI', extver].header
# define set of WCS keywords which need to be managed and copied to the table
@@ -100,7 +99,7 @@ def init_wcscorr(input, force=False):
# if so, get its values directly, otherwise, archive the PRIMARY WCS
# as the OPUS values in the WCSCORR table
if 'O' not in used_wcskeys:
- altwcs.archiveWCS(fimg,('SCI',extver),wcskey='O', wcsname='OPUS')
+ altwcs.archiveWCS(fimg, ('SCI', extver), wcskey='O', wcsname='OPUS')
wkey = 'O'
wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=wkey)
@@ -113,7 +112,7 @@ def init_wcscorr(input, force=False):
break
for key in wcs_keywords:
if key in wcsext.data.names:
- wcsext.data.field(key)[rownum] = wcshdr[(key+wkey)[:8]]
+ wcsext.data.field(key)[rownum] = wcshdr[(key + wkey)[:8]]
# Now get any keywords from PRIMARY header needed for WCS updates
for key in prihdr_keys:
if key in prihdr:
@@ -143,7 +142,7 @@ def init_wcscorr(input, force=False):
# identify next empty row
rowind = find_wcscorr_row(wcsext.data,
- selections={'wcs_id':['','0.0']})
+ selections={'wcs_id': ['', '0.0']})
rows = np.where(rowind)
if len(rows[0]) > 0:
rownum = np.where(rowind)[0][0]
@@ -196,26 +195,26 @@ def find_wcscorr_row(wcstab, selections):
mask = None
for i in selections:
icol = wcstab.field(i)
- if isinstance(icol,np.chararray): icol = icol.rstrip()
+ if isinstance(icol, np.chararray): icol = icol.rstrip()
selecti = selections[i]
- if not isinstance(selecti,list):
- if isinstance(selecti,str):
+ if not isinstance(selecti, list):
+ if isinstance(selecti, str):
selecti = selecti.rstrip()
bmask = (icol == selecti)
if mask is None:
mask = bmask.copy()
else:
- mask = np.logical_and(mask,bmask)
+ mask = np.logical_and(mask, bmask)
del bmask
else:
for si in selecti:
- if isinstance(si,str):
+ if isinstance(si, str):
si = si.rstrip()
bmask = (icol == si)
if mask is None:
mask = bmask.copy()
else:
- mask = np.logical_or(mask,bmask)
+ mask = np.logical_or(mask, bmask)
del bmask
return mask
@@ -268,7 +267,7 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
headerlet
"""
if not isinstance(dest, fits.HDUList):
- dest = fits.open(dest,mode='update')
+ dest = fits.open(dest, mode='update')
fname = dest.filename()
if source is None:
@@ -280,7 +279,7 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
numext = fileutil.countExtn(source, extname)
if numext == 0:
raise ValueError('No %s extensions found in the source HDU list.'
- % extname)
+ % extname)
# Initialize the WCSCORR table extension in dest if not already present
init_wcscorr(dest)
try:
@@ -291,7 +290,7 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
# check to see whether or not this is an up-to-date table
# replace with newly initialized table with current format
old_table = dest['WCSCORR']
- wcscorr_cols = ['WCS_ID','EXTVER', 'SIPNAME',
+ wcscorr_cols = ['WCS_ID', 'EXTVER', 'SIPNAME',
'HDRNAME', 'NPOLNAME', 'D2IMNAME']
for colname in wcscorr_cols:
@@ -308,7 +307,7 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
# modified...
wcs_keys = altwcs.wcskeys(source[(extname, 1)].header)
wcs_keys = [kk for kk in wcs_keys if kk]
- if ' ' not in wcs_keys: wcs_keys.append(' ') # Insure that primary WCS gets used
+ if ' ' not in wcs_keys: wcs_keys.append(' ') # Insure that primary WCS gets used
# apply logic for only updating WCSCORR table with specified keywords
# corresponding to the WCS with WCSNAME=wcs_id
if wcs_id is not None:
@@ -324,10 +323,10 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
wcs_keywords = list(wcshdr.keys())
if 'O' in wcs_keys:
- wcs_keys.remove('O') # 'O' is reserved for original OPUS WCS
+ wcs_keys.remove('O') # 'O' is reserved for original OPUS WCS
# create new table for hdr and populate it with the newly updated values
- new_table = create_wcscorr(descrip=True,numrows=0, padding=len(wcs_keys)*numext)
+ new_table = create_wcscorr(descrip=True, numrows=0, padding=len(wcs_keys) * numext)
prihdr = source[0].header
# Get headerlet related keywords here
@@ -344,18 +343,18 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
for extver in range(1, numext + 1):
extn = (extname, extver)
if 'SIPWCS' in extname and not active:
- tab_extver = 0 # Since it has not been added to the SCI header yet
+ tab_extver = 0 # Since it has not been added to the SCI header yet
else:
tab_extver = extver
hdr = source[extn].header
- if 'WCSNAME'+wcs_key in hdr:
+ if 'WCSNAME' + wcs_key in hdr:
wcsname = hdr['WCSNAME' + wcs_key]
else:
wcsname = utils.build_default_wcsname(hdr['idctab'])
selection = {'WCS_ID': wcsname, 'EXTVER': tab_extver,
- 'SIPNAME':sipname, 'HDRNAME': hdrname,
- 'NPOLNAME': npolname, 'D2IMNAME':d2imname
+ 'SIPNAME': sipname, 'HDRNAME': hdrname,
+ 'NPOLNAME': npolname, 'D2IMNAME': d2imname
}
# Ensure that an entry for this WCS is not already in the dest
@@ -387,15 +386,14 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
# Interpret any 'pri.hdrname' or
# 'sci.crpix1' formatted keyword names
if '.' in fitkw:
- srchdr,fitkw = fitkw.split('.')
+ srchdr, fitkw = fitkw.split('.')
if 'pri' in srchdr.lower(): srchdr = prihdr
else: srchdr = source[extn].header
else:
srchdr = source[extn].header
- if fitkw+wcs_key in srchdr:
- new_table.data.field(key)[idx] = srchdr[fitkw+wcs_key]
-
+ if fitkw + wcs_key in srchdr:
+ new_table.data.field(key)[idx] = srchdr[fitkw + wcs_key]
# If idx was never incremented, no rows were added, so there's nothing else
# to do...
@@ -403,16 +401,16 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
return
# Now, we need to merge this into the existing table
- rowind = find_wcscorr_row(old_table.data, {'wcs_id':['','0.0']})
+ rowind = find_wcscorr_row(old_table.data, {'wcs_id': ['', '0.0']})
old_nrows = np.where(rowind)[0][0]
new_nrows = new_table.data.shape[0]
# check to see if there is room for the new row
- if (old_nrows + new_nrows) > old_table.data.shape[0]-1:
+ if (old_nrows + new_nrows) > old_table.data.shape[0] - 1:
pad_rows = 2 * new_nrows
# if not, create a new table with 'pad_rows' new empty rows
- upd_table = fits.new_table(old_table.columns,header=old_table.header,
- nrows=old_table.data.shape[0]+pad_rows)
+ upd_table = fits.new_table(old_table.columns, header=old_table.header,
+ nrows=old_table.data.shape[0] + pad_rows)
else:
upd_table = old_table
pad_rows = 0
@@ -421,10 +419,10 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
if name in new_table.data.names:
# reset the default values to ones specific to the row definitions
for i in range(pad_rows):
- upd_table.data.field(name)[old_nrows+i] = old_table.data.field(name)[-1]
+ upd_table.data.field(name)[old_nrows + i] = old_table.data.field(name)[-1]
# Now populate with values from new table
upd_table.data.field(name)[old_nrows:old_nrows + new_nrows] = \
- new_table.data.field(name)
+ new_table.data.field(name)
upd_table.header['TROWS'] = old_nrows + new_nrows
# replace old extension with newly updated table extension
@@ -447,14 +445,14 @@ def restore_file_from_wcscorr(image, id='OPUS', wcskey=''):
wcs_table = fimg['WCSCORR']
orig_rows = (wcs_table.data.field('WCS_ID') == 'OPUS')
# create an HSTWCS object to figure out what WCS keywords need to be updated
- wcsobj = stwcs.wcsutil.HSTWCS(fimg,ext=('sci',1))
+ wcsobj = stwcs.wcsutil.HSTWCS(fimg, ext=('sci', 1))
wcshdr = wcsobj.wcs2header()
- for extn in range(1,numsci+1):
+ for extn in range(1, numsci + 1):
# find corresponding row from table
ext_rows = (wcs_table.data.field('EXTVER') == extn)
- erow = np.where(np.logical_and(ext_rows,orig_rows))[0][0]
+ erow = np.where(np.logical_and(ext_rows, orig_rows))[0][0]
for key in wcshdr:
- if key in wcs_table.data.names: # insure that keyword is column in table
+ if key in wcs_table.data.names: # insure that keyword is column in table
tkey = key
if 'orient' in key.lower():
@@ -462,14 +460,14 @@ def restore_file_from_wcscorr(image, id='OPUS', wcskey=''):
if wcskey == '':
skey = key
else:
- skey = key[:7]+wcskey
- fimg['sci',extn].header[skey] = wcs_table.data.field(tkey)[erow]
+ skey = key[:7] + wcskey
+ fimg['sci', extn].header[skey] = wcs_table.data.field(tkey)[erow]
for key in DEFAULT_PRI_KEYS:
if key in wcs_table.data.names:
if wcskey == '':
pkey = key
else:
- pkey = key[:7]+wcskey
+ pkey = key[: 7] + wcskey
fimg[0].header[pkey] = wcs_table.data.field(key)[erow]
utils.updateNEXTENDKw(fimg)
@@ -498,25 +496,25 @@ def create_wcscorr(descrip=False, numrows=1, padding=0):
def_float64_zeros = np.array([0.0] * trows, dtype=np.float64)
def_float64_ones = def_float64_zeros + 1.0
def_float_col = {'format': 'D', 'array': def_float64_zeros.copy()}
- def_float1_col = {'format': 'D', 'array':def_float64_ones.copy()}
+ def_float1_col = {'format': 'D', 'array': def_float64_ones.copy()}
def_str40_col = {'format': '40A',
'array': np.array([''] * trows, dtype='S40')}
def_str24_col = {'format': '24A',
'array': np.array([''] * trows, dtype='S24')}
def_int32_col = {'format': 'J',
- 'array': np.array([0]*trows,dtype=np.int32)}
+ 'array': np.array([0] * trows, dtype=np.int32)}
# If more columns are needed, simply add their definitions to this list
- col_names = [('HDRNAME', def_str24_col), ('SIPNAME', def_str24_col),
- ('NPOLNAME', def_str24_col), ('D2IMNAME', def_str24_col),
- ('CRVAL1', def_float_col), ('CRVAL2', def_float_col),
- ('CRPIX1', def_float_col), ('CRPIX2', def_float_col),
- ('CD1_1', def_float_col), ('CD1_2', def_float_col),
- ('CD2_1', def_float_col), ('CD2_2', def_float_col),
- ('CTYPE1', def_str24_col), ('CTYPE2', def_str24_col),
- ('ORIENTAT', def_float_col), ('PA_V3', def_float_col),
- ('RMS_RA', def_float_col), ('RMS_Dec', def_float_col),
- ('NMatch', def_int32_col), ('Catalog', def_str40_col)]
+ col_names = [('HDRNAME', def_str24_col), ('SIPNAME', def_str24_col),
+ ('NPOLNAME', def_str24_col), ('D2IMNAME', def_str24_col),
+ ('CRVAL1', def_float_col), ('CRVAL2', def_float_col),
+ ('CRPIX1', def_float_col), ('CRPIX2', def_float_col),
+ ('CD1_1', def_float_col), ('CD1_2', def_float_col),
+ ('CD2_1', def_float_col), ('CD2_2', def_float_col),
+ ('CTYPE1', def_str24_col), ('CTYPE2', def_str24_col),
+ ('ORIENTAT', def_float_col), ('PA_V3', def_float_col),
+ ('RMS_RA', def_float_col), ('RMS_Dec', def_float_col),
+ ('NMatch', def_int32_col), ('Catalog', def_str40_col)]
# Define selector columns
id_col = fits.Column(name='WCS_ID', format='40A',
@@ -529,7 +527,7 @@ def create_wcscorr(descrip=False, numrows=1, padding=0):
array=np.array(['O'] * numrows + [''] * padding,
dtype='S'))
# create list of remaining columns to be added to table
- col_list = [id_col, extver_col, wcskey_col] # start with selector columns
+ col_list = [id_col, extver_col, wcskey_col] # start with selector columns
for c in col_names:
cdef = copy.deepcopy(c[1])
@@ -552,7 +550,8 @@ def create_wcscorr(descrip=False, numrows=1, padding=0):
return newtab
-def delete_wcscorr_row(wcstab,selections=None,rows=None):
+
+def delete_wcscorr_row(wcstab, selections=None, rows=None):
"""
Sets all values in a specified row or set of rows to default values
@@ -588,7 +587,7 @@ def delete_wcscorr_row(wcstab,selections=None,rows=None):
rowind = find_wcscorr_row(wcstab, selections=selections)
delete_rows = np.where(rowind)[0].tolist()
else:
- if not isinstance(rows,list):
+ if not isinstance(rows, list):
rows = [rows]
delete_rows = rows
@@ -601,13 +600,14 @@ def delete_wcscorr_row(wcstab,selections=None,rows=None):
return
# identify next empty row
- rowind = find_wcscorr_row(wcstab, selections={'wcs_id':['','0.0']})
+ rowind = find_wcscorr_row(wcstab, selections={'wcs_id': ['', '0.0']})
last_blank_row = np.where(rowind)[0][-1]
# copy values from blank row into user-specified rows
for colname in wcstab.names:
wcstab[colname][delete_rows] = wcstab[colname][last_blank_row]
+
def update_wcscorr_column(wcstab, column, values, selections=None, rows=None):
"""
Update the values in 'column' with 'values' for selected rows
@@ -645,7 +645,7 @@ def update_wcscorr_column(wcstab, column, values, selections=None, rows=None):
rowind = find_wcscorr_row(wcstab, selections=selections)
update_rows = np.where(rowind)[0].tolist()
else:
- if not isinstance(rows,list):
+ if not isinstance(rows, list):
rows = [rows]
update_rows = rows
@@ -654,8 +654,8 @@ def update_wcscorr_column(wcstab, column, values, selections=None, rows=None):
# Expand single input value to apply to all selected rows
if len(values) > 1 and len(values) < len(update_rows):
- print('ERROR: Number of new values',len(values))
- print(' does not match number of rows',len(update_rows),' to be updated!')
+ print('ERROR: Number of new values', len(values))
+ print(' does not match number of rows', len(update_rows), ' to be updated!')
print(' Please enter either 1 value or the same number of values')
print(' as there are rows to be updated.')
print(' Table will not be updated...')
diff --git a/stwcs/wcsutil/wcsdiff.py b/stwcs/wcsutil/wcsdiff.py
index cfc2d66..9f00f1a 100644
--- a/stwcs/wcsutil/wcsdiff.py
+++ b/stwcs/wcsutil/wcsdiff.py
@@ -1,10 +1,12 @@
-from __future__ import print_function
+from __future__ import absolute_import, print_function
+
from astropy import wcs as pywcs
from collections import OrderedDict
from astropy.io import fits
from .headerlet import parse_filename
import numpy as np
+
def is_wcs_identical(scifile, file2, sciextlist, fextlist, scikey=" ",
file2key=" ", verbose=False):
"""
@@ -48,28 +50,29 @@ def is_wcs_identical(scifile, file2, sciextlist, fextlist, scikey=" ",
sciobj, sciname, close_scifile = parse_filename(scifile)
diff['file_names'] = [scifile, file2]
if get_rootname(scifile) != get_rootname(file2):
- #logger.info('Rootnames do not match.')
- diff['rootname'] = ("%s: %s", "%s: %s") % (sciname, get_rootname(scifile), file2, get_rootname(file2))
+ # logger.info('Rootnames do not match.')
+ diff['rootname'] = ("%s: %s", "%s: %s") % (sciname, get_rootname(scifile), file2,
+ get_rootname(file2))
result = False
for i, j in zip(sciextlist, fextlist):
w1 = pywcs.WCS(sciobj[i].header, sciobj, key=scikey)
w2 = pywcs.WCS(fobj[j].header, fobj, key=file2key)
diff['extension'] = [get_extname_extnum(sciobj[i]), get_extname_extnum(fobj[j])]
if not np.allclose(w1.wcs.crval, w2.wcs.crval, rtol=10**(-7)):
- #logger.info('CRVALs do not match')
+ # logger.info('CRVALs do not match')
diff['CRVAL'] = w1.wcs.crval, w2.wcs.crval
result = False
if not np.allclose(w1.wcs.crpix, w2.wcs.crpix, rtol=10**(-7)):
- #logger.info('CRPIX do not match')
- diff ['CRPIX'] = w1.wcs.crpix, w2.wcs.crpix
+ # logger.info('CRPIX do not match')
+ diff['CRPIX'] = w1.wcs.crpix, w2.wcs.crpix
result = False
if not np.allclose(w1.wcs.cd, w2.wcs.cd, rtol=10**(-7)):
- #logger.info('CDs do not match')
- diff ['CD'] = w1.wcs.cd, w2.wcs.cd
+ # logger.info('CDs do not match')
+ diff['CD'] = w1.wcs.cd, w2.wcs.cd
result = False
if not (np.array(w1.wcs.ctype) == np.array(w2.wcs.ctype)).all():
- #logger.info('CTYPEs do not match')
- diff ['CTYPE'] = w1.wcs.ctype, w2.wcs.ctype
+ # logger.info('CTYPEs do not match')
+ diff['CTYPE'] = w1.wcs.ctype, w2.wcs.ctype
result = False
if w1.sip or w2.sip:
if (w2.sip and not w1.sip) or (w1.sip and not w2.sip):
@@ -79,41 +82,41 @@ def is_wcs_identical(scifile, file2, sciextlist, fextlist, scikey=" ",
diff['SIP_A'] = 'SIP_A differ'
result = False
if not np.allclose(w1.sip.b, w2.sip.b, rtol=10**(-7)):
- #logger.info('SIP coefficients do not match')
- diff ['SIP_B'] = (w1.sip.b, w2.sip.b)
+ # logger.info('SIP coefficients do not match')
+ diff['SIP_B'] = (w1.sip.b, w2.sip.b)
result = False
if w1.cpdis1 or w2.cpdis1:
if w1.cpdis1 and not w2.cpdis1 or w2.cpdis1 and not w1.cpdis1:
diff['CPDIS1'] = "CPDIS1 missing"
- result=False
+ result = False
if w1.cpdis2 and not w2.cpdis2 or w2.cpdis2 and not w1.cpdis2:
diff['CPDIS2'] = "CPDIS2 missing"
result = False
if not np.allclose(w1.cpdis1.data, w2.cpdis1.data, rtol=10**(-7)):
- #logger.info('NPOL distortions do not match')
- diff ['CPDIS1_data'] = (w1.cpdis1.data, w2.cpdis1.data)
+ # logger.info('NPOL distortions do not match')
+ diff['CPDIS1_data'] = (w1.cpdis1.data, w2.cpdis1.data)
result = False
if not np.allclose(w1.cpdis2.data, w2.cpdis2.data, rtol=10**(-7)):
- #logger.info('NPOL distortions do not match')
- diff ['CPDIS2_data'] = (w1.cpdis2.data, w2.cpdis2.data)
+ # logger.info('NPOL distortions do not match')
+ diff['CPDIS2_data'] = (w1.cpdis2.data, w2.cpdis2.data)
result = False
if w1.det2im1 or w2.det2im1:
if w1.det2im1 and not w2.det2im1 or \
- w2.det2im1 and not w1.det2im1:
+ w2.det2im1 and not w1.det2im1:
diff['DET2IM'] = "Det2im1 missing"
result = False
if not np.allclose(w1.det2im1.data, w2.det2im1.data, rtol=10**(-7)):
- #logger.info('Det2Im corrections do not match')
- diff ['D2IM1_data'] = (w1.det2im1.data, w2.det2im1.data)
- result = False
+ # logger.info('Det2Im corrections do not match')
+ diff['D2IM1_data'] = (w1.det2im1.data, w2.det2im1.data)
+ result = False
if w1.det2im2 or w2.det2im2:
if w1.det2im2 and not w2.det2im2 or \
w2.det2im2 and not w1.det2im2:
diff['DET2IM2'] = "Det2im2 missing"
result = False
if not np.allclose(w1.det2im2.data, w2.det2im2.data, rtol=10**(-7)):
- #logger.info('Det2Im corrections do not match')
- diff ['D2IM2_data'] = (w1.det2im2.data, w2.det2im2.data)
+ # logger.info('Det2Im corrections do not match')
+ diff['D2IM2_data'] = (w1.det2im2.data, w2.det2im2.data)
result = False
if not result and verbose:
for key in diff:
@@ -124,6 +127,7 @@ def is_wcs_identical(scifile, file2, sciextlist, fextlist, scikey=" ",
sciobj.close()
return result, diff
+
def get_rootname(fname):
"""
Returns the value of ROOTNAME or DESTIM
@@ -139,12 +143,13 @@ def get_rootname(fname):
rootname = fname
return rootname
+
def get_extname_extnum(ext):
"""
Return (EXTNAME, EXTNUM) of a FITS extension
"""
extname = ""
- extnum=1
+ extnum = 1
extname = ext.header.get('EXTNAME', extname)
extnum = ext.header.get('EXTVER', extnum)
return (extname, extnum)