summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/stwcs/__init__.py (renamed from lib/__init__.py)2
-rw-r--r--lib/stwcs/distortion/__init__.py0
-rw-r--r--lib/stwcs/distortion/coeff_converter.py141
-rw-r--r--lib/stwcs/distortion/models.py361
-rw-r--r--lib/stwcs/distortion/mutil.py663
-rw-r--r--lib/stwcs/distortion/utils.py204
-rw-r--r--lib/stwcs/updatewcs/__init__.py396
-rw-r--r--lib/stwcs/updatewcs/apply_corrections.py223
-rw-r--r--lib/stwcs/updatewcs/corrections.py230
-rw-r--r--lib/stwcs/updatewcs/det2im.py200
-rw-r--r--lib/stwcs/updatewcs/makewcs.py251
-rw-r--r--lib/stwcs/updatewcs/npol.py319
-rw-r--r--lib/stwcs/updatewcs/utils.py28
-rw-r--r--lib/stwcs/wcsutil/__init__.py35
-rw-r--r--lib/stwcs/wcsutil/altwcs.py451
-rw-r--r--lib/stwcs/wcsutil/convertwcs.py118
-rw-r--r--lib/stwcs/wcsutil/getinput.py62
-rw-r--r--lib/stwcs/wcsutil/headerlet.py898
-rw-r--r--lib/stwcs/wcsutil/hstwcs.py451
-rw-r--r--lib/stwcs/wcsutil/instruments.py321
-rw-r--r--lib/stwcs/wcsutil/mappings.py29
-rw-r--r--lib/stwcs/wcsutil/mosaic.py183
-rw-r--r--lib/stwcs/wcsutil/wcscorr.py458
23 files changed, 6023 insertions, 1 deletions
diff --git a/lib/__init__.py b/lib/stwcs/__init__.py
index e2380a8..a32f8b9 100644
--- a/lib/__init__.py
+++ b/lib/stwcs/__init__.py
@@ -13,7 +13,7 @@ from __future__ import division # confidence high
import distortion
import pywcs
-from pytools import fileutil
+from stsci.tools import fileutil
__docformat__ = 'restructuredtext'
diff --git a/lib/stwcs/distortion/__init__.py b/lib/stwcs/distortion/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/stwcs/distortion/__init__.py
diff --git a/lib/stwcs/distortion/coeff_converter.py b/lib/stwcs/distortion/coeff_converter.py
new file mode 100644
index 0000000..f2eb4ad
--- /dev/null
+++ b/lib/stwcs/distortion/coeff_converter.py
@@ -0,0 +1,141 @@
+from __future__ import division # confidence high
+
+import numpy as np
+import pyfits
+import pywcs
+
+def sip2idc(wcs):
+ """
+ Converts SIP style coefficients to IDCTAB coefficients.
+
+ :Parameters:
+ `wcs`: pyfits.Header or pywcs.WCS object
+ """
+ if isinstance(wcs,pyfits.Header):
+ ocx10 = wcs.get('OCX10', None)
+ ocx11 = wcs.get('OCX11', None)
+ ocy10 = wcs.get('OCY10', None)
+ ocy11 = wcs.get('OCY11', None)
+ order = wcs.get('A_ORDER', None)
+ sipa, sipb = _read_sip_kw(wcs)
+ if None in [ocx10, ocx11, ocy10, ocy11, sipa, sipb]:
+ print 'Cannot convert SIP to IDC coefficients.\n'
+ return None, None
+ elif isinstance(wcs,pywcs.WCS):
+ try:
+ ocx10 = wcs.ocx10
+ ocx11 = wcs.ocx11
+ ocy10 = wcs.ocy10
+ ocy11 = wcs.ocy11
+ except AttributeError:
+ print 'First order IDCTAB coefficients are not available.\n'
+ print 'Cannot convert SIP to IDC coefficients.\n'
+ return None, None
+ try:
+ sipa = wcs.sip.a
+ sipb = wcs.sip.b
+ except AttributeError:
+ print 'SIP coefficients are not available.'
+ print 'Cannot convert SIP to IDC coefficients.\n'
+ return None, None
+ try:
+ order = wcs.sip.a_order
+ except AttributeError:
+ print 'SIP model order unknown, exiting ...\n'
+ return None, None
+
+ else:
+ print 'Input to sip2idc must be a PyFITS header or a wcsutil.HSTWCS object\n'
+ return
+
+
+ if None in [ocx10, ocx11, ocy10, ocy11]:
+ print 'First order IDC coefficients not found, exiting ...\n'
+ return
+ idc_coeff = np.array([[ocx11, ocx10], [ocy11, ocy10]])
+ cx = np.zeros((order+1,order+1), dtype=np.double)
+ cy = np.zeros((order+1,order+1), dtype=np.double)
+ for n in range(order+1):
+ for m in range(order+1):
+ if n >= m and n>=2:
+ sipval = np.array([[sipa[m,n-m]],[sipb[m,n-m]]])
+ idcval = np.dot(idc_coeff, sipval)
+ cx[n,m] = idcval[0]
+ cy[n,m] = idcval[1]
+
+ cx[1,0] = ocx10
+ cx[1,1] = ocx11
+ cy[1,0] = ocy10
+ cy[1,1] = ocy11
+
+ return cx, cy
+
+def _read_sip_kw(header):
+ """
+ Reads SIP header keywords and returns an array of coefficients.
+
+ If no SIP header keywords are found, None is returned.
+ """
+ if header.has_key("A_ORDER"):
+ if not header.has_key("B_ORDER"):
+ raise ValueError(
+ "A_ORDER provided without corresponding B_ORDER "
+ "keyword for SIP distortion")
+
+ m = int(header["A_ORDER"])
+ a = np.zeros((m+1, m+1), np.double)
+ for i in range(m+1):
+ for j in range(m-i+1):
+ a[i, j] = header.get("A_%d_%d" % (i, j), 0.0)
+
+ m = int(header["B_ORDER"])
+ b = np.zeros((m+1, m+1), np.double)
+ for i in range(m+1):
+ for j in range(m-i+1):
+ b[i, j] = header.get("B_%d_%d" % (i, j), 0.0)
+ elif header.has_key("B_ORDER"):
+ raise ValueError(
+ "B_ORDER provided without corresponding A_ORDER "
+ "keyword for SIP distortion")
+ else:
+ a = None
+ b = None
+
+ return a , b
+
+
+"""
+def idc2sip(wcsobj, idctab = None):
+ if isinstance(wcs,pywcs.WCS):
+ try:
+ cx10 = wcsobj.ocx10
+ cx11 = wcsobj.cx11
+ cy10 = wcsobj.cy10
+ cy11 = wcsobj.cy11
+ except AttributeError:
+ print
+ try:
+ order = wcs.sip.a_order
+ except AttributeError:
+ print 'SIP model order unknown, exiting ...\n'
+ return
+ else:
+ print 'Input to sip2idc must be a PyFITS header or a wcsutil.HSTWCS object\n'
+ return
+
+ if None in [ocx10, ocx11, ocy10, ocy11]:
+ print 'First order IDC coefficients not found, exiting ...\n'
+ return
+ idc_coeff = np.array([[wcsobj.cx11, wcsobj.cx10], [wcsobj.cy11, wcsobj.cy10]])
+ cx = numpy.zeros((order+1,order+1), dtype=numpy.double)
+ cy = numpy.zeros((order+1,order+1), dtype=numpy.double)
+ for n in range(order+1):
+ for m in range(order+1):
+ if n >= m and n>=2:
+ sipval = numpy.array([[wcsobj.sip.a[n,m]],[wcsobj.sip.b[n,m]]])
+ idcval = numpy.dot(idc_coeff, sipval)
+ cx[m,n-m] = idcval[0]
+ cy[m,n-m] = idcval[1]
+
+ return cx, cy
+""" \ No newline at end of file
diff --git a/lib/stwcs/distortion/models.py b/lib/stwcs/distortion/models.py
new file mode 100644
index 0000000..96d5d72
--- /dev/null
+++ b/lib/stwcs/distortion/models.py
@@ -0,0 +1,361 @@
+from __future__ import division # confidence high
+
+import types
+# Import PyDrizzle utility modules
+import mutil
+import numpy as np
+import mutil
+from mutil import combin
+
+yes = True
+no = False
+
+#################
+#
+#
+# Geometry/Distortion Classes
+#
+#
+#################
+
+class GeometryModel:
+ """
+ Base class for Distortion model.
+ There will be a separate class for each type of
+ model/filetype used with drizzle, i.e., IDCModel and
+ DrizzleModel.
+
+ Each class will know how to apply the distortion to a
+ single point and how to convert coefficients to an input table
+ suitable for the drizzle task.
+
+ Coefficients will be stored in CX,CY arrays.
+ """
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ NORDER = 3
+
+ def __init__(self):
+ " This will open the given file and determine its type and norder."
+
+ # Method to read in coefficients from given table and
+ # populate the n arrays 'cx' and 'cy'.
+ # This will be different for each type of input file,
+ # IDCTAB vs. drizzle table.
+
+ # Set these up here for all sub-classes to use...
+ # But, calculate norder and cx,cy arrays in detector specific classes.
+ self.cx = None
+ self.cy = None
+ self.refpix = None
+ self.norder = self.NORDER
+ # Keep track of computed zero-point for distortion coeffs
+ self.x0 = None
+ self.y0 = None
+
+ # default values for these attributes
+ self.direction = 'forward'
+
+ self.pscale = 1.0
+
+ def shift(self,cx,cy,xs,ys):
+ """
+ Shift reference position of coefficients to new center
+ where (xs,ys) = old-reference-position - subarray/image center.
+ This will support creating coeffs files for drizzle which will
+ be applied relative to the center of the image, rather than relative
+ to the reference position of the chip.
+ """
+
+ _cxs = np.zeros(shape=cx.shape,dtype=cx.dtype)
+ _cys = np.zeros(shape=cy.shape,dtype=cy.dtype)
+ _k = self.norder + 1
+ # loop over each input coefficient
+ for m in xrange(_k):
+ for n in xrange(_k):
+ if m >= n:
+ # For this coefficient, shift by xs/ys.
+ _ilist = range(m, _k)
+ # sum from m to k
+ for i in _ilist:
+ _jlist = range(n, i - (m-n)+1)
+ # sum from n to i-(m-n)
+ for j in _jlist:
+ _cxs[m,n] += cx[i,j]*combin(j,n)*combin((i-j),(m-n))*pow(xs,(j-n))*pow(ys,((i-j)-(m-n)))
+ _cys[m,n] += cy[i,j]*combin(j,n)*combin((i-j),(m-n))*pow(xs,(j-n))*pow(ys,((i-j)-(m-n)))
+ return _cxs,_cys
+
+ def convert(self, tmpname, xref=None,yref=None,delta=yes):
+ """
+ Open up an ASCII file, output coefficients in drizzle
+ format after converting them as necessary.
+ First, normalize these coefficients to what drizzle expects
+ Normalize the coefficients by the MODEL/output plate scale.
+
+ 16-May-2002:
+ Revised to work with higher order polynomials by John Blakeslee.
+ 27-June-2002:
+ Added ability to shift coefficients to new center for support
+ of subarrays.
+ """
+ cx = self.cx/self.pscale
+ cy = self.cy/self.pscale
+ x0 = self.refpix['XDELTA'] + cx[0,0]
+ y0 = self.refpix['YDELTA'] + cy[0,0]
+ #xr = self.refpix['XREF']
+ #yr = self.refpix['YREF']
+ xr = self.refpix['CHIP_XREF']
+ yr = self.refpix['CHIP_YREF']
+
+
+
+ '''
+ if xref != None:
+ # Shift coefficients for use with drizzle
+ _xs = xref - self.refpix['XREF'] + 1.0
+ _ys = yref - self.refpix['YREF'] + 1.0
+
+
+ if _xs != 0 or _ys != 0:
+ cxs,cys= self.shift(cx, cy, _xs, _ys)
+ cx = cxs
+ cy = cys
+
+ # We only want to apply this shift to coeffs
+ # for subarray images.
+ if delta == no:
+ cxs[0,0] = cxs[0,0] - _xs
+ cys[0,0] = cys[0,0] - _ys
+
+ # Now, apply only the difference introduced by the distortion..
+ # i.e., (undistorted - original) shift.
+ x0 += cxs[0,0]
+ y0 += cys[0,0]
+ '''
+ self.x0 = x0 #+ 1.0
+ self.y0 = y0 #+ 1.0
+
+ # Now, write out the coefficients into an ASCII
+ # file in 'drizzle' format.
+ lines = []
+
+
+ lines.append('# Polynomial distortion coefficients\n')
+ lines.append('# Extracted from "%s" \n'%self.name)
+ lines.append('refpix %f %f \n'%(xr,yr))
+ if self.norder==3:
+ lines.append('cubic\n')
+ elif self.norder==4:
+ lines.append('quartic\n')
+ elif self.norder==5:
+ lines.append('quintic\n')
+ else:
+ raise ValueError, "Drizzle cannot handle poly distortions of order %d"%self.norder
+
+ str = '%16.8f %16.8g %16.8g %16.8g %16.8g \n'% (x0,cx[1,1],cx[1,0],cx[2,2],cx[2,1])
+ lines.append(str)
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cx[2,0],cx[3,3],cx[3,2],cx[3,1],cx[3,0])
+ lines.append(str)
+ if self.norder>3:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cx[4,4],cx[4,3],cx[4,2],cx[4,1],cx[4,0])
+ lines.append(str)
+ if self.norder>4:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cx[5,5],cx[5,4],cx[5,3],cx[5,2],cx[5,1],cx[5,0])
+ lines.append(str)
+ lines.append("\n")
+
+ str = '%16.8f %16.8g %16.8g %16.8g %16.8g \n'% (y0,cy[1,1],cy[1,0],cy[2,2],cy[2,1])
+ lines.append(str)
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cy[2,0],cy[3,3],cy[3,2],cy[3,1],cy[3,0])
+ lines.append(str)
+ if self.norder>3:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cy[4,4],cy[4,3],cy[4,2],cy[4,1],cy[4,0])
+ lines.append(str)
+ if self.norder>4:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cy[5,5],cy[5,4],cy[5,3],cy[5,2],cy[5,1],cy[5,0])
+ lines.append(str)
+
+ output = open(tmpname,'w')
+ output.writelines(lines)
+ output.close()
+
+
+ def apply(self, pixpos,scale=1.0,order=None):
+ """
+ Apply coefficients to a pixel position or a list of positions.
+ This should be the same for all coefficients tables.
+ Return the geometrically-adjusted position
+ in arcseconds from the reference position as a tuple (x,y).
+
+ Compute delta from reference position
+ """
+
+ """
+ scale actually is a ratio of pscale/self.model.pscale
+ what is pscale?
+ """
+ if self.cx == None:
+ return pixpos[:,0],pixpos[:,1]
+
+ if order is None:
+ order = self.norder
+
+ # Apply in the same way that 'drizzle' would...
+ _cx = self.cx / (self.pscale * scale)
+ _cy = self.cy / (self.pscale * scale)
+ _convert = no
+ _p = pixpos
+
+ # Do NOT include any zero-point terms in CX,CY here
+ # as they should not be scaled by plate-scale like rest
+ # of coeffs... This makes the computations consistent
+ # with 'drizzle'. WJH 17-Feb-2004
+ _cx[0,0] = 0.
+ _cy[0,0] = 0.
+
+ if isinstance(_p,types.ListType) or isinstance(_p,types.TupleType):
+ _p = np.array(_p,dtype=np.float64)
+ _convert = yes
+
+ dxy = _p - (self.refpix['XREF'],self.refpix['YREF'])
+ # Apply coefficients from distortion model here...
+ c = _p * 0.
+ for i in range(order+1):
+ for j in range(i+1):
+ c[:,0] = c[:,0] + _cx[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+ c[:,1] = c[:,1] + _cy[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+ xc = c[:,0]
+ yc = c[:,1]
+
+ # Convert results back to same form as original input
+ if _convert:
+ xc = xc.tolist()
+ yc = yc.tolist()
+ # If a single tuple was input, return just a single tuple
+ if len(xc) == 1:
+ xc = xc[0]
+ yc = yc[0]
+
+ return xc,yc
+
+ def setPScaleCoeffs(self,pscale):
+ self.cx[1,1] = pscale
+ self.cy[1,0] = pscale
+
+ self.refpix['PSCALE'] = pscale
+ self.pscale = pscale
+
+
+class IDCModel(GeometryModel):
+ """
+ This class will open the IDCTAB, select proper row based on
+ chip/direction and populate cx,cy arrays.
+ We also need to read in SCALE, XCOM,YCOM, XREF,YREF as well.
+ """
+ def __init__(self, idcfile, date=None, chip=1, direction='forward',
+ filter1='CLEAR1',filter2='CLEAR2',offtab=None, binned=1):
+ GeometryModel.__init__(self)
+ #
+ # Norder must be derived from the coeffs file itself,
+ # then the arrays can be setup. Thus, it needs to be
+ # done in the sub-class, not in the base class.
+ # Read in table.
+ # Populate cx,cy,scale, and other variables here.
+ #
+ self.name = idcfile
+ self.cx,self.cy,self.refpix,self.norder = mutil.readIDCtab(idcfile,
+ chip=chip,direction=direction,filter1=filter1,filter2=filter2,
+ date=date, offtab=offtab)
+
+ if self.refpix.has_key('empty_model') and self.refpix['empty_model']:
+ pass
+ else:
+ self.refpix['PSCALE'] = self.refpix['PSCALE'] * binned
+ self.cx = self.cx * binned
+ self.cy = self.cy * binned
+ self.refpix['XREF'] = self.refpix['XREF'] / binned
+ self.refpix['YREF'] = self.refpix['YREF'] / binned
+ self.refpix['XSIZE'] = self.refpix['XSIZE'] / binned
+ self.refpix['YSIZE'] = self.refpix['YSIZE'] / binned
+
+ self.pscale = self.refpix['PSCALE']
+
+
+class WCSModel(GeometryModel):
+ """
+ This class sets up a distortion model based on coefficients
+ found in the image header.
+ """
+ def __init__(self,header,rootname):
+ GeometryModel.__init__(self)
+
+
+ if header.has_key('rootname'):
+ self.name = header['rootname']
+ else:
+ self.name = rootname
+ # Initialize all necessary distortion arrays with
+ # default model...
+ #self.cx,self.cy,self.refpix,self.order = mutil.defaultModel()
+
+ # Read in values from header, and update distortion arrays.
+ self.cx,self.cy,self.refpix,self.norder = mutil.readWCSCoeffs(header)
+
+ self.pscale = self.refpix['PSCALE']
+
+
+
+class DrizzleModel(GeometryModel):
+ """
+ This class will read in an ASCII Cubic
+ drizzle coeffs file and populate the cx,cy arrays.
+ """
+
+ def __init__(self, idcfile, scale = None):
+ GeometryModel.__init__(self)
+ #
+ # We now need to read in the file, populate cx,cy, and
+ # other variables as necessary.
+ #
+ self.name = idcfile
+ self.cx,self.cy,self.refpix,self.norder = mutil.readCubicTable(idcfile)
+
+ # scale is the ratio wcs.pscale/model.pscale.
+ # model.pscale for WFPC2 is passed from REFDATA.
+ # This is needed for WFPC2 binned data.
+
+ if scale != None:
+ self.pscale = scale
+ else:
+ self.pscale = self.refpix['PSCALE']
+
+ """
+ The above definition looks wrong.
+ In one case it's a ratio in the other it's pscale.
+
+ """
+
+class TraugerModel(GeometryModel):
+ """
+ This class will read in the ASCII Trauger coeffs
+ file, convert them to SIAF coefficients, then populate
+ the cx,cy arrays.
+ """
+ NORDER = 3
+
+ def __init__(self, idcfile,lam):
+ GeometryModel.__init__(self)
+ self.name = idcfile
+ self.cx,self.cy,self.refpix,self.norder = mutil.readTraugerTable(idcfile,lam)
+ self.pscale = self.refpix['PSCALE']
+ #
+ # Read in file here.
+ # Populate cx,cy, and other variables.
+ #
+
+
diff --git a/lib/stwcs/distortion/mutil.py b/lib/stwcs/distortion/mutil.py
new file mode 100644
index 0000000..ab9eac2
--- /dev/null
+++ b/lib/stwcs/distortion/mutil.py
@@ -0,0 +1,663 @@
+from __future__ import division # confidence high
+
+from stsci.tools import fileutil
+import numpy as np
+import string
+import calendar
+
+# Set up IRAF-compatible Boolean values
+yes = True
+no = False
+
+# This function read the IDC table and generates the two matrices with
+# the geometric correction coefficients.
+#
+# INPUT: FITS object of open IDC table
+# OUTPUT: coefficient matrices for Fx and Fy
+#
+#### If 'tabname' == None: This should return a default, undistorted
+#### solution.
+#
+
+def readIDCtab (tabname, chip=1, date=None, direction='forward',
+ filter1=None,filter2=None, offtab=None):
+
+ """
+ Read IDCTAB, and optional OFFTAB if sepcified, and generate
+ the two matrices with the geometric correction coefficients.
+
+ If tabname == None, then return a default, undistorted solution.
+ If offtab is specified, dateobs also needs to be given.
+
+ """
+
+ # Return a default geometry model if no IDCTAB filename
+ # is given. This model will not distort the data in any way.
+ if tabname == None:
+ print 'Warning: No IDCTAB specified! No distortion correction will be applied.'
+ return defaultModel()
+
+ # Implement default values for filters here to avoid the default
+ # being overwritten by values of None passed by user.
+ if filter1 == None or filter1.find('CLEAR') == 0:
+ filter1 = 'CLEAR'
+ if filter2 == None or filter2.find('CLEAR') == 0:
+ filter2 = 'CLEAR'
+
+ # Insure that tabname is full filename with fully expanded
+ # IRAF variables; i.e. 'jref$mc41442gj_idc.fits' should get
+ # expanded to '/data/cdbs7/jref/mc41442gj_idc.fits' before
+ # being used here.
+ # Open up IDC table now...
+ try:
+ ftab = fileutil.openImage(tabname)
+ except:
+ err_str = "------------------------------------------------------------------------ \n"
+ err_str += "WARNING: the IDCTAB geometric distortion file specified in the image \n"
+ err_str += "header was not found on disk. Please verify that your environment \n"
+ err_str += "variable ('jref'/'uref'/'oref'/'nref') has been correctly defined. If \n"
+ err_str += "you do not have the IDCTAB file, you may obtain the latest version \n"
+ err_str += "of it from the relevant instrument page on the STScI HST website: \n"
+ err_str += "http://www.stsci.edu/hst/ For WFPC2, STIS and NICMOS data, the \n"
+ err_str += "present run will continue using the old coefficients provided in \n"
+ err_str += "the Dither Package (ca. 1995-1998). \n"
+ err_str += "------------------------------------------------------------------------ \n"
+ raise IOError,err_str
+
+ #First thing we need, is to read in the coefficients from the IDC
+ # table and populate the Fx and Fy matrices.
+
+ if ftab['PRIMARY'].header.has_key('DETECTOR'):
+ detector = ftab['PRIMARY'].header['DETECTOR']
+ else:
+ if ftab['PRIMARY'].header.has_key('CAMERA'):
+ detector = str(ftab['PRIMARY'].header['CAMERA'])
+ else:
+ detector = 1
+ # First, read in TDD coeffs if present
+ phdr = ftab['PRIMARY'].header
+ instrument = phdr['INSTRUME']
+ if instrument == 'ACS' and detector == 'WFC':
+ skew_coeffs = read_tdd_coeffs(phdr)
+ else:
+ skew_coeffs = None
+
+ # Set default filters for SBC
+ if detector == 'SBC':
+ if filter1 == 'CLEAR':
+ filter1 = 'F115LP'
+ filter2 = 'N/A'
+ if filter2 == 'CLEAR':
+ filter2 = 'N/A'
+
+ # Read FITS header to determine order of fit, i.e. k
+ norder = ftab['PRIMARY'].header['NORDER']
+ if norder < 3:
+ order = 3
+ else:
+ order = norder
+
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+
+ #Determine row from which to get the coefficients.
+ # How many rows do we have in the table...
+ fshape = ftab[1].data.shape
+ colnames = ftab[1].data._names
+ row = -1
+
+ # Loop over all the rows looking for the one which corresponds
+ # to the value of CCDCHIP we are working on...
+ for i in xrange(fshape[0]):
+
+ try:
+ # Match FILTER combo to appropriate row,
+ #if there is a filter column in the IDCTAB...
+ if 'FILTER1' in colnames and 'FILTER2' in colnames:
+
+ filt1 = ftab[1].data.field('FILTER1')[i]
+ if filt1.find('CLEAR') > -1: filt1 = filt1[:5]
+
+ filt2 = ftab[1].data.field('FILTER2')[i]
+ if filt2.find('CLEAR') > -1: filt2 = filt2[:5]
+ else:
+ if 'OPT_ELEM' in colnames:
+ filt1 = ftab[1].data.field('OPT_ELEM')
+ if filt1.find('CLEAR') > -1: filt1 = filt1[:5]
+ else:
+ filt1 = filter1
+
+ if 'FILTER' in colnames:
+ _filt = ftab[1].data.field('FILTER')[i]
+ if _filt.find('CLEAR') > -1: _filt = _filt[:5]
+ if 'OPT_ELEM' in colnames:
+ filt2 = _filt
+ else:
+ filt1 = _filt
+ filt2 = 'CLEAR'
+ else:
+ filt2 = filter2
+ except:
+ # Otherwise assume all rows apply and compare to input filters...
+ filt1 = filter1
+ filt2 = filter2
+
+ if 'DETCHIP' in colnames:
+ detchip = ftab[1].data.field('DETCHIP')[i]
+ if not str(detchip).isdigit():
+ detchip = 1
+ else:
+ detchip = 1
+
+ if 'DIRECTION' in colnames:
+ direct = string.strip(string.lower(ftab[1].data.field('DIRECTION')[i]))
+ else:
+ direct = 'forward'
+
+ if filt1 == filter1.strip() and filt2 == filter2.strip():
+ if direct == direction.strip():
+ if int(detchip) == int(chip) or int(detchip) == -999:
+ row = i
+ break
+
+ joinstr = ','
+ if 'CLEAR' in filter1:
+ f1str = ''
+ joinstr = ''
+ else:
+ f1str = filter1.strip()
+ if 'CLEAR' in filter2:
+ f2str = ''
+ joinstr = ''
+ else:
+ f2str = filter2.strip()
+ filtstr = (joinstr.join([f1str,f2str])).strip()
+ if row < 0:
+ err_str = '\nProblem finding row in IDCTAB! Could not find row matching:\n'
+ err_str += ' CHIP: '+str(detchip)+'\n'
+ err_str += ' FILTERS: '+filstr+'\n'
+ ftab.close()
+ del ftab
+ raise LookupError,err_str
+ else:
+ print '- IDCTAB: Distortion model from row',str(row+1),'for chip',detchip,':',filtstr
+
+ # Read in V2REF and V3REF: this can either come from current table,
+ # or from an OFFTAB if time-dependent (i.e., for WFPC2)
+ theta = None
+ if 'V2REF' in colnames:
+ v2ref = ftab[1].data.field('V2REF')[row]
+ v3ref = ftab[1].data.field('V3REF')[row]
+ else:
+ # Read V2REF/V3REF from offset table (OFFTAB)
+ if offtab:
+ v2ref,v3ref,theta = readOfftab(offtab, date, chip=detchip)
+ else:
+ v2ref = 0.0
+ v3ref = 0.0
+
+ if theta == None:
+ if 'THETA' in colnames:
+ theta = ftab[1].data.field('THETA')[row]
+ else:
+ theta = 0.0
+
+ refpix = {}
+ refpix['XREF'] = ftab[1].data.field('XREF')[row]
+ refpix['YREF'] = ftab[1].data.field('YREF')[row]
+ refpix['XSIZE'] = ftab[1].data.field('XSIZE')[row]
+ refpix['YSIZE'] = ftab[1].data.field('YSIZE')[row]
+ refpix['PSCALE'] = round(ftab[1].data.field('SCALE')[row],8)
+ refpix['V2REF'] = v2ref
+ refpix['V3REF'] = v3ref
+ refpix['THETA'] = theta
+ refpix['XDELTA'] = 0.0
+ refpix['YDELTA'] = 0.0
+ refpix['DEFAULT_SCALE'] = yes
+ refpix['centered'] = no
+ refpix['skew_coeffs'] = skew_coeffs
+
+ # Now that we know which row to look at, read coefficients into the
+ # numeric arrays we have set up...
+ # Setup which column name convention the IDCTAB follows
+ # either: A,B or CX,CY
+ if 'CX10' in ftab[1].data._names:
+ cxstr = 'CX'
+ cystr = 'CY'
+ else:
+ cxstr = 'A'
+ cystr = 'B'
+
+ for i in xrange(norder+1):
+ if i > 0:
+ for j in xrange(i+1):
+ xcname = cxstr+str(i)+str(j)
+ ycname = cystr+str(i)+str(j)
+ fx[i,j] = ftab[1].data.field(xcname)[row]
+ fy[i,j] = ftab[1].data.field(ycname)[row]
+
+ ftab.close()
+ del ftab
+
+ # If CX11 is 1.0 and not equal to the PSCALE, then the
+ # coeffs need to be scaled
+
+ if fx[1,1] == 1.0 and abs(fx[1,1]) != refpix['PSCALE']:
+ fx *= refpix['PSCALE']
+ fy *= refpix['PSCALE']
+
+ # Return arrays and polynomial order read in from table.
+ # NOTE: XREF and YREF are stored in Fx,Fy arrays respectively.
+ return fx,fy,refpix,order
+#
+#
+# Time-dependent skew correction coefficients (only ACS/WFC)
+#
+#
+def read_tdd_coeffs(phdr):
+ ''' Read in the TDD related keywords from the PRIMARY header of the IDCTAB
+ '''
+ skew_coeffs = {}
+ skew_coeffs['TDDORDER'] = 0
+ skew_coeffs['TDD_DATE'] = ""
+ skew_coeffs['TDD_A'] = None
+ skew_coeffs['TDD_B'] = None
+
+ if phdr.has_key("TDDORDER"):
+ n = int(phdr["TDDORDER"])
+ else:
+ print 'TDDORDER kw not present, using default TDD correction'
+ return None
+
+ a = np.zeros((n+1,), np.float64)
+ b = np.zeros((n+1,), np.float64)
+ for i in range(n+1):
+ a[i] = phdr.get(("TDD_A%d" % i), 0.0)
+ b[i] = phdr.get(("TDD_B%d" % i), 0.0)
+ if (a==0).all() and (b==0).all():
+ print 'Warning: TDD_A and TDD_B coeffiecients have values of 0, \n \
+ but TDDORDER is %d.' % TDDORDER
+
+ skew_coeffs['TDDORDER'] = n
+ skew_coeffs['TDD_DATE'] = phdr['TDD_DATE']
+ skew_coeffs['TDD_A'] = a
+ skew_coeffs['TDD_B'] = b
+
+ return skew_coeffs
+
+def readOfftab(offtab, date, chip=None):
+
+
+#Read V2REF,V3REF from a specified offset table (OFFTAB).
+# Return a default geometry model if no IDCTAB filenam e
+# is given. This model will not distort the data in any way.
+
+ if offtab == None:
+ return 0.,0.
+
+ # Provide a default value for chip
+ if chip:
+ detchip = chip
+ else:
+ detchip = 1
+
+ # Open up IDC table now...
+ try:
+ ftab = fileutil.openImage(offtab)
+ except:
+ raise IOError,"Offset table '%s' not valid as specified!" % offtab
+
+ #Determine row from which to get the coefficients.
+ # How many rows do we have in the table...
+ fshape = ftab[1].data.shape
+ colnames = ftab[1].data._names
+ row = -1
+
+ row_start = None
+ row_end = None
+
+ v2end = None
+ v3end = None
+ date_end = None
+ theta_end = None
+
+ num_date = convertDate(date)
+ # Loop over all the rows looking for the one which corresponds
+ # to the value of CCDCHIP we are working on...
+ for ri in xrange(fshape[0]):
+ i = fshape[0] - ri - 1
+ if 'DETCHIP' in colnames:
+ detchip = ftab[1].data.field('DETCHIP')[i]
+ else:
+ detchip = 1
+
+ obsdate = convertDate(ftab[1].data.field('OBSDATE')[i])
+
+ # If the row is appropriate for the chip...
+ # Interpolate between dates
+ if int(detchip) == int(chip) or int(detchip) == -999:
+ if num_date <= obsdate:
+ date_end = obsdate
+ v2end = ftab[1].data.field('V2REF')[i]
+ v3end = ftab[1].data.field('V3REF')[i]
+ theta_end = ftab[1].data.field('THETA')[i]
+ row_end = i
+ continue
+
+ if row_end == None and (num_date > obsdate):
+ date_end = obsdate
+ v2end = ftab[1].data.field('V2REF')[i]
+ v3end = ftab[1].data.field('V3REF')[i]
+ theta_end = ftab[1].data.field('THETA')[i]
+ row_end = i
+ continue
+
+ if num_date > obsdate:
+ date_start = obsdate
+ v2start = ftab[1].data.field('V2REF')[i]
+ v3start = ftab[1].data.field('V3REF')[i]
+ theta_start = ftab[1].data.field('THETA')[i]
+ row_start = i
+ break
+
+ ftab.close()
+ del ftab
+
+ if row_start == None and row_end == None:
+ print 'Row corresponding to DETCHIP of ',detchip,' was not found!'
+ raise LookupError
+ elif row_start == None:
+ print '- OFFTAB: Offset defined by row',str(row_end+1)
+ else:
+ print '- OFFTAB: Offset interpolated from rows',str(row_start+1),'and',str(row_end+1)
+
+ # Now, do the interpolation for v2ref, v3ref, and theta
+ if row_start == None or row_end == row_start:
+ # We are processing an observation taken after the last calibration
+ date_start = date_end
+ v2start = v2end
+ v3start = v3end
+ _fraction = 0.
+ theta_start = theta_end
+ else:
+ _fraction = float((num_date - date_start)) / float((date_end - date_start))
+
+ v2ref = _fraction * (v2end - v2start) + v2start
+ v3ref = _fraction * (v3end - v3start) + v3start
+ theta = _fraction * (theta_end - theta_start) + theta_start
+
+ return v2ref,v3ref,theta
+
+def readWCSCoeffs(header):
+
+ #Read distortion coeffs from WCS header keywords and
+ #populate distortion coeffs arrays.
+
+ # Read in order for polynomials
+ _xorder = header['a_order']
+ _yorder = header['b_order']
+ order = max(max(_xorder,_yorder),3)
+
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+
+ # Read in CD matrix
+ _cd11 = header['cd1_1']
+ _cd12 = header['cd1_2']
+ _cd21 = header['cd2_1']
+ _cd22 = header['cd2_2']
+ _cdmat = np.array([[_cd11,_cd12],[_cd21,_cd22]])
+ _theta = np.arctan2(-_cd12,_cd22)
+ _rotmat = np.array([[np.cos(_theta),np.sin(_theta)],
+ [-np.sin(_theta),np.cos(_theta)]])
+ _rCD = np.dot(_rotmat,_cdmat)
+ _skew = np.arcsin(-_rCD[1][0] / _rCD[0][0])
+ _scale = _rCD[0][0] * np.cos(_skew) * 3600.
+ _scale2 = _rCD[1][1] * 3600.
+
+ # Set up refpix
+ refpix = {}
+ refpix['XREF'] = header['crpix1']
+ refpix['YREF'] = header['crpix2']
+ refpix['XSIZE'] = header['naxis1']
+ refpix['YSIZE'] = header['naxis2']
+ refpix['PSCALE'] = _scale
+ refpix['V2REF'] = 0.
+ refpix['V3REF'] = 0.
+ refpix['THETA'] = RADTODEG(_theta)
+ refpix['XDELTA'] = 0.0
+ refpix['YDELTA'] = 0.0
+ refpix['DEFAULT_SCALE'] = yes
+ refpix['centered'] = yes
+
+
+ # Set up template for coeffs keyword names
+ cxstr = 'A_'
+ cystr = 'B_'
+ # Read coeffs into their own matrix
+ for i in xrange(_xorder+1):
+ for j in xrange(i+1):
+ xcname = cxstr+str(j)+'_'+str(i-j)
+ if header.has_key(xcname):
+ fx[i,j] = header[xcname]
+
+ # Extract Y coeffs separately as a different order may
+ # have been used to fit it.
+ for i in xrange(_yorder+1):
+ for j in xrange(i+1):
+ ycname = cystr+str(j)+'_'+str(i-j)
+ if header.has_key(ycname):
+ fy[i,j] = header[ycname]
+
+ # Now set the linear terms
+ fx[0][0] = 1.0
+ fy[0][0] = 1.0
+
+ return fx,fy,refpix,order
+
+
+def readTraugerTable(idcfile,wavelength):
+
+ # Return a default geometry model if no coefficients filename
+ # is given. This model will not distort the data in any way.
+ if idcfile == None:
+ return fileutil.defaultModel()
+
+ # Trauger coefficients only result in a cubic file...
+ order = 3
+ numco = 10
+ a_coeffs = [0] * numco
+ b_coeffs = [0] * numco
+ indx = _MgF2(wavelength)
+
+ ifile = open(idcfile,'r')
+ # Search for the first line of the coefficients
+ _line = fileutil.rAsciiLine(ifile)
+ while string.lower(_line[:7]) != 'trauger':
+ _line = fileutil.rAsciiLine(ifile)
+ # Read in each row of coefficients,split them into their values,
+ # and convert them into cubic coefficients based on
+ # index of refraction value for the given wavelength
+ # Build X coefficients from first 10 rows of Trauger coefficients
+ j = 0
+ while j < 20:
+ _line = fileutil.rAsciiLine(ifile)
+ if _line == '': continue
+ _lc = string.split(_line)
+ if j < 10:
+ a_coeffs[j] = float(_lc[0])+float(_lc[1])*(indx-1.5)+float(_lc[2])*(indx-1.5)**2
+ else:
+ b_coeffs[j-10] = float(_lc[0])+float(_lc[1])*(indx-1.5)+float(_lc[2])*(indx-1.5)**2
+ j = j + 1
+
+ ifile.close()
+ del ifile
+
+ # Now, convert the coefficients into a Numeric array
+ # with the right coefficients in the right place.
+ # Populate output values now...
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ # Assign the coefficients to their array positions
+ fx[0,0] = 0.
+ fx[1] = np.array([a_coeffs[2],a_coeffs[1],0.,0.],dtype=np.float64)
+ fx[2] = np.array([a_coeffs[5],a_coeffs[4],a_coeffs[3],0.],dtype=np.float64)
+ fx[3] = np.array([a_coeffs[9],a_coeffs[8],a_coeffs[7],a_coeffs[6]],dtype=np.float64)
+ fy[0,0] = 0.
+ fy[1] = np.array([b_coeffs[2],b_coeffs[1],0.,0.],dtype=np.float64)
+ fy[2] = np.array([b_coeffs[5],b_coeffs[4],b_coeffs[3],0.],dtype=np.float64)
+ fy[3] = np.array([b_coeffs[9],b_coeffs[8],b_coeffs[7],b_coeffs[6]],dtype=np.float64)
+
+ # Used in Pattern.computeOffsets()
+ refpix = {}
+ refpix['XREF'] = None
+ refpix['YREF'] = None
+ refpix['V2REF'] = None
+ refpix['V3REF'] = None
+ refpix['XDELTA'] = 0.
+ refpix['YDELTA'] = 0.
+ refpix['PSCALE'] = None
+ refpix['DEFAULT_SCALE'] = no
+ refpix['centered'] = yes
+
+ return fx,fy,refpix,order
+
+
+def readCubicTable(idcfile):
+ # Assumption: this will only be used for cubic file...
+ order = 3
+ # Also, this function does NOT perform any scaling on
+ # the coefficients, it simply passes along what is found
+ # in the file as is...
+
+ # Return a default geometry model if no coefficients filename
+ # is given. This model will not distort the data in any way.
+ if idcfile == None:
+ return fileutil.defaultModel()
+
+ ifile = open(idcfile,'r')
+ # Search for the first line of the coefficients
+ _line = fileutil.rAsciiLine(ifile)
+
+ _found = no
+ while _found == no:
+ if _line[:7] in ['cubic','quartic','quintic'] or _line[:4] == 'poly':
+ found = yes
+ break
+ _line = fileutil.rAsciiLine(ifile)
+
+ # Read in each row of coefficients, without line breaks or newlines
+ # split them into their values, and create a list for A coefficients
+ # and another list for the B coefficients
+ _line = fileutil.rAsciiLine(ifile)
+ a_coeffs = string.split(_line)
+
+ x0 = float(a_coeffs[0])
+ _line = fileutil.rAsciiLine(ifile)
+ a_coeffs[len(a_coeffs):] = string.split(_line)
+ # Scale coefficients for use within PyDrizzle
+ for i in range(len(a_coeffs)):
+ a_coeffs[i] = float(a_coeffs[i])
+
+ _line = fileutil.rAsciiLine(ifile)
+ b_coeffs = string.split(_line)
+ y0 = float(b_coeffs[0])
+ _line = fileutil.rAsciiLine(ifile)
+ b_coeffs[len(b_coeffs):] = string.split(_line)
+ # Scale coefficients for use within PyDrizzle
+ for i in range(len(b_coeffs)):
+ b_coeffs[i] = float(b_coeffs[i])
+
+ ifile.close()
+ del ifile
+ # Now, convert the coefficients into a Numeric array
+ # with the right coefficients in the right place.
+ # Populate output values now...
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ # Assign the coefficients to their array positions
+ fx[0,0] = 0.
+ fx[1] = np.array([a_coeffs[2],a_coeffs[1],0.,0.],dtype=np.float64)
+ fx[2] = np.array([a_coeffs[5],a_coeffs[4],a_coeffs[3],0.],dtype=np.float64)
+ fx[3] = np.array([a_coeffs[9],a_coeffs[8],a_coeffs[7],a_coeffs[6]],dtype=np.float64)
+ fy[0,0] = 0.
+ fy[1] = np.array([b_coeffs[2],b_coeffs[1],0.,0.],dtype=np.float64)
+ fy[2] = np.array([b_coeffs[5],b_coeffs[4],b_coeffs[3],0.],dtype=np.float64)
+ fy[3] = np.array([b_coeffs[9],b_coeffs[8],b_coeffs[7],b_coeffs[6]],dtype=np.float64)
+
+ # Used in Pattern.computeOffsets()
+ refpix = {}
+ refpix['XREF'] = None
+ refpix['YREF'] = None
+ refpix['V2REF'] = x0
+ refpix['V3REF'] = y0
+ refpix['XDELTA'] = 0.
+ refpix['YDELTA'] = 0.
+ refpix['PSCALE'] = None
+ refpix['DEFAULT_SCALE'] = no
+ refpix['centered'] = yes
+
+ return fx,fy,refpix,order
+
+def factorial(n):
+ """ Compute a factorial for integer n. """
+ m = 1
+ for i in range(int(n)):
+ m = m * (i+1)
+ return m
+
+def combin(j,n):
+ """ Return the combinatorial factor for j in n."""
+ return (factorial(j) / (factorial(n) * factorial( (j-n) ) ) )
+
+
+def defaultModel():
+ """ This function returns a default, non-distorting model
+ that can be used with the data.
+ """
+ order = 3
+
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+
+ fx[1,1] = 1.
+ fy[1,0] = 1.
+
+ # Used in Pattern.computeOffsets()
+ refpix = {}
+ refpix['empty_model'] = yes
+ refpix['XREF'] = None
+ refpix['YREF'] = None
+ refpix['V2REF'] = 0.
+ refpix['XSIZE'] = 0.
+ refpix['YSIZE'] = 0.
+ refpix['V3REF'] = 0.
+ refpix['XDELTA'] = 0.
+ refpix['YDELTA'] = 0.
+ refpix['PSCALE'] = None
+ refpix['DEFAULT_SCALE'] = no
+ refpix['THETA'] = 0.
+ refpix['centered'] = yes
+ return fx,fy,refpix,order
+
+# Function to compute the index of refraction for MgF2 at
+# the specified wavelength for use with Trauger coefficients
+def _MgF2(lam):
+ _sig = pow((1.0e7/lam),2)
+ return np.sqrt(1.0 + 2.590355e10/(5.312993e10-_sig) +
+ 4.4543708e9/(11.17083e9-_sig) + 4.0838897e5/(1.766361e5-_sig))
+
+
+def convertDate(date):
+ """ Converts the DATE-OBS date string into an integer of the
+ number of seconds since 1970.0 using calendar.timegm().
+
+ INPUT: DATE-OBS in format of 'YYYY-MM-DD'.
+ OUTPUT: Date (integer) in seconds.
+ """
+
+ _dates = date.split('-')
+ _val = 0
+ _date_tuple = (int(_dates[0]), int(_dates[1]), int(_dates[2]), 0, 0, 0, 0, 0, 0)
+
+ return calendar.timegm(_date_tuple)
diff --git a/lib/stwcs/distortion/utils.py b/lib/stwcs/distortion/utils.py
new file mode 100644
index 0000000..7f937f7
--- /dev/null
+++ b/lib/stwcs/distortion/utils.py
@@ -0,0 +1,204 @@
+from __future__ import division # confidence high
+import os
+import numpy as np
+import pywcs
+import pyfits
+from stwcs import wcsutil
+from numpy import sqrt, arctan2
+from stsci.tools import fileutil
+
+def output_wcs(list_of_wcsobj, ref_wcs=None, owcs=None, undistort=True):
+ """
+ Create an output WCS.
+
+ Parameters
+ ----------
+ list_of_wcsobj: Python list
+ a list of HSTWCS objects
+ ref_wcs: an HSTWCS object
+ to be used as a reference WCS, in case outwcs is None.
+ if ref_wcs is None (default), the first member of the list
+ is used as a reference
+ outwcs: an HSTWCS object
+ the tangent plane defined by this object is used as a reference
+ undistort: boolean (default-True)
+ a flag whether to create an undistorted output WCS
+ """
+ fra_dec = np.vstack([w.calcFootprint() for w in list_of_wcsobj])
+
+ # This new algorithm may not be strictly necessary, but it may be more
+ # robust in handling regions near the poles or at 0h RA.
+ crval1,crval2 = computeFootprintCenter(fra_dec)
+
+ crval = np.array([crval1,crval2], dtype=np.float64) # this value is now zero-based
+ if owcs is None:
+ if ref_wcs == None:
+ ref_wcs = list_of_wcsobj[0].deepcopy()
+ if undistort:
+ outwcs = undistortWCS(ref_wcs)
+ else:
+ outwcs = ref_wcs.deepcopy()
+ outwcs.wcs.crval = crval
+ outwcs.wcs.set()
+ outwcs.pscale = sqrt(outwcs.wcs.cd[0,0]**2 + outwcs.wcs.cd[1,0]**2)*3600.
+ outwcs.orientat = arctan2(outwcs.wcs.cd[0,1],outwcs.wcs.cd[1,1]) * 180./np.pi
+ else:
+ outwcs = owcs.deepcopy()
+ outwcs.pscale = sqrt(outwcs.wcs.cd[0,0]**2 + outwcs.wcs.cd[1,0]**2)*3600.
+ outwcs.orientat = arctan2(outwcs.wcs.cd[0,1],outwcs.wcs.cd[1,1]) * 180./np.pi
+
+ tanpix = outwcs.wcs.s2p(fra_dec, 0)['pixcrd']
+
+ outwcs.naxis1 = int(np.ceil(tanpix[:,0].max() - tanpix[:,0].min()))
+ outwcs.naxis2 = int(np.ceil(tanpix[:,1].max() - tanpix[:,1].min()))
+ crpix = np.array([outwcs.naxis1/2., outwcs.naxis2/2.], dtype=np.float64)
+ outwcs.wcs.crpix = crpix
+ outwcs.wcs.set()
+ tanpix = outwcs.wcs.s2p(fra_dec, 0)['pixcrd']
+
+ # shift crpix to take into account (floating-point value of) position of
+ # corner pixel relative to output frame size: no rounding necessary...
+ newcrpix = np.array([crpix[0]+tanpix[:,0].min(), crpix[1]+
+ tanpix[:,1].min()])
+
+ newcrval = outwcs.wcs.p2s([newcrpix], 1)['world'][0]
+ outwcs.wcs.crval = newcrval
+ outwcs.wcs.set()
+ return outwcs
+
+def computeFootprintCenter(edges):
+ """ Geographic midpoint in spherical coords for points defined by footprints.
+ Algorithm derived from: http://www.geomidpoint.com/calculation.html
+
+ This algorithm should be more robust against discontinuities at the poles.
+ """
+ alpha = fileutil.DEGTORAD(edges[:,0])
+ dec = fileutil.DEGTORAD(edges[:,1])
+
+ xmean = np.mean(np.cos(dec)*np.cos(alpha))
+ ymean = np.mean(np.cos(dec)*np.sin(alpha))
+ zmean = np.mean(np.sin(dec))
+
+ crval1 = fileutil.RADTODEG(np.arctan2(ymean,xmean))%360.0
+ crval2 = fileutil.RADTODEG(np.arctan2(zmean,np.sqrt(xmean*xmean+ymean*ymean)))
+
+ return crval1,crval2
+
+def undistortWCS(wcsobj):
+ """
+ Creates an undistorted linear WCS by applying the IDCTAB distortion model
+ to a 3-point square. The new ORIENTAT angle is calculated as well as the
+ plate scale in the undistorted frame.
+ """
+ assert isinstance(wcsobj, pywcs.WCS)
+ import coeff_converter
+
+ cx, cy = coeff_converter.sip2idc(wcsobj)
+ # cx, cy can be None because either there is no model available
+ # or updatewcs was not run.
+ if cx == None or cy == None:
+ if foundIDCTAB(wcsobj.idctab):
+ m = """IDCTAB is present but distortion model is missing.
+ Run updatewcs() to update the headers or
+ pass 'undistort=False' keyword to output_wcs().\n
+ """
+ raise RuntimeError, m
+ else:
+ print 'Distortion model is not available, using input reference image for output WCS.\n'
+ return wcsobj.copy()
+ crpix1 = wcsobj.wcs.crpix[0]
+ crpix2 = wcsobj.wcs.crpix[1]
+ xy = np.array([(crpix1,crpix2),(crpix1+1.,crpix2),(crpix1,crpix2+1.)],dtype=np.double)
+ offsets = np.array([wcsobj.ltv1, wcsobj.ltv2])
+ px = xy + offsets
+ #order = wcsobj.sip.a_order
+ pscale = wcsobj.idcscale
+ #pixref = np.array([wcsobj.sip.SIPREF1, wcsobj.sip.SIPREF2])
+
+ tan_pix = apply_idc(px, cx, cy, wcsobj.wcs.crpix, pscale, order=1)
+ xc = tan_pix[:,0]
+ yc = tan_pix[:,1]
+ am = xc[1] - xc[0]
+ bm = xc[2] - xc[0]
+ cm = yc[1] - yc[0]
+ dm = yc[2] - yc[0]
+ cd_mat = np.array([[am,bm],[cm,dm]],dtype=np.double)
+
+ # Check the determinant for singularity
+ _det = (am * dm) - (bm * cm)
+ if ( _det == 0.0):
+ print 'Singular matrix in updateWCS, aborting ...'
+ return
+
+ lin_wcsobj = pywcs.WCS()
+ cd_inv = np.linalg.inv(cd_mat)
+ cd = np.dot(wcsobj.wcs.cd, cd_inv).astype(np.float64)
+ lin_wcsobj.wcs.cd = cd
+ lin_wcsobj.wcs.set()
+ lin_wcsobj.orientat = arctan2(lin_wcsobj.wcs.cd[0,1],lin_wcsobj.wcs.cd[1,1]) * 180./np.pi
+ lin_wcsobj.pscale = sqrt(lin_wcsobj.wcs.cd[0,0]**2 + lin_wcsobj.wcs.cd[1,0]**2)*3600.
+ lin_wcsobj.wcs.crval = np.array([0.,0.])
+ lin_wcsobj.wcs.crpix = np.array([0.,0.])
+ lin_wcsobj.wcs.ctype = ['RA---TAN', 'DEC--TAN']
+ lin_wcsobj.wcs.set()
+ return lin_wcsobj
+
+def apply_idc(pixpos, cx, cy, pixref, pscale= None, order=None):
+ """
+ Apply the IDCTAB polynomial distortion model to pixel positions.
+ pixpos must be already corrected for ltv1/2.
+
+ Parameters
+ ----------
+ pixpos: a 2D numpy array of (x,y) pixel positions to be distortion corrected
+ cx, cy: IDC model distortion coefficients
+ pixref: reference opixel position
+
+ """
+ if cx == None:
+ return pixpos
+
+ if order is None:
+ print 'Unknown order of distortion model \n'
+ return pixpos
+ if pscale is None:
+ print 'Unknown model plate scale\n'
+ return pixpos
+
+ # Apply in the same way that 'drizzle' would...
+ _cx = cx/pscale
+ _cy = cy/ pscale
+ _p = pixpos
+
+ # Do NOT include any zero-point terms in CX,CY here
+ # as they should not be scaled by plate-scale like rest
+ # of coeffs... This makes the computations consistent
+ # with 'drizzle'. WJH 17-Feb-2004
+ _cx[0,0] = 0.
+ _cy[0,0] = 0.
+
+ dxy = _p - pixref
+ # Apply coefficients from distortion model here...
+
+ c = _p * 0.
+ for i in range(order+1):
+ for j in range(i+1):
+ c[:,0] = c[:,0] + _cx[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+ c[:,1] = c[:,1] + _cy[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+
+ return c
+
+def foundIDCTAB(idctab):
+ idctab_found = True
+ try:
+ idctab = fileutil.osfn(idctab)
+ if idctab == 'N/A' or idctab == "":
+ idctab_found = False
+ if os.path.exists(idctab):
+ idctab_found = True
+ else:
+ idctab_found = False
+ except KeyError:
+ idctab_found = False
+ return idctab_found
+
diff --git a/lib/stwcs/updatewcs/__init__.py b/lib/stwcs/updatewcs/__init__.py
new file mode 100644
index 0000000..53e1c21
--- /dev/null
+++ b/lib/stwcs/updatewcs/__init__.py
@@ -0,0 +1,396 @@
+from __future__ import division # confidence high
+
+import os
+import pyfits
+import numpy as np
+from stwcs import wcsutil
+from stwcs.wcsutil import HSTWCS
+from stwcs import __version__ as stwcsversion
+import pywcs
+
+import utils, corrections, makewcs
+import npol, det2im
+from stsci.tools import parseinput, fileutil
+import apply_corrections
+
+import time
+import logging
+logger = logging.getLogger('stwcs.updatewcs')
+
+import atexit
+atexit.register(logging.shutdown)
+
+#Note: The order of corrections is important
+
+__docformat__ = 'restructuredtext'
+
+def updatewcs(input, vacorr=True, tddcorr=True, npolcorr=True, d2imcorr=True,
+ checkfiles=True, wcskey=" ", wcsname=" ", clobber=False, verbose=False):
+ """
+
+ Updates HST science files with the best available calibration information.
+ This allows users to retrieve from the archive self contained science files
+ which do not require additional reference files.
+
+ Basic WCS keywords are updated in the process and new keywords (following WCS
+ Paper IV and the SIP convention) as well as new extensions are added to the science files.
+
+
+ Example
+ -------
+ >>>from stwcs import updatewcs
+ >>>updatewcs.updatewcs(filename)
+
+ Dependencies
+ ------------
+ `stsci.tools`
+ `pyfits`
+ `pywcs`
+
+ Parameters
+ ----------
+ input: a python list of file names or a string (wild card characters allowed)
+ input files may be in fits, geis or waiver fits format
+ vacorr: boolean
+ If True, vecocity aberration correction will be applied
+ tddcorr: boolean
+ If True, time dependent distortion correction will be applied
+ npolcorr: boolean
+ If True, a Lookup table distortion will be applied
+ d2imcorr: boolean
+ If True, detector to image correction will be applied
+ checkfiles: boolean
+ If True, the format of the input files will be checked,
+ geis and waiver fits files will be converted to MEF format.
+ Default value is True for standalone mode.
+ wcskey: None, one character string A-Z or an empty string of length 1
+ If None - the primary WCS is not archived
+ If an empty string - the next available wcskey is used for the archive
+ A-Z - use this key to archive the WCS
+ wcsname: a string
+ The name under which the primary WCS is archived after it is updated.
+ If an empty string (default), the name of the idctable is used as
+ a base.
+ clobber: boolean
+ a flag for reusing the wcskey when archiving the primary WCS
+ """
+ if verbose == False:
+ logger.setLevel(100)
+ else:
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
+ log_filename = 'stwcs.log'
+ fh = logging.FileHandler(log_filename, mode='w')
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.setLevel(verbose)
+ args = "vacorr=%s, tddcorr=%s, npolcorr=%s, d2imcorr=%s, checkfiles=%s, \
+ wcskey=%s, wcsname=%s, clobber=%s" % (str(vacorr), str(tddcorr), str(npolcorr),
+ str(d2imcorr), str(checkfiles), str(wcskey),
+ str(wcsname), str(clobber))
+ logger.info('\n\tStarting UPDATEWCS: %s', time.asctime())
+
+ files = parseinput.parseinput(input)[0]
+ logger.info("\n\tInput files: %s, " % [i for i in files])
+ logger.info("\n\tInput arguments: %s" %args)
+ if checkfiles:
+ files = checkFiles(files)
+ if not files:
+ print 'No valid input, quitting ...\n'
+ return
+
+ for f in files:
+ acorr = apply_corrections.setCorrections(f, vacorr=vacorr, \
+ tddcorr=tddcorr,npolcorr=npolcorr, d2imcorr=d2imcorr)
+ if 'MakeWCS' in acorr and newIDCTAB(f):
+ logger.warning("\n\tNew IDCTAB file detected. All current WCSs will be deleted")
+ cleanWCS(f)
+
+ #restore the original WCS keywords
+ makecorr(f, acorr, wkey=wcskey, wname=wcsname, clobber=False)
+ return files
+
+def makecorr(fname, allowed_corr, wkey=" ", wname=" ", clobber=False):
+ """
+ Purpose
+ =======
+ Applies corrections to the WCS of a single file
+
+ :Parameters:
+ `fname`: string
+ file name
+ `acorr`: list
+ list of corrections to be applied
+ `wkey`: None, one character string A-Z or an empty string of length 1
+ If None - the primary WCS is not archived
+ If an empty string - the next available wcskey is used for the archive
+ A-Z - use this key to archive the WCS
+ `wname`: a string
+ The name under which the primary WCS is archived after it is updated.
+ If an empty string (default), the name of the idctable is used as
+ a base.
+ `clobber`: boolean
+ a flag for reusing the wcskey when archiving the primary WCS
+ """
+ f = pyfits.open(fname, mode='update')
+ #restore the original WCS keywords
+ #wcsutil.restoreWCS(f, ext=[], wcskey='O', clobber=True)
+ #Determine the reference chip and create the reference HSTWCS object
+ nrefchip, nrefext = getNrefchip(f)
+ wcsutil.restoreWCS(f, nrefext, wcskey='O', clobber=True)
+ rwcs = HSTWCS(fobj=f, ext=nrefext)
+ rwcs.readModel(update=True,header=f[nrefext].header)
+
+ wcsutil.archiveWCS(f, nrefext, 'O', wcsname='OPUS', clobber=True)
+
+ if 'DET2IMCorr' in allowed_corr:
+ det2im.DET2IMCorr.updateWCS(f)
+
+ # get a wcskey and wcsname from the first extension header
+ idcname = fileutil.osfn(rwcs.idctab)
+ key, name = getKeyName(f[1].header, wkey, wname, idcname)
+
+ for i in range(len(f))[1:]:
+ extn = f[i]
+
+ if extn.header.has_key('extname'):
+ extname = extn.header['extname'].lower()
+ if extname == 'sci':
+ wcsutil.restoreWCS(f, ext=i, wcskey='O', clobber=True)
+ sciextver = extn.header['extver']
+ ref_wcs = rwcs.deepcopy()
+ hdr = extn.header
+ ext_wcs = HSTWCS(fobj=f, ext=i)
+ wcsutil.archiveWCS(f, ext=i, wcskey="O", wcsname="OPUS", clobber=True)
+ ext_wcs.readModel(update=True,header=hdr)
+ for c in allowed_corr:
+ if c != 'NPOLCorr' and c != 'DET2IMCorr':
+ corr_klass = corrections.__getattribute__(c)
+ kw2update = corr_klass.updateWCS(ext_wcs, ref_wcs)
+ for kw in kw2update:
+ hdr.update(kw, kw2update[kw])
+ #if wkey is None, do not archive the primary WCS
+ if key is not None:
+ wcsutil.archiveWCS(f, ext=i, wcskey=key, wcsname=name, clobber=True)
+ elif extname in ['err', 'dq', 'sdq', 'samp', 'time']:
+ cextver = extn.header['extver']
+ if cextver == sciextver:
+ hdr = f[('SCI',sciextver)].header
+ w = pywcs.WCS(hdr, f)
+ copyWCS(w, extn.header, key, name)
+ else:
+ continue
+
+ if 'NPOLCorr' in allowed_corr:
+ kw2update = npol.NPOLCorr.updateWCS(f)
+ for kw in kw2update:
+ f[1].header.update(kw, kw2update[kw])
+ # Finally record the version of the software which updated the WCS
+ if f[0].header.has_key('HISTORY'):
+ f[0].header.update(key='UPWCSVER', value=stwcsversion,
+ comment="Version of STWCS used to updated the WCS", before='HISTORY')
+ elif f[0].header.has_key('ASN_MTYP'):
+ f[0].header.update(key='UPWCSVER', value=stwcsversion,
+ comment="Version of STWCS used to updated the WCS", after='ASN_MTYP')
+ else:
+ # Find index of last non-blank card, and insert this new keyword after that card
+ for i in range(len(f[0].header.ascard)-1,0,-1):
+ if f[0].header[i].strip() != '': break
+
+ f[0].header.update(key='UPWCSVER', value=stwcsversion,
+ comment="Version of STWCS used to updated the WCS",after=i)
+ f.close()
+
+def getKeyName(hdr, wkey, wname, idcname):
+ if wkey is not None: # archive the primary WCS
+ if wkey == " ":
+ if wname == " " :
+ # get the next available key and use the IDCTABLE name as WCSNAME
+ idcname = os.path.split(idcname)[1]
+ name = ''.join(['IDC_',idcname.split('_idc.fits')[0]])
+ key = wcsutil.getKeyFromName(hdr, name)
+ if not key:
+ key = wcsutil.next_wcskey(hdr)
+ else:
+ #try to get a key from WCSNAME
+ # if not - get the next availabble key
+ name = wname
+ key = wcsutil.getKeyFromName(hdr, wname)
+ if not key:
+ key = wcsutil.next_wcskey(hdr)
+ else:
+ key = wkey
+ name = wname
+ return key, name
+
+def copyWCS(w, hdr, wkey, wname):
+ """
+ This is a convenience function to copy a WCS object
+ to a header as a primary WCS. It is used only to copy the
+ WCS of the 'SCI' extension to the headers of 'ERR', 'DQ', 'SDQ',
+ 'TIME' or 'SAMP' extensions.
+ """
+ hwcs = w.to_header()
+
+ if w.wcs.has_cd():
+ wcsutil.pc2cd(hwcs)
+ for k in hwcs.keys():
+ key = k[:7] + wkey
+ hdr.update(key=key, value=hwcs[k])
+ norient = np.rad2deg(np.arctan2(hwcs['CD1_2'],hwcs['CD2_2']))
+ okey = 'ORIENT%s' % wkey
+ hdr.update(key=okey, value=norient)
+
+def getNrefchip(fobj):
+ """
+
+ Finds which FITS extension holds the reference chip.
+
+ The reference chip has EXTNAME='SCI', can be in any extension and
+ is instrument specific. This functions provides mappings between
+ sci extensions, chips and fits extensions.
+ In the case of a subarray when the reference chip is missing, the
+ first 'SCI' extension is the reference chip.
+
+ Parameters
+ ----------
+ fobj: pyfits HDUList object
+ """
+ nrefext = 1
+ nrefchip = 1
+ instrument = fobj[0].header['INSTRUME']
+
+ if instrument == 'WFPC2':
+ chipkw = 'DETECTOR'
+ extvers = [("SCI",img.header['EXTVER']) for img in
+ fobj[1:] if img.header['EXTNAME'].lower()=='sci']
+ detectors = [img.header[chipkw] for img in fobj[1:] if
+ img.header['EXTNAME'].lower()=='sci']
+ fitsext = [i for i in range(len(fobj))[1:] if
+ fobj[i].header['EXTNAME'].lower()=='sci']
+ det2ext=dict(map(None, detectors,extvers))
+ ext2det=dict(map(None, extvers, detectors))
+ ext2fitsext=dict(map(None, extvers, fitsext))
+
+ if 3 not in detectors:
+ nrefchip = ext2det.pop(extvers[0])
+ nrefext = ext2fitsext.pop(extvers[0])
+ else:
+ nrefchip = 3
+ extname = det2ext.pop(nrefchip)
+ nrefext = ext2fitsext.pop(extname)
+
+ elif (instrument == 'ACS' and fobj[0].header['DETECTOR'] == 'WFC') or \
+ (instrument == 'WFC3' and fobj[0].header['DETECTOR'] == 'UVIS'):
+ chipkw = 'CCDCHIP'
+ extvers = [("SCI",img.header['EXTVER']) for img in
+ fobj[1:] if img.header['EXTNAME'].lower()=='sci']
+ detectors = [img.header[chipkw] for img in fobj[1:] if
+ img.header['EXTNAME'].lower()=='sci']
+ fitsext = [i for i in range(len(fobj))[1:] if
+ fobj[i].header['EXTNAME'].lower()=='sci']
+ det2ext=dict(map(None, detectors,extvers))
+ ext2det=dict(map(None, extvers, detectors))
+ ext2fitsext=dict(map(None, extvers, fitsext))
+
+ if 2 not in detectors:
+ nrefchip = ext2det.pop(extvers[0])
+ nrefext = ext2fitsext.pop(extvers[0])
+ else:
+ nrefchip = 2
+ extname = det2ext.pop(nrefchip)
+ nrefext = ext2fitsext.pop(extname)
+ else:
+ for i in range(len(fobj)):
+ extname = fobj[i].header.get('EXTNAME', None)
+ if extname != None and extname.lower == 'sci':
+ nrefext = i
+ break
+
+ return nrefchip, nrefext
+
+def checkFiles(input):
+ """
+ Checks that input files are in the correct format.
+ Converts geis and waiver fits files to multiextension fits.
+ """
+ from stsci.tools.check_files import geis2mef, waiver2mef, checkFiles
+ logger.info("\n\tChecking files %s" % input)
+ removed_files = []
+ newfiles = []
+ if not isinstance(input, list):
+ input=[input]
+
+ for file in input:
+ try:
+ imgfits,imgtype = fileutil.isFits(file)
+ except IOError:
+ logger.warning( "\n\tFile %s could not be found, removing it from the input list.\n" %file)
+ removed_files.append(file)
+ continue
+ # Check for existence of waiver FITS input, and quit if found.
+ # Or should we print a warning and continue but not use that file
+ if imgfits:
+ if imgtype == 'waiver':
+ newfilename = waiver2mef(file, convert_dq=True)
+ if newfilename == None:
+ logger.warning("\n\tRemoving file %s from input list - could not convert waiver to mef" %file)
+ removed_files.append(file)
+ else:
+ newfiles.append(newfilename)
+ else:
+ newfiles.append(file)
+
+ # If a GEIS image is provided as input, create a new MEF file with
+ # a name generated using 'buildFITSName()'
+ # Convert the corresponding data quality file if present
+ if not imgfits:
+ newfilename = geis2mef(file, convert_dq=True)
+ if newfilename == None:
+ logger.warning("\n\tRemoving file %s from input list - could not convert geis to mef" %file)
+ removed_files.append(file)
+ else:
+ newfiles.append(newfilename)
+ if removed_files:
+ logger.warning('\n\tThe following files will be removed from the list of files to be processed %s' % removed_files)
+ #for f in removed_files:
+ # print f
+
+ newfiles = checkFiles(newfiles)[0]
+ logger.info("\n\tThese files passed the input check and will be processed: %s" % newfiles)
+ return newfiles
+
+def newIDCTAB(fname):
+ #When this is called we know there's a kw IDCTAB in the header
+ idctab = fileutil.osfn(pyfits.getval(fname, 'IDCTAB'))
+ try:
+ #check for the presence of IDCTAB in the first extension
+ oldidctab = fileutil.osfn(pyfits.getval(fname, 'IDCTAB', ext=1))
+ except KeyError:
+ return False
+ if idctab == oldidctab:
+ return False
+ else:
+ return True
+
+def cleanWCS(fname):
+ # A new IDCTAB means all previously computed WCS's are invalid
+ # We are deleting all of them except the original OPUS WCS.nvalidates all WCS's.
+ keys = wcsutil.wcskeys(pyfits.getheader(fname, ext=1))
+ f = pyfits.open(fname, mode='update')
+ fext = range(len(f))
+ for key in keys:
+ wcsutil.deleteWCS(fname, ext=fext,wcskey=key)
+
+def getCorrections(instrument):
+ """
+ Print corrections available for an instrument
+
+ :Parameters:
+ `instrument`: string, one of 'WFPC2', 'NICMOS', 'STIS', 'ACS', 'WFC3'
+ """
+ acorr = apply_corrections.allowed_corrections[instrument]
+
+ print "The following corrections will be performed for instrument %s\n" % instrument
+ for c in acorr: print c,': ' , apply_corrections.cnames[c]
+
diff --git a/lib/stwcs/updatewcs/apply_corrections.py b/lib/stwcs/updatewcs/apply_corrections.py
new file mode 100644
index 0000000..fbcb502
--- /dev/null
+++ b/lib/stwcs/updatewcs/apply_corrections.py
@@ -0,0 +1,223 @@
+from __future__ import division # confidence high
+
+import os
+import pyfits
+import time
+from stsci.tools import fileutil
+import os.path
+from stwcs.wcsutil import altwcs
+
+import logging
+logger = logging.getLogger("stwcs.updatewcs.apply_corrections")
+
+#Note: The order of corrections is important
+
+__docformat__ = 'restructuredtext'
+
+# A dictionary which lists the allowed corrections for each instrument.
+# These are the default corrections applied also in the pipeline.
+
+allowed_corrections={'WFPC2': ['DET2IMCorr', 'MakeWCS','CompSIP', 'VACorr'],
+ 'ACS': ['DET2IMCorr', 'TDDCorr', 'MakeWCS', 'CompSIP','VACorr', 'NPOLCorr'],
+ 'STIS': ['MakeWCS', 'CompSIP','VACorr'],
+ 'NICMOS': ['MakeWCS', 'CompSIP','VACorr'],
+ 'WFC3': ['MakeWCS', 'CompSIP','VACorr'],
+ }
+
+cnames = {'DET2IMCorr': 'Detector to Image Correction',
+ 'TDDCorr': 'Time Dependent Distortion Correction',
+ 'MakeWCS': 'Recalculate basic WCS keywords based on the distortion model',
+ 'CompSIP': 'Given IDCTAB distortion model calculate the SIP coefficients',
+ 'VACorr': 'Velocity Aberration Correction',
+ 'NPOLCorr': 'Lookup Table Distortion'
+ }
+
+def setCorrections(fname, vacorr=True, tddcorr=True, npolcorr=True, d2imcorr=True):
+ """
+ Creates a list of corrections to be applied to a file
+ based on user input paramters and allowed corrections
+ for the instrument.
+ """
+ instrument = pyfits.getval(fname, 'INSTRUME')
+ # make a copy of this list !
+ acorr = allowed_corrections[instrument][:]
+
+ # Check if idctab is present on disk
+ # If kw IDCTAB is present in the header but the file is
+ # not found on disk, do not run TDDCorr, MakeCWS and CompSIP
+ if not foundIDCTAB(fname):
+ if 'TDDCorr' in acorr: acorr.remove('TDDCorr')
+ if 'MakeWCS' in acorr: acorr.remove('MakeWCS')
+ if 'CompSIP' in acorr: acorr.remove('CompSIP')
+
+ if 'VACorr' in acorr and vacorr==False: acorr.remove('VACorr')
+ if 'TDDCorr' in acorr:
+ tddcorr = applyTDDCorr(fname, tddcorr)
+ if tddcorr == False: acorr.remove('TDDCorr')
+
+ if 'NPOLCorr' in acorr:
+ npolcorr = applyNpolCorr(fname, npolcorr)
+ if npolcorr == False: acorr.remove('NPOLCorr')
+ if 'DET2IMCorr' in acorr:
+ d2imcorr = applyD2ImCorr(fname, d2imcorr)
+ if d2imcorr == False: acorr.remove('DET2IMCorr')
+ logger.info("\n\tCorrections to be applied to %s: %s " % (fname, str(acorr)))
+ return acorr
+
+def foundIDCTAB(fname):
+ try:
+ idctab = fileutil.osfn(pyfits.getval(fname, 'IDCTAB'))
+ except KeyError:
+ return False
+ if idctab == 'N/A' or idctab == "":
+ return False
+ if os.path.exists(idctab):
+ return True
+ else:
+ return False
+
+def applyTDDCorr(fname, utddcorr):
+ """
+ The default value of tddcorr for all ACS images is True.
+ This correction will be performed if all conditions below are True:
+ - the user did not turn it off on the command line
+ - the detector is WFC
+ - the idc table specified in the primary header is available.
+ """
+ instrument = pyfits.getval(fname, 'INSTRUME')
+ try:
+ detector = pyfits.getval(fname, 'DETECTOR')
+ except KeyError:
+ detector = None
+ try:
+ tddswitch = pyfits.getval(fname, 'TDDCORR')
+ except KeyError:
+ tddswitch = 'PERFORM'
+
+ if instrument == 'ACS' and detector == 'WFC' and utddcorr == True and tddswitch == 'PERFORM':
+ tddcorr = True
+ try:
+ idctab = pyfits.getval(fname, 'IDCTAB')
+ except KeyError:
+ tddcorr = False
+ #print "***IDCTAB keyword not found - not applying TDD correction***\n"
+ if os.path.exists(fileutil.osfn(idctab)):
+ tddcorr = True
+ else:
+ tddcorr = False
+ #print "***IDCTAB file not found - not applying TDD correction***\n"
+ else:
+ tddcorr = False
+
+ return tddcorr
+
+def applyNpolCorr(fname, unpolcorr):
+ """
+ Determines whether non-polynomial distortion lookup tables should be added
+ as extensions to the science file based on the 'NPOLFILE' keyword in the
+ primary header and NPOLEXT kw in the first extension.
+ This is a default correction and will always run in the pipeline.
+ The file used to generate the extensions is
+ recorded in the NPOLEXT keyword in the first science extension.
+ If 'NPOLFILE' in the primary header is different from 'NPOLEXT' in the
+ extension header and the file exists on disk and is a 'new type' npolfile,
+ then the lookup tables will be updated as 'WCSDVARR' extensions.
+ """
+ applyNPOLCorr = True
+ try:
+ # get NPOLFILE kw from primary header
+ fnpol0 = pyfits.getval(fname, 'NPOLFILE')
+ if fnpol0 == 'N/A':
+ return False
+ fnpol0 = fileutil.osfn(fnpol0)
+ if not fileutil.findFile(fnpol0):
+ msg = """\n\tKw "NPOLFILE" exists in primary header but file %s not found
+ Non-polynomial distortion correction will not be applied\n
+ """ % fnpol0
+ logger.critical(msg)
+ applyNPOLCorr = False
+ return applyNPOLCorr
+ try:
+ # get NPOLEXT kw from first extension header
+ fnpol1 = pyfits.getval(fname, 'NPOLEXT', ext=1)
+ fnpol1 = fileutil.osfn(fnpol1)
+ if fnpol1 and fileutil.findFile(fnpol1):
+ if fnpol0 != fnpol1:
+ applyNPOLCorr = True
+ else:
+ msg = """\n\tNPOLEXT with the same value as NPOLFILE found in first extension.
+ NPOL correction will not be applied."""
+ logger.info(msg)
+ applyNPOLCorr = False
+ else:
+ # npl file defined in first extension may not be found
+ # but if a valid kw exists in the primary header, non-polynomial
+ #distortion correction should be applied.
+ applyNPOLCorr = True
+ except KeyError:
+ # the case of "NPOLFILE" kw present in primary header but "NPOLEXT" missing
+ # in first extension header
+ applyNPOLCorr = True
+ except KeyError:
+ logger.info('\n\t"NPOLFILE" keyword not found in primary header')
+ applyNPOLCorr = False
+ return applyNPOLCorr
+
+ if isOldStyleDGEO(fname, fnpol0):
+ applyNPOLCorr = False
+ return (applyNPOLCorr and unpolcorr)
+
+def isOldStyleDGEO(fname, dgname):
+ # checks if the file defined in a NPOLFILE kw is a full size
+ # (old style) image
+
+ sci_naxis1 = pyfits.getval(fname, 'NAXIS1', ext=1)
+ sci_naxis2 = pyfits.getval(fname, 'NAXIS2', ext=1)
+ dg_naxis1 = pyfits.getval(dgname, 'NAXIS1', ext=1)
+ dg_naxis2 = pyfits.getval(dgname, 'NAXIS2', ext=1)
+ if sci_naxis1 <= dg_naxis1 or sci_naxis2 <= dg_naxis2:
+ msg = """\n\tOnly full size (old style) DGEO file was found.\n
+ Non-polynomial distortion correction will not be applied."""
+ logger.critical(msg)
+ return True
+ else:
+ return False
+
+def applyD2ImCorr(fname, d2imcorr):
+ applyD2IMCorr = True
+ try:
+ # get D2IMFILE kw from primary header
+ fd2im0 = pyfits.getval(fname, 'D2IMFILE')
+ if fd2im0 == 'N/A':
+ return False
+ fd2im0 = fileutil.osfn(fd2im0)
+ if not fileutil.findFile(fd2im0):
+ msg = """\n\tKw D2IMFILE exists in primary header but file %s not found\n
+ Detector to image correction will not be applied\n""" % fd2im0
+ logger.critical(msg)
+ print msg
+ applyD2IMCorr = False
+ return applyD2IMCorr
+ try:
+ # get D2IMEXT kw from first extension header
+ fd2imext = pyfits.getval(fname, 'D2IMEXT', ext=1)
+ fd2imext = fileutil.osfn(fd2imext)
+ if fd2imext and fileutil.findFile(fd2imext):
+ if fd2im0 != fd2imext:
+ applyD2IMCorr = True
+ else:
+ applyD2IMCorr = False
+ else:
+ # D2IM file defined in first extension may not be found
+ # but if a valid kw exists in the primary header,
+ # detector to image correction should be applied.
+ applyD2IMCorr = True
+ except KeyError:
+ # the case of D2IMFILE kw present in primary header but D2IMEXT missing
+ # in first extension header
+ applyD2IMCorr = True
+ except KeyError:
+ print 'D2IMFILE keyword not found in primary header'
+ applyD2IMCorr = False
+ return applyD2IMCorr
+
diff --git a/lib/stwcs/updatewcs/corrections.py b/lib/stwcs/updatewcs/corrections.py
new file mode 100644
index 0000000..b227d05
--- /dev/null
+++ b/lib/stwcs/updatewcs/corrections.py
@@ -0,0 +1,230 @@
+from __future__ import division # confidence high
+
+import datetime
+import numpy as np
+from numpy import linalg
+from stsci.tools import fileutil
+from utils import diff_angles
+import makewcs, npol
+
+import logging, time
+logger=logging.getLogger('stwcs.updatewcs.corrections')
+
+MakeWCS = makewcs.MakeWCS
+NPOLCorr = npol.NPOLCorr
+
+class TDDCorr(object):
+ """
+ Apply time dependent distortion correction to distortion coefficients and basic
+ WCS keywords. This correction **must** be done before any other WCS correction.
+
+ Parameters
+ ----------
+ ext_wcs: HSTWCS object
+ An HSTWCS object to be modified
+ ref_wcs: HSTWCS object
+ A reference HSTWCS object
+
+ Notes
+ -----
+ Compute the ACS/WFC time dependent distortion terms as described
+ in [1]_ and apply the correction to the WCS of the observation.
+
+ The model coefficients are stored in the primary header of the IDCTAB.
+ :math:`D_{ref}` is the reference date. The computed corrections are saved
+ in the science extension header as TDDALPHA and TDDBETA keywords.
+
+ .. math:: TDDALPHA = A_{0} + {A_{1}*(obsdate - D_{ref})}
+
+ .. math:: TDDBETA = B_{0} + B_{1}*(obsdate - D_{ref})
+
+
+ The time dependent distortion affects the IDCTAB coefficients, and the
+ relative location of the two chips. Because the linear order IDCTAB
+ coefficients ar eused in the computatuion of the NPOL extensions,
+ the TDD correction affects all components of the distortion model.
+
+ Application of TDD to the IDCTAB polynomial coefficients:
+ The TDD model is computed in Jay's frame, while the IDCTAB coefficients
+ are in the HST V2/V3 frame. The coefficients are transformed to Jay's frame,
+ TDD is applied and they are transformed back to the V2/V3 frame. This
+ correction is performed in this class.
+
+ Application of TDD to the relative location of the two chips is
+ done in `makewcs`.
+
+ References
+ ----------
+ .. [1] Jay Anderson, "Variation of the Distortion Solution of the WFC", ACS ISR 2007-08.
+
+ """
+ def updateWCS(cls, ext_wcs, ref_wcs):
+ """
+ - Calculates alpha and beta for ACS/WFC data.
+ - Writes 2 new kw to the extension header: TDDALPHA and TDDBETA
+ """
+ logger.info("\n\tStarting TDDCorr: %s" % time.asctime())
+ alpha, beta = cls.compute_alpha_beta(ext_wcs)
+ cls.apply_tdd2idc(ref_wcs, alpha, beta)
+ cls.apply_tdd2idc(ext_wcs, alpha, beta)
+ ext_wcs.idcmodel.refpix['TDDALPHA'] = alpha
+ ext_wcs.idcmodel.refpix['TDDBETA'] = beta
+ ref_wcs.idcmodel.refpix['TDDALPHA'] = alpha
+ ref_wcs.idcmodel.refpix['TDDBETA'] = beta
+
+ newkw = {'TDDALPHA': alpha, 'TDDBETA':beta, 'OCX10':ext_wcs.idcmodel.cx[1,0],
+ 'OCX11':ext_wcs.idcmodel.cx[1,1],'OCY10':ext_wcs.idcmodel.cy[1,0],
+ 'OCY11':ext_wcs.idcmodel.cy[1,1],}
+
+ return newkw
+ updateWCS = classmethod(updateWCS)
+
+ def apply_tdd2idc(cls, hwcs, alpha, beta):
+ """
+ Applies TDD to the idctab coefficients of a ACS/WFC observation.
+ This should be always the first correction.
+ """
+ theta_v2v3 = 2.234529
+ mrotp = fileutil.buildRotMatrix(theta_v2v3)
+ mrotn = fileutil.buildRotMatrix(-theta_v2v3)
+ tdd_mat = np.array([[1+(beta/2048.), alpha/2048.],[alpha/2048.,1-(beta/2048.)]],np.float64)
+ abmat1 = np.dot(tdd_mat, mrotn)
+ abmat2 = np.dot(mrotp,abmat1)
+ xshape, yshape = hwcs.idcmodel.cx.shape, hwcs.idcmodel.cy.shape
+ icxy = np.dot(abmat2,[hwcs.idcmodel.cx.ravel(), hwcs.idcmodel.cy.ravel()])
+ hwcs.idcmodel.cx = icxy[0]
+ hwcs.idcmodel.cy = icxy[1]
+ hwcs.idcmodel.cx.shape = xshape
+ hwcs.idcmodel.cy.shape = yshape
+
+ apply_tdd2idc = classmethod(apply_tdd2idc)
+
+ def compute_alpha_beta(cls, ext_wcs):
+ """
+ Compute the ACS time dependent distortion skew terms
+ as described in ACS ISR 07-08 by J. Anderson.
+
+ Jay's code only computes the alpha/beta values based on a decimal year
+ with only 3 digits, so this line reproduces that when needed for comparison
+ with his results.
+ rday = float(('%0.3f')%rday)
+
+ The zero-point terms account for the skew accumulated between
+ 2002.0 and 2004.5, when the latest IDCTAB was delivered.
+ alpha = 0.095 + 0.090*(rday-dday)/2.5
+ beta = -0.029 - 0.030*(rday-dday)/2.5
+ """
+ if not isinstance(ext_wcs.date_obs,float):
+ year,month,day = ext_wcs.date_obs.split('-')
+ rdate = datetime.datetime(int(year),int(month),int(day))
+ rday = float(rdate.strftime("%j"))/365.25 + rdate.year
+ else:
+ rday = ext_wcs.date_obs
+
+ skew_coeffs = ext_wcs.idcmodel.refpix['skew_coeffs']
+ if skew_coeffs is None:
+ # Only print out warning for post-SM4 data where this may matter
+ if rday > 2009.0:
+ err_str = "------------------------------------------------------------------------ \n"
+ err_str += "WARNING: the IDCTAB geometric distortion file specified in the image \n"
+ err_str += " header did not have the time-dependent distortion coefficients. \n"
+ err_str += " The pre-SM4 time-dependent skew solution will be used by default.\n"
+ err_str += " Please update IDCTAB with new reference file from HST archive. \n"
+ err_str += "------------------------------------------------------------------------ \n"
+ print err_str
+ # Using default pre-SM4 coefficients
+ skew_coeffs = {'TDD_A':[0.095,0.090/2.5],
+ 'TDD_B':[-0.029,-0.030/2.5],
+ 'TDD_DATE':2004.5,'TDDORDER':1}
+
+ alpha = 0
+ beta = 0
+ # Compute skew terms, allowing for non-linear coefficients as well
+ for c in range(skew_coeffs['TDDORDER']+1):
+ alpha += skew_coeffs['TDD_A'][c]* np.power((rday-skew_coeffs['TDD_DATE']),c)
+ beta += skew_coeffs['TDD_B'][c]*np.power((rday-skew_coeffs['TDD_DATE']),c)
+
+ return alpha,beta
+ compute_alpha_beta = classmethod(compute_alpha_beta)
+
+
+class VACorr(object):
+ """
+ Apply velocity aberation correction to WCS keywords.
+
+ Notes
+ -----
+ Velocity Aberration is stored in the extension header keyword 'VAFACTOR'.
+ The correction is applied to the CD matrix and CRVALs.
+
+ """
+ def updateWCS(cls, ext_wcs, ref_wcs):
+ logger.info("\n\tStarting VACorr: %s" % time.asctime())
+ if ext_wcs.vafactor != 1:
+ ext_wcs.wcs.cd = ext_wcs.wcs.cd * ext_wcs.vafactor
+ crval0 = ref_wcs.wcs.crval[0] + ext_wcs.vafactor*diff_angles(ext_wcs.wcs.crval[0],
+ ref_wcs.wcs.crval[0])
+ crval1 = ref_wcs.wcs.crval[1] + ext_wcs.vafactor*diff_angles(ext_wcs.wcs.crval[1],
+ ref_wcs.wcs.crval[1])
+ crval = np.array([crval0,crval1])
+ ext_wcs.wcs.crval = crval
+ ext_wcs.wcs.set()
+ else:
+ pass
+ kw2update={'CD1_1': ext_wcs.wcs.cd[0,0], 'CD1_2':ext_wcs.wcs.cd[0,1],
+ 'CD2_1':ext_wcs.wcs.cd[1,0], 'CD2_2':ext_wcs.wcs.cd[1,1],
+ 'CRVAL1':ext_wcs.wcs.crval[0], 'CRVAL2':ext_wcs.wcs.crval[1]}
+ return kw2update
+
+ updateWCS = classmethod(updateWCS)
+
+
+class CompSIP(object):
+ """
+ Compute Simple Imaging Polynomial (SIP) coefficients as defined in [2]_
+ from IDC table coefficients.
+
+ This class transforms the TDD corrected IDCTAB coefficients into SIP format.
+ It also applies a binning factor to the coefficients if the observation was
+ binned.
+
+ References
+ ----------
+ .. [2] David Shupe, et al, "The SIP Convention of representing Distortion
+ in FITS Image headers", Astronomical Data Analysis Software And Systems, ASP
+ Conference Series, Vol. 347, 2005
+
+ """
+ def updateWCS(cls, ext_wcs, ref_wcs):
+ logger.info("\n\tStarting CompSIP: %s" %time.asctime())
+ kw2update = {}
+ order = ext_wcs.idcmodel.norder
+ kw2update['A_ORDER'] = order
+ kw2update['B_ORDER'] = order
+ pscale = ext_wcs.idcmodel.refpix['PSCALE']
+
+ cx = ext_wcs.idcmodel.cx
+ cy = ext_wcs.idcmodel.cy
+
+ matr = np.array([[cx[1,1],cx[1,0]], [cy[1,1],cy[1,0]]], dtype=np.float64)
+ imatr = linalg.inv(matr)
+ akeys1 = np.zeros((order+1,order+1), dtype=np.float64)
+ bkeys1 = np.zeros((order+1,order+1), dtype=np.float64)
+ for n in range(order+1):
+ for m in range(order+1):
+ if n >= m and n>=2:
+ idcval = np.array([[cx[n,m]],[cy[n,m]]])
+ sipval = np.dot(imatr, idcval)
+ akeys1[m,n-m] = sipval[0]
+ bkeys1[m,n-m] = sipval[1]
+ Akey="A_%d_%d" % (m,n-m)
+ Bkey="B_%d_%d" % (m,n-m)
+ kw2update[Akey] = sipval[0,0] * ext_wcs.binned
+ kw2update[Bkey] = sipval[1,0] * ext_wcs.binned
+ kw2update['CTYPE1'] = 'RA---TAN-SIP'
+ kw2update['CTYPE2'] = 'DEC--TAN-SIP'
+ return kw2update
+
+ updateWCS = classmethod(updateWCS)
+
+
diff --git a/lib/stwcs/updatewcs/det2im.py b/lib/stwcs/updatewcs/det2im.py
new file mode 100644
index 0000000..fe683b4
--- /dev/null
+++ b/lib/stwcs/updatewcs/det2im.py
@@ -0,0 +1,200 @@
+from __future__ import division # confidence high
+
+import time
+import pyfits
+from stsci.tools import fileutil
+import utils
+
+import logging
+logger = logging.getLogger('stwcs.updatewcs.Det2IM')
+
+class DET2IMCorr(object):
+ """
+ Stores a small correction to the detector coordinates as a d2imarr
+ extension in the science file.
+
+ Notes
+ -----
+ For the case of ACS/WFC every 68th column is wider than the rest.
+ To compensate for this a small correction needs to be applied to the
+ detector coordinates. We call this a detector to image transformation.
+ The so obtained image coordinates are the input to all other distortion
+ corrections. The correction is originally stored in an external
+ reference file pointed to by 'D2IMFILE' keyword in the primary header.
+ This class attaches the correction array as an extension to the science
+ file with extname = `d2imarr`.
+
+ Other keywords used in this correction are:
+
+ `AXISCORR`: integer (1 or 2) - axis to which the detector to image
+ correction is applied
+
+ `D2IMEXT`: string = name of reference file which was used to create
+ the lookup table extension
+
+ `D2IMERR`: float, optional - maximum value of the correction
+
+ """
+ def updateWCS(cls, fobj):
+ """
+ Parameters
+ ----------
+ fobj: pyfits object
+ Science file, for which a detector to image correction
+ is available
+
+ Notes
+ -----
+ Uses the file pointed to in the primary header keyword 'D2IMFILE'
+ to create an extension with a detector to image correction.
+ """
+ logger.info("\n\tStarting Det2IM Correction: %s" % time.asctime())
+ try:
+ assert isinstance(fobj, pyfits.HDUList)
+ except AssertionError:
+ logger.exception('\n\tInput must be a pyfits.HDUList object')
+ raise
+
+ d2imfile = fileutil.osfn(fobj[0].header['D2IMFILE'])
+ axiscorr = cls.getAxisCorr(d2imfile)
+ d2imerr = pyfits.getdata(d2imfile, ext=1).max()
+ if axiscorr == None:
+ new_kw = {}
+ else:
+ new_kw = {'D2IMEXT': d2imfile, 'AXISCORR': axiscorr, 'D2IMERR': d2imerr}
+ cls.applyDet2ImCorr(fobj,axiscorr)
+ cls.updatehdr(fobj, new_kw)
+
+ updateWCS = classmethod(updateWCS)
+
+ def getAxisCorr(cls, refname):
+ try:
+ direction = pyfits.getval(refname, ext=1, key='EXTNAME')
+ if direction == 'DX': return 1
+ elif direction == 'DY': return 2
+ else:
+ logger.warning('\n\tDET2IM correction expects the reference file to have \
+ an EXTNAME keyword of value "DX" or "DY", EXTNAMe %s detected' % direction)
+ return None
+ except KeyError:
+ logger.exception("\n\tD2IMFILE %s is missing EXTNAME keyword. Unable to determine axis \
+ to which to apply the correction." % refname)
+ direction = None
+ return direction
+ getAxisCorr = classmethod(getAxisCorr)
+
+ def applyDet2ImCorr(cls,fobj, axiscorr):
+ binned = utils.getBinning(fobj)
+ hdu = cls.createDgeoHDU(fobj, axiscorr, binned)
+ d2imarr_ind = cls.getD2imIndex(fobj)
+ if d2imarr_ind:
+ fobj[d2imarr_ind] = hdu
+ else:
+ fobj.append(hdu)
+ applyDet2ImCorr = classmethod(applyDet2ImCorr)
+
+ def getD2imIndex(cls,fobj):
+ index = None
+ for e in range(len(fobj)):
+ try:
+ ename = fobj[e].header['EXTNAME']
+ except KeyError:
+ continue
+ if ename == 'D2IMARR':
+ index = e
+ return index
+ getD2imIndex = classmethod(getD2imIndex)
+
+ def createDgeoHDU(cls, fobj, axiscorr, binned=1):
+ d2imfile = fileutil.osfn(fobj[0].header['D2IMFILE'])
+ d2im_data = pyfits.getdata(d2imfile, ext=1)
+ sci_hdr = fobj['sci',1].header
+ d2im_hdr = cls.createDet2ImHdr(fobj, binned)
+ hdu = pyfits.ImageHDU(header=d2im_hdr, data=d2im_data)
+
+ return hdu
+
+ createDgeoHDU = classmethod(createDgeoHDU)
+
+ def createDet2ImHdr(cls, fobj, binned=1):
+ """
+ Creates a header for the D2IMARR extension based on the
+ reference file recorded in D2IMFILE keyword in the primary header.
+ fobj - the science file
+
+ """
+ d2imfile = fileutil.osfn(fobj[0].header['D2IMFILE'])
+ axiscorr = cls.getAxisCorr(d2imfile)
+ sci_hdr = fobj[1].header
+ data_shape = pyfits.getdata(d2imfile, ext=1).shape
+ naxis = pyfits.getval(d2imfile, ext=1, key='NAXIS')
+
+ kw = { 'NAXIS': 'Size of the axis',
+ 'CRPIX': 'Coordinate system reference pixel',
+ 'CRVAL': 'Coordinate system value at reference pixel',
+ 'CDELT': 'Coordinate increment along axis'}
+
+ kw_comm1 = {}
+ kw_val1 = {}
+ for key in kw.keys():
+ for i in range(1, naxis+1):
+ si = str(i)
+ kw_comm1[key+si] = kw[key]
+
+ for i in range(1, naxis+1):
+ si = str(i)
+ kw_val1['NAXIS'+si] = data_shape[i-1]
+ kw_val1['CRPIX'+si] = data_shape[i-1]/2.
+ kw_val1['CDELT'+si] = 1./binned
+ kw_val1['CRVAL'+si] = (sci_hdr.get('NAXIS'+si, 1)/2. + \
+ sci_hdr.get('LTV'+si, 0.)) / binned
+
+
+ kw_comm0 = {'XTENSION': 'Image extension',
+ 'BITPIX': 'IEEE floating point',
+ 'NAXIS': 'Number of axes',
+ 'EXTNAME': 'WCS distortion array',
+ 'EXTVER': 'Distortion array version number',
+ 'PCOUNT': 'Special data area of size 0',
+ 'GCOUNT': 'One data group',
+ 'AXISCORR': 'Direction in which the det2im correction is applied'}
+
+ kw_val0 = { 'XTENSION': 'IMAGE',
+ 'BITPIX': -32,
+ 'NAXIS': naxis,
+ 'EXTNAME': 'D2IMARR',
+ 'EXTVER': 1,
+ 'PCOUNT': 0,
+ 'GCOUNT': 1,
+ 'AXISCORR': axiscorr
+ }
+
+
+ cdl = pyfits.CardList()
+ for key in kw_comm0.keys():
+ cdl.append(pyfits.Card(key=key, value=kw_val0[key], comment=kw_comm0[key]))
+ for key in kw_comm1.keys():
+ cdl.append(pyfits.Card(key=key, value=kw_val1[key], comment=kw_comm1[key]))
+
+ hdr = pyfits.Header(cards=cdl)
+ return hdr
+
+ createDet2ImHdr = classmethod(createDet2ImHdr)
+
+ def updatehdr(cls, fobj, kwdict):
+ """
+ Update extension headers to keep record of the files used for the
+ detector to image correction.
+ """
+ for ext in fobj:
+ try:
+ extname = ext.header['EXTNAME'].lower()
+ except KeyError:
+ continue
+ if extname == 'sci':
+ for kw in kwdict:
+ ext.header.update(kw, kwdict[kw])
+ else:
+ continue
+ updatehdr = classmethod(updatehdr)
+
diff --git a/lib/stwcs/updatewcs/makewcs.py b/lib/stwcs/updatewcs/makewcs.py
new file mode 100644
index 0000000..bb86be9
--- /dev/null
+++ b/lib/stwcs/updatewcs/makewcs.py
@@ -0,0 +1,251 @@
+from __future__ import division # confidence high
+
+from stwcs import DEGTORAD, RADTODEG
+import numpy as np
+from math import sin, sqrt, pow, cos, asin, atan2,pi
+import utils
+from stsci.tools import fileutil
+
+import logging, time
+logger = logging.getLogger(__name__)
+
+class MakeWCS(object):
+ """
+ Recompute basic WCS keywords based on PA_V3 and distortion model.
+
+ Notes
+ -----
+ - Compute the reference chip WCS:
+
+ -- CRVAL: transform the model XREF/YREF to the sky
+ -- PA_V3 is calculated at the target position and adjusted
+ for each chip orientation
+ -- CD: PA_V3 and the model scale are used to cnstruct a CD matrix
+
+ - Compute the second chip WCS:
+
+ -- CRVAL: - the distance between the zero points of the two
+ chip models on the sky
+ -- CD matrix: first order coefficients are added to the components
+ of this distance and transfered on the sky. The difference
+ between CRVAL and these vectors is the new CD matrix for each chip.
+ -- CRPIX: chip's model zero point in pixel space (XREF/YREF)
+
+ - Time dependent distortion correction is applied to both chips' V2/V3 values.
+
+ """
+ tdd_xyref = {1: [2048, 3072], 2:[2048, 1024]}
+ def updateWCS(cls, ext_wcs, ref_wcs):
+ """
+ recomputes the basic WCS kw
+ """
+ logger.info("\n\tStarting MakeWCS: %s" % time.asctime())
+ ltvoff, offshift = cls.getOffsets(ext_wcs)
+
+ v23_corr = cls.zero_point_corr(ext_wcs)
+ rv23_corr = cls.zero_point_corr(ref_wcs)
+
+ cls.uprefwcs(ext_wcs, ref_wcs, rv23_corr, ltvoff, offshift)
+ cls.upextwcs(ext_wcs, ref_wcs, v23_corr, rv23_corr, ltvoff, offshift)
+
+ kw2update = {'CD1_1': ext_wcs.wcs.cd[0,0],
+ 'CD1_2': ext_wcs.wcs.cd[0,1],
+ 'CD2_1': ext_wcs.wcs.cd[1,0],
+ 'CD2_2': ext_wcs.wcs.cd[1,1],
+ 'CRVAL1': ext_wcs.wcs.crval[0],
+ 'CRVAL2': ext_wcs.wcs.crval[1],
+ 'CRPIX1': ext_wcs.wcs.crpix[0],
+ 'CRPIX2': ext_wcs.wcs.crpix[1],
+ 'IDCTAB': ext_wcs.idctab,
+ }
+ return kw2update
+
+ updateWCS = classmethod(updateWCS)
+
+ def upextwcs(cls, ext_wcs, ref_wcs, v23_corr, rv23_corr, loff, offsh):
+ """
+ updates an extension wcs
+ """
+ ltvoffx, ltvoffy = loff[0], loff[1]
+ offshiftx, offshifty = offsh[0], offsh[1]
+ ltv1 = ext_wcs.ltv1
+ ltv2 = ext_wcs.ltv2
+ if ltv1 != 0. or ltv2 != 0.:
+ offsetx = ext_wcs.wcs.crpix[0] - ltv1 - ext_wcs.idcmodel.refpix['XREF']
+ offsety = ext_wcs.wcs.crpix[1] - ltv2 - ext_wcs.idcmodel.refpix['YREF']
+ fx,fy = ext_wcs.idcmodel.shift(ext_wcs.idcmodel.cx,ext_wcs.idcmodel.cy,offsetx,offsety)
+ else:
+ fx, fy = ext_wcs.idcmodel.cx, ext_wcs.idcmodel.cy
+
+ tddscale = (ref_wcs.pscale/fx[1,1])
+ v2 = ext_wcs.idcmodel.refpix['V2REF'] + v23_corr[0,0] * tddscale
+ v3 = ext_wcs.idcmodel.refpix['V3REF'] - v23_corr[1,0] * tddscale
+ v2ref = ref_wcs.idcmodel.refpix['V2REF'] + rv23_corr[0,0] * tddscale
+ v3ref = ref_wcs.idcmodel.refpix['V3REF'] - rv23_corr[1,0] * tddscale
+
+ R_scale = ref_wcs.idcmodel.refpix['PSCALE']/3600.0
+ off = sqrt((v2-v2ref)**2 + (v3-v3ref)**2)/(R_scale*3600.0)
+
+ if v3 == v3ref:
+ theta=0.0
+ else:
+ theta = atan2(ext_wcs.parity[0][0]*(v2-v2ref), ext_wcs.parity[1][1]*(v3-v3ref))
+
+ if ref_wcs.idcmodel.refpix['THETA']: theta += ref_wcs.idcmodel.refpix['THETA']*pi/180.0
+
+ dX=(off*sin(theta)) + offshiftx
+ dY=(off*cos(theta)) + offshifty
+
+ px = np.array([[dX,dY]])
+ newcrval = ref_wcs.wcs.p2s(px, 1)['world'][0]
+ newcrpix = np.array([ext_wcs.idcmodel.refpix['XREF'] + ltvoffx,
+ ext_wcs.idcmodel.refpix['YREF'] + ltvoffy])
+ ext_wcs.wcs.crval = newcrval
+ ext_wcs.wcs.crpix = newcrpix
+ ext_wcs.wcs.set()
+
+ # Create a small vector, in reference image pixel scale
+ delmat = np.array([[fx[1,1], fy[1,1]], \
+ [fx[1,0], fy[1,0]]]) / R_scale/3600.
+
+ # Account for subarray offset
+ # Angle of chip relative to chip
+ if ext_wcs.idcmodel.refpix['THETA']:
+ dtheta = ext_wcs.idcmodel.refpix['THETA'] - ref_wcs.idcmodel.refpix['THETA']
+ else:
+ dtheta = 0.0
+
+ rrmat = fileutil.buildRotMatrix(dtheta)
+ # Rotate the vectors
+ dxy = np.dot(delmat, rrmat)
+ wc = ref_wcs.wcs.p2s((px + dxy), 1)['world']
+
+ # Calculate the new CDs and convert to degrees
+ cd11 = utils.diff_angles(wc[0,0],newcrval[0])*cos(newcrval[1]*pi/180.0)
+ cd12 = utils.diff_angles(wc[1,0],newcrval[0])*cos(newcrval[1]*pi/180.0)
+ cd21 = utils.diff_angles(wc[0,1],newcrval[1])
+ cd22 = utils.diff_angles(wc[1,1],newcrval[1])
+ cd = np.array([[cd11, cd12], [cd21, cd22]])
+ ext_wcs.wcs.cd = cd
+ ext_wcs.wcs.set()
+
+ upextwcs = classmethod(upextwcs)
+
+ def uprefwcs(cls, ext_wcs, ref_wcs, rv23_corr_tdd, loff, offsh):
+ """
+ Updates the reference chip
+ """
+ ltvoffx, ltvoffy = loff[0], loff[1]
+ offshift = offsh
+ dec = ref_wcs.wcs.crval[1]
+ tddscale = (ref_wcs.pscale/ext_wcs.idcmodel.cx[1,1])
+ rv23 = [ref_wcs.idcmodel.refpix['V2REF'] + (rv23_corr_tdd[0,0] *tddscale),
+ ref_wcs.idcmodel.refpix['V3REF'] - (rv23_corr_tdd[1,0] * tddscale)]
+ # Get an approximate reference position on the sky
+ rref = np.array([[ref_wcs.idcmodel.refpix['XREF']+ltvoffx ,
+ ref_wcs.idcmodel.refpix['YREF']+ltvoffy]])
+
+ crval = ref_wcs.wcs.p2s(rref, 1)['world'][0]
+ # Convert the PA_V3 orientation to the orientation at the aperture
+ # This is for the reference chip only - we use this for the
+ # reference tangent plane definition
+ # It has the same orientation as the reference chip
+ pv = troll(ext_wcs.pav3,dec,rv23[0], rv23[1])
+ # Add the chip rotation angle
+ if ref_wcs.idcmodel.refpix['THETA']:
+ pv += ref_wcs.idcmodel.refpix['THETA']
+
+
+ # Set values for the rest of the reference WCS
+ ref_wcs.wcs.crval = crval
+ ref_wcs.wcs.crpix = np.array([0.0,0.0])+offsh
+ parity = ref_wcs.parity
+ R_scale = ref_wcs.idcmodel.refpix['PSCALE']/3600.0
+ cd11 = parity[0][0] * cos(pv*pi/180.0)*R_scale
+ cd12 = parity[0][0] * -sin(pv*pi/180.0)*R_scale
+ cd21 = parity[1][1] * sin(pv*pi/180.0)*R_scale
+ cd22 = parity[1][1] * cos(pv*pi/180.0)*R_scale
+
+ rcd = np.array([[cd11, cd12], [cd21, cd22]])
+ ref_wcs.wcs.cd = rcd
+ ref_wcs.wcs.set()
+
+ uprefwcs = classmethod(uprefwcs)
+
+ def zero_point_corr(cls,hwcs):
+ try:
+ alpha = hwcs.idcmodel.refpix['TDDALPHA']
+ beta = hwcs.idcmodel.refpix['TDDBETA']
+ except KeyError:
+ alpha = 0.0
+ beta = 0.0
+ v23_corr = np.array([[0.],[0.]])
+ logger.debug("\n\tTDD Zero point correction for chip %s defaulted to: %s" % (hwcs.chip, v23_corr))
+ return v23_corr
+
+ tdd = np.array([[beta, alpha], [alpha, -beta]])
+ mrotp = fileutil.buildRotMatrix(2.234529)/2048.
+ xy0 = np.array([[cls.tdd_xyref[hwcs.chip][0]-2048.], [cls.tdd_xyref[hwcs.chip][1]-2048.]])
+ v23_corr = np.dot(mrotp,np.dot(tdd,xy0)) * 0.05
+ logger.debug("\n\tTDD Zero point correction for chip %s: %s" % (hwcs.chip, v23_corr))
+ return v23_corr
+
+ zero_point_corr = classmethod(zero_point_corr)
+
+ def getOffsets(cls, ext_wcs):
+ ltv1 = ext_wcs.ltv1
+ ltv2 = ext_wcs.ltv2
+
+ offsetx = ext_wcs.wcs.crpix[0] - ltv1 - ext_wcs.idcmodel.refpix['XREF']
+ offsety = ext_wcs.wcs.crpix[1] - ltv2 - ext_wcs.idcmodel.refpix['YREF']
+
+ shiftx = ext_wcs.idcmodel.refpix['XREF'] + ltv1
+ shifty = ext_wcs.idcmodel.refpix['YREF'] + ltv2
+ if ltv1 != 0. or ltv2 != 0.:
+ ltvoffx = ltv1 + offsetx
+ ltvoffy = ltv2 + offsety
+ offshiftx = offsetx + shiftx
+ offshifty = offsety + shifty
+ else:
+ ltvoffx = 0.
+ ltvoffy = 0.
+ offshiftx = 0.
+ offshifty = 0.
+
+ ltvoff = np.array([ltvoffx, ltvoffy])
+ offshift = np.array([offshiftx, offshifty])
+ return ltvoff, offshift
+
+ getOffsets = classmethod(getOffsets)
+
+
+def troll(roll, dec, v2, v3):
+ """ Computes the roll angle at the target position based on:
+ the roll angle at the V1 axis(roll),
+ the dec of the target(dec), and
+ the V2/V3 position of the aperture (v2,v3) in arcseconds.
+
+ Based on algorithm provided by Colin Cox and used in
+ Generic Conversion at STScI.
+ """
+ # Convert all angles to radians
+ _roll = DEGTORAD(roll)
+ _dec = DEGTORAD(dec)
+ _v2 = DEGTORAD(v2 / 3600.)
+ _v3 = DEGTORAD(v3 / 3600.)
+
+ # compute components
+ sin_rho = sqrt((pow(sin(_v2),2)+pow(sin(_v3),2)) - (pow(sin(_v2),2)*pow(sin(_v3),2)))
+ rho = asin(sin_rho)
+ beta = asin(sin(_v3)/sin_rho)
+ if _v2 < 0: beta = pi - beta
+ gamma = asin(sin(_v2)/sin_rho)
+ if _v3 < 0: gamma = pi - gamma
+ A = pi/2. + _roll - beta
+ B = atan2( sin(A)*cos(_dec), (sin(_dec)*sin_rho - cos(_dec)*cos(rho)*cos(A)))
+
+ # compute final value
+ troll = RADTODEG(pi - (gamma+B))
+
+ return troll
+
diff --git a/lib/stwcs/updatewcs/npol.py b/lib/stwcs/updatewcs/npol.py
new file mode 100644
index 0000000..db928bf
--- /dev/null
+++ b/lib/stwcs/updatewcs/npol.py
@@ -0,0 +1,319 @@
+from __future__ import division # confidence high
+
+import pyfits
+from stsci.tools import fileutil
+import utils
+import numpy as np
+
+import logging, time
+logger = logging.getLogger('stwcs.updatewcs.npol')
+
+class NPOLCorr(object):
+ """
+ Defines a Lookup table prior distortion correction as per WCS paper IV.
+ It uses a reference file defined by the NPOLFILE (suffix 'NPL') keyword
+ in the primary header.
+
+ Notes
+ -----
+ - Using extensions in the reference file create a WCSDVARR extensions
+ and add them to the science file.
+ - Add record-valued keywords to the science extension header to describe
+ the lookup tables.
+ - Add a keyword 'NPOLEXT' to the science extension header to store
+ the name of the reference file used to create the WCSDVARR extensions.
+
+ If WCSDVARR extensions exist and `NPOLFILE` is different from `NPOLEXT`,
+ a subsequent update will overwrite the existing extensions.
+ If WCSDVARR extensions were not found in the science file, they will be added.
+
+ It is assumed that the NPL reference files were created to work with IDC tables
+ but will be applied with SIP coefficients. A transformation is applied to correct
+ for the fact that the lookup tables will be applied before the first order coefficients
+ which are in the CD matrix when the SIP convention is used.
+ """
+
+ def updateWCS(cls, fobj):
+ """
+ Parameters
+ ----------
+ fobj: pyfits object
+ Science file, for which a distortion correction in a NPOLFILE is available
+
+ """
+ logger.info("\n\tStarting CompSIP: %s" %time.asctime())
+ try:
+ assert isinstance(fobj, pyfits.HDUList)
+ except AssertionError:
+ logger.exception('\n\tInput must be a pyfits.HDUList object')
+ raise
+
+ cls.applyNPOLCorr(fobj)
+ nplfile = fobj[0].header['NPOLFILE']
+
+ new_kw = {'NPOLEXT': nplfile}
+ return new_kw
+
+ updateWCS = classmethod(updateWCS)
+
+ def applyNPOLCorr(cls, fobj):
+ """
+ For each science extension in a pyfits file object:
+ - create a WCSDVARR extension
+ - update science header
+ - add/update NPOLEXT keyword
+ """
+ nplfile = fileutil.osfn(fobj[0].header['NPOLFILE'])
+ # Map WCSDVARR EXTVER numbers to extension numbers
+ wcsdvarr_ind = cls.getWCSIndex(fobj)
+ for ext in fobj:
+ try:
+ extname = ext.header['EXTNAME'].lower()
+ except KeyError:
+ continue
+ if extname == 'sci':
+ extversion = ext.header['EXTVER']
+ ccdchip = cls.get_ccdchip(fobj, extname='SCI', extver=extversion)
+ binned = utils.getBinning(fobj, extversion)
+ header = ext.header
+ # get the data arrays from the reference file and transform them for use with SIP
+ dx,dy = cls.getData(nplfile, ccdchip)
+ idccoeffs = cls.getIDCCoeffs(header)
+
+ if idccoeffs != None:
+ dx, dy = cls.transformData(dx,dy, idccoeffs)
+
+ # Determine EXTVER for the WCSDVARR extension from the NPL file (EXTNAME, EXTVER) kw.
+ # This is used to populate DPj.EXTVER kw
+ wcsdvarr_x_version = 2 * extversion -1
+ wcsdvarr_y_version = 2 * extversion
+
+ for ename in zip(['DX', 'DY'], [wcsdvarr_x_version,wcsdvarr_y_version],[dx, dy]):
+ cls.addSciExtKw(header, wdvarr_ver=ename[1], npol_extname=ename[0])
+ hdu = cls.createNpolHDU(header, npolfile=nplfile, \
+ wdvarr_ver=ename[1], npl_extname=ename[0], data=ename[2],ccdchip=ccdchip, binned=binned)
+ if wcsdvarr_ind:
+ fobj[wcsdvarr_ind[ename[1]]] = hdu
+ else:
+ fobj.append(hdu)
+
+
+ applyNPOLCorr = classmethod(applyNPOLCorr)
+
+ def getWCSIndex(cls, fobj):
+
+ """
+ If fobj has WCSDVARR extensions:
+ returns a mapping of their EXTVER kw to file object extension numbers
+ if fobj does not have WCSDVARR extensions:
+ an empty dictionary is returned
+ """
+ wcsd = {}
+ for e in range(len(fobj)):
+ try:
+ ename = fobj[e].header['EXTNAME']
+ except KeyError:
+ continue
+ if ename == 'WCSDVARR':
+ wcsd[fobj[e].header['EXTVER']] = e
+ logger.debug("A map of WSCDVARR externsions %s" % wcsd)
+ return wcsd
+
+ getWCSIndex = classmethod(getWCSIndex)
+
+ def addSciExtKw(cls, hdr, wdvarr_ver=None, npol_extname=None):
+ """
+ Adds kw to sci extension to define WCSDVARR lookup table extensions
+
+ """
+ if npol_extname =='DX':
+ j=1
+ else:
+ j=2
+
+ cperror = 'CPERROR%s' %j
+ cpdis = 'CPDIS%s' %j
+ dpext = 'DP%s.' %j + 'EXTVER'
+ dpnaxes = 'DP%s.' %j +'NAXES'
+ dpaxis1 = 'DP%s.' %j+'AXIS.1'
+ dpaxis2 = 'DP%s.' %j+'AXIS.2'
+ keys = [cperror, cpdis, dpext, dpnaxes, dpaxis1, dpaxis2]
+ values = {cperror: 0.0, cpdis: 'Lookup', dpext: wdvarr_ver, dpnaxes: 2,
+ dpaxis1: 1, dpaxis2: 2}
+
+ comments = {cperror: 'Maximum error of NPOL correction for axis %s' % j,
+ cpdis: 'Prior distortion funcion type',
+ dpext: 'Version number of WCSDVARR extension containing lookup distortion table',
+ dpnaxes: 'Number of independent variables in distortion function',
+ dpaxis1: 'Axis number of the jth independent variable in a distortion function',
+ dpaxis2: 'Axis number of the jth independent variable in a distortion function'
+ }
+
+ for key in keys:
+ hdr.update(key=key, value=values[key], comment=comments[key], before='HISTORY')
+
+ addSciExtKw = classmethod(addSciExtKw)
+
+ def getData(cls,nplfile, ccdchip):
+ """
+ Get the data arrays from the reference NPOL files
+ Make sure 'CCDCHIP' in the npolfile matches "CCDCHIP' in the science file.
+ """
+ npl = pyfits.open(nplfile)
+ for ext in npl:
+ nplextname = ext.header.get('EXTNAME',"")
+ nplccdchip = ext.header.get('CCDCHIP',1)
+ if nplextname == 'DX' and nplccdchip == ccdchip:
+ xdata = ext.data.copy()
+ continue
+ elif nplextname == 'DY' and nplccdchip == ccdchip:
+ ydata = ext.data.copy()
+ continue
+ else:
+ continue
+ npl.close()
+ return xdata, ydata
+ getData = classmethod(getData)
+
+ def transformData(cls, dx, dy, coeffs):
+ """
+ Transform the NPOL data arrays for use with SIP
+ """
+ ndx, ndy = np.dot(coeffs, [dx.ravel(), dy.ravel()])
+ ndx.shape = dx.shape
+ ndy.shape=dy.shape
+ return ndx, ndy
+
+ transformData = classmethod(transformData)
+
+ def getIDCCoeffs(cls, header):
+ """
+ Return a matrix of the scaled first order IDC coefficients.
+ """
+ try:
+ ocx10 = header['OCX10']
+ ocx11 = header['OCX11']
+ ocy10 = header['OCY10']
+ ocy11 = header['OCY11']
+ coeffs = np.array([[ocx11, ocx10], [ocy11,ocy10]], dtype=np.float32)
+ except KeyError:
+ logger.exception('\n\tFirst order IDCTAB coefficients are not available. \n\
+ Cannot convert SIP to IDC coefficients.')
+ return None
+ try:
+ idcscale = header['IDCSCALE']
+ except KeyError:
+ logger.exception("IDCSCALE not found in header - setting it to 1.")
+ idcscale = 1
+
+ return np.linalg.inv(coeffs/idcscale)
+
+ getIDCCoeffs = classmethod(getIDCCoeffs)
+
+ def createNpolHDU(cls, sciheader, npolfile=None, wdvarr_ver=1, npl_extname=None,data = None, ccdchip=1, binned=1):
+ """
+ Creates an HDU to be added to the file object.
+ """
+ hdr = cls.createNpolHdr(sciheader, npolfile=npolfile, wdvarr_ver=wdvarr_ver, npl_extname=npl_extname, ccdchip=ccdchip, binned=binned)
+ hdu=pyfits.ImageHDU(header=hdr, data=data)
+ return hdu
+
+ createNpolHDU = classmethod(createNpolHDU)
+
+ def createNpolHdr(cls, sciheader, npolfile, wdvarr_ver, npl_extname, ccdchip, binned):
+ """
+ Creates a header for the WCSDVARR extension based on the NPOL reference file
+ and sci extension header. The goal is to always work in image coordinates
+ (also for subarrays and binned images. The WCS for the WCSDVARR extension
+ i ssuch that a full size npol table is created and then shifted or scaled
+ if the science image is a subarray or binned image.
+ """
+ npl = pyfits.open(npolfile)
+ for ext in npl:
+ try:
+ nplextname = ext.header['EXTNAME']
+ nplextver = ext.header['EXTVER']
+ except KeyError:
+ continue
+ nplccdchip = cls.get_ccdchip(npl, extname=nplextname, extver=nplextver)
+ if nplextname == npl_extname and nplccdchip == ccdchip:
+ npol_header = ext.header
+ break
+ else:
+ continue
+ npl.close()
+
+ naxis = pyfits.getval(npolfile, ext=1, key='NAXIS')
+ ccdchip = nplextname #npol_header['CCDCHIP']
+
+ kw = { 'NAXIS': 'Size of the axis',
+ 'CDELT': 'Coordinate increment along axis',
+ 'CRPIX': 'Coordinate system reference pixel',
+ 'CRVAL': 'Coordinate system value at reference pixel',
+ }
+
+ kw_comm1 = {}
+ kw_val1 = {}
+ for key in kw.keys():
+ for i in range(1, naxis+1):
+ si = str(i)
+ kw_comm1[key+si] = kw[key]
+
+ for i in range(1, naxis+1):
+ si = str(i)
+ kw_val1['NAXIS'+si] = npol_header.get('NAXIS'+si)
+ kw_val1['CDELT'+si] = npol_header.get('CDELT'+si, 1.0)
+ kw_val1['CRPIX'+si] = npol_header.get('CRPIX'+si, 0.0)
+ kw_val1['CRVAL'+si] = npol_header.get('CRVAL'+si, 0.0)
+
+ kw_comm0 = {'XTENSION': 'Image extension',
+ 'BITPIX': 'IEEE floating point',
+ 'NAXIS': 'Number of axes',
+ 'EXTNAME': 'WCS distortion array',
+ 'EXTVER': 'Distortion array version number',
+ 'PCOUNT': 'Special data area of size 0',
+ 'GCOUNT': 'One data group',
+ }
+
+ kw_val0 = { 'XTENSION': 'IMAGE',
+ 'BITPIX': -32,
+ 'NAXIS': naxis,
+ 'EXTNAME': 'WCSDVARR',
+ 'EXTVER': wdvarr_ver,
+ 'PCOUNT': 0,
+ 'GCOUNT': 1,
+ 'CCDCHIP': ccdchip,
+ }
+
+ cdl = pyfits.CardList()
+ for key in kw_comm0.keys():
+ cdl.append(pyfits.Card(key=key, value=kw_val0[key], comment=kw_comm0[key]))
+ for key in kw_comm1.keys():
+ cdl.append(pyfits.Card(key=key, value=kw_val1[key], comment=kw_comm1[key]))
+
+
+ hdr = pyfits.Header(cards=cdl)
+
+ return hdr
+
+ createNpolHdr = classmethod(createNpolHdr)
+
+ def get_ccdchip(cls, fobj, extname, extver):
+ """
+ Given a science file or npol file determine CCDCHIP
+ """
+ ccdchip = 1
+ if fobj[0].header['INSTRUME'] == 'ACS' and fobj[0].header['DETECTOR'] == 'WFC':
+ ccdchip = fobj[extname, extver].header['CCDCHIP']
+ elif fobj[0].header['INSTRUME'] == 'WFC3' and fobj[0].header['DETECTOR'] == 'UVIS':
+ ccdchip = fobj[extname, extver].header['CCDCHIP']
+ elif fobj[0].header['INSTRUME'] == 'WFPC2':
+ ccdchip = fobj[extname, extver].header['DETECTOR']
+ elif fobj[0].header['INSTRUME'] == 'STIS':
+ ccdchip = fobj[extname, extver].header['DETECTOR']
+ elif fobj[0].header['INSTRUME'] == 'NICMOS':
+ ccdchip = fobj[extname, extver].header['CAMERA']
+ return ccdchip
+
+ get_ccdchip = classmethod(get_ccdchip)
+ \ No newline at end of file
diff --git a/lib/stwcs/updatewcs/utils.py b/lib/stwcs/updatewcs/utils.py
new file mode 100644
index 0000000..29ba5f3
--- /dev/null
+++ b/lib/stwcs/updatewcs/utils.py
@@ -0,0 +1,28 @@
+from __future__ import division # confidence high
+
+def diff_angles(a,b):
+ """
+ Perform angle subtraction a-b taking into account
+ small-angle differences across 360degree line.
+ """
+
+ diff = a - b
+
+ if diff > 180.0:
+ diff -= 360.0
+
+ if diff < -180.0:
+ diff += 360.0
+
+ return diff
+
+def getBinning(fobj, extver=1):
+ # Return the binning factor
+ binned = 1
+ if fobj[0].header['INSTRUME'] == 'WFPC2':
+ mode = fobj[0].header.get('MODE', "")
+ if mode == 'AREA': binned = 2
+ else:
+ binned = fobj['SCI', extver].header.get('BINAXIS',1)
+ return binned
+
diff --git a/lib/stwcs/wcsutil/__init__.py b/lib/stwcs/wcsutil/__init__.py
new file mode 100644
index 0000000..9b7ed8c
--- /dev/null
+++ b/lib/stwcs/wcsutil/__init__.py
@@ -0,0 +1,35 @@
+from __future__ import division # confidence high
+
+from altwcs import *
+from hstwcs import HSTWCS
+
+__docformat__ = 'restructuredtext'
+
+
+def help():
+ print 'How to create an HSTWCS object:\n\n'
+ print """ \
+ 1. Using a pyfits HDUList object and an extension number \n
+ Example:\n
+
+ fobj = pyfits.open('some_file.fits')\n
+ w = wcsutil.HSTWCS(fobj, 3)\n\n
+
+ 2. Create an HSTWCS object using a qualified file name. \n
+ Example:\n
+ w = wcsutil.HSTWCS('j9irw4b1q_flt.fits[sci,1]')\n\n
+
+ 3. Create an HSTWCS object using a file name and an extension number. \n
+ Example:\n
+ w = wcsutil.HSTWCS('j9irw4b1q_flt.fits', ext=2)\n\n
+
+ 4. Create an HSTWCS object from WCS with key 'O'.\n
+ Example:\n
+
+ w = wcsutil.HSTWCS('j9irw4b1q_flt.fits', ext=2, wcskey='O')\n\n
+
+ 5. Create a template HSTWCS object for a DEFAULT object.\n
+ Example:\n
+ w = wcsutil.HSTWCS(instrument='DEFAULT')\n\n
+ """
+
diff --git a/lib/stwcs/wcsutil/altwcs.py b/lib/stwcs/wcsutil/altwcs.py
new file mode 100644
index 0000000..d250b52
--- /dev/null
+++ b/lib/stwcs/wcsutil/altwcs.py
@@ -0,0 +1,451 @@
+from __future__ import division # confidence high
+import os
+import string
+
+import numpy as np
+import pywcs
+import pyfits
+
+altwcskw = ['WCSAXES', 'CRVAL', 'CRPIX', 'PC', 'CDELT', 'CD', 'CTYPE', 'CUNIT',
+ 'PV', 'PS']
+
+# file operations
+def archiveWCS(fname, ext, wcskey=" ", wcsname=" ", clobber=False):
+ """
+ Copy the primary WCS to the hader as an alternate WCS
+ with wcskey and name WCSNAME. It loops over all extensions in 'ext'
+
+ Parameters
+ ----------
+ fname: string or pyfits.HDUList
+ a file name or a file object
+ ext: an int, a tuple, a python list of integers or a python list of tuples (e.g.('sci',1))
+ fits extensions to work with
+ wcskey: string "A"-"Z" or " "
+ if " ": get next available key if wcsname is also " " or try
+ to get a key from WCSNAME value
+ wcsname: string
+ Name of alternate WCS description
+ clobber: boolean
+ if Ture - overwrites a WCS with the same key
+
+ See Also
+ --------
+ wcsutils.restoreWCS: Copy an alternate WCS to the primary WCS
+
+ """
+
+ if isinstance(fname, str):
+ f = pyfits.open(fname, mode='update')
+ else:
+ f = fname
+
+ if not parpasscheck(f, ext, wcskey, wcsname):
+ closefobj(fname, f)
+ return
+
+ if isinstance(ext, int) or isinstance(ext, tuple):
+ ext = [ext]
+
+ wcsext = 0
+ eindx = 0
+ for e in f:
+ if 'extname' in e.header and 'sci' in e.header['extname'].lower():
+ wcsext = eindx
+ break
+ eindx += 1
+
+ if wcskey == " ":
+ # try getting the key from WCSNAME
+ if not wcsname.strip():
+ wkey = next_wcskey(f[wcsext].header)
+ else:
+ wkey = getKeyFromName(f[wcsext].header, wcsname)
+ else:
+ if wcskey not in available_wcskeys(f[wcsext].header):
+ # reuse wcsname
+ if not wcsname.strip():
+ wcsname = f[wcsext].header["WCSNAME"+wcskey]
+ wname = wcsname
+ wkey = wcskey
+ else:
+ wkey = wcskey
+ wname = wcsname
+ else:
+ wkey = wcskey
+ wname = wcsname
+
+ for e in ext:
+ w = pywcs.WCS(f[e].header, fobj=f)
+ hwcs = w.to_header()
+ wcsnamekey = 'WCSNAME' + wkey
+ f[e].header.update(key=wcsnamekey, value=wcsname)
+ if w.wcs.has_cd():
+ pc2cd(hwcs)
+ for k in hwcs.keys():
+ key = k[:7] + wkey
+ f[e].header.update(key=key, value=hwcs[k])
+ #norient = np.rad2deg(np.arctan2(hwcs['CD1_2'],hwcs['CD2_2']))
+ #okey = 'ORIENT%s' % wkey
+ #f[e].header.update(key=okey, value=norient)
+ closefobj(fname, f)
+
+def restoreWCS(f, ext, wcskey=" ", wcsname=" ", clobber=False):
+ """
+ Copy a WCS with key "WCSKEY" to a primary WCS
+
+ Reads in a WCS defined with wcskey and saves it as the primary WCS.
+ If clobber is False, writes out new files whose names are the original
+ names with an attached 3 character string _'WCSKEY'_.
+ Otherwise overwrites the files. Goes sequentially through the list of extensions
+ The WCS is restored from the 'SCI' extension but the primary WCS of all
+ extensions with the same EXTVER are updated.
+
+
+ Parameters
+ ----------
+ f: string or pyfits.HDUList object
+ a file name or a file object
+ ext: an int, a tuple, a python list of integers or a python list
+ of tuples (e.g.('sci',1))
+ fits extensions to work with
+ wcskey: a charater
+ "A"-"Z" - Used for one of 26 alternate WCS definitions.
+ or " " - find a key from WCSNAMe value
+ wcsname: string (optional)
+ if given and wcskey is " ", will try to restore by WCSNAME value
+ clobber: boolean
+ A flag to define if the original files should be overwritten
+
+ See Also
+ --------
+ wcsutil.archiveWCS - copy the primary WCS as an alternate WCS
+
+ """
+ if isinstance(f, str):
+ if clobber:
+ fobj = pyfits.open(f, mode='update')
+ else:
+ fobj = pyfits.open(f)
+ else:
+ fobj = f
+
+ if not parpasscheck(fobj, ext, wcskey, wcsname):
+ closefobj(f, fobj)
+ return
+
+ if isinstance(ext, int) or isinstance(ext, tuple):
+ ext = [ext]
+
+ if not clobber:
+ name = (fobj.filename().split('.fits')[0] + '_%s_' + '.fits') %wcskey
+ else:
+ name = fobj.filename()
+
+ if wcskey == " ":
+ if wcsname.strip():
+ wkey = getKeyFromName(fobj[1].header, wcsname)
+ if not wkey:
+ print 'Could not get a key from wcsname %s .' % wcsname
+ closefobj(f, fobj)
+ return
+ else:
+ if wcskey not in wcskeys(fobj[1].header):
+ print "Could not find alternate WCS with key %s in this file" % wcskey
+ closefobj(f, fobj)
+ return
+ wkey = wcskey
+
+ for e in ext:
+ try:
+ extname = fobj[e].header['EXTNAME'].lower()
+ except KeyError:
+ continue
+ #Restore always from a 'SCI' extension but write it out to 'ERR' and 'DQ'
+ if extname == 'sci':
+ sciver = fobj[e].header['extver']
+ try:
+ nwcs = pywcs.WCS(fobj[e].header, fobj=fobj, key=wkey)
+ except KeyError:
+ print 'restoreWCS: Could not read WCS with key %s in file %s, \
+ extension %d' % (wkey, fobj.filename(), e)
+ closefobj(f, fobj)
+ return #raise
+ hwcs = nwcs.to_header()
+
+ if nwcs.wcs.has_cd():
+ pc2cd(hwcs, key=wkey)
+ for k in hwcs.keys():
+ key = k[:-1]
+ if key in fobj[e].header.keys():
+ fobj[e].header.update(key=key, value = hwcs[k])
+ else:
+ continue
+ if wcskey == 'O' and fobj[e].header.has_key('TDDALPHA'):
+ fobj[e].header['TDDALPHA'] = 0.0
+ fobj[e].header['TDDBETA'] = 0.0
+ if fobj[e].header.has_key('ORIENTAT'):
+ norient = np.rad2deg(np.arctan2(hwcs['CD1_2'+'%s' %wkey],hwcs['CD2_2'+'%s' %wkey]))
+ fobj[e].header.update(key='ORIENTAT', value=norient)
+ elif extname in ['err', 'dq', 'sdq', 'time', 'samp']:
+ cextver = fobj[e].header['extver']
+ if cextver == sciver:
+ for k in hwcs.keys():
+ key = k[:-1]
+ fobj[e].header.update(key=key, value = hwcs[k])
+ if fobj[e].header.has_key('ORIENTAT'):
+ norient = np.rad2deg(np.arctan2(hwcs['CD1_2'+'%s' %wkey],hwcs['CD2_2'+'%s' %wkey]))
+ fobj[e].header.update(key='ORIENTAT', value=norient)
+ else:
+ continue
+
+ if not clobber:
+ fobj.writeto(name)
+ closefobj(f, fobj)
+
+def deleteWCS(fname, ext, wcskey=" ", wcsname=" "):
+ """
+ Delete an alternate WCS defined with wcskey.
+ If wcskey is " " try to get a key from WCSNAME.
+
+ Parameters
+ ----------
+ fname: sting or a pyfits.HDUList object
+ ext: an int, a tuple, a python list of integers or a python list of tuples (e.g.('sci',1))
+ fits extensions to work with
+ wcskey: one of 'A'-'Z' or " "
+ wcsname: string
+ Name of alternate WCS description
+ """
+ if isinstance(fname, str):
+ fobj = pyfits.open(fname, mode='update')
+ else:
+ fobj = fname
+
+ if not parpasscheck(fobj, ext, wcskey, wcsname):
+ closefobj(fname, fobj)
+ return
+
+ if isinstance(ext, int) or isinstance(ext, tuple):
+ ext = [ext]
+
+ # Do not allow deleting the original WCS.
+ if wcskey == 'O':
+ print "Wcskey 'O' is reserved for the original WCS and should not be deleted."
+ closefobj(fname, fobj)
+ return
+
+ if wcskey == " ":
+ # try getting the key from WCSNAME
+ if wcsname == " ":
+ print "Could not get a valid key from header"
+ closefobj(fname, fobj)
+ return
+ else:
+ wkey = getKeyFromName(fobj[1].header, wcsname)
+ if not wkey:
+ print 'Could not get a key: wcsname "%s" not found in header.' % wcsname
+ closefobj(fname, fobj)
+ return
+ else:
+ if wcskey not in wcskeys(fobj[1].header):
+ print "Could not find alternate WCS with key %s in this file" % wcskey
+ closefobj(fname, fobj)
+ return
+ wkey = wcskey
+
+ prexts = []
+ for i in ext:
+ hdr = fobj[i].header
+ try:
+ w = pywcs.WCS(hdr, fobj, key=wkey)
+ except KeyError:
+ continue
+ hwcs = w.to_header()
+ if w.wcs.has_cd():
+ pc2cd(hwcs, key=wkey)
+ for k in hwcs.keys():
+ del hdr[k]
+ #del hdr['ORIENT'+wkey]
+ prexts.append(i)
+ if prexts != []:
+ print 'Deleted all instances of WCS with key %s in extensions' % wkey, prexts
+ else:
+ print "Did not find WCS with key %s in any of the extensions" % wkey
+ closefobj(fname, fobj)
+
+#header operations
+def wcskeys(header):
+ """
+ Returns a list of characters used in the header for alternate
+ WCS description with WCSNAME keyword
+
+ Parameters
+ ----------
+ hdr: pyfits.Header
+ """
+ assert isinstance(header, pyfits.Header), "Requires a pyfits.Header object as input"
+ names = header["WCSNAME*"]
+ return [key.split('WCSNAME')[1].upper() for key in names.keys()]
+
+def wcsnames(header):
+ """
+ Returns a dictionary of wcskey: WCSNAME pairs
+
+ Parameters
+ ----------
+ header: pyfits.Header
+ """
+ names = header["WCSNAME*"]
+ d = {}
+ for c in names:
+ d[c.key[-1]] = c.value
+ return d
+
+def available_wcskeys(header):
+ """
+ Returns a list of characters which are not used in the header
+ with WCSNAME keyword. Any of them can be used to save a new
+ WCS.
+
+ Parameters
+ ----------
+ header: pyfits.Header
+ """
+ assert isinstance(header, pyfits.Header), "Requires a pyfits.Header object as input"
+ all_keys = list(string.ascii_uppercase)
+ used_keys = wcskeys(header)
+ try:
+ used_keys.remove("")
+ except ValueError:
+ pass
+ [all_keys.remove(key) for key in used_keys]
+ return all_keys
+
+def next_wcskey(header):
+ """
+ Returns next available character to be used for an alternate WCS
+
+ Parameters
+ ----------
+ header: pyfits.Header
+ """
+ assert isinstance(header, pyfits.Header), "Requires a pyfits.Header object as input"
+ allkeys = available_wcskeys(header)
+ if allkeys != []:
+ return allkeys[0]
+ else:
+ return None
+
+def getKeyFromName(header, wcsname):
+ """
+ If WCSNAME is found in header, return its key, else return
+ None. This is used to update an alternate WCS
+ repeatedly and not generate new keys every time.
+
+ Parameters
+ ----------
+ header: pyfits.Header
+ wcsname: str
+ Value of WCSNAME
+ """
+ wkey = None
+ names = wcsnames(header)
+ for item in names.items():
+ if item[1].lower() == wcsname.lower():
+ wkey = item[0]
+ break
+ return wkey
+
+def pc2cd(hdr, key=' '):
+ """
+ Convert a CD PC matrix to a CD matrix.
+
+ WCSLIB (and PyWCS) recognizes CD keywords as input
+ but converts them and works internally with the PC matrix.
+ to_header() returns the PC matrix even if the i nput was a
+ CD matrix. To keep input and output consistent we check
+ for has_cd and convert the PC back to CD.
+
+ Parameters
+ ----------
+ hdr: pyfits.Header
+
+ """
+ for c in ['1_1', '1_2', '2_1', '2_2']:
+ try:
+ val = hdr['PC'+c+'%s' % key]
+ del hdr['PC'+c+ '%s' % key]
+ except KeyError:
+ if c=='1_1' or c == '2_2':
+ val = 1.
+ else:
+ val = 0.
+ hdr.update(key='CD'+c+'%s' %key, value=val)
+ return hdr
+
+def parpasscheck(fobj, ext, wcskey, wcsname, clobber=True):
+
+ if not isinstance(fobj,pyfits.HDUList):
+ print "First parameter must be a fits file object or a file name."
+ return False
+ try:
+ assert (fobj.fileinfo(0)['filemode'] == 'update')
+ except AssertionError:
+ print "First parameter must be a file name or a file object opened in 'update' mode."
+ return False
+
+ if not isinstance(ext, int) and not isinstance(ext, tuple) \
+ and not isinstance(ext, list):
+ print "Ext must be integer, tuple, a list of int extension numbers, \
+ or a list of tuples representing a fits extension, for example ('sci', 1)."
+ return False
+
+ if len(wcskey) != 1:
+ print 'Parameter wcskey must be a character - one of "A"-"Z" or " "'
+ return False
+
+ wcsext = 0
+ eindx = 0
+ for e in fobj:
+ if 'extname' in e.header and 'sci' in e.header['extname'].lower():
+ wcsext = eindx
+ break
+ eindx += 1
+
+
+ if wcskey == " ":
+ # try getting the key from WCSNAME
+ """
+ if wcsname == " " or wcsname == "":
+ #wkey = next_wcskey(f[1].header)
+ #if not wkey:
+ # print "Could not get a valid key from header"
+ return False
+ """
+ if wcsname.strip():
+ wkey = getKeyFromName(fobj[wcsext].header, wcsname)
+ if wkey and not clobber:
+ print 'Wcsname %s is already used.' % wcsname
+ print 'Use "wcsutil.next_wcskey" to obtain a valid wcskey'
+ print 'or use "clobber=True" to overwrite the values.'
+ return False
+ else:
+ if wcskey not in available_wcskeys(fobj[wcsext].header):
+ if clobber==False:
+ print 'Wcskey %s is already used.' % wcskey
+ print 'Use "wcsutil.next_wcskey" to obtain a valid wcskey'
+ print 'or use "clobber=True" to overwrite the values.'
+ return False
+
+
+ return True
+
+def closefobj(fname, f):
+ """
+ Functions in this module accept as input a file name or a file object.
+ If the input was a file name (string) we close the object. If the user
+ passed a file object we leave it to the user to close it.
+ """
+ if isinstance(fname, str):
+ f.close()
diff --git a/lib/stwcs/wcsutil/convertwcs.py b/lib/stwcs/wcsutil/convertwcs.py
new file mode 100644
index 0000000..80276c3
--- /dev/null
+++ b/lib/stwcs/wcsutil/convertwcs.py
@@ -0,0 +1,118 @@
+import pyfits
+try:
+ import stwcs
+ from stwcs import wcsutil
+except:
+ stwcs = None
+
+from stsci.tools import fileutil
+
+OPUS_WCSKEYS = ['OCRVAL1','OCRVAL2','OCRPIX1','OCRPIX2',
+ 'OCD1_1','OCD1_2','OCD2_1','OCD2_2',
+ 'OCTYPE1','OCTYPE2']
+
+
+def archive_prefix_OPUS_WCS(fobj,extname='SCI'):
+ """ Identifies WCS keywords which were generated by OPUS and archived
+ using a prefix of 'O' for all 'SCI' extensions in the file
+
+ Parameters
+ ----------
+ fobj: string or pyfits.HDUList
+ Filename or pyfits object of a file
+
+ """
+ if stwcs is None:
+ print '====================='
+ print 'The STWCS package is needed to convert an old-style OPUS WCS to an alternate WCS'
+ print '====================='
+ raise ImportError
+
+
+ closefits = False
+ if isinstance(fobj,str):
+ # A filename was provided as input
+ fobj = pyfits.open(fobj,mode='update')
+ closefits=True
+
+ # Define the header
+ ext = ('sci',1)
+ hdr = fobj[ext].header
+
+ numextn = fileutil.countExtn(fobj)
+ extlist = []
+ for e in xrange(1,numextn+1):
+ extlist.append(('sci',e))
+
+ # Insure that the 'O' alternate WCS is present
+ if 'O' not in wcsutil.wcskeys(hdr):
+ # if not, archive the Primary WCS as the default OPUS WCS
+ wcsutil.archiveWCS(fobj,extlist, wcskey='O', wcsname='OPUS')
+
+ # find out how many SCI extensions are in the image
+ numextn = fileutil.countExtn(fobj,extname=extname)
+ if numextn == 0:
+ extname = 'PRIMARY'
+
+ # create HSTWCS object from PRIMARY WCS
+ wcsobj = wcsutil.HSTWCS(fobj,ext=ext,wcskey='O')
+ # get list of WCS keywords
+ wcskeys = wcsobj.wcs2header().keys()
+
+ # For each SCI extension...
+ for e in xrange(1,numextn+1):
+ # Now, look for any WCS keywords with a prefix of 'O'
+ for key in wcskeys:
+ okey = 'O'+key[:7]
+ hdr = fobj[(extname,e)].header
+ if hdr.has_key(okey):
+ # Update alternate WCS keyword with prefix-O OPUS keyword value
+ hdr[key] = hdr[okey]
+
+ if closefits:
+ fobj.close()
+
+def create_prefix_OPUS_WCS(fobj,extname='SCI'):
+ """ Creates alternate WCS with a prefix of 'O' for OPUS generated WCS values
+ to work with old MultiDrizzle.
+
+ Parameters
+ ----------
+ fobj: string or pyfits.HDUList
+ Filename or pyfits object of a file
+
+ Raises
+ ------
+ IOError:
+ if input PyFITS object was not opened in 'update' mode
+
+ """
+ # List of O-prefix keywords to create
+ owcskeys = OPUS_WCSKEYS
+
+ closefits = False
+ if isinstance(fobj,str):
+ # A filename was provided as input
+ fobj = pyfits.open(fobj,mode='update')
+ closefits=True
+ else:
+ # check to make sure this FITS obj has been opened in update mode
+ if fobj.fileinfo(0)['filemode'] != 'update':
+ print 'File not opened with "mode=update". Quitting...'
+ raise IOError
+
+ # check for existance of O-prefix WCS
+ if not fobj['sci',1].header.has_key(owcskeys[0]):
+
+ # find out how many SCI extensions are in the image
+ numextn = fileutil.countExtn(fobj,extname=extname)
+ if numextn == 0:
+ extname = ''
+ for extn in xrange(1,numextn+1):
+ hdr = fobj[(extname,extn)].header
+ for okey in owcskeys:
+ hdr.update(okey,hdr[okey[1:]+'O'])
+
+ # Close FITS image if we had to open it...
+ if closefits:
+ fobj.close()
diff --git a/lib/stwcs/wcsutil/getinput.py b/lib/stwcs/wcsutil/getinput.py
new file mode 100644
index 0000000..2f64f46
--- /dev/null
+++ b/lib/stwcs/wcsutil/getinput.py
@@ -0,0 +1,62 @@
+import pyfits
+from stsci.tools import irafglob, fileutil, parseinput
+
+def parseSingleInput(f=None, ext=None):
+ if isinstance(f, str):
+ # create an HSTWCS object from a filename
+ if ext != None:
+ filename = f
+ if isinstance(ext,tuple):
+ if ext[0] == '':
+ extnum = ext[1] # handle ext=('',1)
+ else:
+ extnum = ext
+ else:
+ extnum = int(ext)
+ elif ext == None:
+ filename, ext = fileutil.parseFilename(f)
+ ext = fileutil.parseExtn(ext)
+ if ext[0] == '':
+ extnum = int(ext[1]) #handle ext=('',extnum)
+ else:
+ extnum = ext
+ phdu = pyfits.open(filename)
+ hdr0 = pyfits.getheader(filename)
+ try:
+ ehdr = pyfits.getheader(filename, ext=extnum)
+ except (IndexError,KeyError):
+ print 'Unable to get extension.', extnum
+ raise
+
+ elif isinstance(f, pyfits.HDUList):
+ phdu = f
+ if ext == None:
+ extnum = 0
+ else:
+ extnum = ext
+ ehdr = f[extnum].header
+ hdr0 = f[0].header
+ filename = hdr0.get('FILENAME', "")
+
+ else:
+ raise ValueError('Input must be a file name string or a pyfits file object')
+
+ return filename, hdr0, ehdr, phdu
+
+
+def parseMultipleInput(input):
+ if isinstance(input, str):
+ if input[0] == '@':
+ # input is an @ file
+ filelist = irafglob.irafglob(input)
+ else:
+ try:
+ filelist, output = parseinput.parseinput(input)
+ except IOError: raise
+ elif isinstance(input, list):
+ if isinstance(input[0], wcsutil.HSTWCS):
+ # a list of HSTWCS objects
+ return input
+ else:
+ filelist = input[:]
+ return filelist \ No newline at end of file
diff --git a/lib/stwcs/wcsutil/headerlet.py b/lib/stwcs/wcsutil/headerlet.py
new file mode 100644
index 0000000..0318bf2
--- /dev/null
+++ b/lib/stwcs/wcsutil/headerlet.py
@@ -0,0 +1,898 @@
+from __future__ import division
+import logging
+import os
+import tarfile
+import tempfile
+import time
+import warnings
+from cStringIO import StringIO
+
+import numpy as np
+import pyfits
+
+import altwcs
+import wcscorr
+from hstwcs import HSTWCS
+from mappings import basic_wcs
+from stsci.tools.fileutil import countExtn
+
+module_logger = logging.getLogger('headerlet')
+
+import atexit
+atexit.register(logging.shutdown)
+
+def setLogger(logger, level, mode='w'):
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
+ log_filename = 'headerlet.log'
+ fh = logging.FileHandler(log_filename, mode=mode)
+ fh.setLevel(logging.DEBUG)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+ logger.setLevel(level)
+
+def isWCSIdentical(scifile, file2, verbose=False):
+ """
+ Compares the WCS solution of 2 files.
+
+ Parameters
+ ----------
+ scifile: file1
+ file2: file2
+ verbose: False or a python logging level
+ (one of 'INFO', 'DEBUG' logging levels)
+ (an integer representing a logging level)
+
+ Notes
+ -----
+ These can be 2 science observations or 2 headerlets
+ or a science observation and a headerlet. The two files
+ have the same WCS solution if the following are the same:
+
+ - rootname/destim
+ - primary WCS
+ - SIP coefficients
+ - NPOL distortion
+ - D2IM correction
+ - Velocity aberation
+
+ """
+ if verbose:
+ setLogger(module_logger, verbose)
+ else:
+ module_logger.setLevel(100)
+
+ module_logger.info("Starting isWCSIdentical: %s" % time.asctime())
+
+ result = True
+ numsci1 = max(countExtn(scifile), countExtn(scifile, 'SIPWCS'))
+ numsci2 = max(countExtn(file2), countExtn(file2, 'SIPWCS'))
+
+ if numsci1 == 0 or numsci2 == 0 or numsci1 != numsci2:
+ module_logger.info("Number of SCI and SIPWCS extensions do not match.")
+ result = False
+
+ if getRootname(scifile) != getRootname(file2):
+ module_logger.info('Rootnames do not match.')
+ result = False
+ try:
+ extname1 = pyfits.getval(scifile, 'EXTNAME', ext=('SCI', 1))
+ except KeyError:
+ extname1 = 'SIPWCS'
+ try:
+ extname2 = pyfits.getval(file2, 'EXTNAME', ext=('SCI', 1))
+ except KeyError:
+ extname2 = 'SIPWCS'
+
+ for i in range(1, numsci1 + 1):
+ w1 = HSTWCS(scifile, ext=(extname1, i))
+ w2 = HSTWCS(file2, ext=(extname2, i))
+ if not np.allclose(w1.wcs.crval, w2.wcs.crval, rtol=1e-7) or \
+ not np.allclose(w1.wcs.crpix, w2.wcs.crpix, rtol=1e-7) or \
+ not np.allclose(w1.wcs.cd, w2.wcs.cd, rtol=1e-7) or \
+ not (np.array(w1.wcs.ctype) == np.array(w2.wcs.ctype)).all():
+ module_logger.info('Primary WCSs do not match')
+ result = False
+ if w1.sip or w2.sip:
+ if (w2.sip and not w1.sip) or (w1.sip and not w2.sip) or \
+ not np.allclose(w1.sip.a, w2.sip.a, rtol=1e-7) or \
+ not np.allclose(w1.sip.b, w2.sip.b, rtol=1e-7):
+ module_logger.info('SIP coefficients do not match')
+ result = False
+ if w1.cpdis1 or w2.cpdis1:
+ if w1.cpdis1 and not w2.cpdis1 or \
+ w2.cpdis1 and not w1.cpdis1 or \
+ not np.allclose(w1.cpdis1.data, w2.cpdis1.data):
+ module_logger.info('NPOL distortions do not match')
+ result = False
+ if w1.cpdis2 or w2.cpdis2:
+ if w1.cpdis2 and not w2.cpdis2 or \
+ w2.cpdis2 and not w1.cpdis2 or \
+ not np.allclose(w1.cpdis2.data, w2.cpdis2.data):
+ module_logger.info('NPOL distortions do not match')
+ result = False
+ if w1.det2im1 or w2.det2im1:
+ if w1.det2im1 and not w2.det2im1 or \
+ w2.det2im1 and not w1.det2im1 or\
+ not np.allclose(w1.det2im1.data, w2.det2im1.data):
+ module_logger.info('Det2Im corrections do not match')
+ result = False
+ if w1.det2im2 or w2.det2im2:
+ if w1.det2im2 and not w2.det2im2 or \
+ w2.det2im2 and not w1.det2im2 or\
+ not np.allclose(w1.det2im2.data, w2.det2im2.data):
+ module_logger.info('Det2Im corrections do not match')
+ result = False
+ if w1.vafactor != w2.vafactor:
+ module_logger.info('VA factors do not match')
+ result = False
+
+ return result
+
+
+# TODO: It would be logical for this to be part of the Headerlet class, perhaps
+# as a classmethod
+def createHeaderlet(fname, hdrname, destim=None, output=None, verbose=False, logmode='w'):
+ """
+ Create a headerlet from a science observation
+
+ Parameters
+ ----------
+ fname: string
+ Name of file with science observation
+ hdrname: string
+ Name for the headerlet, stored in the primary header of the headerlet
+ destim: string
+ Destination image, stored in the primary header of the headerlet.
+ If None ROOTNAME is used of the science observation is used.
+ ROOTNAME has precedence, destim is used for observations without
+ ROOTNAME in the primary header
+ output: string
+ Save the headerlet to the given filename.
+ verbose: False or a python logging level
+ (one of 'INFO', 'DEBUG' logging levels)
+ (an integer representing a logging level)
+ logmode: 'w', 'a'
+ used internally for controling access to the log file
+ """
+
+ if verbose:
+ setLogger(module_logger, verbose, mode=logmode)
+ else:
+ module_logger.setLevel(100)
+
+ module_logger.info("Starting createHeaderlet: %s" % time.asctime())
+ phdukw = {'IDCTAB': True,
+ 'NPOLFILE': True,
+ 'D2IMFILE': True}
+ if not isinstance(fname, pyfits.HDUList):
+ fobj = pyfits.open(fname)
+ close_file = True
+ else:
+ fobj = fname
+ close_file = False
+ if destim is None:
+ try:
+ destim = fobj[0].header['ROOTNAME']
+ except KeyError:
+ module_logger.exception('Required keyword "DESTIM" not found')
+ print 'Please provide a value for the DESTIM keyword'
+ raise
+ if hdrname is None:
+ module_logger.critical("Required keyword 'HDRNAME' not given")
+ raise ValueError("Please provide a name for the headerlet, HDRNAME is "
+ "a required parameter.")
+
+ # get the version of STWCS used to create the WCS of the science file.
+ try:
+ upwcsver = fobj[0].header.ascard['STWCSVER']
+ except KeyError:
+ upwcsver = pyfits.Card("STWCSVER", " ",
+ "Version of STWCS used to update the WCS")
+
+ # get all keys for alternate WCSs
+ altkeys = altwcs.wcskeys(fobj[('SCI', 1)].header)
+
+ if 'O' in altkeys:
+ altkeys.remove('O')
+
+ numsci = countExtn(fname, 'SCI')
+ module_logger.debug("Number of 'SCI' extensions in file %s is %s"
+ % (fname, numsci))
+ hdul = pyfits.HDUList()
+ phdu = pyfits.PrimaryHDU()
+ phdu.header.update('DESTIM', destim,
+ comment='Destination observation root name')
+ phdu.header.update('HDRNAME', hdrname, comment='Headerlet name')
+ fmt="%Y-%m-%dT%H:%M:%S"
+ phdu.header.update('DATE', time.strftime(fmt),
+ comment='Date FITS file was generated')
+ phdu.header.ascard.append(upwcsver)
+
+ refs = updateRefFiles(fobj[0].header.ascard, phdu.header.ascard, verbose=verbose)
+ phdukw.update(refs)
+ phdu.header.update(key='VAFACTOR',
+ value=fobj[('SCI',1)].header.get('VAFACTOR', 1.))
+ hdul.append(phdu)
+
+ for e in range(1, numsci + 1):
+ hwcs = HSTWCS(fname, ext=('SCI', e))
+ h = hwcs.wcs2header(sip2hdr=True).ascard
+ for ak in altkeys:
+ awcs = HSTWCS(fname,ext=('SCI', e), wcskey=ak)
+ h.extend(awcs.wcs2header(idc2hdr=False).ascard)
+ h.append(pyfits.Card(key='VAFACTOR', value=hwcs.vafactor,
+ comment='Velocity aberration plate scale factor'))
+ h.insert(0, pyfits.Card(key='EXTNAME', value='SIPWCS',
+ comment='Extension name'))
+ h.insert(1, pyfits.Card(key='EXTVER', value=e,
+ comment='Extension version'))
+
+ fhdr = fobj[('SCI', e)].header.ascard
+ if phdukw['NPOLFILE']:
+ cpdis = fhdr['CPDIS*...']
+ for c in range(1, len(cpdis) + 1):
+ h.append(cpdis[c - 1])
+ dp = fhdr['DP%s.*...' % c]
+ h.extend(dp)
+
+ try:
+ h.append(fhdr['CPERROR%s' % c])
+ except KeyError:
+ pass
+
+ try:
+ h.append(fhdr['NPOLEXT'])
+ except KeyError:
+ pass
+
+ if phdukw['D2IMFILE']:
+ try:
+ h.append(fhdr['D2IMEXT'])
+ except KeyError:
+ pass
+
+ try:
+ h.append(fhdr['AXISCORR'])
+ except KeyError:
+ module_logger.exception("'D2IMFILE' kw exists but keyword 'AXISCORR' was not found in "
+ "%s['SCI',%d]" % (fname, e))
+ raise
+
+ try:
+ h.append(fhdr['D2IMERR'])
+ except KeyError:
+ h.append(pyfits.Card(key='DPERROR', value=0,
+ comment='Maximum error of D2IMARR'))
+
+ hdu = pyfits.ImageHDU(header=pyfits.Header(h))
+ # temporary fix for pyfits ticket # 48
+ hdu._extver = e
+ hdul.append(hdu)
+ numwdvarr = countExtn(fname, 'WCSDVARR')
+ numd2im = countExtn(fname, 'D2IMARR')
+ for w in range(1, numwdvarr + 1):
+ hdu = fobj[('WCSDVARR', w)].copy()
+ # temporary fix for pyfits ticket # 48
+ hdu._extver = w
+ hdul.append(hdu)
+ for d in range(1, numd2im + 1):
+ hdu = fobj[('D2IMARR', d)].copy()
+ # temporary fix for pyfits ticket # 48
+ hdu._extver = d
+ hdul.append(hdu)
+ if output is not None:
+ # write the headerlet to a file
+ if not output.endswith('_hdr.fits'):
+ output = output + '_hdr.fits'
+ hdul.writeto(output, clobber=True)
+ if close_file:
+ fobj.close()
+ return Headerlet(hdul,verbose=verbose, logmode='a')
+
+def applyHeaderlet(hdrfile, destfile, createheaderlet=True, hdrname=None,
+ verbose=False):
+ """
+ Apply headerlet 'hdrfile' to a science observation 'destfile'
+
+ Parameters
+ ----------
+ hdrfile: string
+ Headerlet file
+ destfile: string
+ File name of science observation whose WCS solution will be updated
+ createheaderlet: boolean
+ True (default): before updating, create a headerlet with the
+ WCS old solution.
+ hdrname: string or None (default)
+ will be the value of the HDRNAME keyword in the headerlet generated
+ for the old WCS solution. If not specified, a sensible default
+ will be used. Not required if createheaderlet is False
+ verbose: False or a python logging level
+ (one of 'INFO', 'DEBUG' logging levels)
+ (an integer representing a logging level)
+ """
+ if verbose:
+ setLogger(module_logger, verbose)
+ else:
+ module_logger.setLevel(100)
+ module_logger.info("Starting applyHeaderlet: %s" % time.asctime())
+ hlet = Headerlet(hdrfile, verbose=verbose, logmode='a')
+ hlet.apply(destfile, createheaderlet=createheaderlet, hdrname=hdrname)
+
+def updateRefFiles(source, dest, verbose=False):
+ """
+ Update the reference files name in the primary header of 'dest'
+ using values from 'source'
+
+ Parameters
+ ----------
+ source: pyfits.Header.ascardlist
+ dest: pyfits.Header.ascardlist
+ """
+ module_logger.info("Updating reference files")
+ phdukw = {'IDCTAB': True,
+ 'NPOLFILE': True,
+ 'D2IMFILE': True}
+
+ try:
+ wind = dest.index_of('HISTORY')
+ except KeyError:
+ wind = len(dest)
+ for key in phdukw.keys():
+ try:
+ value = source[key]
+ dest.insert(wind, value)
+ except KeyError:
+ # TODO: I don't understand what the point of this is. Is it meant
+ # for logging purposes? Right now it isn't used.
+ phdukw[key] = False
+ return phdukw
+
+def getRootname(fname):
+ """
+ returns the value of ROOTNAME or DESTIM
+ """
+
+ try:
+ rootname = pyfits.getval(fname, 'ROOTNAME')
+ except KeyError:
+ rootname = pyfits.getval(fname, 'DESTIM')
+ return rootname
+
+def mapFitsExt2HDUListInd(fname, extname):
+ """
+ Map FITS extensions with 'EXTNAME' to HDUList indexes.
+ """
+
+ if not isinstance(fname, pyfits.HDUList):
+ f = pyfits.open(fname)
+ close_file = True
+ else:
+ f = fname
+ close_file = False
+ d = {}
+ for hdu in f:
+ # TODO: Replace calls to header.has_key() with `in header` once
+ # pyfits refactoring branch is in production use
+ if hdu.header.has_key('EXTNAME') and hdu.header['EXTNAME'] == extname:
+ extver = hdu.header['EXTVER']
+ d[(extname, extver)] = f.index_of((extname, extver))
+ if close_file:
+ f.close()
+ return d
+
+
+class Headerlet(pyfits.HDUList):
+ """
+ A Headerlet class
+ Ref: http://stsdas.stsci.edu/stsci_python_sphinxdocs/stwcs/headerlet_def.html
+ """
+
+ def __init__(self, fobj, wcskeys=[], mode='copyonwrite', verbose=False, logmode='w'):
+ """
+ Parameters
+ ----------
+ fobj: string
+ Name of headerlet file, file-like object, a list of HDU
+ instances, or an HDUList instance
+ wcskeys: python list
+ a list of wcskeys to be included in the headerlet
+ created from the old WCS solution before the
+ science file is updated. If empty: all alternate (if any)
+ WCSs are copied to the headerlet.
+ mode: string, optional
+ Mode with which to open the given file object
+ """
+ self.verbose = verbose
+ self.hdr_logger = logging.getLogger('headerlet.Headerlet')
+ if self.verbose:
+ setLogger(self.hdr_logger, self.verbose, mode=logmode)
+ else:
+ self.hdr_logger.setLevel(100)
+
+ self.hdr_logger.info("Creating a Headerlet object from wcskeys %s" % wcskeys)
+ self.wcskeys = wcskeys
+ if not isinstance(fobj, list):
+ fobj = pyfits.open(fobj, mode=mode)
+
+ super(Headerlet, self).__init__(fobj)
+ self.fname = self.filename()
+ self.hdrname = self[0].header["HDRNAME"]
+ self.stwcsver = self[0].header.get("STWCSVER", "")
+ self.destim = self[0].header["DESTIM"]
+ self.idctab = self[0].header.get("IDCTAB", "")
+ self.npolfile = self[0].header.get("NPOLFILE", "")
+ self.d2imfile = self[0].header.get("D2IMFILE", "")
+ self.vafactor = self[1].header.get("VAFACTOR", 1) #None instead of 1?
+ self.d2imerr = 0
+ self.axiscorr = 1
+
+ def apply(self, dest, createheaderlet=True, hdrname=None, attach=True, createsummary=True):
+ """
+ Apply this headerlet to a file.
+
+ Parameters
+ ----------
+ dest: string
+ Name of file to which to apply the WCS in the headerlet
+ hdrname: string
+ A unique name for the headerlet
+ createheaderlet: boolean
+ A flag which indicates if a headerlet should be created
+ from the old WCS and attached to the science file (default: True)
+ attach: boolean, default: True
+ By default the headerlet being applied will be attached
+ as an extension to the science file. Set 'attach' to False
+ to disable this.
+ createsummary: boolean, default: True
+ Set this to False to disable creating and updating of wcscorr table.
+ This is used primarily for testing.
+ """
+ self.hverify()
+ if self.verify_dest(dest):
+ if not isinstance(dest, pyfits.HDUList):
+ fobj = pyfits.open(dest, mode='update')
+ close_dest = True
+ else:
+ fobj = dest
+ close_dest = False
+
+ # Create the WCSCORR HDU/table from the existing WCS keywords if
+ # necessary
+ if createsummary:
+ try:
+ # TODO: in the pyfits refactoring branch if will be easier to
+ # test whether an HDUList contains a certain extension HDU
+ # without relying on try/except
+ wcscorr_table = fobj['WCSCORR']
+ except KeyError:
+ # The WCSCORR table needs to be created
+ wcscorr.init_wcscorr(fobj)
+
+ orig_hlt_hdu = None
+ numhlt = countExtn(fobj, 'HDRLET')
+ if createheaderlet:
+ # Create a headerlet for the original WCS data in the file,
+ # create an HDU from the original headerlet, and append it to
+ # the file
+ if not hdrname:
+ hdrname = fobj[0].header['ROOTNAME'] + '_orig'
+ orig_hlt = createHeaderlet(fobj, hdrname, verbose=self.verbose, logmode='a')
+ orig_hlt_hdu = HeaderletHDU.fromheaderlet(orig_hlt)
+ orig_hlt_hdu.update_ext_version(numhlt + 1)
+ numhlt += 1
+
+ self._delDestWCS(fobj)
+ refs = updateRefFiles(self[0].header.ascard, fobj[0].header.ascard, verbose=self.verbose)
+ numsip = countExtn(self, 'SIPWCS')
+ for idx in range(1, numsip + 1):
+ fhdr = fobj[('SCI', idx)].header.ascard
+ siphdr = self[('SIPWCS', idx)].header.ascard
+ # a minimal attempt to get the position of the WCS keywords group
+ # in the header by looking for the PA_APER kw.
+ # at least make sure the WCS kw are written befir the HISTORY kw
+ # if everything fails, append the kw to the header
+ try:
+ wind = fhdr.index_of('PA_APER')
+ except KeyError:
+ try:
+ wind = fhdr.index_of('HISTORY')
+ except KeyError:
+ wind = len(fhdr)
+ self.hdr_logger.debug("Inserting WCS keywords at index %s" % wind)
+ # TODO: Drop .keys() when refactored pyfits comes into use
+ for k in siphdr.keys():
+ if k not in ['XTENSION', 'BITPIX', 'NAXIS', 'PCOUNT',
+ 'GCOUNT','EXTNAME', 'EXTVER', 'ORIGIN',
+ 'INHERIT', 'DATE', 'IRAF-TLM']:
+ fhdr.insert(wind, siphdr[k])
+ else:
+ pass
+
+ #! Always attach these extensions last. Otherwise their headers may
+ # get updated with the other WCS kw.
+ numwdvar = countExtn(self, 'WCSDVARR')
+ numd2im = countExtn(self, 'D2IMARR')
+ for idx in range(1, numwdvar + 1):
+ fobj.append(self[('WCSDVARR', idx)].copy())
+ for idx in range(1, numd2im + 1):
+ fobj.append(self[('D2IMARR', idx)].copy())
+
+ # Update the WCSCORR table with new rows from the headerlet's WCSs
+ if createsummary:
+ wcscorr.update_wcscorr(fobj, self, 'SIPWCS')
+
+ # Append the original headerlet
+ if createheaderlet and orig_hlt_hdu:
+ fobj.append(orig_hlt_hdu)
+
+ if attach:
+ # Finally, append an HDU for this headerlet
+ new_hlt = HeaderletHDU.fromheaderlet(self)
+ new_hlt.update_ext_version(numhlt + 1)
+ fobj.append(new_hlt)
+
+ if close_dest:
+ fobj.close()
+ else:
+ self.hdr_logger.critical("Observation %s cannot be updated with headerlet "
+ "%s" % (fobj.filename(), self.hdrname))
+ print "Observation %s cannot be updated with headerlet %s" \
+ % (fobj.filename(), self.hdrname)
+
+
+ def hverify(self):
+ self.verify()
+ assert(self[0].header.has_key('DESTIM') and
+ self[0].header['DESTIM'].strip())
+ assert(self[0].header.has_key('HDRNAME') and
+ self[0].header['HDRNAME'].strip())
+ assert(self[0].header.has_key('STWCSVER'))
+
+
+ def verify_dest(self, dest):
+ """
+ verifies that the headerlet can be applied to the observation
+
+ DESTIM in the primary header of the headerlet must match ROOTNAME
+ of the science file (or the name of the destination file)
+ """
+
+ try:
+ if not isinstance(dest, pyfits.HDUList):
+ droot = pyfits.getval(dest, 'ROOTNAME')
+ else:
+ droot = dest[0].header['ROOTNAME']
+ except KeyError:
+ self.hdr_logger.debug("Keyword 'ROOTNAME' not found in destination file")
+ droot = dest.split('.fits')[0]
+ if droot == self.destim:
+ self.hdr_logger.debug("verify_destim() returned True")
+ return True
+ else:
+ self.hdr_logger.debug("verify_destim() returned False")
+ return False
+
+ def tofile(self, fname, destim=None, hdrname=None, clobber=False):
+ if not destim or not hdrname:
+ self.hverify()
+ self.writeto(fname, clobber=clobber)
+
+ def _delDestWCS(self, dest):
+ """
+ Delete the WCS of a science file
+ """
+
+ self.hdr_logger.info("Deleting all WCSs of file %s" % dest.filename())
+ numext = len(dest)
+
+ for idx in range(numext):
+ # Only delete WCS from extensions which may have WCS keywords
+ if dest[idx].__dict__.has_key('_xtn') and "IMAGE" in dest[idx]._xtn:
+ self._removeD2IM(dest[idx])
+ self._removeSIP(dest[idx])
+ self._removeLUT(dest[idx])
+ self._removePrimaryWCS(dest[idx])
+ self._removeIDCCoeffs(dest[idx])
+ try:
+ del dest[idx].header.ascard['VAFACTOR']
+ except KeyError:
+ pass
+
+ self._removeRefFiles(dest[0])
+ self._removeAltWCS(dest, ext=range(numext))
+ numwdvarr = countExtn(dest, 'WCSDVARR')
+ numd2im = countExtn(dest, 'D2IMARR')
+ for idx in range(1, numwdvarr + 1):
+ del dest[('WCSDVARR', idx)]
+ for idx in range(1, numd2im + 1):
+ del dest[('D2IMARR', idx)]
+
+ def _removeRefFiles(self, phdu):
+ """
+ phdu: Primary HDU
+ """
+ refkw = ['IDCTAB', 'NPOLFILE', 'D2IMFILE']
+ for kw in refkw:
+ try:
+ del phdu.header.ascard[kw]
+ except KeyError:
+ pass
+
+ def _removeSIP(self, ext):
+ """
+ Remove the SIP distortion of a FITS extension
+ """
+
+ self.hdr_logger.debug("Removing SIP distortion from (%s, %s)"
+ % (ext.name, ext._extver))
+ for prefix in ['A', 'B', 'AP', 'BP']:
+ try:
+ order = ext.header[prefix + '_ORDER']
+ del ext.header[prefix + '_ORDER']
+ except KeyError:
+ continue
+ for i in range(order + 1):
+ for j in range(order + 1):
+ key = prefix + '_%d_%d' % (i, j)
+ try:
+ del ext.header[key]
+ except KeyError:
+ pass
+ try:
+ del ext.header['IDCTAB']
+ except KeyError:
+ pass
+
+ def _removeLUT(self, ext):
+ """
+ Remove the Lookup Table distortion of a FITS extension
+ """
+
+ self.hdr_logger.debug("Removing LUT distortion from (%s, %s)"
+ % (ext.name, ext._extver))
+ try:
+ cpdis = ext.header['CPDIS*']
+ except KeyError:
+ return
+ try:
+ for c in range(1, len(cpdis) + 1):
+ del ext.header['DP%s.*...' % c]
+ del ext.header[cpdis[c - 1].key]
+ del ext.header['CPERR*']
+ del ext.header['NPOLFILE']
+ del ext.header['NPOLEXT']
+ except KeyError:
+ pass
+
+ def _removeD2IM(self, ext):
+ """
+ Remove the Detector to Image correction of a FITS extension
+ """
+
+ self.hdr_logger.debug("Removing D2IM correction from (%s, %s)"
+ % (ext.name, ext._extver))
+ d2imkeys = ['D2IMFILE', 'AXISCORR', 'D2IMEXT', 'D2IMERR']
+ for k in d2imkeys:
+ try:
+ del ext.header[k]
+ except KeyError:
+ pass
+
+ def _removeAltWCS(self, dest, ext):
+ """
+ Remove Alternate WCSs of a FITS extension.
+ A WCS with wcskey 'O' is never deleted.
+ """
+ dkeys = altwcs.wcskeys(dest[('SCI', 1)].header)
+ self.hdr_logger.debug("Removing alternate WCSs with keys %s from %s"
+ % (dkeys, dest.filename()))
+ for k in dkeys:
+ altwcs.deleteWCS(dest, ext=ext, wcskey=k)
+
+ def _removePrimaryWCS(self, ext):
+ """
+ Remove the primary WCS of a FITS extension
+ """
+
+ self.hdr_logger.debug("Removing Primary WCS from (%s, %s)"
+ % (ext.name, ext._extver))
+ naxis = ext.header.ascard['NAXIS'].value
+ for key in basic_wcs:
+ for i in range(1, naxis + 1):
+ try:
+ del ext.header.ascard[key + str(i)]
+ except KeyError:
+ pass
+ try:
+ del ext.header.ascard['WCSAXES']
+ except KeyError:
+ pass
+
+ def _removeIDCCoeffs(self, ext):
+ """
+ Remove IDC coefficients of a FITS extension
+ """
+
+ self.hdr_logger.debug("Removing IDC coefficient from (%s, %s)"
+ % (ext.name, ext._extver))
+ coeffs = ['OCX10', 'OCX11', 'OCY10', 'OCY11', 'IDCSCALE']
+ for k in coeffs:
+ try:
+ del ext.header.ascard[k]
+ except KeyError:
+ pass
+
+
+class HeaderletHDU(pyfits.core._NonstandardExtHDU):
+ """
+ A non-standard extension HDU for encapsulating Headerlets in a file. These
+ HDUs have an extension type of HDRLET and their EXTNAME is derived from the
+ Headerlet's HDRNAME.
+
+ The data itself is a tar file containing a single file, which is the
+ Headerlet file itself. The file name is derived from the HDRNAME keyword,
+ and should be in the form `<HDRNAME>_hdr.fits`. If the COMPRESS keyword
+ evaluates to `True`, the tar file is compressed with gzip compression.
+
+ The Headerlet contained in the HDU's data can be accessed by the
+ `headerlet` attribute.
+ """
+
+ _xtn = _extension = 'HDRLET'
+
+ def __init__(self, data=None, header=None):
+ super(HeaderletHDU, self).__init__(data=data, header=header)
+ # TODO: This can be removed after the next pyfits release, but for now
+ # the _ExtensionHDU base class sets self._xtn = '' in its __init__().
+ self._xtn = self._extension
+ # For some reason _NonstandardExtHDU.__init__ sets self.name = None,
+ # even if it's already been set by the EXTNAME keyword in
+ # _ExtensionHDU.__init__() -_-;
+ if header and header.has_key('EXTNAME') and not self.name:
+ self.name = header['EXTNAME']
+ # self._extver, if set, is still preserved. From
+ # _ExtensionHDU.__init__()...totally inconsistent.
+
+ def __getattr__(self, attr):
+ if attr == 'data':
+ size = self.size()
+ self._file.seek(self._datLoc)
+ self.__dict__[attr] = self._file.read(size)
+ elif attr == 'headerlet':
+ self._file.seek(self._datLoc)
+ s = StringIO()
+ # Read the data into a StringIO--reading directly from the file
+ # won't work (at least for gzipped files) due to problems deep
+ # within the gzip module that make it difficult to read gzip files
+ # embedded in another file
+ s.write(self._file.read(self.size()))
+ s.seek(0)
+ if self._header['COMPRESS']:
+ mode = 'r:gz'
+ else:
+ mode = 'r'
+ t = tarfile.open(mode=mode, fileobj=s)
+ members = t.getmembers()
+ if not len(members):
+ raise ValueError('The Headerlet contents are missing.')
+ elif len(members) > 1:
+ warnings.warn('More than one file is contained in this '
+ 'only the headerlet file should be present.')
+ hlt_name = self._header['HDRNAME'] + '_hdr.fits'
+ try:
+ hlt_info = t.getmember(hlt_name)
+ except KeyError:
+ warnings.warn('The file %s was missing from the HDU data. '
+ 'Assuming that the first file in the data is '
+ 'headerlet file.' % hlt_name)
+ hlt_info = members[0]
+ hlt_file = t.extractfile(hlt_info)
+ # hlt_file is a file-like object
+ hlt = Headerlet(hlt_file, mode='readonly')
+ self.__dict__[attr] = hlt
+ else:
+ return pyfits.core._ValidHDU.__getattr__(self, attr)
+ try:
+ return self.__dict__[attr]
+ except KeyError:
+ raise AttributeError(attr)
+
+ @classmethod
+ def fromheaderlet(cls, headerlet, compress=False):
+ """
+ Creates a new HeaderletHDU from a given Headerlet object.
+
+ Parameters
+ ----------
+ headerlet : Headerlet
+ A valid Headerlet object.
+ compress : bool, optional
+ Gzip compress the headerlet data.
+ """
+
+ phdu = headerlet[0]
+ phduhdr = phdu.header
+ hlt_filename = phdu.header['HDRNAME'] + '_hdr.fits'
+
+ # TODO: As it stands there's no good way to write out an HDUList in
+ # memory, since it automatically closes the given file-like object when
+ # it's done writing. I'd argue that if passed an open file handler it
+ # should not close it, but for now we'll have to write to a temp file.
+ fd, name = tempfile.mkstemp()
+ try:
+ f = os.fdopen(fd, 'rb+')
+ headerlet.writeto(f)
+ # The tar file itself we'll write in memory, as it should be
+ # relatively small
+ if compress:
+ mode = 'w:gz'
+ else:
+ mode = 'w'
+ s = StringIO()
+ t = tarfile.open(mode=mode, fileobj=s)
+ t.add(name, arcname=hlt_filename)
+ t.close()
+ finally:
+ os.remove(name)
+
+ cards = [
+ pyfits.Card('XTENSION', cls._extension, 'Headerlet extension'),
+ pyfits.Card('BITPIX', 8, 'array data type'),
+ pyfits.Card('NAXIS', 1, 'number of array dimensions'),
+ pyfits.Card('NAXIS1', len(s.getvalue()), 'Axis length'),
+ pyfits.Card('PCOUNT', 0, 'number of parameters'),
+ pyfits.Card('GCOUNT', 1, 'number of groups'),
+ pyfits.Card('EXTNAME', cls._extension,
+ 'name of the headerlet extension'),
+ phdu.header.ascard['HDRNAME'],
+ phdu.header.ascard['DATE'],
+ pyfits.Card('SIPNAME', headerlet['SIPWCS', 1].header['WCSNAMEA'],
+ 'SIP distortion model name'),
+ phdu.header.ascard['NPOLFILE'],
+ phdu.header.ascard['D2IMFILE'],
+ pyfits.Card('COMPRESS', compress, 'Uses gzip compression')
+ ]
+
+ header = pyfits.Header(pyfits.CardList(cards))
+
+ hdu = cls(data=pyfits.DELAYED, header=header)
+ hdu._file = s
+ hdu._datLoc = 0
+ return hdu
+
+ @classmethod
+ def match_header(cls, header):
+ """
+ This is a class method used in the pyfits refactoring branch to
+ recognize whether or not this class should be used for instantiating
+ an HDU object based on values in the header.
+
+ It is included here for forward-compatibility.
+ """
+
+ card = header.ascard[0]
+ if card.key != 'XTENSION':
+ return False
+ xtension = card.value.rstrip()
+ return xtension == cls._extension
+
+ # TODO: Add header verification
+
+ def _summary(self):
+ # TODO: Perhaps make this more descriptive...
+ return '%-10s %-12s %4d' % (self.name, self.__class__.__name__,
+ len(self._header.ascard))
+
+# Monkey-patch pyfits to add minimal support for HeaderletHDUs
+# TODO: Get rid of this ASAP!!! (it won't be necessary with the pyfits
+# refactoring branch)
+if not hasattr(pyfits.Header._updateHDUtype, '_patched'):
+ __old_updateHDUtype = pyfits.Header._updateHDUtype
+ def __updateHDUtype(self):
+ if HeaderletHDU.match_header(self):
+ self._hdutype = HeaderletHDU
+ else:
+ __old_updateHDUtype(self)
+ __updateHDUtype._patched = True
+ pyfits.Header._updateHDUtype = __updateHDUtype
diff --git a/lib/stwcs/wcsutil/hstwcs.py b/lib/stwcs/wcsutil/hstwcs.py
new file mode 100644
index 0000000..11c1f42
--- /dev/null
+++ b/lib/stwcs/wcsutil/hstwcs.py
@@ -0,0 +1,451 @@
+from __future__ import division # confidence high
+
+import os.path
+from pywcs import WCS
+import pyfits
+import instruments
+from stwcs.distortion import models, coeff_converter
+import altwcs
+import numpy as np
+from stsci.tools import fileutil
+from stsci.tools.fileutil import DEGTORAD, RADTODEG
+
+import getinput
+import mappings
+from mappings import inst_mappings, ins_spec_kw
+from mappings import basic_wcs
+
+
+__docformat__ = 'restructuredtext'
+
+class HSTWCS(WCS):
+
+ def __init__(self, fobj=None, ext=None, minerr=0.0, wcskey=" "):
+ """
+ Create a WCS object based on the instrument.
+
+ In addition to basic WCS keywords this class provides
+ instrument specific information needed in distortion computation.
+
+ Parameters
+ ----------
+ fobj: string or PyFITS HDUList object or None
+ a file name, e.g j9irw4b1q_flt.fits
+ a fully qualified filename[EXTNAME,EXTNUM], e.g. j9irw4b1q_flt.fits[sci,1]
+ a pyfits file object, e.g pyfits.open('j9irw4b1q_flt.fits'), in which case the
+ user is responsible for closing the file object.
+ ext: int or None
+ extension number
+ if ext==None, it is assumed the data is in the primary hdu
+ minerr: float
+ minimum value a distortion correction must have in order to be applied.
+ If CPERRja, CQERRja are smaller than minerr, the corersponding
+ distortion is not applied.
+ wcskey: str
+ A one character A-Z or " " used to retrieve and define an
+ alternate WCS description.
+ """
+
+ self.inst_kw = ins_spec_kw
+ self.minerr = minerr
+ self.wcskey = wcskey
+
+ if fobj is not None:
+ filename, hdr0, ehdr, phdu = getinput.parseSingleInput(f=fobj,
+ ext=ext)
+ self.filename = filename
+ instrument_name = hdr0.get('INSTRUME', 'DEFAULT')
+ if instrument_name in ['IRAF/ARTDATA','',' ','N/A']:
+ self.instrument = 'DEFAULT'
+ else:
+ self.instrument = instrument_name
+ WCS.__init__(self, ehdr, fobj=phdu, minerr=self.minerr,
+ key=self.wcskey)
+ # If input was a pyfits HDUList object, it's the user's
+ # responsibility to close it, otherwise, it's closed here.
+ if not isinstance(fobj, pyfits.HDUList):
+ phdu.close()
+ self.setInstrSpecKw(hdr0, ehdr)
+ self.readIDCCoeffs(ehdr)
+ extname = ehdr.get('EXTNAME', '')
+ extnum = ehdr.get('EXTVER', None)
+ self.extname = (extname, extnum)
+ else:
+ # create a default HSTWCS object
+ self.instrument = 'DEFAULT'
+ WCS.__init__(self, minerr=self.minerr, key=self.wcskey)
+ self.pc2cd()
+ self.setInstrSpecKw()
+ self.setPscale()
+ self.setOrient()
+
+ def readIDCCoeffs(self, header):
+ """
+ Reads in first order IDCTAB coefficients if present in the header
+ """
+ coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale']
+ for c in coeffs:
+ self.__setattr__(c, header.get(c, None))
+
+ def setInstrSpecKw(self, prim_hdr=None, ext_hdr=None):
+ """
+ Populate the instrument specific attributes:
+
+ These can be in different headers but each instrument class has knowledge
+ of where to look for them.
+
+ Parameters
+ ----------
+ prim_hdr: pyfits.Header
+ primary header
+ ext_hdr: pyfits.Header
+ extension header
+
+ """
+ if self.instrument in inst_mappings.keys():
+ inst_kl = inst_mappings[self.instrument]
+ inst_kl = instruments.__dict__[inst_kl]
+ insobj = inst_kl(prim_hdr, ext_hdr)
+
+ for key in self.inst_kw:
+ try:
+ self.__setattr__(key, insobj.__getattribute__(key))
+ except AttributeError:
+ # Some of the instrument's attributes are recorded in the primary header and
+ # were already set, (e.g. 'DETECTOR'), the code below is a check for that case.
+ if not self.__getattribute__(key):
+ raise
+ else:
+ pass
+
+ else:
+ raise KeyError, "Unsupported instrument - %s" %self.instrument
+
+ def setPscale(self):
+ """
+ Calculates the plate scale from the CD matrix
+ """
+ try:
+ cd11 = self.wcs.cd[0][0]
+ cd21 = self.wcs.cd[1][0]
+ self.pscale = np.sqrt(np.power(cd11,2)+np.power(cd21,2)) * 3600.
+ except AttributeError:
+ print "This file has a PC matrix. You may want to convert it \n \
+ to a CD matrix, if reasonable, by running pc2.cd() method.\n \
+ The plate scale can be set then by calling setPscale() method.\n"
+ self.pscale = None
+
+ def setOrient(self):
+ """
+ Computes ORIENTAT from the CD matrix
+ """
+ try:
+ cd12 = self.wcs.cd[0][1]
+ cd22 = self.wcs.cd[1][1]
+ self.orientat = RADTODEG(np.arctan2(cd12,cd22))
+ except AttributeError:
+ print "This file has a PC matrix. You may want to convert it \n \
+ to a CD matrix, if reasonable, by running pc2.cd() method.\n \
+ The orientation can be set then by calling setOrient() method.\n"
+ self.pscale = None
+
+ def updatePscale(self, scale):
+ """
+ Updates the CD matrix with a new plate scale
+ """
+ self.wcs.cd = self.wcs.cd/self.pscale*scale
+ self.setPscale()
+
+ def readModel(self, update=False, header=None):
+ """
+ Reads distortion model from IDCTAB.
+
+ If IDCTAB is not found ('N/A', "", or not found on disk), then
+ if SIP coefficients and first order IDCTAB coefficients are present
+ in the header, restore the idcmodel from the header.
+ If not - assign None to self.idcmodel.
+
+ Parameters
+ ----------
+ header: pyfits.Header
+ fits extension header
+ update: boolean (False)
+ if True - record the following IDCTAB quantities as header keywords:
+ CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF,
+ IDCV2REF, IDCV3REF
+ """
+ if self.idctab in [None, '', ' ','N/A']:
+ #Keyword idctab is not present in header - check for sip coefficients
+ if header is not None and header.has_key('IDCSCALE'):
+ self._readModelFromHeader(header)
+ else:
+ print "Distortion model is not available: IDCTAB=None\n"
+ self.idcmodel = None
+ elif not os.path.exists(fileutil.osfn(self.idctab)):
+ if header is not None and header.has_key('IDCSCALE'):
+ self._readModelFromHeader(header)
+ else:
+ print 'Distortion model is not available: IDCTAB file %s not found\n' % self.idctab
+ self.idcmodel = None
+ else:
+ self.readModelFromIDCTAB(header=header, update=update)
+
+ def _readModelFromHeader(self, header):
+ # Recreate idc model from SIP coefficients and header kw
+ print 'Restoring IDC model from SIP coefficients\n'
+ model = models.GeometryModel()
+ cx, cy = coeff_converter.sip2idc(self)
+ model.cx = cx
+ model.cy = cy
+ model.name = "sip"
+ model.norder = header['A_ORDER']
+
+ refpix = {}
+ refpix['XREF'] = header['IDCXREF']
+ refpix['YREF'] = header['IDCYREF']
+ refpix['PSCALE'] = header['IDCSCALE']
+ refpix['V2REF'] = header['IDCV2REF']
+ refpix['V3REF'] = header['IDCV3REF']
+ refpix['THETA'] = header['IDCTHETA']
+ model.refpix = refpix
+
+ self.idcmodel = model
+
+
+ def readModelFromIDCTAB(self, header=None, update=False):
+ """
+ Read distortion model from idc table.
+
+ Parameters
+ ----------
+ header: pyfits.Header
+ fits extension header
+ update: boolean (False)
+ if True - save teh following as header keywords:
+ CX10, CX11, CY10, CY11, IDCSCALE, IDCTHETA, IDCXREF, IDCYREF,
+ IDCV2REF, IDCV3REF
+
+ """
+ if self.date_obs == None:
+ print 'date_obs not available\n'
+ self.idcmodel = None
+ return
+ if self.filter1 == None and self.filter2 == None:
+ 'No filter information available\n'
+ self.idcmodel = None
+ return
+
+ self.idcmodel = models.IDCModel(self.idctab,
+ chip=self.chip, direction='forward', date=self.date_obs,
+ filter1=self.filter1, filter2=self.filter2,
+ offtab=self.offtab, binned=self.binned)
+
+ if update:
+ if header==None:
+ print 'Update header with IDC model kw requested but header was not provided\n.'
+ else:
+ self._updatehdr(header)
+
+ def wcs2header(self, sip2hdr=False, idc2hdr=True):
+ """
+ Create a pyfits.Header object from WCS keywords.
+
+ If the original header had a CD matrix, return a CD matrix,
+ otherwise return a PC matrix.
+
+ Parameters
+ ----------
+ sip2hdr: boolean
+ If True - include SIP coefficients
+ """
+ h = self.to_header()
+ if self.wcs.has_cd():
+ h = altwcs.pc2cd(h, key=self.wcskey)
+
+ if idc2hdr:
+ for card in self._idc2hdr():
+ h.update(card.key,value=card.value,comment=card.comment)
+ try:
+ del h.ascard['RESTFRQ']
+ del h.ascard['RESTWAV']
+ except KeyError: pass
+
+ if sip2hdr and self.sip:
+ for card in self._sip2hdr('a'):
+ h.update(card.key,value=card.value,comment=card.comment)
+ for card in self._sip2hdr('b'):
+ h.update(card.key,value=card.value,comment=card.comment)
+
+ try:
+ ap = self.sip.ap
+ except AssertionError:
+ ap = None
+ try:
+ bp = self.sip.bp
+ except AssertionError:
+ bp = None
+
+ if ap:
+ for card in self._sip2hdr('ap'):
+ h.update(card.key,value=card.value,comment=card.comment)
+ if bp:
+ for card in self._sip2hdr('bp'):
+ h.update(card.key,value=card.value,comment=card.comment)
+ return h
+
+
+ def _sip2hdr(self, k):
+ """
+ Get a set of SIP coefficients in the form of an array
+ and turn them into a pyfits.Cardlist.
+ k - one of 'a', 'b', 'ap', 'bp'
+ """
+
+ cards = pyfits.CardList()
+ korder = self.sip.__getattribute__(k+'_order')
+ cards.append(pyfits.Card(key=k.upper()+'_ORDER', value=korder))
+ coeffs = self.sip.__getattribute__(k)
+ ind = coeffs.nonzero()
+ for i in range(len(ind[0])):
+ card = pyfits.Card(key=k.upper()+'_'+str(ind[0][i])+'_'+str(ind[1][i]),
+ value=coeffs[ind[0][i], ind[1][i]])
+ cards.append(card)
+ return cards
+
+ def _idc2hdr(self):
+ # save some of the idc coefficients
+ coeffs = ['ocx10', 'ocx11', 'ocy10', 'ocy11', 'idcscale']
+ cards = pyfits.CardList()
+ for c in coeffs:
+ try:
+ val = self.__getattribute__(c)
+ except AttributeError:
+ continue
+ cards.append(pyfits.Card(key=c, value=val))
+ return cards
+
+ def pc2cd(self):
+ self.wcs.cd = self.wcs.pc.copy()
+
+ def all_sky2pix(self,ra,dec,origin):
+ """
+ Performs full inverse transformation using iterative solution
+ on full forward transformation with complete distortion model.
+
+ NOTES
+ -----
+ We now need to find the position we want by iterative
+ improvement of an initial guess - the centre of the chip
+
+ The method is to derive an "effective CD matrix" and use that
+ to apply a correction until we are close enough (as defined by
+ the ERR variable)
+
+ Code from the drizzle task TRANBACK (dither$drizzle/tranback.f)
+ defined the algorithm for this implementation
+
+ """
+ from stwcs.distortion import utils
+
+ # Define some output arrays
+ xout = np.zeros(len(ra),dtype=np.float64)
+ yout = np.zeros(len(ra),dtype=np.float64)
+ # ... and internal arrays
+ x = np.zeros(3,dtype=np.float64)
+ y = np.zeros(3,dtype=np.float64)
+
+ # define delta for comparison
+ err = 0.0001
+
+ # Use linear WCS as frame in which to perform fit
+ # rather than on the sky
+ undistort = True
+ if self.sip is None:
+ # Only apply distortion if distortion coeffs are present.
+ undistort = False
+ wcslin = utils.output_wcs([self],undistort=undistort)
+
+ # We can only transform 1 position at a time
+ for r,d,n in zip(ra,dec,xrange(len(ra))):
+ # First guess for output
+ x[0],y[0] = self.wcs_sky2pix(r,d,origin)
+ # also convert RA,Dec into undistorted linear pixel positions
+ lx,ly = wcslin.wcs_sky2pix(r,d,origin)
+
+ # Loop around until we are close enough (max 20 iterations)
+ ev_old = None
+ for i in xrange(20):
+ x[1] = x[0] + 1.0
+ y[1] = y[0]
+ x[2] = x[0]
+ y[2] = y[0] + 1.0
+ # Perform full transformation on pixel position
+ rao,deco = self.all_pix2sky(x,y,origin)
+ # convert RA,Dec into linear pixel positions for fitting
+ xo,yo = wcslin.wcs_sky2pix(rao,deco,origin)
+
+ # Compute deltas between output and initial guess as
+ # an affine transform then invert that transformation
+ dxymat = np.array([[xo[1]-xo[0],yo[1]-yo[0]],
+ [xo[2]-xo[0],yo[2]-yo[0]]],dtype=np.float64)
+
+ invmat = np.linalg.inv(dxymat)
+ # compute error in output position
+ dx = lx - xo[0]
+ dy = ly - yo[0]
+
+ # record the old position
+ xold = x[0]
+ yold = y[0]
+
+ # Update the initial guess position using the transform
+ x[0] = xold + dx*dxymat[0][0] + dy*dxymat[1][0]
+ y[0] = yold + dx*dxymat[0][1] + dy*dxymat[1][1]
+
+ # Work out the error vector length
+ ev = np.sqrt((x[0] - xold)**2 + (y[0] - yold)**2)
+
+ # initialize record of previous error measurement during 1st iteration
+ if ev_old is None:
+ ev_old = ev
+
+ # Check to see whether we have reached the limit or
+ # the new error is greater than error from previous iteration
+ if ev < err or (np.abs(ev) > np.abs(ev_old)):
+ break
+ # remember error measurement from previous iteration
+ ev_old = ev
+
+ xout[n] = x[0]
+ yout[n] = y[0]
+
+ return xout,yout
+
+ def _updatehdr(self, ext_hdr):
+ #kw2add : OCX10, OCX11, OCY10, OCY11
+ # record the model in the header for use by pydrizzle
+ ext_hdr.update('OCX10', self.idcmodel.cx[1,0])
+ ext_hdr.update('OCX11', self.idcmodel.cx[1,1])
+ ext_hdr.update('OCY10', self.idcmodel.cy[1,0])
+ ext_hdr.update('OCY11', self.idcmodel.cy[1,1])
+ ext_hdr.update('IDCSCALE', self.idcmodel.refpix['PSCALE'])
+ ext_hdr.update('IDCTHETA', self.idcmodel.refpix['THETA'])
+ ext_hdr.update('IDCXREF', self.idcmodel.refpix['XREF'])
+ ext_hdr.update('IDCYREF', self.idcmodel.refpix['YREF'])
+ ext_hdr.update('IDCV2REF', self.idcmodel.refpix['V2REF'])
+ ext_hdr.update('IDCV3REF', self.idcmodel.refpix['V3REF'])
+
+ def printwcs(self):
+ """
+ Print the basic WCS keywords.
+ """
+ print 'WCS Keywords\n'
+ print 'CD_11 CD_12: %r %r' % (self.wcs.cd[0,0], self.wcs.cd[0,1])
+ print 'CD_21 CD_22: %r %r' % (self.wcs.cd[1,0], self.wcs.cd[1,1])
+ print 'CRVAL : %r %r' % (self.wcs.crval[0], self.wcs.crval[1])
+ print 'CRPIX : %r %r' % (self.wcs.crpix[0], self.wcs.crpix[1])
+ print 'NAXIS : %d %d' % (self.naxis1, self.naxis2)
+ print 'Plate Scale : %r' % self.pscale
+ print 'ORIENTAT : %r' % self.orientat
+
+
diff --git a/lib/stwcs/wcsutil/instruments.py b/lib/stwcs/wcsutil/instruments.py
new file mode 100644
index 0000000..997bdc8
--- /dev/null
+++ b/lib/stwcs/wcsutil/instruments.py
@@ -0,0 +1,321 @@
+from __future__ import division # confidence high
+
+import pyfits
+from mappings import ins_spec_kw
+
+class InstrWCS(object):
+ """
+ A base class for instrument specific keyword definition.
+ It prvides a default implementation (modeled by ACS) for
+ all set_kw methods.
+ """
+ def __init__(self, hdr0=None, hdr=None):
+ self.exthdr = hdr
+ self.primhdr = hdr0
+ self.set_ins_spec_kw()
+
+ def set_ins_spec_kw(self):
+ """
+ This method MUST call all set_kw methods.
+ There should be a set_kw method for all kw listed in
+ mappings.ins_spec_kw. TypeError handles the case when
+ fobj='DEFAULT'.
+ """
+ self.set_idctab()
+ self.set_offtab()
+ self.set_date_obs()
+ self.set_ra_targ()
+ self.set_dec_targ()
+ self.set_pav3()
+ self.set_detector()
+ self.set_filter1()
+ self.set_filter2()
+ self.set_vafactor()
+ self.set_naxis1()
+ self.set_naxis2()
+ self.set_ltv1()
+ self.set_ltv2()
+ self.set_binned()
+ self.set_chip()
+ self.set_parity()
+
+ def set_idctab(self):
+ try:
+ self.idctab = self.primhdr['IDCTAB']
+ except (KeyError, TypeError):
+ self.idctab = None
+
+ def set_offtab(self):
+ try:
+ self.offtab = self.primhdr['OFFTAB']
+ except (KeyError, TypeError):
+ self.offtab = None
+
+ def set_date_obs(self):
+ try:
+ self.date_obs = self.primhdr['DATE-OBS']
+ except (KeyError, TypeError):
+ self.date_obs = None
+
+ def set_ra_targ(self):
+ try:
+ self.ra_targ = self.primhdr['RA-TARG']
+ except (KeyError, TypeError):
+ self.ra_targ = None
+
+ def set_dec_targ(self):
+ try:
+ self.dec_targ = self.primhdr['DEC-TARG']
+ except (KeyError, TypeError):
+ self.dec_targ = None
+
+ def set_pav3(self):
+ try:
+ self.pav3 = self.primhdr['PA_V3']
+ except (KeyError, TypeError):
+ self.pav3 = None
+
+ def set_filter1(self):
+ try:
+ self.filter1 = self.primhdr['FILTER1']
+ except (KeyError, TypeError):
+ self.filter1 = None
+
+ def set_filter2(self):
+ try:
+ self.filter2 = self.primhdr['FILTER2']
+ except (KeyError, TypeError):
+ self.filter2 = None
+
+ def set_vafactor(self):
+ try:
+ self.vafactor = self.exthdr['VAFACTOR']
+ except (KeyError, TypeError):
+ self.vafactor = 1
+
+ def set_naxis1(self):
+ try:
+ self.naxis1 = self.exthdr['naxis1']
+ except (KeyError, TypeError):
+ try:
+ self.naxis1 = self.exthdr['npix1']
+ except (KeyError, TypeError):
+ self.naxis1 = None
+
+ def set_naxis2(self):
+ try:
+ self.naxis2 = self.exthdr['naxis2']
+ except (KeyError, TypeError):
+ try:
+ self.naxis2 = self.exthdr['npix2']
+ except (KeyError, TypeError):
+ self.naxis2 = None
+
+ def set_ltv1(self):
+ try:
+ self.ltv1 = self.exthdr['LTV1']
+ except (KeyError, TypeError):
+ self.ltv1 = 0.0
+
+ def set_ltv2(self):
+ try:
+ self.ltv2 = self.exthdr['LTV2']
+ except (KeyError, TypeError):
+ self.ltv2 = 0.0
+
+ def set_binned(self):
+ try:
+ self.binned = self.exthdr['BINAXIS1']
+ except (KeyError, TypeError):
+ self.binned = 1
+
+ def set_chip(self):
+ try:
+ self.chip = self.exthdr['CCDCHIP']
+ except (KeyError, TypeError):
+ self.chip = 1
+
+ def set_parity(self):
+ self.parity = [[1.0,0.0],[0.0,-1.0]]
+
+ def set_detector(self):
+ # each instrument has a different kw for detector and it can be
+ # in a different header, so this is to be handled by the instrument classes
+ self.detector = 'DEFAULT'
+
+class ACSWCS(InstrWCS):
+ """
+ get instrument specific kw
+ """
+
+ def __init__(self, hdr0, hdr):
+ self.primhdr = hdr0
+ self.exthdr = hdr
+ InstrWCS.__init__(self,hdr0, hdr)
+ self.set_ins_spec_kw()
+
+ def set_detector(self):
+ try:
+ self.detector = self.primhdr['DETECTOR']
+ except KeyError:
+ print 'ERROR: Detector kw not found.\n'
+ raise
+
+ def set_parity(self):
+ parity = {'WFC':[[1.0,0.0],[0.0,-1.0]],
+ 'HRC':[[-1.0,0.0],[0.0,1.0]],
+ 'SBC':[[-1.0,0.0],[0.0,1.0]]}
+
+ if self.detector not in parity.keys():
+ parity = InstrWCS.set_parity(self)
+ else:
+ self.parity = parity[self.detector]
+
+
+class WFPC2WCS(InstrWCS):
+
+
+ def __init__(self, hdr0, hdr):
+ self.primhdr = hdr0
+ self.exthdr = hdr
+ InstrWCS.__init__(self,hdr0, hdr)
+ self.set_ins_spec_kw()
+
+ def set_filter1(self):
+ self.filter1 = self.primhdr.get('FILTNAM1', None)
+ if self.filter1 == " " or self.filter1 == None:
+ self.filter1 = 'CLEAR1'
+
+ def set_filter2(self):
+ self.filter2 = self.primhdr.get('FILTNAM2', None)
+ if self.filter2 == " " or self.filter2 == None:
+ self.filter2 = 'CLEAR2'
+
+
+ def set_binned(self):
+ mode = self.primhdr.get('MODE', 1)
+ if mode == 'FULL':
+ self.binned = 1
+ elif mode == 'AREA':
+ self.binned = 2
+
+ def set_chip(self):
+ self.chip = self.exthdr.get('DETECTOR', 1)
+
+ def set_parity(self):
+ self.parity = [[-1.0,0.],[0.,1.0]]
+
+ def set_detector(self):
+ try:
+ self.detector = self.exthdr['DETECTOR']
+ except KeyError:
+ print 'ERROR: Detector kw not found.\n'
+ raise
+
+
+class WFC3WCS(InstrWCS):
+ """
+ Create a WFC3 detector specific class
+ """
+
+ def __init__(self, hdr0, hdr):
+ self.primhdr = hdr0
+ self.exthdr = hdr
+ InstrWCS.__init__(self,hdr0, hdr)
+ self.set_ins_spec_kw()
+
+ def set_detector(self):
+ try:
+ self.detector = self.primhdr['DETECTOR']
+ except KeyError:
+ print 'ERROR: Detector kw not found.\n'
+ raise
+
+ def set_filter1(self):
+ self.filter1 = self.primhdr.get('FILTER', None)
+ if self.filter1 == " " or self.filter1 == None:
+ self.filter1 = 'CLEAR'
+
+ def set_filter2(self):
+ #Nicmos idc tables do not allow 2 filters.
+ self.filter2 = 'CLEAR'
+
+ def set_parity(self):
+ parity = {'UVIS':[[-1.0,0.0],[0.0,1.0]],
+ 'IR':[[-1.0,0.0],[0.0,1.0]]}
+
+ if self.detector not in parity.keys():
+ parity = InstrWCS.set_parity(self)
+ else:
+ self.parity = parity[self.detector]
+
+class NICMOSWCS(InstrWCS):
+ """
+ Create a NICMOS specific class
+ """
+
+ def __init__(self, hdr0, hdr):
+ self.primhdr = hdr0
+ self.exthdr = hdr
+ InstrWCS.__init__(self,hdr0, hdr)
+ self.set_ins_spec_kw()
+
+ def set_parity(self):
+ self.parity = [[-1.0,0.],[0.,1.0]]
+
+ def set_filter1(self):
+ self.filter1 = self.primhdr.get('FILTER', None)
+ if self.filter1 == " " or self.filter1 == None:
+ self.filter1 = 'CLEAR'
+
+ def set_filter2(self):
+ #Nicmos idc tables do not allow 2 filters.
+ self.filter2 = 'CLEAR'
+
+ def set_chip(self):
+ self.chip = self.detector
+
+ def set_detector(self):
+ try:
+ self.detector = self.primhdr['CAMERA']
+ except KeyError:
+ print 'ERROR: Detector kw not found.\n'
+ raise
+
+class STISWCS(InstrWCS):
+ """
+ A STIS specific class
+ """
+
+ def __init__(self, hdr0, hdr):
+ self.primhdr = hdr0
+ self.exthdr = hdr
+ InstrWCS.__init__(self,hdr0, hdr)
+ self.set_ins_spec_kw()
+
+ def set_parity(self):
+ self.parity = [[-1.0,0.],[0.,1.0]]
+
+ def set_filter1(self):
+ self.filter1 = self.exthdr.get('OPT_ELEM', None)
+ if self.filter1 == " " or self.filter1 == None:
+ self.filter1 = 'CLEAR1'
+
+ def set_filter2(self):
+ self.filter2 = self.exthdr.get('FILTER', None)
+ if self.filter2 == " " or self.filter2 == None:
+ self.filter2 = 'CLEAR2'
+
+ def set_detector(self):
+ try:
+ self.detector = self.primhdr['DETECTOR']
+ except KeyError:
+ print 'ERROR: Detector kw not found.\n'
+ raise
+
+ def set_date_obs(self):
+ try:
+ self.date_obs = self.exthdr['DATE-OBS']
+ except (KeyError, TypeError):
+ self.date_obs = None
+ \ No newline at end of file
diff --git a/lib/stwcs/wcsutil/mappings.py b/lib/stwcs/wcsutil/mappings.py
new file mode 100644
index 0000000..24038bf
--- /dev/null
+++ b/lib/stwcs/wcsutil/mappings.py
@@ -0,0 +1,29 @@
+from __future__ import division # confidence high
+
+# This dictionary maps an instrument into an instrument class
+# The instrument class handles instrument specific keywords
+
+inst_mappings={'WFPC2': 'WFPC2WCS',
+ 'ACS': 'ACSWCS',
+ 'NICMOS': 'NICMOSWCS',
+ 'STIS': 'STISWCS',
+ 'WFC3': 'WFC3WCS',
+ 'DEFAULT': 'InstrWCS'
+ }
+
+
+# A list of instrument specific keywords
+# Every instrument class must have methods which define each of these
+# as class attributes.
+ins_spec_kw = [ 'idctab', 'offtab', 'date_obs', 'ra_targ', 'dec_targ', 'pav3', \
+ 'detector', 'ltv1', 'ltv2', 'parity', 'binned','vafactor', \
+ 'chip', 'naxis1', 'naxis2', 'filter1', 'filter2']
+
+# A list of keywords defined in the primary header.
+# The HSTWCS class sets this as attributes
+prim_hdr_kw = ['detector', 'offtab', 'idctab', 'date-obs',
+ 'pa_v3', 'ra_targ', 'dec_targ']
+
+# These are the keywords which are archived before MakeWCS is run
+basic_wcs = ['CD1_', 'CD2_', 'CRVAL', 'CTYPE', 'CRPIX', 'CTYPE', 'CDELT', 'CUNIT']
+
diff --git a/lib/stwcs/wcsutil/mosaic.py b/lib/stwcs/wcsutil/mosaic.py
new file mode 100644
index 0000000..0c02265
--- /dev/null
+++ b/lib/stwcs/wcsutil/mosaic.py
@@ -0,0 +1,183 @@
+from __future__ import division
+import numpy as np
+from matplotlib import pyplot as plt
+import pyfits
+import string
+
+from stsci.tools import parseinput, irafglob
+from stwcs.distortion import utils
+from stwcs import updatewcs, wcsutil
+from stwcs.wcsutil import altwcs
+
+def vmosaic(fnames, outwcs=None, ref_wcs=None, ext=None, extname=None, undistort=True, wkey='V', wname='VirtualMosaic', plot=False, clobber=False):
+ """
+ Create a virtual mosaic using the WCS of the input images.
+
+ Parameters
+ ----------
+ fnames: a string or a list
+ a string or a list of filenames, or a list of wcsutil.HSTWCS objects
+ outwcs: an HSTWCS object
+ if given, represents the output tangent plane
+ if None, the output WCS is calculated from the input observations.
+ ref_wcs: an HSTWCS object
+ if output wcs is not given, this will be used as a reference for the
+ calculation of the output WCS. If ref_wcs is None and outwcs is None,
+ then the first observation in th einput list is used as reference.
+ ext: an int, a tuple or a list
+ an int - represents a FITS extension, e.g. 0 is the primary HDU
+ a tuple - uses the notation (extname, extver), e.g. ('sci',1)
+ Can be a list of integers or tuples representing FITS extensions
+ extname: string
+ the value of the EXTNAME keyword for the extensions to be used in the mosaic
+ undistort: boolean (default: True)
+ undistort (or not) the output WCS
+ wkey: string
+ default: 'V'
+ one character A-Z to be used to record the virtual mosaic WCS as
+ an alternate WCS in the headers of the input files.
+ wname: string
+ default: 'VirtualMosaic
+ a string to be used as a WCSNAME value for the alternate WCS representign
+ the virtual mosaic
+ plot: boolean
+ if True and matplotlib is installed will make a plot of the tangent plane
+ and the location of the input observations.
+ clobber: boolean
+ This covers the case when an alternate WCS with the requested key
+ already exists in the header of the input files.
+ if clobber is True, it will be overwritten
+ if False, it will compute the new one but will not write it to the headers.
+
+ Notes
+ -----
+ The algorithm is:
+ 1. If output WCS is not given it is calculated from the input WCSes.
+ The first image is used as a reference, if no reference is given.
+ This represents the virtual mosaic WCS.
+ 2. For each input observation/chip, an HSTWCS object is created
+ and its footprint on the sky is calculated (using only the four corners).
+ 3. For each input observation the footprint is projected on the output
+ tangent plane and the virtual WCS is recorded in the header.
+ """
+ wcsobjects = readWCS(fnames, ext, extname)
+ if outwcs != None:
+ outwcs = outwcs.deepcopy()
+ else:
+ if ref_wcs != None:
+ outwcs = utils.output_wcs(wcsobjects, ref_wcs=ref_wcs, undistort=undistort)
+ else:
+ outwcs = utils.output_wcs(wcsobjects, undistort=undistort)
+ if plot:
+ outc=np.array([[0.,0], [outwcs.naxis1,0],
+ [outwcs.naxis1, outwcs.naxis2],
+ [0,outwcs.naxis2], [0,0]])
+ plt.plot(outc[:,0], outc[:,1])
+ for wobj in wcsobjects:
+ outcorners = outwcs.wcs_sky2pix(wobj.calcFootprint(),1)
+ if plot:
+ plt.plot(outcorners[:,0], outcorners[:,1])
+ objwcs = outwcs.deepcopy()
+ objwcs.wcs.crpix = objwcs.wcs.crpix - (outcorners[0])
+ updatehdr(wobj.filename, objwcs,wkey=wkey, wcsname=wname, ext=wobj.extname, clobber=clobber)
+ return outwcs
+
+def updatehdr(fname, wcsobj, wkey, wcsname, ext=1, clobber=False):
+ hdr = pyfits.getheader(fname, ext=ext)
+ all_keys = list(string.ascii_uppercase)
+ if wkey.upper() not in all_keys:
+ raise KeyError, "wkey must be one character: A-Z"
+ if wkey not in altwcs.available_wcskeys(hdr):
+ if not clobber:
+ raise ValueError, "wkey %s is already in use. Use clobber=True to overwrite it or specify a different key." %wkey
+ else:
+ altwcs.deleteWCS(fname, ext=ext, wcskey='V')
+ f = pyfits.open(fname, mode='update')
+
+ hwcs = wcs2header(wcsobj)
+ wcsnamekey = 'WCSNAME' + wkey
+ f[ext].header.update(key=wcsnamekey, value=wcsname)
+ for k in hwcs.keys():
+ f[ext].header.update(key=k[:7]+wkey, value=hwcs[k])
+
+ f.close()
+
+def wcs2header(wcsobj):
+
+ h = wcsobj.to_header()
+
+ if wcsobj.wcs.has_cd():
+ altwcs.pc2cd(h)
+ h.update('CTYPE1', 'RA---TAN')
+ h.update('CTYPE2', 'DEC--TAN')
+ norient = np.rad2deg(np.arctan2(h['CD1_2'],h['CD2_2']))
+ #okey = 'ORIENT%s' % wkey
+ okey = 'ORIENT'
+ h.update(key=okey, value=norient)
+ return h
+
+def readWCS(input, exts=None, extname=None):
+ if isinstance(input, str):
+ if input[0] == '@':
+ # input is an @ file
+ filelist = irafglob.irafglob(input)
+ else:
+ try:
+ filelist, output = parseinput.parseinput(input)
+ except IOError: raise
+ elif isinstance(input, list):
+ if isinstance(input[0], wcsutil.HSTWCS):
+ # a list of HSTWCS objects
+ return input
+ else:
+ filelist = input[:]
+ wcso = []
+ fomited = []
+ # figure out which FITS extension(s) to use
+ if exts == None and extname == None:
+ #Assume it's simple FITS and the data is in the primary HDU
+ for f in filelist:
+ try:
+ wcso = wcsutil.HSTWCS(f)
+ except AttributeError:
+ fomited.append(f)
+ continue
+ elif exts != None and validateExt(exts):
+ exts = [exts]
+ for f in filelist:
+ try:
+ wcso.extend([wcsutil.HSTWCS(f, ext=e) for e in exts])
+ except KeyError:
+ fomited.append(f)
+ continue
+ elif extname != None:
+ for f in filelist:
+ fobj = pyfits.open(f)
+ for i in range(len(fobj)):
+ try:
+ ename = fobj[i].header['EXTNAME']
+ except KeyError:
+ continue
+ if ename.lower() == extname.lower():
+ wcso.append(wcsutil.HSTWCS(f,ext=i))
+ else:
+ continue
+ fobj.close()
+ if fomited != []:
+ print "These files were skipped:"
+ for f in fomited:
+ print f
+ return wcso
+
+
+def validateExt(ext):
+ if not isinstance(ext, int) and not isinstance(ext, tuple) \
+ and not isinstance(ext, list):
+ print "Ext must be integer, tuple, a list of int extension numbers, \
+ or a list of tuples representing a fits extension, for example ('sci', 1)."
+ return False
+ else:
+ return True
+
+
+
diff --git a/lib/stwcs/wcsutil/wcscorr.py b/lib/stwcs/wcsutil/wcscorr.py
new file mode 100644
index 0000000..a6b1f94
--- /dev/null
+++ b/lib/stwcs/wcsutil/wcscorr.py
@@ -0,0 +1,458 @@
+import os,copy
+import pyfits
+import numpy as np
+
+from stsci.tools import fileutil
+import stwcs
+from stwcs.wcsutil import altwcs
+import convertwcs
+
+
+DEFAULT_WCS_KEYS = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2',
+ 'CD1_1','CD1_2','CD2_1','CD2_2',
+ 'CTYPE1','CTYPE2']
+DEFAULT_PRI_KEYS = ['PA_V3']
+###
+### WCSEXT table related keyword archive functions
+###
+def init_wcscorr(input, force=False):
+ """
+ This function will initialize the WCSCORR table if it is not already present,
+ and look for WCS keywords with a prefix of 'O' as the original OPUS
+ generated WCS as the initial row for the table or use the current WCS
+ keywords as initial row if no 'O' prefix keywords are found.
+
+ This function will NOT overwrite any rows already present.
+
+ This function works on all SCI extensions at one time.
+ """
+
+ # TODO: Create some sort of decorator or (for Python2.5) context for
+ # opening a FITS file and closing it when done, if necessary
+ if not isinstance(input, pyfits.HDUList):
+ # input must be a filename, so open as PyFITS object
+ fimg = pyfits.open(input, mode='update')
+ need_to_close = True
+ else:
+ fimg = input
+ need_to_close = False
+
+ # Do not try to generate a WCSCORR table for a simple FITS file
+ if len(fimg) == 1:
+ return
+
+ # Verify that a WCSCORR extension does not already exist...
+ for extn in fimg:
+ if extn.header.has_key('extname') and \
+ extn.header['extname'] == 'WCSCORR':
+ if not force:
+ return
+ else:
+ del fimg['WCSCORR']
+ # define the primary columns of the WCSEXT table with initial rows for each
+ # SCI extension for the original OPUS solution
+ numsci = fileutil.countExtn(fimg)
+
+ # create new table with more rows than needed initially to make it easier to
+ # add new rows later
+ wcsext = create_wcscorr(numrows=numsci, padding=numsci * 4)
+ # Assign the correct EXTNAME value to this table extension
+ wcsext.header.update('TROWS', numsci * 2,
+ comment='Number of updated rows in table')
+ wcsext.header.update('EXTNAME', 'WCSCORR',
+ comment='Table with WCS Update history')
+ wcsext.header.update('EXTVER', 1)
+
+ used_wcskeys = None
+ wcs1 = stwcs.wcsutil.HSTWCS(fimg,ext=('SCI',1))
+ idc2header = True
+ if wcs1.idcscale is None:
+ idc2header = False
+ wcs_keywords = wcs1.wcs2header(idc2hdr=idc2header).keys()
+
+ # Now copy original OPUS values into table
+ for extver in xrange(1, numsci + 1):
+ rowind = find_wcscorr_row(wcsext.data,
+ {'WCS_ID': 'OPUS', 'EXTVER': extver,
+ 'WCS_key':'O'})
+ # There should only EVER be a single row for each extension with OPUS values
+ rownum = np.where(rowind)[0][0]
+ #print 'Archiving OPUS WCS in row number ',rownum,' in WCSCORR table for SCI,',extver
+
+ hdr = fimg['SCI', extver].header
+ # define set of WCS keywords which need to be managed and copied to the table
+ if used_wcskeys is None:
+ used_wcskeys = altwcs.wcskeys(hdr)
+ # Check to see whether or not there is an OPUS alternate WCS present,
+ # if so, get its values directly, otherwise, archive the PRIMARY WCS
+ # as the OPUS values in the WCSCORR table
+ if 'O' not in used_wcskeys:
+ altwcs.archiveWCS(fimg,('SCI',extver),wcskey='O', wcsname='OPUS')
+ wkey = 'O'
+
+ wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=wkey)
+ wcshdr = wcs.wcs2header(idc2hdr=idc2header)
+
+ for key in DEFAULT_PRI_KEYS:
+ prihdr_keys = []
+ if not hdr.has_key(key):
+ prihdr_keys.append(key)
+
+ if wcsext.data.field('CRVAL1')[rownum] != 0:
+ # If we find values for these keywords already in the table, do not
+ # overwrite them again
+ print 'WCS keywords already updated...'
+ break
+ for key in wcs_keywords:
+ if key in wcsext.data.names:
+ wcsext.data.field(key)[rownum] = wcshdr[(key+wkey)[:8]]
+ # Now get any keywords from PRIMARY header needed for WCS updates
+ for key in prihdr_keys:
+ wcsext.data.field(key)[rownum] = fimg[0].header[key]
+
+ # Now that we have archived the OPUS alternate WCS, remove it from the list
+ # of used_wcskeys
+ if 'O' in used_wcskeys:
+ used_wcskeys.remove('O')
+
+ # Now copy remaining alternate WCSs into table
+ # TODO: Much of this appears to be redundant with update_wcscorr; consider
+ # merging them...
+ for uwkey in used_wcskeys:
+ if wkey == ' ':
+ break
+ for extver in xrange(1, numsci + 1):
+ hdr = fimg['SCI', extver].header
+ wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver),
+ wcskey=uwkey)
+ wcshdr = wcs.wcs2header()
+ if 'WCSNAME' + uwkey not in wcshdr:
+ idctab = fileutil.osfn(fimg[0].header['idctab'])
+ idcname = os.path.split(idctab)[-1]
+ idcname = idcname[:idcname.find('_')]
+ wcsid = 'IDC_' + idcname
+ else:
+ wcsid = wcshdr['WCSNAME' + uwkey]
+
+ # identify next empty row
+ rowind = find_wcscorr_row(wcsext.data, selections={'wcs_id': ''})
+ rows = np.where(rowind)
+ if len(rows[0]) > 0:
+ rownum = np.where(rowind)[0][0]
+ else:
+ print 'No available rows found for updating. '
+ #print 'Archiving current WCS row number ',rownum,' in WCSCORR table for SCI,',extver
+
+ # Update selection columns for this row with relevant values
+ wcsext.data.field('WCS_ID')[rownum] = wcsid
+ wcsext.data.field('EXTVER')[rownum] = extver
+ wcsext.data.field('WCS_key')[rownum] = uwkey
+
+ # Look for standard WCS keyword values
+ for key in wcs_keywords:
+ if key in wcsext.data.names:
+ wcsext.data.field(key)[rownum] = wcshdr[key + uwkey]
+ # Now get any keywords from PRIMARY header needed for WCS updates
+ for key in prihdr_keys:
+ wcsext.data.field(key)[rownum] = fimg[0].header[key]
+
+ # Append this table to the image FITS file
+ fimg.append(wcsext)
+ # force an update now
+ # set the verify flag to 'warn' so that it will always succeed, but still
+ # tell the user if PyFITS detects any problems with the file as a whole
+ fimg.flush('warn')
+
+ if need_to_close:
+ fimg.close()
+
+
+def find_wcscorr_row(wcstab, selections):
+ """
+ Return an array of indices from the table (NOT HDU) 'wcstab' that matches the
+ selections specified by the user.
+
+ The row selection criteria must be specified as a dictionary with
+ column name as key and value(s) representing the valid desired row values.
+ For example, {'wcs_id':'OPUS','extver':2}.
+ """
+
+ mask = None
+ for i in selections:
+ icol = wcstab.field(i)
+ if isinstance(icol,np.chararray): icol = icol.rstrip()
+ bmask = (icol == selections[i])
+ if mask is None:
+ mask = bmask.copy()
+ else:
+ mask = np.logical_and(mask,bmask)
+ del bmask
+ return mask
+
+
+def archive_wcs_file(image, wcs_id=None):
+ """
+ Update WCSCORR table with rows for each SCI extension to record the
+ newly updated WCS keyword values.
+ """
+
+ if not isinstance(image, pyfits.HDUList):
+ fimg = pyfits.open(image, mode='update')
+ close_image = True
+ else:
+ fimg = image
+ close_image = False
+
+ update_wcscorr(fimg, wcs_id=wcs_id)
+
+ if close_image:
+ fimg.close()
+
+
+def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None):
+ """
+ Update WCSCORR table with a new row or rows for this extension header. It
+ copies the current set of WCS keywords as a new row of the table based on
+ keyed WCSs as per Paper I Multiple WCS standard).
+
+ Parameters
+ ----------
+ dest : HDUList
+ The HDU list whose WCSCORR table should be appended to (the WCSCORR HDU
+ must already exist)
+ source : HDUList, optional
+ The HDU list containing the extension from which to extract the WCS
+ keywords to add to the WCSCORR table. If None, the dest is also used
+ as the source.
+ extname : str, optional
+ The extension name from which to take new WCS keywords. If there are
+ multiple extensions with that name, rows are added for each extension
+ version.
+ wcs_id : str, optional
+ The name of the WCS to add, as in the WCSNAMEa keyword. If
+ unspecified, all the WCSs in the specified extensions are added.
+ """
+
+ if source is None:
+ source = dest
+
+ numext = fileutil.countExtn(source, extname)
+ if numext == 0:
+ raise ValueError('No %s extensions found in the source HDU list.'
+ % extname)
+
+ # Current implementation assumes the same WCS keywords are in each
+ # extension version; if this should not be assumed then this can be
+ # modified...
+ wcs_keys = altwcs.wcskeys(source[(extname, 1)].header)
+ wcs_keys = filter(None, wcs_keys)
+ wcshdr = stwcs.wcsutil.HSTWCS(source, ext=(extname, 1)).wcs2header()
+ wcs_keywords = wcshdr.keys()
+
+ if 'O' in wcs_keys:
+ wcs_keys.remove('O') # 'O' is reserved for original OPUS WCS
+
+ # If we're looking for a particular wcs_id, test ahead of time that it's
+ # actually present in the specified extension headers
+ if wcs_id:
+ wcs_key = ''
+ for wcs_key in wcs_keys:
+ wcsname = source[(extname, 1)].header['WCSNAME' + wcs_key]
+ if wcs_id == wcsname:
+ break
+ else:
+ raise ValueError('A WCS with name %s was not found in the %s '
+ 'extension headers in the source HDU list.'
+ % (wcs_id, extname))
+ wcs_keys = [wcs_key] # We're only interested in this one
+
+ # create new table for hdr and populate it with the newly updated values
+ new_table = create_wcscorr(numrows=0, padding=len(wcs_keys)*numext)
+ old_table = dest['WCSCORR']
+
+ idx = -1
+ for wcs_key in wcs_keys:
+ for extver in range(1, numext + 1):
+ extn = (extname, extver)
+ hdr = source[extn].header
+ wcsname = hdr['WCSNAME' + wcs_key]
+ selection = {'WCS_ID': wcsname, 'EXTVER': extver,
+ 'WCS_key': wcs_key}
+
+ # Ensure that an entry for this WCS is not already in the dest
+ # table; if so just skip it
+ rowind = find_wcscorr_row(old_table.data, selection)
+ if np.any(rowind):
+ continue
+
+ idx += 1
+
+ wcs = stwcs.wcsutil.HSTWCS(source, ext=extn, wcskey=wcs_key)
+ wcshdr = wcs.wcs2header()
+
+ # Update selection column values
+ for key, val in selection.iteritems():
+ new_table.data.field(key)[idx] = val
+
+ for key in wcs_keywords:
+ if key in new_table.data.names:
+ new_table.data.field(key)[idx] = wcshdr[key + wcs_key]
+
+ prihdr = source[0].header
+ for key in DEFAULT_PRI_KEYS:
+ if key in new_table.data.names and prihdr.has_key(key):
+ new_table.data.field(key)[idx] = prihdr[key]
+
+ # If idx was never incremented, no rows were added, so there's nothing else
+ # to do...
+ if idx < 0:
+ return
+
+ # Now, we need to merge this into the existing table
+ rowind = find_wcscorr_row(old_table.data, {'wcs_id':''})
+ old_nrows = np.where(rowind)[0][0]
+ new_nrows = new_table.data.shape[0]
+
+ # check to see if there is room for the new row
+ if (old_nrows + new_nrows) > old_table.data.shape[0]-1:
+ pad_rows = 2 * new_nrows
+ # if not, create a new table with 'pad_rows' new empty rows
+ upd_table = pyfits.new_table(old_table.columns,header=old_table.header,
+ nrows=old_table.data.shape[0]+pad_rows)
+ else:
+ upd_table = old_table
+ pad_rows = 0
+ # Now, add
+ for name in old_table.columns.names:
+ # reset the default values to ones specific to the row definitions
+ for i in range(pad_rows):
+ upd_table.data.field(name)[old_nrows+i] = old_table.data.field(name)[-1]
+ # Now populate with values from new table
+ upd_table.data.field(name)[old_nrows:old_nrows + new_nrows] = \
+ new_table.data.field(name)
+ upd_table.header.update('TROWS', old_nrows + new_nrows)
+
+ # replace old extension with newly updated table extension
+ dest['WCSCORR'] = upd_table
+
+
+def restore_file_from_wcscorr(image, id='OPUS', wcskey=''):
+ """ Copies the values of the WCS from the WCSCORR based on ID specified by user.
+ The default will be to restore the original OPUS-derived values to the Primary WCS.
+ If wcskey is specified, the WCS with that key will be updated instead.
+ """
+
+ if not isinstance(image, pyfits.HDUList):
+ fimg = pyfits.open(image, mode='update')
+ close_image = True
+ else:
+ fimg = image
+ close_image = False
+ numsci = fileutil.countExtn(fimg)
+ wcs_table = fimg['WCSCORR']
+ orig_rows = (wcs_table.data.field('WCS_ID') == 'OPUS')
+ # create an HSTWCS object to figure out what WCS keywords need to be updated
+ wcsobj = stwcs.wcsutil.HSTWCS(fimg,ext=('sci',1))
+ wcshdr = wcsobj.wcs2header()
+ for extn in range(1,numsci+1):
+ # find corresponding row from table
+ ext_rows = (wcs_table.data.field('EXTVER') == extn)
+ erow = np.where(np.logical_and(ext_rows,orig_rows))[0][0]
+ for key in wcshdr:
+ if key in wcs_table.data.names: # insure that keyword is column in table
+ tkey = key
+
+ if 'orient' in key.lower():
+ key = 'ORIENT'
+ if wcskey == '':
+ skey = key
+ else:
+ skey = key[:7]+wcskey
+ fimg['sci',extn].header.update(skey,wcs_table.data.field(tkey)[erow])
+ for key in DEFAULT_PRI_KEYS:
+ if key in wcs_table.data.names:
+ if wcskey == '':
+ pkey = key
+ else:
+ pkey = key[:7]+wcskey
+ fimg[0].header.update(pkey,wcs_table.data.field(key)[erow])
+
+ # close the image now that the update has been completed.
+ if close_image:
+ fimg.close()
+
+
+def create_wcscorr(descrip=False, numrows=1, padding=0):
+ """
+ Return the basic definitions for a WCSCORR table.
+ The dtype definitions for the string columns are set to the maximum allowed so
+ that all new elements will have the same max size which will be automatically
+ truncated to this limit upon updating (if needed).
+
+ The table is initialized with rows corresponding to the OPUS solution
+ for all the 'SCI' extensions.
+ """
+
+ trows = numrows + padding
+ # define initialized arrays as placeholders for column data
+ # TODO: I'm certain there's an easier way to do this... for example, simply
+ # define the column names and formats, then create an empty array using
+ # them as a dtype, then create the new table from that array.
+ def_float64_zeros = np.array([0.0] * trows, dtype=np.float64)
+ def_float64_ones = def_float64_zeros + 1.0
+ def_float_col = {'format': 'D', 'array': def_float64_zeros.copy()}
+ def_float1_col = {'format': 'D', 'array':def_float64_ones.copy()}
+ def_str40_col = {'format': '40A',
+ 'array': np.array([''] * trows, dtype='S40')}
+ def_str24_col = {'format': '24A',
+ 'array': np.array([''] * trows, dtype='S24')}
+ def_int32_col = {'format': 'J',
+ 'array': np.array([0]*trows,dtype=np.int32)}
+
+ # If more columns are needed, simply add their definitions to this list
+ col_names = [('CRVAL1', def_float_col), ('CRVAL2', def_float_col),
+ ('CRPIX1', def_float_col), ('CRPIX2', def_float_col),
+ ('CD1_1', def_float_col), ('CD1_2', def_float_col),
+ ('CD2_1', def_float_col), ('CD2_2', def_float_col),
+ ('CTYPE1', def_str24_col), ('CTYPE2', def_str24_col),
+ ('ORIENTAT', def_float_col), ('PA_V3', def_float_col),
+ ('Delta_RA', def_float_col), ('Delta_Dec', def_float_col),
+ ('RMS_RA', def_float_col), ('RMS_Dec', def_float_col),
+ ('Delta_Orientat', def_float_col),
+ ('Delta_Scale', def_float1_col),
+ ('NMatch', def_int32_col), ('Catalog', def_str40_col)]
+
+ # Define selector columns
+ id_col = pyfits.Column(name='WCS_ID', format='40A',
+ array=np.array(['OPUS'] * numrows + [''] * padding,
+ dtype='S24'))
+ extver_col = pyfits.Column(name='EXTVER', format='I',
+ array=np.array(range(1, numrows + 1),
+ dtype=np.int16))
+ wcskey_col = pyfits.Column(name='WCS_key', format='A',
+ array=np.array(['O'] * numrows + [''] * padding,
+ dtype='S'))
+ # create list of remaining columns to be added to table
+ col_list = [id_col, extver_col, wcskey_col] # start with selector columns
+
+ for c in col_names:
+ cdef = copy.deepcopy(c[1])
+ col_list.append(pyfits.Column(name=c[0], format=cdef['format'],
+ array=cdef['array']))
+
+ if descrip:
+ col_list.append(
+ pyfits.Column(name='Descrip', format='128A',
+ array=np.array(
+ ['Original WCS computed by OPUS'] * numrows,
+ dtype='S128')))
+
+ # Now create the new table from the column definitions
+ newtab = pyfits.new_table(pyfits.ColDefs(col_list), nrows=trows)
+ # The fact that setting .name is necessary should be considered a bug in
+ # pyfits.
+ # TODO: Make sure this is fixed in pyfits, then remove this
+ newtab.name = 'WCSCORR'
+
+ return newtab
+