summaryrefslogtreecommitdiff
path: root/stwcs/distortion
diff options
context:
space:
mode:
Diffstat (limited to 'stwcs/distortion')
-rw-r--r--stwcs/distortion/__init__.py0
-rw-r--r--stwcs/distortion/coeff_converter.py142
-rw-r--r--stwcs/distortion/models.py362
-rw-r--r--stwcs/distortion/mutil.py703
-rw-r--r--stwcs/distortion/utils.py270
5 files changed, 1477 insertions, 0 deletions
diff --git a/stwcs/distortion/__init__.py b/stwcs/distortion/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/stwcs/distortion/__init__.py
diff --git a/stwcs/distortion/coeff_converter.py b/stwcs/distortion/coeff_converter.py
new file mode 100644
index 0000000..415b512
--- /dev/null
+++ b/stwcs/distortion/coeff_converter.py
@@ -0,0 +1,142 @@
+from __future__ import division, print_function # confidence high
+
+import numpy as np
+from astropy.io import fits
+from astropy import wcs as pywcs
+
+def sip2idc(wcs):
+ """
+ Converts SIP style coefficients to IDCTAB coefficients.
+
+ Parameters
+ ----------
+ wcs : `astropy.io.fits.Header` or `astropy.wcs.WCS` object
+ """
+ if isinstance(wcs, fits.Header):
+ ocx10 = wcs.get('OCX10', None)
+ ocx11 = wcs.get('OCX11', None)
+ ocy10 = wcs.get('OCY10', None)
+ ocy11 = wcs.get('OCY11', None)
+ order = wcs.get('A_ORDER', None)
+ sipa, sipb = _read_sip_kw(wcs)
+ if None in [ocx10, ocx11, ocy10, ocy11, sipa, sipb]:
+ print('Cannot convert SIP to IDC coefficients.\n')
+ return None, None
+ elif isinstance(wcs, pywcs.WCS):
+ try:
+ ocx10 = wcs.ocx10
+ ocx11 = wcs.ocx11
+ ocy10 = wcs.ocy10
+ ocy11 = wcs.ocy11
+ except AttributeError:
+ print('First order IDCTAB coefficients are not available.\n')
+ print('Cannot convert SIP to IDC coefficients.\n')
+ return None, None
+ try:
+ sipa = wcs.sip.a
+ sipb = wcs.sip.b
+ except AttributeError:
+ print('SIP coefficients are not available.')
+ print('Cannot convert SIP to IDC coefficients.\n')
+ return None, None
+ try:
+ order = wcs.sip.a_order
+ except AttributeError:
+ print('SIP model order unknown, exiting ...\n')
+ return None, None
+
+ else:
+ print('Input to sip2idc must be a PyFITS header or a wcsutil.HSTWCS object\n')
+ return
+
+
+ if None in [ocx10, ocx11, ocy10, ocy11]:
+ print('First order IDC coefficients not found, exiting ...\n')
+ return None, None
+ idc_coeff = np.array([[ocx11, ocx10], [ocy11, ocy10]])
+ cx = np.zeros((order+1,order+1), dtype=np.double)
+ cy = np.zeros((order+1,order+1), dtype=np.double)
+ for n in range(order+1):
+ for m in range(order+1):
+ if n >= m and n>=2:
+ sipval = np.array([[sipa[m,n-m]],[sipb[m,n-m]]])
+ idcval = np.dot(idc_coeff, sipval)
+ cx[n,m] = idcval[0]
+ cy[n,m] = idcval[1]
+
+ cx[1,0] = ocx10
+ cx[1,1] = ocx11
+ cy[1,0] = ocy10
+ cy[1,1] = ocy11
+
+ return cx, cy
+
+def _read_sip_kw(header):
+ """
+ Reads SIP header keywords and returns an array of coefficients.
+
+ If no SIP header keywords are found, None is returned.
+ """
+ if "A_ORDER" in header:
+ if "B_ORDER" not in header:
+ raise ValueError(
+ "A_ORDER provided without corresponding B_ORDER "
+ "keyword for SIP distortion")
+
+ m = int(header["A_ORDER"])
+ a = np.zeros((m+1, m+1), np.double)
+ for i in range(m+1):
+ for j in range(m-i+1):
+ a[i, j] = header.get("A_%d_%d" % (i, j), 0.0)
+
+ m = int(header["B_ORDER"])
+ b = np.zeros((m+1, m+1), np.double)
+ for i in range(m+1):
+ for j in range(m-i+1):
+ b[i, j] = header.get("B_%d_%d" % (i, j), 0.0)
+ elif "B_ORDER" in header:
+ raise ValueError(
+ "B_ORDER provided without corresponding A_ORDER "
+ "keyword for SIP distortion")
+ else:
+ a = None
+ b = None
+
+ return a , b
+
+
+"""
+def idc2sip(wcsobj, idctab = None):
+ if isinstance(wcs,pywcs.WCS):
+ try:
+ cx10 = wcsobj.ocx10
+ cx11 = wcsobj.cx11
+ cy10 = wcsobj.cy10
+ cy11 = wcsobj.cy11
+ except AttributeError:
+ print
+ try:
+ order = wcs.sip.a_order
+ except AttributeError:
+ print 'SIP model order unknown, exiting ...\n'
+ return
+ else:
+ print 'Input to sip2idc must be a PyFITS header or a wcsutil.HSTWCS object\n'
+ return
+
+ if None in [ocx10, ocx11, ocy10, ocy11]:
+ print 'First order IDC coefficients not found, exiting ...\n'
+ return
+ idc_coeff = np.array([[wcsobj.cx11, wcsobj.cx10], [wcsobj.cy11, wcsobj.cy10]])
+ cx = numpy.zeros((order+1,order+1), dtype=numpy.double)
+ cy = numpy.zeros((order+1,order+1), dtype=numpy.double)
+ for n in range(order+1):
+ for m in range(order+1):
+ if n >= m and n>=2:
+ sipval = numpy.array([[wcsobj.sip.a[n,m]],[wcsobj.sip.b[n,m]]])
+ idcval = numpy.dot(idc_coeff, sipval)
+ cx[m,n-m] = idcval[0]
+ cy[m,n-m] = idcval[1]
+
+ return cx, cy
+"""
diff --git a/stwcs/distortion/models.py b/stwcs/distortion/models.py
new file mode 100644
index 0000000..231a9f1
--- /dev/null
+++ b/stwcs/distortion/models.py
@@ -0,0 +1,362 @@
+from __future__ import absolute_import, division, print_function # confidence high
+
+
+import numpy as np
+
+# Import PyDrizzle utility modules
+from . import mutil
+from .mutil import combin
+
+yes = True
+no = False
+
+#################
+#
+#
+# Geometry/Distortion Classes
+#
+#
+#################
+
+class GeometryModel:
+ """
+ Base class for Distortion model.
+ There will be a separate class for each type of
+ model/filetype used with drizzle, i.e., IDCModel and
+ DrizzleModel.
+
+ Each class will know how to apply the distortion to a
+ single point and how to convert coefficients to an input table
+ suitable for the drizzle task.
+
+ Coefficients will be stored in CX,CY arrays.
+ """
+ #
+ #
+ #
+ #
+ #
+ #
+ #
+ NORDER = 3
+
+ def __init__(self):
+ " This will open the given file and determine its type and norder."
+
+ # Method to read in coefficients from given table and
+ # populate the n arrays 'cx' and 'cy'.
+ # This will be different for each type of input file,
+ # IDCTAB vs. drizzle table.
+
+ # Set these up here for all sub-classes to use...
+ # But, calculate norder and cx,cy arrays in detector specific classes.
+ self.cx = None
+ self.cy = None
+ self.refpix = None
+ self.norder = self.NORDER
+ # Keep track of computed zero-point for distortion coeffs
+ self.x0 = None
+ self.y0 = None
+
+ # default values for these attributes
+ self.direction = 'forward'
+
+ self.pscale = 1.0
+
+ def shift(self, xs, ys):
+ """
+ Shift reference position of coefficients to new center
+ where (xs,ys) = old-reference-position - subarray/image center.
+ This will support creating coeffs files for drizzle which will
+ be applied relative to the center of the image, rather than relative
+ to the reference position of the chip.
+ """
+
+ _cxs = np.zeros(shape=self.cx.shape,dtype=self.cx.dtype)
+ _cys = np.zeros(shape=self.cy.shape,dtype=self.cy.dtype)
+ _k = self.norder + 1
+ # loop over each input coefficient
+ for m in range(_k):
+ for n in range(_k):
+ if m >= n:
+ # For this coefficient, shift by xs/ys.
+ _ilist = list(range(m, _k))
+ # sum from m to k
+ for i in _ilist:
+ _jlist = list(range(n, i - (m-n)+1))
+ # sum from n to i-(m-n)
+ for j in _jlist:
+ _cxs[m,n] += self.cx[i,j]*combin(j,n)*combin((i-j),(m-n))*pow(xs,(j-n))*pow(ys,((i-j)-(m-n)))
+ _cys[m,n] += self.cy[i,j]*combin(j,n)*combin((i-j),(m-n))*pow(xs,(j-n))*pow(ys,((i-j)-(m-n)))
+ self.cx = _cxs.copy()
+ self.cy = _cys.copy()
+
+ def convert(self, tmpname, xref=None,yref=None,delta=yes):
+ """
+ Open up an ASCII file, output coefficients in drizzle
+ format after converting them as necessary.
+ First, normalize these coefficients to what drizzle expects
+ Normalize the coefficients by the MODEL/output plate scale.
+
+ 16-May-2002:
+ Revised to work with higher order polynomials by John Blakeslee.
+ 27-June-2002:
+ Added ability to shift coefficients to new center for support
+ of subarrays.
+ """
+ cx = self.cx/self.pscale
+ cy = self.cy/self.pscale
+ x0 = self.refpix['XDELTA'] + cx[0,0]
+ y0 = self.refpix['YDELTA'] + cy[0,0]
+ #xr = self.refpix['XREF']
+ #yr = self.refpix['YREF']
+ xr = self.refpix['CHIP_XREF']
+ yr = self.refpix['CHIP_YREF']
+
+
+
+ '''
+ if xref != None:
+ # Shift coefficients for use with drizzle
+ _xs = xref - self.refpix['XREF'] + 1.0
+ _ys = yref - self.refpix['YREF'] + 1.0
+
+
+ if _xs != 0 or _ys != 0:
+ cxs,cys= self.shift(cx, cy, _xs, _ys)
+ cx = cxs
+ cy = cys
+
+ # We only want to apply this shift to coeffs
+ # for subarray images.
+ if delta == no:
+ cxs[0,0] = cxs[0,0] - _xs
+ cys[0,0] = cys[0,0] - _ys
+
+ # Now, apply only the difference introduced by the distortion..
+ # i.e., (undistorted - original) shift.
+ x0 += cxs[0,0]
+ y0 += cys[0,0]
+ '''
+ self.x0 = x0 #+ 1.0
+ self.y0 = y0 #+ 1.0
+
+ # Now, write out the coefficients into an ASCII
+ # file in 'drizzle' format.
+ lines = []
+
+
+ lines.append('# Polynomial distortion coefficients\n')
+ lines.append('# Extracted from "%s" \n'%self.name)
+ lines.append('refpix %f %f \n'%(xr,yr))
+ if self.norder==3:
+ lines.append('cubic\n')
+ elif self.norder==4:
+ lines.append('quartic\n')
+ elif self.norder==5:
+ lines.append('quintic\n')
+ else:
+ raise ValueError("Drizzle cannot handle poly distortions of order %d" % self.norder)
+
+ str = '%16.8f %16.8g %16.8g %16.8g %16.8g \n'% (x0,cx[1,1],cx[1,0],cx[2,2],cx[2,1])
+ lines.append(str)
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cx[2,0],cx[3,3],cx[3,2],cx[3,1],cx[3,0])
+ lines.append(str)
+ if self.norder>3:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cx[4,4],cx[4,3],cx[4,2],cx[4,1],cx[4,0])
+ lines.append(str)
+ if self.norder>4:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cx[5,5],cx[5,4],cx[5,3],cx[5,2],cx[5,1],cx[5,0])
+ lines.append(str)
+ lines.append("\n")
+
+ str = '%16.8f %16.8g %16.8g %16.8g %16.8g \n'% (y0,cy[1,1],cy[1,0],cy[2,2],cy[2,1])
+ lines.append(str)
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cy[2,0],cy[3,3],cy[3,2],cy[3,1],cy[3,0])
+ lines.append(str)
+ if self.norder>3:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cy[4,4],cy[4,3],cy[4,2],cy[4,1],cy[4,0])
+ lines.append(str)
+ if self.norder>4:
+ str = '%16.8g %16.8g %16.8g %16.8g %16.8g %16.8g \n'% (cy[5,5],cy[5,4],cy[5,3],cy[5,2],cy[5,1],cy[5,0])
+ lines.append(str)
+
+ output = open(tmpname,'w')
+ output.writelines(lines)
+ output.close()
+
+
+ def apply(self, pixpos,scale=1.0,order=None):
+ """
+ Apply coefficients to a pixel position or a list of positions.
+ This should be the same for all coefficients tables.
+ Return the geometrically-adjusted position
+ in arcseconds from the reference position as a tuple (x,y).
+
+ Compute delta from reference position
+ """
+
+ """
+ scale actually is a ratio of pscale/self.model.pscale
+ what is pscale?
+ """
+ if self.cx == None:
+ return pixpos[:,0],pixpos[:,1]
+
+ if order is None:
+ order = self.norder
+
+ # Apply in the same way that 'drizzle' would...
+ _cx = self.cx / (self.pscale * scale)
+ _cy = self.cy / (self.pscale * scale)
+ _convert = no
+ _p = pixpos
+
+ # Do NOT include any zero-point terms in CX,CY here
+ # as they should not be scaled by plate-scale like rest
+ # of coeffs... This makes the computations consistent
+ # with 'drizzle'. WJH 17-Feb-2004
+ _cx[0,0] = 0.
+ _cy[0,0] = 0.
+
+ if isinstance(_p, list) or isinstance(_p, tuple):
+ _p = np.array(_p,dtype=np.float64)
+ _convert = yes
+
+ dxy = _p - (self.refpix['XREF'],self.refpix['YREF'])
+ # Apply coefficients from distortion model here...
+ c = _p * 0.
+ for i in range(order+1):
+ for j in range(i+1):
+ c[:,0] = c[:,0] + _cx[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+ c[:,1] = c[:,1] + _cy[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+ xc = c[:,0]
+ yc = c[:,1]
+
+ # Convert results back to same form as original input
+ if _convert:
+ xc = xc.tolist()
+ yc = yc.tolist()
+ # If a single tuple was input, return just a single tuple
+ if len(xc) == 1:
+ xc = xc[0]
+ yc = yc[0]
+
+ return xc,yc
+
+ def setPScaleCoeffs(self,pscale):
+ self.cx[1,1] = pscale
+ self.cy[1,0] = pscale
+
+ self.refpix['PSCALE'] = pscale
+ self.pscale = pscale
+
+
+class IDCModel(GeometryModel):
+ """
+ This class will open the IDCTAB, select proper row based on
+ chip/direction and populate cx,cy arrays.
+ We also need to read in SCALE, XCOM,YCOM, XREF,YREF as well.
+ """
+ def __init__(self, idcfile, date=None, chip=1, direction='forward',
+ filter1='CLEAR1',filter2='CLEAR2',offtab=None, binned=1):
+ GeometryModel.__init__(self)
+ #
+ # Norder must be derived from the coeffs file itself,
+ # then the arrays can be setup. Thus, it needs to be
+ # done in the sub-class, not in the base class.
+ # Read in table.
+ # Populate cx,cy,scale, and other variables here.
+ #
+ self.name = idcfile
+ self.cx,self.cy,self.refpix,self.norder = mutil.readIDCtab(idcfile,
+ chip=chip,direction=direction,filter1=filter1,filter2=filter2,
+ date=date, offtab=offtab)
+
+ if 'empty_model' in self.refpix and self.refpix['empty_model']:
+ pass
+ else:
+ self.refpix['PSCALE'] = self.refpix['PSCALE'] * binned
+ self.cx = self.cx * binned
+ self.cy = self.cy * binned
+ self.refpix['XREF'] = self.refpix['XREF'] / binned
+ self.refpix['YREF'] = self.refpix['YREF'] / binned
+ self.refpix['XSIZE'] = self.refpix['XSIZE'] / binned
+ self.refpix['YSIZE'] = self.refpix['YSIZE'] / binned
+
+ self.pscale = self.refpix['PSCALE']
+
+
+class WCSModel(GeometryModel):
+ """
+ This class sets up a distortion model based on coefficients
+ found in the image header.
+ """
+ def __init__(self,header,rootname):
+ GeometryModel.__init__(self)
+
+
+ if 'rootname' in header:
+ self.name = header['rootname']
+ else:
+ self.name = rootname
+ # Initialize all necessary distortion arrays with
+ # default model...
+ #self.cx,self.cy,self.refpix,self.order = mutil.defaultModel()
+
+ # Read in values from header, and update distortion arrays.
+ self.cx,self.cy,self.refpix,self.norder = mutil.readWCSCoeffs(header)
+
+ self.pscale = self.refpix['PSCALE']
+
+
+
+class DrizzleModel(GeometryModel):
+ """
+ This class will read in an ASCII Cubic
+ drizzle coeffs file and populate the cx,cy arrays.
+ """
+
+ def __init__(self, idcfile, scale = None):
+ GeometryModel.__init__(self)
+ #
+ # We now need to read in the file, populate cx,cy, and
+ # other variables as necessary.
+ #
+ self.name = idcfile
+ self.cx,self.cy,self.refpix,self.norder = mutil.readCubicTable(idcfile)
+
+ # scale is the ratio wcs.pscale/model.pscale.
+ # model.pscale for WFPC2 is passed from REFDATA.
+ # This is needed for WFPC2 binned data.
+
+ if scale != None:
+ self.pscale = scale
+ else:
+ self.pscale = self.refpix['PSCALE']
+
+ """
+ The above definition looks wrong.
+ In one case it's a ratio in the other it's pscale.
+
+ """
+
+class TraugerModel(GeometryModel):
+ """
+ This class will read in the ASCII Trauger coeffs
+ file, convert them to SIAF coefficients, then populate
+ the cx,cy arrays.
+ """
+ NORDER = 3
+
+ def __init__(self, idcfile,lam):
+ GeometryModel.__init__(self)
+ self.name = idcfile
+ self.cx,self.cy,self.refpix,self.norder = mutil.readTraugerTable(idcfile,lam)
+ self.pscale = self.refpix['PSCALE']
+ #
+ # Read in file here.
+ # Populate cx,cy, and other variables.
+ #
+
+
diff --git a/stwcs/distortion/mutil.py b/stwcs/distortion/mutil.py
new file mode 100644
index 0000000..ed6a1ea
--- /dev/null
+++ b/stwcs/distortion/mutil.py
@@ -0,0 +1,703 @@
+from __future__ import division, print_function # confidence high
+
+from stsci.tools import fileutil
+import numpy as np
+import calendar
+
+# Set up IRAF-compatible Boolean values
+yes = True
+no = False
+
+# This function read the IDC table and generates the two matrices with
+# the geometric correction coefficients.
+#
+# INPUT: FITS object of open IDC table
+# OUTPUT: coefficient matrices for Fx and Fy
+#
+#### If 'tabname' == None: This should return a default, undistorted
+#### solution.
+#
+
+def readIDCtab (tabname, chip=1, date=None, direction='forward',
+ filter1=None,filter2=None, offtab=None):
+
+ """
+ Read IDCTAB, and optional OFFTAB if sepcified, and generate
+ the two matrices with the geometric correction coefficients.
+
+ If tabname == None, then return a default, undistorted solution.
+ If offtab is specified, dateobs also needs to be given.
+
+ """
+
+ # Return a default geometry model if no IDCTAB filename
+ # is given. This model will not distort the data in any way.
+ if tabname == None:
+ print('Warning: No IDCTAB specified! No distortion correction will be applied.')
+ return defaultModel()
+
+ # Implement default values for filters here to avoid the default
+ # being overwritten by values of None passed by user.
+ if filter1 == None or filter1.find('CLEAR') == 0 or filter1.strip() == '':
+ filter1 = 'CLEAR'
+ if filter2 == None or filter2.find('CLEAR') == 0 or filter2.strip() == '':
+ filter2 = 'CLEAR'
+
+ # Insure that tabname is full filename with fully expanded
+ # IRAF variables; i.e. 'jref$mc41442gj_idc.fits' should get
+ # expanded to '/data/cdbs7/jref/mc41442gj_idc.fits' before
+ # being used here.
+ # Open up IDC table now...
+ try:
+ ftab = fileutil.openImage(tabname)
+ except:
+ err_str = "------------------------------------------------------------------------ \n"
+ err_str += "WARNING: the IDCTAB geometric distortion file specified in the image \n"
+ err_str += "header was not found on disk. Please verify that your environment \n"
+ err_str += "variable ('jref'/'uref'/'oref'/'nref') has been correctly defined. If \n"
+ err_str += "you do not have the IDCTAB file, you may obtain the latest version \n"
+ err_str += "of it from the relevant instrument page on the STScI HST website: \n"
+ err_str += "http://www.stsci.edu/hst/ For WFPC2, STIS and NICMOS data, the \n"
+ err_str += "present run will continue using the old coefficients provided in \n"
+ err_str += "the Dither Package (ca. 1995-1998). \n"
+ err_str += "------------------------------------------------------------------------ \n"
+ raise IOError(err_str)
+
+ #First thing we need, is to read in the coefficients from the IDC
+ # table and populate the Fx and Fy matrices.
+
+ if 'DETECTOR' in ftab['PRIMARY'].header:
+ detector = ftab['PRIMARY'].header['DETECTOR']
+ else:
+ if 'CAMERA' in ftab['PRIMARY'].header:
+ detector = str(ftab['PRIMARY'].header['CAMERA'])
+ else:
+ detector = 1
+ # First, read in TDD coeffs if present
+ phdr = ftab['PRIMARY'].header
+ instrument = phdr['INSTRUME']
+ if instrument == 'ACS' and detector == 'WFC':
+ skew_coeffs = read_tdd_coeffs(phdr, chip=chip)
+ else:
+ skew_coeffs = None
+
+ # Set default filters for SBC
+ if detector == 'SBC':
+ if filter1 == 'CLEAR':
+ filter1 = 'F115LP'
+ filter2 = 'N/A'
+ if filter2 == 'CLEAR':
+ filter2 = 'N/A'
+
+ # Read FITS header to determine order of fit, i.e. k
+ norder = ftab['PRIMARY'].header['NORDER']
+ if norder < 3:
+ order = 3
+ else:
+ order = norder
+
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+
+ #Determine row from which to get the coefficients.
+ # How many rows do we have in the table...
+ fshape = ftab[1].data.shape
+ colnames = ftab[1].data.names
+ row = -1
+
+ # Loop over all the rows looking for the one which corresponds
+ # to the value of CCDCHIP we are working on...
+ for i in range(fshape[0]):
+
+ try:
+ # Match FILTER combo to appropriate row,
+ #if there is a filter column in the IDCTAB...
+ if 'FILTER1' in colnames and 'FILTER2' in colnames:
+
+ filt1 = ftab[1].data.field('FILTER1')[i]
+ if filt1.find('CLEAR') > -1: filt1 = filt1[:5]
+
+ filt2 = ftab[1].data.field('FILTER2')[i]
+ if filt2.find('CLEAR') > -1: filt2 = filt2[:5]
+ else:
+ if 'OPT_ELEM' in colnames:
+ filt1 = ftab[1].data.field('OPT_ELEM')
+ if filt1.find('CLEAR') > -1: filt1 = filt1[:5]
+ else:
+ filt1 = filter1
+
+ if 'FILTER' in colnames:
+ _filt = ftab[1].data.field('FILTER')[i]
+ if _filt.find('CLEAR') > -1: _filt = _filt[:5]
+ if 'OPT_ELEM' in colnames:
+ filt2 = _filt
+ else:
+ filt1 = _filt
+ filt2 = 'CLEAR'
+ else:
+ filt2 = filter2
+ except:
+ # Otherwise assume all rows apply and compare to input filters...
+ filt1 = filter1
+ filt2 = filter2
+
+ if 'DETCHIP' in colnames:
+ detchip = ftab[1].data.field('DETCHIP')[i]
+ if not str(detchip).isdigit():
+ detchip = 1
+ else:
+ detchip = 1
+
+ if 'DIRECTION' in colnames:
+ direct = ftab[1].data.field('DIRECTION')[i].lower().strip()
+ else:
+ direct = 'forward'
+
+ if filt1 == filter1.strip() and filt2 == filter2.strip():
+ if direct == direction.strip():
+ if int(detchip) == int(chip) or int(detchip) == -999:
+ row = i
+ break
+
+ joinstr = ','
+ if 'CLEAR' in filter1:
+ f1str = ''
+ joinstr = ''
+ else:
+ f1str = filter1.strip()
+ if 'CLEAR' in filter2:
+ f2str = ''
+ joinstr = ''
+ else:
+ f2str = filter2.strip()
+ filtstr = (joinstr.join([f1str,f2str])).strip()
+ if row < 0:
+ err_str = '\nProblem finding row in IDCTAB! Could not find row matching:\n'
+ err_str += ' CHIP: '+str(detchip)+'\n'
+ err_str += ' FILTERS: '+filtstr+'\n'
+ ftab.close()
+ del ftab
+ raise LookupError(err_str)
+ else:
+ print('- IDCTAB: Distortion model from row',str(row+1),'for chip',detchip,':',filtstr)
+
+ # Read in V2REF and V3REF: this can either come from current table,
+ # or from an OFFTAB if time-dependent (i.e., for WFPC2)
+ theta = None
+ if 'V2REF' in colnames:
+ v2ref = ftab[1].data.field('V2REF')[row]
+ v3ref = ftab[1].data.field('V3REF')[row]
+ else:
+ # Read V2REF/V3REF from offset table (OFFTAB)
+ if offtab:
+ v2ref,v3ref,theta = readOfftab(offtab, date, chip=detchip)
+ else:
+ v2ref = 0.0
+ v3ref = 0.0
+
+ if theta == None:
+ if 'THETA' in colnames:
+ theta = ftab[1].data.field('THETA')[row]
+ else:
+ theta = 0.0
+
+ refpix = {}
+ refpix['XREF'] = ftab[1].data.field('XREF')[row]
+ refpix['YREF'] = ftab[1].data.field('YREF')[row]
+ refpix['XSIZE'] = ftab[1].data.field('XSIZE')[row]
+ refpix['YSIZE'] = ftab[1].data.field('YSIZE')[row]
+ refpix['PSCALE'] = round(ftab[1].data.field('SCALE')[row],8)
+ refpix['V2REF'] = v2ref
+ refpix['V3REF'] = v3ref
+ refpix['THETA'] = theta
+ refpix['XDELTA'] = 0.0
+ refpix['YDELTA'] = 0.0
+ refpix['DEFAULT_SCALE'] = yes
+ refpix['centered'] = no
+ refpix['skew_coeffs'] = skew_coeffs
+ # Now that we know which row to look at, read coefficients into the
+ # numeric arrays we have set up...
+ # Setup which column name convention the IDCTAB follows
+ # either: A,B or CX,CY
+ if 'CX10' in ftab[1].data.names:
+ cxstr = 'CX'
+ cystr = 'CY'
+ else:
+ cxstr = 'A'
+ cystr = 'B'
+
+ for i in range(norder+1):
+ if i > 0:
+ for j in range(i+1):
+ xcname = cxstr+str(i)+str(j)
+ ycname = cystr+str(i)+str(j)
+ fx[i,j] = ftab[1].data.field(xcname)[row]
+ fy[i,j] = ftab[1].data.field(ycname)[row]
+
+ ftab.close()
+ del ftab
+
+ # If CX11 is 1.0 and not equal to the PSCALE, then the
+ # coeffs need to be scaled
+
+ if fx[1,1] == 1.0 and abs(fx[1,1]) != refpix['PSCALE']:
+ fx *= refpix['PSCALE']
+ fy *= refpix['PSCALE']
+
+ # Return arrays and polynomial order read in from table.
+ # NOTE: XREF and YREF are stored in Fx,Fy arrays respectively.
+ return fx,fy,refpix,order
+#
+#
+# Time-dependent skew correction coefficients (only ACS/WFC)
+#
+#
+def read_tdd_coeffs(phdr, chip=1):
+ ''' Read in the TDD related keywords from the PRIMARY header of the IDCTAB
+ '''
+ # Insure we have an integer form of chip
+ ic = int(chip)
+
+ skew_coeffs = {}
+ skew_coeffs['TDDORDER'] = 0
+ skew_coeffs['TDD_DATE'] = ""
+ skew_coeffs['TDD_A'] = None
+ skew_coeffs['TDD_B'] = None
+ skew_coeffs['TDD_CY_BETA'] = None
+ skew_coeffs['TDD_CY_ALPHA'] = None
+ skew_coeffs['TDD_CX_BETA'] = None
+ skew_coeffs['TDD_CX_ALPHA'] = None
+
+ # Skew-based TDD coefficients
+ skew_terms = ['TDD_CTB','TDD_CTA','TDD_CYA','TDD_CYB','TDD_CXA','TDD_CXB']
+ for s in skew_terms:
+ skew_coeffs[s] = None
+
+ if "TDD_CTB1" in phdr:
+ # We have the 2015-calibrated TDD correction to apply
+ # This correction is based on correcting the skew in the linear terms
+ # not just set polynomial terms
+ print("Using 2015-calibrated VAFACTOR-corrected TDD correction...")
+ skew_coeffs['TDD_DATE'] = phdr['TDD_DATE']
+ for s in skew_terms:
+ skew_coeffs[s] = phdr.get('{0}{1}'.format(s,ic),None)
+
+ elif "TDD_CYB1" in phdr:
+ # We have 2014-calibrated TDD correction to apply, not J.A.-derived values
+ print("Using 2014-calibrated TDD correction...")
+ skew_coeffs['TDD_DATE'] = phdr['TDD_DATE']
+ # Read coefficients for TDD Y coefficient
+ cyb_kw = 'TDD_CYB{0}'.format(int(chip))
+ skew_coeffs['TDD_CY_BETA'] = phdr.get(cyb_kw,None)
+ cya_kw = 'TDD_CYA{0}'.format(int(chip))
+ tdd_cya = phdr.get(cya_kw,None)
+ if tdd_cya == 0 or tdd_cya == 'N/A': tdd_cya = None
+ skew_coeffs['TDD_CY_ALPHA'] = tdd_cya
+
+ # Read coefficients for TDD X coefficient
+ cxb_kw = 'TDD_CXB{0}'.format(int(chip))
+ skew_coeffs['TDD_CX_BETA'] = phdr.get(cxb_kw,None)
+ cxa_kw = 'TDD_CXA{0}'.format(int(chip))
+ tdd_cxa = phdr.get(cxa_kw,None)
+ if tdd_cxa == 0 or tdd_cxa == 'N/A': tdd_cxa = None
+ skew_coeffs['TDD_CX_ALPHA'] = tdd_cxa
+
+ else:
+ if "TDDORDER" in phdr:
+ n = int(phdr["TDDORDER"])
+ else:
+ print('TDDORDER kw not present, using default TDD correction')
+ return None
+
+ a = np.zeros((n+1,), np.float64)
+ b = np.zeros((n+1,), np.float64)
+ for i in range(n+1):
+ a[i] = phdr.get(("TDD_A%d" % i), 0.0)
+ b[i] = phdr.get(("TDD_B%d" % i), 0.0)
+ if (a==0).all() and (b==0).all():
+ print('Warning: TDD_A and TDD_B coeffiecients have values of 0, \n \
+ but TDDORDER is %d.' % TDDORDER)
+
+ skew_coeffs['TDDORDER'] = n
+ skew_coeffs['TDD_DATE'] = phdr['TDD_DATE']
+ skew_coeffs['TDD_A'] = a
+ skew_coeffs['TDD_B'] = b
+
+ return skew_coeffs
+
+def readOfftab(offtab, date, chip=None):
+
+
+#Read V2REF,V3REF from a specified offset table (OFFTAB).
+# Return a default geometry model if no IDCTAB filenam e
+# is given. This model will not distort the data in any way.
+
+ if offtab == None:
+ return 0.,0.
+
+ # Provide a default value for chip
+ if chip:
+ detchip = chip
+ else:
+ detchip = 1
+
+ # Open up IDC table now...
+ try:
+ ftab = fileutil.openImage(offtab)
+ except:
+ raise IOError("Offset table '%s' not valid as specified!" % offtab)
+
+ #Determine row from which to get the coefficients.
+ # How many rows do we have in the table...
+ fshape = ftab[1].data.shape
+ colnames = ftab[1].data.names
+ row = -1
+
+ row_start = None
+ row_end = None
+
+ v2end = None
+ v3end = None
+ date_end = None
+ theta_end = None
+
+ num_date = convertDate(date)
+ # Loop over all the rows looking for the one which corresponds
+ # to the value of CCDCHIP we are working on...
+ for ri in range(fshape[0]):
+ i = fshape[0] - ri - 1
+ if 'DETCHIP' in colnames:
+ detchip = ftab[1].data.field('DETCHIP')[i]
+ else:
+ detchip = 1
+
+ obsdate = convertDate(ftab[1].data.field('OBSDATE')[i])
+
+ # If the row is appropriate for the chip...
+ # Interpolate between dates
+ if int(detchip) == int(chip) or int(detchip) == -999:
+ if num_date <= obsdate:
+ date_end = obsdate
+ v2end = ftab[1].data.field('V2REF')[i]
+ v3end = ftab[1].data.field('V3REF')[i]
+ theta_end = ftab[1].data.field('THETA')[i]
+ row_end = i
+ continue
+
+ if row_end == None and (num_date > obsdate):
+ date_end = obsdate
+ v2end = ftab[1].data.field('V2REF')[i]
+ v3end = ftab[1].data.field('V3REF')[i]
+ theta_end = ftab[1].data.field('THETA')[i]
+ row_end = i
+ continue
+
+ if num_date > obsdate:
+ date_start = obsdate
+ v2start = ftab[1].data.field('V2REF')[i]
+ v3start = ftab[1].data.field('V3REF')[i]
+ theta_start = ftab[1].data.field('THETA')[i]
+ row_start = i
+ break
+
+ ftab.close()
+ del ftab
+
+ if row_start == None and row_end == None:
+ print('Row corresponding to DETCHIP of ',detchip,' was not found!')
+ raise LookupError
+ elif row_start == None:
+ print('- OFFTAB: Offset defined by row',str(row_end+1))
+ else:
+ print('- OFFTAB: Offset interpolated from rows',str(row_start+1),'and',str(row_end+1))
+
+ # Now, do the interpolation for v2ref, v3ref, and theta
+ if row_start == None or row_end == row_start:
+ # We are processing an observation taken after the last calibration
+ date_start = date_end
+ v2start = v2end
+ v3start = v3end
+ _fraction = 0.
+ theta_start = theta_end
+ else:
+ _fraction = float((num_date - date_start)) / float((date_end - date_start))
+
+ v2ref = _fraction * (v2end - v2start) + v2start
+ v3ref = _fraction * (v3end - v3start) + v3start
+ theta = _fraction * (theta_end - theta_start) + theta_start
+
+ return v2ref,v3ref,theta
+
+def readWCSCoeffs(header):
+
+ #Read distortion coeffs from WCS header keywords and
+ #populate distortion coeffs arrays.
+
+ # Read in order for polynomials
+ _xorder = header['a_order']
+ _yorder = header['b_order']
+ order = max(max(_xorder,_yorder),3)
+
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+
+ # Read in CD matrix
+ _cd11 = header['cd1_1']
+ _cd12 = header['cd1_2']
+ _cd21 = header['cd2_1']
+ _cd22 = header['cd2_2']
+ _cdmat = np.array([[_cd11,_cd12],[_cd21,_cd22]])
+ _theta = np.arctan2(-_cd12,_cd22)
+ _rotmat = np.array([[np.cos(_theta),np.sin(_theta)],
+ [-np.sin(_theta),np.cos(_theta)]])
+ _rCD = np.dot(_rotmat,_cdmat)
+ _skew = np.arcsin(-_rCD[1][0] / _rCD[0][0])
+ _scale = _rCD[0][0] * np.cos(_skew) * 3600.
+ _scale2 = _rCD[1][1] * 3600.
+
+ # Set up refpix
+ refpix = {}
+ refpix['XREF'] = header['crpix1']
+ refpix['YREF'] = header['crpix2']
+ refpix['XSIZE'] = header['naxis1']
+ refpix['YSIZE'] = header['naxis2']
+ refpix['PSCALE'] = _scale
+ refpix['V2REF'] = 0.
+ refpix['V3REF'] = 0.
+ refpix['THETA'] = np.rad2deg(_theta)
+ refpix['XDELTA'] = 0.0
+ refpix['YDELTA'] = 0.0
+ refpix['DEFAULT_SCALE'] = yes
+ refpix['centered'] = yes
+
+
+ # Set up template for coeffs keyword names
+ cxstr = 'A_'
+ cystr = 'B_'
+ # Read coeffs into their own matrix
+ for i in range(_xorder+1):
+ for j in range(i+1):
+ xcname = cxstr+str(j)+'_'+str(i-j)
+ if xcname in header:
+ fx[i,j] = header[xcname]
+
+ # Extract Y coeffs separately as a different order may
+ # have been used to fit it.
+ for i in range(_yorder+1):
+ for j in range(i+1):
+ ycname = cystr+str(j)+'_'+str(i-j)
+ if ycname in header:
+ fy[i,j] = header[ycname]
+
+ # Now set the linear terms
+ fx[0][0] = 1.0
+ fy[0][0] = 1.0
+
+ return fx,fy,refpix,order
+
+
+def readTraugerTable(idcfile,wavelength):
+
+ # Return a default geometry model if no coefficients filename
+ # is given. This model will not distort the data in any way.
+ if idcfile == None:
+ return fileutil.defaultModel()
+
+ # Trauger coefficients only result in a cubic file...
+ order = 3
+ numco = 10
+ a_coeffs = [0] * numco
+ b_coeffs = [0] * numco
+ indx = _MgF2(wavelength)
+
+ ifile = open(idcfile,'r')
+ # Search for the first line of the coefficients
+ _line = fileutil.rAsciiLine(ifile)
+ while _line[:7].lower() != 'trauger':
+ _line = fileutil.rAsciiLine(ifile)
+ # Read in each row of coefficients,split them into their values,
+ # and convert them into cubic coefficients based on
+ # index of refraction value for the given wavelength
+ # Build X coefficients from first 10 rows of Trauger coefficients
+ j = 0
+ while j < 20:
+ _line = fileutil.rAsciiLine(ifile)
+ if _line == '': continue
+ _lc = _line.split()
+ if j < 10:
+ a_coeffs[j] = float(_lc[0])+float(_lc[1])*(indx-1.5)+float(_lc[2])*(indx-1.5)**2
+ else:
+ b_coeffs[j-10] = float(_lc[0])+float(_lc[1])*(indx-1.5)+float(_lc[2])*(indx-1.5)**2
+ j = j + 1
+
+ ifile.close()
+ del ifile
+
+ # Now, convert the coefficients into a Numeric array
+ # with the right coefficients in the right place.
+ # Populate output values now...
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ # Assign the coefficients to their array positions
+ fx[0,0] = 0.
+ fx[1] = np.array([a_coeffs[2],a_coeffs[1],0.,0.],dtype=np.float64)
+ fx[2] = np.array([a_coeffs[5],a_coeffs[4],a_coeffs[3],0.],dtype=np.float64)
+ fx[3] = np.array([a_coeffs[9],a_coeffs[8],a_coeffs[7],a_coeffs[6]],dtype=np.float64)
+ fy[0,0] = 0.
+ fy[1] = np.array([b_coeffs[2],b_coeffs[1],0.,0.],dtype=np.float64)
+ fy[2] = np.array([b_coeffs[5],b_coeffs[4],b_coeffs[3],0.],dtype=np.float64)
+ fy[3] = np.array([b_coeffs[9],b_coeffs[8],b_coeffs[7],b_coeffs[6]],dtype=np.float64)
+
+ # Used in Pattern.computeOffsets()
+ refpix = {}
+ refpix['XREF'] = None
+ refpix['YREF'] = None
+ refpix['V2REF'] = None
+ refpix['V3REF'] = None
+ refpix['XDELTA'] = 0.
+ refpix['YDELTA'] = 0.
+ refpix['PSCALE'] = None
+ refpix['DEFAULT_SCALE'] = no
+ refpix['centered'] = yes
+
+ return fx,fy,refpix,order
+
+
+def readCubicTable(idcfile):
+ # Assumption: this will only be used for cubic file...
+ order = 3
+ # Also, this function does NOT perform any scaling on
+ # the coefficients, it simply passes along what is found
+ # in the file as is...
+
+ # Return a default geometry model if no coefficients filename
+ # is given. This model will not distort the data in any way.
+ if idcfile == None:
+ return fileutil.defaultModel()
+
+ ifile = open(idcfile,'r')
+ # Search for the first line of the coefficients
+ _line = fileutil.rAsciiLine(ifile)
+
+ _found = no
+ while _found == no:
+ if _line[:7] in ['cubic','quartic','quintic'] or _line[:4] == 'poly':
+ found = yes
+ break
+ _line = fileutil.rAsciiLine(ifile)
+
+ # Read in each row of coefficients, without line breaks or newlines
+ # split them into their values, and create a list for A coefficients
+ # and another list for the B coefficients
+ _line = fileutil.rAsciiLine(ifile)
+ a_coeffs = _line.split()
+
+ x0 = float(a_coeffs[0])
+ _line = fileutil.rAsciiLine(ifile)
+ a_coeffs[len(a_coeffs):] = _line.split()
+ # Scale coefficients for use within PyDrizzle
+ for i in range(len(a_coeffs)):
+ a_coeffs[i] = float(a_coeffs[i])
+
+ _line = fileutil.rAsciiLine(ifile)
+ b_coeffs = _line.split()
+ y0 = float(b_coeffs[0])
+ _line = fileutil.rAsciiLine(ifile)
+ b_coeffs[len(b_coeffs):] = _line.split()
+ # Scale coefficients for use within PyDrizzle
+ for i in range(len(b_coeffs)):
+ b_coeffs[i] = float(b_coeffs[i])
+
+ ifile.close()
+ del ifile
+ # Now, convert the coefficients into a Numeric array
+ # with the right coefficients in the right place.
+ # Populate output values now...
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ # Assign the coefficients to their array positions
+ fx[0,0] = 0.
+ fx[1] = np.array([a_coeffs[2],a_coeffs[1],0.,0.],dtype=np.float64)
+ fx[2] = np.array([a_coeffs[5],a_coeffs[4],a_coeffs[3],0.],dtype=np.float64)
+ fx[3] = np.array([a_coeffs[9],a_coeffs[8],a_coeffs[7],a_coeffs[6]],dtype=np.float64)
+ fy[0,0] = 0.
+ fy[1] = np.array([b_coeffs[2],b_coeffs[1],0.,0.],dtype=np.float64)
+ fy[2] = np.array([b_coeffs[5],b_coeffs[4],b_coeffs[3],0.],dtype=np.float64)
+ fy[3] = np.array([b_coeffs[9],b_coeffs[8],b_coeffs[7],b_coeffs[6]],dtype=np.float64)
+
+ # Used in Pattern.computeOffsets()
+ refpix = {}
+ refpix['XREF'] = None
+ refpix['YREF'] = None
+ refpix['V2REF'] = x0
+ refpix['V3REF'] = y0
+ refpix['XDELTA'] = 0.
+ refpix['YDELTA'] = 0.
+ refpix['PSCALE'] = None
+ refpix['DEFAULT_SCALE'] = no
+ refpix['centered'] = yes
+
+ return fx,fy,refpix,order
+
+def factorial(n):
+ """ Compute a factorial for integer n. """
+ m = 1
+ for i in range(int(n)):
+ m = m * (i+1)
+ return m
+
+def combin(j,n):
+ """ Return the combinatorial factor for j in n."""
+ return (factorial(j) / (factorial(n) * factorial( (j-n) ) ) )
+
+
+def defaultModel():
+ """ This function returns a default, non-distorting model
+ that can be used with the data.
+ """
+ order = 3
+
+ fx = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+ fy = np.zeros(shape=(order+1,order+1),dtype=np.float64)
+
+ fx[1,1] = 1.
+ fy[1,0] = 1.
+
+ # Used in Pattern.computeOffsets()
+ refpix = {}
+ refpix['empty_model'] = yes
+ refpix['XREF'] = None
+ refpix['YREF'] = None
+ refpix['V2REF'] = 0.
+ refpix['XSIZE'] = 0.
+ refpix['YSIZE'] = 0.
+ refpix['V3REF'] = 0.
+ refpix['XDELTA'] = 0.
+ refpix['YDELTA'] = 0.
+ refpix['PSCALE'] = None
+ refpix['DEFAULT_SCALE'] = no
+ refpix['THETA'] = 0.
+ refpix['centered'] = yes
+ return fx,fy,refpix,order
+
+# Function to compute the index of refraction for MgF2 at
+# the specified wavelength for use with Trauger coefficients
+def _MgF2(lam):
+ _sig = pow((1.0e7/lam),2)
+ return np.sqrt(1.0 + 2.590355e10/(5.312993e10-_sig) +
+ 4.4543708e9/(11.17083e9-_sig) + 4.0838897e5/(1.766361e5-_sig))
+
+
+def convertDate(date):
+ """ Converts the DATE-OBS date string into an integer of the
+ number of seconds since 1970.0 using calendar.timegm().
+
+ INPUT: DATE-OBS in format of 'YYYY-MM-DD'.
+ OUTPUT: Date (integer) in seconds.
+ """
+
+ _dates = date.split('-')
+ _val = 0
+ _date_tuple = (int(_dates[0]), int(_dates[1]), int(_dates[2]), 0, 0, 0, 0, 0, 0)
+
+ return calendar.timegm(_date_tuple)
diff --git a/stwcs/distortion/utils.py b/stwcs/distortion/utils.py
new file mode 100644
index 0000000..4228f62
--- /dev/null
+++ b/stwcs/distortion/utils.py
@@ -0,0 +1,270 @@
+from __future__ import division, print_function # confidence high
+
+import os
+
+import numpy as np
+from numpy import linalg
+from astropy import wcs as pywcs
+
+from stwcs import wcsutil
+from stwcs import updatewcs
+from numpy import sqrt, arctan2
+from stsci.tools import fileutil
+
+def output_wcs(list_of_wcsobj, ref_wcs=None, owcs=None, undistort=True):
+ """
+ Create an output WCS.
+
+ Parameters
+ ----------
+ list_of_wcsobj: Python list
+ a list of HSTWCS objects
+ ref_wcs: an HSTWCS object
+ to be used as a reference WCS, in case outwcs is None.
+ if ref_wcs is None (default), the first member of the list
+ is used as a reference
+ outwcs: an HSTWCS object
+ the tangent plane defined by this object is used as a reference
+ undistort: boolean (default-True)
+ a flag whether to create an undistorted output WCS
+ """
+ fra_dec = np.vstack([w.calc_footprint() for w in list_of_wcsobj])
+ wcsname = list_of_wcsobj[0].wcs.name
+
+ # This new algorithm may not be strictly necessary, but it may be more
+ # robust in handling regions near the poles or at 0h RA.
+ crval1,crval2 = computeFootprintCenter(fra_dec)
+
+ crval = np.array([crval1,crval2], dtype=np.float64) # this value is now zero-based
+ if owcs is None:
+ if ref_wcs is None:
+ ref_wcs = list_of_wcsobj[0].deepcopy()
+ if undistort:
+ #outwcs = undistortWCS(ref_wcs)
+ outwcs = make_orthogonal_cd(ref_wcs)
+ else:
+ outwcs = ref_wcs.deepcopy()
+ outwcs.wcs.crval = crval
+ outwcs.wcs.set()
+ outwcs.pscale = sqrt(outwcs.wcs.cd[0,0]**2 + outwcs.wcs.cd[1,0]**2)*3600.
+ outwcs.orientat = arctan2(outwcs.wcs.cd[0,1],outwcs.wcs.cd[1,1]) * 180./np.pi
+ else:
+ outwcs = owcs.deepcopy()
+ outwcs.pscale = sqrt(outwcs.wcs.cd[0,0]**2 + outwcs.wcs.cd[1,0]**2)*3600.
+ outwcs.orientat = arctan2(outwcs.wcs.cd[0,1],outwcs.wcs.cd[1,1]) * 180./np.pi
+
+ tanpix = outwcs.wcs.s2p(fra_dec, 0)['pixcrd']
+
+ outwcs._naxis1 = int(np.ceil(tanpix[:,0].max() - tanpix[:,0].min()))
+ outwcs._naxis2 = int(np.ceil(tanpix[:,1].max() - tanpix[:,1].min()))
+ crpix = np.array([outwcs._naxis1/2., outwcs._naxis2/2.], dtype=np.float64)
+ outwcs.wcs.crpix = crpix
+ outwcs.wcs.set()
+ tanpix = outwcs.wcs.s2p(fra_dec, 0)['pixcrd']
+
+ # shift crpix to take into account (floating-point value of) position of
+ # corner pixel relative to output frame size: no rounding necessary...
+ newcrpix = np.array([crpix[0]+tanpix[:,0].min(), crpix[1]+
+ tanpix[:,1].min()])
+
+ newcrval = outwcs.wcs.p2s([newcrpix], 1)['world'][0]
+ outwcs.wcs.crval = newcrval
+ outwcs.wcs.set()
+ outwcs.wcs.name = wcsname # keep track of label for this solution
+ return outwcs
+
+def computeFootprintCenter(edges):
+ """ Geographic midpoint in spherical coords for points defined by footprints.
+ Algorithm derived from: http://www.geomidpoint.com/calculation.html
+
+ This algorithm should be more robust against discontinuities at the poles.
+ """
+ alpha = np.deg2rad(edges[:,0])
+ dec = np.deg2rad(edges[:,1])
+
+ xmean = np.mean(np.cos(dec)*np.cos(alpha))
+ ymean = np.mean(np.cos(dec)*np.sin(alpha))
+ zmean = np.mean(np.sin(dec))
+
+ crval1 = np.rad2deg(np.arctan2(ymean,xmean))%360.0
+ crval2 = np.rad2deg(np.arctan2(zmean,np.sqrt(xmean*xmean+ymean*ymean)))
+
+ return crval1,crval2
+
+def make_orthogonal_cd(wcs):
+ """ Create a perfect (square, orthogonal, undistorted) CD matrix from the
+ input WCS.
+ """
+ # get determinant of the CD matrix:
+ cd = wcs.celestial.pixel_scale_matrix
+
+
+ if hasattr(wcs, 'idcv2ref') and wcs.idcv2ref is not None:
+ # Convert the PA_V3 orientation to the orientation at the aperture
+ # This is for the reference chip only - we use this for the
+ # reference tangent plane definition
+ # It has the same orientation as the reference chip
+ pv = updatewcs.makewcs.troll(wcs.pav3,wcs.wcs.crval[1],wcs.idcv2ref,wcs.idcv3ref)
+ # Add the chip rotation angle
+ if wcs.idctheta:
+ pv += wcs.idctheta
+ cs = np.cos(np.deg2rad(pv))
+ sn = np.sin(np.deg2rad(pv))
+ pvmat = np.dot(np.array([[cs,sn],[-sn,cs]]),wcs.parity)
+ rot = np.arctan2(pvmat[0,1],pvmat[1,1])
+ scale = wcs.idcscale/3600.
+
+ det = linalg.det(wcs.parity)
+
+ else:
+
+ det = linalg.det(cd)
+
+ # find pixel scale:
+ if hasattr(wcs, 'idcscale'):
+ scale = (wcs.idcscale) / 3600. # HST pixel scale provided
+ else:
+ scale = np.sqrt(np.abs(det)) # find as sqrt(pixel area)
+
+ # find Y-axis orientation:
+ if hasattr(wcs, 'orientat') and not ignoreHST:
+ rot = np.deg2rad(wcs.orientat) # use HST ORIENTAT
+ else:
+ rot = np.arctan2(wcs.wcs.cd[0,1], wcs.wcs.cd[1,1]) # angle of the Y-axis
+
+ par = -1 if det < 0.0 else 1
+
+ # create a perfectly square, orthogonal WCS
+ sn = np.sin(rot)
+ cs = np.cos(rot)
+ orthogonal_cd = scale * np.array([[par*cs, sn], [-par*sn, cs]])
+
+ lin_wcsobj = pywcs.WCS()
+ lin_wcsobj.wcs.cd = orthogonal_cd
+ lin_wcsobj.wcs.set()
+ lin_wcsobj.orientat = arctan2(lin_wcsobj.wcs.cd[0,1],lin_wcsobj.wcs.cd[1,1]) * 180./np.pi
+ lin_wcsobj.pscale = sqrt(lin_wcsobj.wcs.cd[0,0]**2 + lin_wcsobj.wcs.cd[1,0]**2)*3600.
+ lin_wcsobj.wcs.crval = np.array([0.,0.])
+ lin_wcsobj.wcs.crpix = np.array([0.,0.])
+ lin_wcsobj.wcs.ctype = ['RA---TAN', 'DEC--TAN']
+ lin_wcsobj.wcs.set()
+
+ return lin_wcsobj
+
+def undistortWCS(wcsobj):
+ """
+ Creates an undistorted linear WCS by applying the IDCTAB distortion model
+ to a 3-point square. The new ORIENTAT angle is calculated as well as the
+ plate scale in the undistorted frame.
+ """
+ assert isinstance(wcsobj, pywcs.WCS)
+ from . import coeff_converter
+
+ cx, cy = coeff_converter.sip2idc(wcsobj)
+ # cx, cy can be None because either there is no model available
+ # or updatewcs was not run.
+ if cx is None or cy is None:
+ if foundIDCTAB(wcsobj.idctab):
+ m = """IDCTAB is present but distortion model is missing.
+ Run updatewcs() to update the headers or
+ pass 'undistort=False' keyword to output_wcs().\n
+ """
+ raise RuntimeError(m)
+ else:
+ print('Distortion model is not available, using input reference image for output WCS.\n')
+ return wcsobj.copy()
+ crpix1 = wcsobj.wcs.crpix[0]
+ crpix2 = wcsobj.wcs.crpix[1]
+ xy = np.array([(crpix1,crpix2),(crpix1+1.,crpix2),(crpix1,crpix2+1.)],dtype=np.double)
+ offsets = np.array([wcsobj.ltv1, wcsobj.ltv2])
+ px = xy + offsets
+ #order = wcsobj.sip.a_order
+ pscale = wcsobj.idcscale
+ #pixref = np.array([wcsobj.sip.SIPREF1, wcsobj.sip.SIPREF2])
+
+ tan_pix = apply_idc(px, cx, cy, wcsobj.wcs.crpix, pscale, order=1)
+ xc = tan_pix[:,0]
+ yc = tan_pix[:,1]
+ am = xc[1] - xc[0]
+ bm = xc[2] - xc[0]
+ cm = yc[1] - yc[0]
+ dm = yc[2] - yc[0]
+ cd_mat = np.array([[am,bm],[cm,dm]],dtype=np.double)
+
+ # Check the determinant for singularity
+ _det = (am * dm) - (bm * cm)
+ if ( _det == 0.0):
+ print('Singular matrix in updateWCS, aborting ...')
+ return
+
+ lin_wcsobj = pywcs.WCS()
+ cd_inv = np.linalg.inv(cd_mat)
+ cd = np.dot(wcsobj.wcs.cd, cd_inv).astype(np.float64)
+ lin_wcsobj.wcs.cd = cd
+ lin_wcsobj.wcs.set()
+ lin_wcsobj.orientat = arctan2(lin_wcsobj.wcs.cd[0,1],lin_wcsobj.wcs.cd[1,1]) * 180./np.pi
+ lin_wcsobj.pscale = sqrt(lin_wcsobj.wcs.cd[0,0]**2 + lin_wcsobj.wcs.cd[1,0]**2)*3600.
+ lin_wcsobj.wcs.crval = np.array([0.,0.])
+ lin_wcsobj.wcs.crpix = np.array([0.,0.])
+ lin_wcsobj.wcs.ctype = ['RA---TAN', 'DEC--TAN']
+ lin_wcsobj.wcs.set()
+ return lin_wcsobj
+
+def apply_idc(pixpos, cx, cy, pixref, pscale= None, order=None):
+ """
+ Apply the IDCTAB polynomial distortion model to pixel positions.
+ pixpos must be already corrected for ltv1/2.
+
+ Parameters
+ ----------
+ pixpos: a 2D numpy array of (x,y) pixel positions to be distortion corrected
+ cx, cy: IDC model distortion coefficients
+ pixref: reference opixel position
+
+ """
+ if cx is None:
+ return pixpos
+
+ if order is None:
+ print('Unknown order of distortion model \n')
+ return pixpos
+ if pscale is None:
+ print('Unknown model plate scale\n')
+ return pixpos
+
+ # Apply in the same way that 'drizzle' would...
+ _cx = cx/pscale
+ _cy = cy/ pscale
+ _p = pixpos
+
+ # Do NOT include any zero-point terms in CX,CY here
+ # as they should not be scaled by plate-scale like rest
+ # of coeffs... This makes the computations consistent
+ # with 'drizzle'. WJH 17-Feb-2004
+ _cx[0,0] = 0.
+ _cy[0,0] = 0.
+
+ dxy = _p - pixref
+ # Apply coefficients from distortion model here...
+
+ c = _p * 0.
+ for i in range(order+1):
+ for j in range(i+1):
+ c[:,0] = c[:,0] + _cx[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+ c[:,1] = c[:,1] + _cy[i][j] * pow(dxy[:,0],j) * pow(dxy[:,1],(i-j))
+
+ return c
+
+def foundIDCTAB(idctab):
+ idctab_found = True
+ try:
+ idctab = fileutil.osfn(idctab)
+ if idctab == 'N/A' or idctab == "":
+ idctab_found = False
+ if os.path.exists(idctab):
+ idctab_found = True
+ else:
+ idctab_found = False
+ except KeyError:
+ idctab_found = False
+ return idctab_found