summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/stwcs/__init__.py15
-rw-r--r--lib/stwcs/distortion/mutil.py2
-rw-r--r--lib/stwcs/distortion/utils.py8
-rw-r--r--lib/stwcs/gui/__init__.py19
-rw-r--r--lib/stwcs/gui/apply_headerlet.help40
-rw-r--r--lib/stwcs/gui/apply_headerlet.py54
-rw-r--r--lib/stwcs/gui/archive_headerlet.py69
-rw-r--r--lib/stwcs/gui/attach_headerlet.py36
-rw-r--r--lib/stwcs/gui/delete_headerlet.py50
-rw-r--r--lib/stwcs/gui/extract_headerlet.py58
-rw-r--r--lib/stwcs/gui/headerlet_summary.py49
-rw-r--r--lib/stwcs/gui/pars/apply_headerlet.cfg10
-rw-r--r--lib/stwcs/gui/pars/apply_headerlet.cfgspc12
-rw-r--r--lib/stwcs/gui/pars/archive_headerlet.cfg13
-rw-r--r--lib/stwcs/gui/pars/archive_headerlet.cfgspc13
-rw-r--r--lib/stwcs/gui/pars/attach_headerlet.cfg3
-rw-r--r--lib/stwcs/gui/pars/attach_headerlet.cfgspc3
-rw-r--r--lib/stwcs/gui/pars/delete_headerlet.cfg5
-rw-r--r--lib/stwcs/gui/pars/delete_headerlet.cfgspc5
-rw-r--r--lib/stwcs/gui/pars/extract_headerlet.cfg7
-rw-r--r--lib/stwcs/gui/pars/extract_headerlet.cfgspc7
-rw-r--r--lib/stwcs/gui/pars/headerlet_summary.cfg8
-rw-r--r--lib/stwcs/gui/pars/headerlet_summary.cfgspc8
-rw-r--r--lib/stwcs/gui/pars/restore_headerlet.cfg9
-rw-r--r--lib/stwcs/gui/pars/restore_headerlet.cfgspc11
-rw-r--r--lib/stwcs/gui/pars/updatewcs.cfg8
-rw-r--r--lib/stwcs/gui/pars/updatewcs.cfgspc8
-rw-r--r--lib/stwcs/gui/pars/write_headerlet.cfg16
-rw-r--r--lib/stwcs/gui/pars/write_headerlet.cfgspc16
-rw-r--r--lib/stwcs/gui/restore_headerlet.help41
-rw-r--r--lib/stwcs/gui/restore_headerlet.py49
-rw-r--r--lib/stwcs/gui/updatewcs.py89
-rw-r--r--lib/stwcs/gui/write_headerlet.py75
-rw-r--r--lib/stwcs/updatewcs/__init__.py2
-rw-r--r--lib/stwcs/updatewcs/makewcs.py13
-rw-r--r--lib/stwcs/updatewcs/utils.py53
-rw-r--r--lib/stwcs/wcsutil/altwcs.py126
-rw-r--r--lib/stwcs/wcsutil/headerlet.py1934
-rw-r--r--lib/stwcs/wcsutil/hstwcs.py3
-rw-r--r--lib/stwcs/wcsutil/wcscorr.py268
40 files changed, 2855 insertions, 360 deletions
diff --git a/lib/stwcs/__init__.py b/lib/stwcs/__init__.py
index 5b3f106..3c8879d 100644
--- a/lib/stwcs/__init__.py
+++ b/lib/stwcs/__init__.py
@@ -15,17 +15,15 @@ descriptions in the headers.
"""
from __future__ import division # confidence high
+import os
import distortion
import pywcs
from stsci.tools import fileutil
+from stsci.tools import teal
__docformat__ = 'restructuredtext'
-DEGTORAD = fileutil.DEGTORAD
-RADTODEG = fileutil.RADTODEG
-
-
if False :
__version__ = ''
__svn_version = ''
@@ -37,7 +35,7 @@ if False :
except:
pass
else :
- __version__ = '0.9'
+ __version__ = '0.9.1'
try:
@@ -45,3 +43,10 @@ try:
__setup_datetime__)
except ImportError:
__svn_version__ = 'Unable to determine SVN revision'
+
+try:
+ import gui
+ teal.print_tasknames(gui.__name__, os.path.dirname(gui.__file__))
+ print '\n'
+except:
+ print 'No TEAL-based tasks available for this package!'
diff --git a/lib/stwcs/distortion/mutil.py b/lib/stwcs/distortion/mutil.py
index 8f80b67..4965bd1 100644
--- a/lib/stwcs/distortion/mutil.py
+++ b/lib/stwcs/distortion/mutil.py
@@ -424,7 +424,7 @@ def readWCSCoeffs(header):
refpix['PSCALE'] = _scale
refpix['V2REF'] = 0.
refpix['V3REF'] = 0.
- refpix['THETA'] = RADTODEG(_theta)
+ refpix['THETA'] = np.rad2deg(_theta)
refpix['XDELTA'] = 0.0
refpix['YDELTA'] = 0.0
refpix['DEFAULT_SCALE'] = yes
diff --git a/lib/stwcs/distortion/utils.py b/lib/stwcs/distortion/utils.py
index 7f937f7..1d395cb 100644
--- a/lib/stwcs/distortion/utils.py
+++ b/lib/stwcs/distortion/utils.py
@@ -72,15 +72,15 @@ def computeFootprintCenter(edges):
This algorithm should be more robust against discontinuities at the poles.
"""
- alpha = fileutil.DEGTORAD(edges[:,0])
- dec = fileutil.DEGTORAD(edges[:,1])
+ alpha = np.deg2rad(edges[:,0])
+ dec = np.deg2rad(edges[:,1])
xmean = np.mean(np.cos(dec)*np.cos(alpha))
ymean = np.mean(np.cos(dec)*np.sin(alpha))
zmean = np.mean(np.sin(dec))
- crval1 = fileutil.RADTODEG(np.arctan2(ymean,xmean))%360.0
- crval2 = fileutil.RADTODEG(np.arctan2(zmean,np.sqrt(xmean*xmean+ymean*ymean)))
+ crval1 = np.rad2deg(np.arctan2(ymean,xmean))%360.0
+ crval2 = np.rad2deg(np.arctan2(zmean,np.sqrt(xmean*xmean+ymean*ymean)))
return crval1,crval2
diff --git a/lib/stwcs/gui/__init__.py b/lib/stwcs/gui/__init__.py
new file mode 100644
index 0000000..559d361
--- /dev/null
+++ b/lib/stwcs/gui/__init__.py
@@ -0,0 +1,19 @@
+""" STWCS.GUI
+
+This package defines the TEAL interfaces for public, file-based operations
+provided by the STWCS package.
+
+"""
+from __future__ import division # confidence high
+__docformat__ = 'restructuredtext'
+
+# import modules which define the TEAL interfaces
+import write_headerlet
+import extract_headerlet
+import attach_headerlet
+import delete_headerlet
+import headerlet_summary
+import archive_headerlet
+import restore_headerlet
+import apply_headerlet
+import updatewcs
diff --git a/lib/stwcs/gui/apply_headerlet.help b/lib/stwcs/gui/apply_headerlet.help
new file mode 100644
index 0000000..2ddb2e4
--- /dev/null
+++ b/lib/stwcs/gui/apply_headerlet.help
@@ -0,0 +1,40 @@
+This task applies a headerlet to a science observation to update either the
+PRIMARY WCS or to add it as an alternate WCS.
+
+filename = ""
+hdrlet = ""
+attach = True
+primary = True
+archive = True
+force = False
+wcskey = ""
+wcsname = ""
+verbose = False
+
+Parameters
+----------
+filename: string
+ File name of science observation whose WCS solution will be updated
+hdrlet: string
+ Headerlet file
+attach: boolean
+ True (default): append headerlet to FITS file as a new extension.
+primary: boolean
+ Specify whether or not to replace PRIMARY WCS with WCS from headerlet.
+archive: boolean
+ True (default): before updating, create a headerlet with the
+ WCS old solution.
+force: boolean
+ If True, this will cause the headerlet to replace the current PRIMARY
+ WCS even if it has a different distortion model. [Default: False]
+wcskey: string
+ Key value (A-Z, except O) for this alternate WCS
+ If None, the next available key will be used
+wcsname: string
+ Name to be assigned to this alternate WCS
+ WCSNAME is a required keyword in a Headerlet but this allows the
+ user to change it as desired.
+
+verbose: False or a python logging level
+ (one of 'INFO', 'DEBUG' logging levels)
+ (an integer representing a logging level)
diff --git a/lib/stwcs/gui/apply_headerlet.py b/lib/stwcs/gui/apply_headerlet.py
new file mode 100644
index 0000000..0a93104
--- /dev/null
+++ b/lib/stwcs/gui/apply_headerlet.py
@@ -0,0 +1,54 @@
+import os
+import string
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ if configObj['primary']:
+ # Call function with properly interpreted input parameters
+ # Syntax: apply_headerlet_as_primary(filename, hdrlet, attach=True,
+ # archive=True, force=False, verbose=False)
+ headerlet.apply_headerlet_as_primary(configObj['filename'],
+ configObj['hdrlet'],attach=configObj['attach'],
+ archive=configObj['archive'],force=configObj['force'],
+ verbose=configObj['verbose'])
+ else:
+ wcsname = configObj['wcsname']
+ if wcsname in ['',' ','INDEF']: wcsname = None
+ wcskey = configObj['wcskey']
+ if wcskey == '': wcskey = None
+ # Call function with properly interpreted input parameters
+ # apply_headerlet_as_alternate(filename, hdrlet, attach=True,
+ # wcskey=None, wcsname=None, verbose=False)
+ headerlet.apply_headerlet_as_alternate(configObj['filename'],
+ configObj['hdrlet'], attach=configObj['attach'],
+ wcsname=wcsname, wcskey=wcskey,
+ verbose=configObj['verbose'])
+
diff --git a/lib/stwcs/gui/archive_headerlet.py b/lib/stwcs/gui/archive_headerlet.py
new file mode 100644
index 0000000..707d521
--- /dev/null
+++ b/lib/stwcs/gui/archive_headerlet.py
@@ -0,0 +1,69 @@
+import os
+import string
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += headerlet.archive_as_headerlet.__doc__
+
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ if configObj['hdrname'] in ['',' ','INDEF']:
+ print '='*60
+ print 'ERROR:'
+ print ' No valid "hdrname" parameter value provided!'
+ print ' Please restart this task and provide a value for this parameter.'
+ print '='*60
+ return
+
+ str_kw = ['wcsname','destim','sipname','npolfile','d2imfile',
+ 'descrip','history','author']
+
+ # create dictionary of remaining parameters, deleting extraneous ones
+ # such as those above
+ cdict = configObj.dict()
+ # remove any rules defined for the TEAL interface
+ if cdict.has_key("_RULES_"): del cdict['_RULES_']
+ del cdict['_task_name_']
+ del cdict['filename']
+ del cdict['hdrname']
+
+ # Convert blank string input as None
+ for kw in str_kw:
+ if cdict[kw] == '': cdict[kw] = None
+ if cdict['wcskey'].lower() == 'primary': cdict['wcskey'] = ' '
+
+ # Call function with properly interpreted input parameters
+ # Syntax: archive_as_headerlet(filename, sciext='SCI', wcsname=None, wcskey=None,
+ # hdrname=None, destim=None,
+ # sipname=None, npolfile=None, d2imfile=None,
+ # author=None, descrip=None, history=None,
+ # hdrlet=None, clobber=False)
+ headerlet.archive_as_headerlet(configObj['filename'], configObj['hdrname'],
+ **cdict) \ No newline at end of file
diff --git a/lib/stwcs/gui/attach_headerlet.py b/lib/stwcs/gui/attach_headerlet.py
new file mode 100644
index 0000000..c68bbfe
--- /dev/null
+++ b/lib/stwcs/gui/attach_headerlet.py
@@ -0,0 +1,36 @@
+import os
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += eval('.'.join([__package__,__taskname__,'__doc__']))
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ headerlet.attach_headerlet(configObj['filename'],configObj['hdrlet'])
+
diff --git a/lib/stwcs/gui/delete_headerlet.py b/lib/stwcs/gui/delete_headerlet.py
new file mode 100644
index 0000000..e33bb91
--- /dev/null
+++ b/lib/stwcs/gui/delete_headerlet.py
@@ -0,0 +1,50 @@
+import os
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += eval('.'.join([__package__,__taskname__,'__doc__']))
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ if configObj['hdrname'] == '' and configObj['hdrext'] is None and \
+ configObj['distname'] == '':
+ print '='*60
+ print 'ERROR:'
+ print ' No valid "hdrname", "hdrext" or "distname" parameter value provided!'
+ print ' Please restart this task and provide a value for one of these parameters.'
+ print '='*60
+ return
+
+ # Call function with properly interpreted input parameters
+ # Syntax: delete_headerlet(filename, hdrname=None, hdrext=None, distname=None)
+ headerlet.delete_headerlet(configObj['filename'],
+ hdrname = configObj['hdrname'],
+ hdrext = configObj['hdrext'],
+ distname = configObj['distname'])
+
diff --git a/lib/stwcs/gui/extract_headerlet.py b/lib/stwcs/gui/extract_headerlet.py
new file mode 100644
index 0000000..8751ff3
--- /dev/null
+++ b/lib/stwcs/gui/extract_headerlet.py
@@ -0,0 +1,58 @@
+import os
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += eval('.'.join([__package__,__taskname__,'__doc__']))
+
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ if configObj['output'] in ['',' ',None]:
+ print '='*60
+ print 'ERROR:'
+ print ' No valid "output" parameter value provided!'
+ print ' Please restart this task and provide a value for this parameter.'
+ print '='*60
+ return
+
+ # create dictionary of remaining parameters, deleting extraneous ones
+ # such as those above
+ cdict = configObj.dict()
+ # remove any rules defined for the TEAL interface
+ if cdict.has_key("_RULES_"): del cdict['_RULES_']
+ del cdict['_task_name_']
+ del cdict['filename']
+ del cdict['output']
+
+ # Call function with properly interpreted input parameters
+ # Syntax: extract_headerlet(filename, output, extnum=None, hdrname=None,
+ # clobber=False, verbose=100)
+ headerlet.extract_headerlet(configObj['filename'], configObj['output'],
+ **cdict)
+
diff --git a/lib/stwcs/gui/headerlet_summary.py b/lib/stwcs/gui/headerlet_summary.py
new file mode 100644
index 0000000..28c26c2
--- /dev/null
+++ b/lib/stwcs/gui/headerlet_summary.py
@@ -0,0 +1,49 @@
+import os
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += eval('.'.join([__package__,__taskname__,'__doc__']))
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ # create dictionary of remaining parameters, deleting extraneous ones
+ # such as those above
+ cdict = configObj.dict()
+ # remove any rules defined for the TEAL interface
+ if cdict.has_key("_RULES_"): del cdict['_RULES_']
+ del cdict['_task_name_']
+ del cdict['filename']
+ if headerlet.is_par_blank(cdict['columns']):
+ cdict['columns'] = None
+ # Call function with properly interpreted input parameters
+
+ # Syntax: headerlet_summary(filename,columns=None,pad=2,maxwidth=None,
+ # output=None,clobber=True,quiet=False)
+ headerlet.headerlet_summary(configObj['filename'],**cdict)
+
diff --git a/lib/stwcs/gui/pars/apply_headerlet.cfg b/lib/stwcs/gui/pars/apply_headerlet.cfg
new file mode 100644
index 0000000..ae97297
--- /dev/null
+++ b/lib/stwcs/gui/pars/apply_headerlet.cfg
@@ -0,0 +1,10 @@
+_task_name_ = apply_headerlet
+filename = ""
+hdrlet = ""
+attach = True
+primary = True
+archive = True
+force = False
+wcskey = ""
+wcsname = ""
+verbose = False
diff --git a/lib/stwcs/gui/pars/apply_headerlet.cfgspc b/lib/stwcs/gui/pars/apply_headerlet.cfgspc
new file mode 100644
index 0000000..240dcec
--- /dev/null
+++ b/lib/stwcs/gui/pars/apply_headerlet.cfgspc
@@ -0,0 +1,12 @@
+_task_name_ = string_kw(default="apply_headerlet")
+filename = string_kw(default="", comment="Input file name")
+hdrlet = string_kw(default="", comment="Headerlet FITS filename")
+attach = boolean_kw(default=True, comment= "Append headerlet to FITS file as new extension?")
+primary = boolean_kw(default=True, triggers="_rule1_", comment="Replace PRIMARY WCS with headerlet WCS?")
+archive = boolean_kw(default=True, active_if="_rule1_", comment="Save PRIMARY WCS as new headerlet extension?")
+force = boolean_kw(default=False, active_if="_rule1_", comment="If distortions do not match, force update anyway?")
+wcskey = option_kw("A","B","C","D","E","F","G","H","I","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z","", default="", inactive_if="_rule1_", comment="Apply headerlet as alternate WCS with this letter")
+wcsname = string_kw(default="", inactive_if="_rule1_", comment="Apply headerlet as alternate WCS with this name")
+verbose = boolean_kw(default=False, comment= "Print logging messages?")
+[ _RULES_ ]
+_rule1_ = string_kw(default=True, code='tyfn={"yes":True, "no":False}; OUT = tyfn[VAL]')
diff --git a/lib/stwcs/gui/pars/archive_headerlet.cfg b/lib/stwcs/gui/pars/archive_headerlet.cfg
new file mode 100644
index 0000000..815ce96
--- /dev/null
+++ b/lib/stwcs/gui/pars/archive_headerlet.cfg
@@ -0,0 +1,13 @@
+_task_name_ = archive_headerlet
+filename = ""
+hdrname = ""
+sciext = "SCI"
+wcsname = ""
+wcskey = "PRIMARY"
+destim = ""
+sipname = ""
+npolfile = ""
+d2imfile = ""
+author = ""
+descrip = ""
+history = ""
diff --git a/lib/stwcs/gui/pars/archive_headerlet.cfgspc b/lib/stwcs/gui/pars/archive_headerlet.cfgspc
new file mode 100644
index 0000000..d049769
--- /dev/null
+++ b/lib/stwcs/gui/pars/archive_headerlet.cfgspc
@@ -0,0 +1,13 @@
+_task_name_ = string_kw(default="archive_headerlet")
+filename = string_kw(default="", comment="Input file name")
+hdrname = string_kw(default="", comment="Unique name(HDRNAME) for headerlet")
+sciext = string_kw(default="SCI", comment="EXTNAME of extension with WCS")
+wcsname = string_kw(default="", comment="Name of WCS to be archived")
+wcskey = option_kw("A","B","C","D","E","F","G","H","I","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z","PRIMARY", default="PRIMARY", comment="Archive the WCS with this letter")
+destim = string_kw(default="", comment="Rootname of image to which this headerlet applies ")
+sipname = string_kw(default="", comment="Name for source of polynomial distortion keywords")
+npolfile = string_kw(default="", comment="Name for source of non-polynomial residuals")
+d2imfile = string_kw(default="", comment="Name for source of detector correction table")
+author = string_kw(default="", comment="Author name for creator of headerlet")
+descrip = string_kw(default="", comment="Short description of headerlet solution")
+history = string_kw(default="", comment="Name of ASCII file containing history for headerlet")
diff --git a/lib/stwcs/gui/pars/attach_headerlet.cfg b/lib/stwcs/gui/pars/attach_headerlet.cfg
new file mode 100644
index 0000000..772a66d
--- /dev/null
+++ b/lib/stwcs/gui/pars/attach_headerlet.cfg
@@ -0,0 +1,3 @@
+_task_name_ = attach_headerlet
+filename = ""
+hdrlet = ""
diff --git a/lib/stwcs/gui/pars/attach_headerlet.cfgspc b/lib/stwcs/gui/pars/attach_headerlet.cfgspc
new file mode 100644
index 0000000..03dcf0f
--- /dev/null
+++ b/lib/stwcs/gui/pars/attach_headerlet.cfgspc
@@ -0,0 +1,3 @@
+_task_name_ = string_kw(default="attach_headerlet")
+filename = string_kw(default="", comment="FITS image file name")
+hdrlet = string_kw(default="", comment="Headerlet FITS filename")
diff --git a/lib/stwcs/gui/pars/delete_headerlet.cfg b/lib/stwcs/gui/pars/delete_headerlet.cfg
new file mode 100644
index 0000000..f43befa
--- /dev/null
+++ b/lib/stwcs/gui/pars/delete_headerlet.cfg
@@ -0,0 +1,5 @@
+_task_name_ = delete_headerlet
+filename = ""
+hdrname = ""
+hdrext = None
+distname = ""
diff --git a/lib/stwcs/gui/pars/delete_headerlet.cfgspc b/lib/stwcs/gui/pars/delete_headerlet.cfgspc
new file mode 100644
index 0000000..5695130
--- /dev/null
+++ b/lib/stwcs/gui/pars/delete_headerlet.cfgspc
@@ -0,0 +1,5 @@
+_task_name_ = string_kw(default="delete_headerlet")
+filename = string_kw(default="", comment="FITS image file name")
+hdrname = string_kw(default="", comment="Delete headerlet with this HDRNAME")
+hdrext = integer_or_none_kw(default=None, comment="Delete headerlet from this extension")
+distname = string_kw(default="", comment="Delete *ALL* with this DISTNAME")
diff --git a/lib/stwcs/gui/pars/extract_headerlet.cfg b/lib/stwcs/gui/pars/extract_headerlet.cfg
new file mode 100644
index 0000000..eaf4eff
--- /dev/null
+++ b/lib/stwcs/gui/pars/extract_headerlet.cfg
@@ -0,0 +1,7 @@
+_task_name_ = extract_headerlet
+filename = ""
+output = ""
+extnum = None
+hdrname = ""
+clobber = True
+verbose = False
diff --git a/lib/stwcs/gui/pars/extract_headerlet.cfgspc b/lib/stwcs/gui/pars/extract_headerlet.cfgspc
new file mode 100644
index 0000000..3b173c2
--- /dev/null
+++ b/lib/stwcs/gui/pars/extract_headerlet.cfgspc
@@ -0,0 +1,7 @@
+_task_name_ = string_kw(default="extract_headerlet")
+filename = string_kw(default="", comment="Input file name")
+output = string_kw(default="", comment="Output headerlet FITS filename")
+extnum = integer_or_none_kw(default=None, comment="FITS extension number of headerlet")
+hdrname = string_kw(default="", comment="Unique name(HDRNAME) for headerlet")
+clobber = boolean_kw(default=True, comment= "Overwrite existing headerlet FITS file?")
+verbose = boolean_kw(default=False, comment= "Print logging messages?")
diff --git a/lib/stwcs/gui/pars/headerlet_summary.cfg b/lib/stwcs/gui/pars/headerlet_summary.cfg
new file mode 100644
index 0000000..7203552
--- /dev/null
+++ b/lib/stwcs/gui/pars/headerlet_summary.cfg
@@ -0,0 +1,8 @@
+_task_name_ = headerlet_summary
+filename = ""
+columns = None
+pad = 2
+maxwidth = None
+output = ""
+clobber = True
+quiet = False
diff --git a/lib/stwcs/gui/pars/headerlet_summary.cfgspc b/lib/stwcs/gui/pars/headerlet_summary.cfgspc
new file mode 100644
index 0000000..ce65930
--- /dev/null
+++ b/lib/stwcs/gui/pars/headerlet_summary.cfgspc
@@ -0,0 +1,8 @@
+_task_name_ = string_kw(default="headerlet_summary")
+filename = string_kw(default="", comment="FITS image file name")
+columns = string_kw(default="", comment="Headerlet keyword(s) to be reported")
+pad = integer_kw(default=2, comment="Number of spaces between output columns")
+maxwidth = integer_or_none_kw(default=None, comment="Max width for each column")
+output = string_kw(default="", comment="Name of output file for summary")
+clobber = boolean_kw(default=True, comment="Overwrite previously written summary?")
+quiet = boolean_kw(default=False, comment="Suppress output of summary to STDOUT?")
diff --git a/lib/stwcs/gui/pars/restore_headerlet.cfg b/lib/stwcs/gui/pars/restore_headerlet.cfg
new file mode 100644
index 0000000..a6230a4
--- /dev/null
+++ b/lib/stwcs/gui/pars/restore_headerlet.cfg
@@ -0,0 +1,9 @@
+_task_name_ = restore_headerlet
+filename = ""
+archive = True
+force = False
+distname = ""
+primary = None
+sciext = "SCI"
+hdrname = ""
+hdrext = None
diff --git a/lib/stwcs/gui/pars/restore_headerlet.cfgspc b/lib/stwcs/gui/pars/restore_headerlet.cfgspc
new file mode 100644
index 0000000..3e713ce
--- /dev/null
+++ b/lib/stwcs/gui/pars/restore_headerlet.cfgspc
@@ -0,0 +1,11 @@
+_task_name_ = string_kw(default="restore_headerlet")
+filename = string_kw(default="", comment="Input file name")
+archive = boolean_kw(default=True, comment= "Create headerlets from WCSs being replaced?")
+force = boolean_kw(default=False, comment="If distortions do not match, force update anyway?")
+distname = string_kw(default="", triggers="_rule1_", comment="Restore ALL headerlet extensions with this DISTNAME")
+primary = integer_or_none_kw(default=None, inactive_if="_rule1_", comment="Headerlet extension to restore as new primary WCS")
+sciext = string_kw(default="SCI", inactive_if="_rule1_", comment="EXTNAME of extension with WCS")
+hdrname = string_kw(default="", active_if="_rule1_", comment="HDRNAME of headerlet extension to be restored")
+hdrext = integer_or_none_kw(default=None, active_if="_rule1_", comment="Extension number for headerlet to be restored")
+[ _RULES_ ]
+_rule1_ = string_kw(default=True, code='from stwcs import wcsutil;from stwcs.wcsutil import headerlet;OUT = headerlet.is_par_blank(VAL)')
diff --git a/lib/stwcs/gui/pars/updatewcs.cfg b/lib/stwcs/gui/pars/updatewcs.cfg
new file mode 100644
index 0000000..35360f2
--- /dev/null
+++ b/lib/stwcs/gui/pars/updatewcs.cfg
@@ -0,0 +1,8 @@
+_task_name_ = updatewcs
+input = "*flt.fits"
+extname = "SCI"
+vacorr = True
+tddcorr = True
+npolcorr = True
+d2imcorr = True
+checkfiles = True
diff --git a/lib/stwcs/gui/pars/updatewcs.cfgspc b/lib/stwcs/gui/pars/updatewcs.cfgspc
new file mode 100644
index 0000000..1ce03bf
--- /dev/null
+++ b/lib/stwcs/gui/pars/updatewcs.cfgspc
@@ -0,0 +1,8 @@
+_task_name_ = string_kw(default="updatewcs")
+input = string_kw(default="", comment="Input files (name, suffix, or @list)")
+extname = string_kw(default="SCI", comment="EXTNAME of extensions to be archived")
+vacorr = boolean_kw(default=True, comment= "Apply vecocity aberration correction?")
+tddcorr = boolean_kw(default=True, comment= "Apply time dependent distortion correction?")
+npolcorr = boolean_kw(default=True, comment= "Apply lookup table distortion?")
+d2imcorr = boolean_kw(default=True, comment= "Apply detector to image correction?")
+checkfiles = boolean_kw(default=True, comment= "Check format of input files?")
diff --git a/lib/stwcs/gui/pars/write_headerlet.cfg b/lib/stwcs/gui/pars/write_headerlet.cfg
new file mode 100644
index 0000000..7dfa3aa
--- /dev/null
+++ b/lib/stwcs/gui/pars/write_headerlet.cfg
@@ -0,0 +1,16 @@
+_task_name_ = write_headerlet
+filename = ""
+hdrname = ""
+wcskey = "PRIMARY"
+wcsname = ""
+author = ""
+descrip = ""
+history = ""
+output = ""
+clobber = True
+sciext = "SCI"
+destim = ""
+sipname = ""
+npolfile = ""
+d2imfile = ""
+attach = True
diff --git a/lib/stwcs/gui/pars/write_headerlet.cfgspc b/lib/stwcs/gui/pars/write_headerlet.cfgspc
new file mode 100644
index 0000000..41527ab
--- /dev/null
+++ b/lib/stwcs/gui/pars/write_headerlet.cfgspc
@@ -0,0 +1,16 @@
+_task_name_ = string_kw(default="write_headerlet")
+filename = string_kw(default="", comment="Input file name")
+hdrname = string_kw(default="", comment="Unique name(HDRNAME) for headerlet[REQUIRED]")
+wcskey = option_kw("A","B","C","D","E","F","G","H","I","J","K","L","M","N","P","Q","R","S","T","U","V","W","X","Y","Z","PRIMARY", default="PRIMARY", comment="Create headerlet from WCS with this letter")
+wcsname = string_kw(default="", comment="Create headerlet from WCS with this name")
+author = string_kw(default="", comment="Author name for creator of headerlet")
+descrip = string_kw(default="", comment="Short description of headerlet solution")
+history = string_kw(default="", comment="Name of ASCII file containing history for headerlet")
+output = string_kw(default="", comment="Filename for headerlet FITS file")
+clobber = boolean_kw(default=True, comment= "Overwrite existing headerlet FITS file?")
+sciext = string_kw(default="SCI", comment="EXTNAME of extension with WCS")
+destim = string_kw(default="", comment="Rootname of image to which this headerlet applies ")
+sipname = string_kw(default="", comment="Name for source of polynomial distortion keywords")
+npolfile = string_kw(default="", comment="Name for source of non-polynomial residuals")
+d2imfile = string_kw(default="", comment="Name for source of detector correction table")
+attach = boolean_kw(default=True, comment="Create output headerlet FITS file?")
diff --git a/lib/stwcs/gui/restore_headerlet.help b/lib/stwcs/gui/restore_headerlet.help
new file mode 100644
index 0000000..710d457
--- /dev/null
+++ b/lib/stwcs/gui/restore_headerlet.help
@@ -0,0 +1,41 @@
+Restore headerlet extension(s) as either a primary WCS or as alternate WCSs
+
+This task can restore a WCS solution stored in a headerlet extension or
+restore all WCS solutions from all headerlet extensions with the same
+distortion model.
+
+Parameters
+----------
+filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+
+archive: boolean (default True)
+ flag indicating if HeaderletHDUs should be created from the
+ primary and alternate WCSs in fname before restoring all matching
+ headerlet extensions
+
+force: boolean (default:False)
+ When the distortion models of the headerlet and the primary do
+ not match, and archive is False, this flag forces an update of
+ the primary.
+
+distname: string
+ distortion model as represented by a DISTNAME keyword
+
+primary: int or string or None
+ HeaderletHDU to be restored as primary
+ if int - a fits extension
+ if string - HDRNAME
+ if None - use first HeaderletHDU
+
+sciext: string (default: SCI)
+ EXTNAME value of FITS extensions with WCS keywords
+
+hdrname: string
+ HDRNAME keyword of HeaderletHDU
+
+hdrext: int or tuple
+ Headerlet extension number of tuple ('HDRLET',2)
+
diff --git a/lib/stwcs/gui/restore_headerlet.py b/lib/stwcs/gui/restore_headerlet.py
new file mode 100644
index 0000000..9de2d3b
--- /dev/null
+++ b/lib/stwcs/gui/restore_headerlet.py
@@ -0,0 +1,49 @@
+import os
+import string
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ if configObj['distname'] not in ['',' ','INDEF']:
+ # Call function with properly interpreted input parameters
+ # Syntax: restore_all_with_distname(filename, distname, primary,
+ # archive=True, sciext='SCI', verbose=False)
+ headerlet.restore_all_with_distname(configObj['filename'],
+ configObj['distname'],configObj['primary'],
+ archive=configObj['archive'],sciext=configObj['sciext'],
+ verbose=configObj['verbose'])
+ else:
+ # Call function with properly interpreted input parameters
+ # restore_from_headerlet(filename, hdrname=None, hdrext=None,
+ # archive=True, force=False)
+ headerlet.restore_from_headerlet(configObj['filename'],
+ hdrname=configObj['hdrname'],hdrext=configObj['hdrext'],
+ archive=configObj['archive'], force=configObj['force'])
+
diff --git a/lib/stwcs/gui/updatewcs.py b/lib/stwcs/gui/updatewcs.py
new file mode 100644
index 0000000..912bf16
--- /dev/null
+++ b/lib/stwcs/gui/updatewcs.py
@@ -0,0 +1,89 @@
+import os
+
+import pyfits
+from stsci.tools import parseinput
+from stsci.tools import fileutil
+from stsci.tools import teal
+import stwcs
+from stwcs import updatewcs
+from stwcs.wcsutil import convertwcs
+
+allowed_corr_dict = {'vacorr':'VACorr','tddcorr':'TDDCorr','npolcorr':'NPOLCorr','d2imcorr':'DET2IMCorr'}
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = updatewcs.__name__
+__version__ = stwcs.__version__
+
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += eval('.'.join([__package__,__taskname__,'__doc__']))
+
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+
+ # Interpret primary parameters from configObj instance
+ extname = configObj['extname']
+ input = configObj['input']
+
+ # create dictionary of remaining parameters, deleting extraneous ones
+ # such as those above
+ cdict = configObj.dict()
+ # remove any rules defined for the TEAL interface
+ if cdict.has_key("_RULES_"): del cdict['_RULES_']
+ del cdict['_task_name_']
+ del cdict['input']
+ del cdict['extname']
+
+ # parse input
+ input,altfiles = parseinput.parseinput(configObj['input'])
+
+ # Insure that all input files have a correctly archived
+ # set of OPUS WCS keywords
+ # Legacy files from OTFR, like all WFPC2 data from OTFR, will only
+ # have the OPUS WCS keywords archived using a prefix of 'O'
+ # These keywords need to be converted to the Paper I alternate WCS
+ # standard using a wcskey (suffix) of 'O'
+ # If an alternate WCS with wcskey='O' already exists, this will copy
+ # the values from the old prefix-'O' WCS keywords to insure the correct
+ # OPUS keyword values get archived for use with updatewcs.
+ #
+ for file in input:
+ # Check to insure that there is a valid reference file to be used
+ idctab = pyfits.getval(file,'idctab')
+ if not os.path.exists(fileutil.osfn(idctab)):
+ print 'No valid distortion reference file ',idctab,' found in ',file,'!'
+ raise ValueError
+
+ # Re-define 'cdict' to only have switches for steps supported by that instrument
+ # the set of supported steps are defined by the dictionary
+ # updatewcs.apply_corrections.allowed_corrections
+ #
+ for file in input:
+ # get instrument name from input file
+ instr = pyfits.getval(file,'INSTRUME')
+ # make copy of input parameters dict for this file
+ fdict = cdict.copy()
+ # Remove any parameter that is not part of this instrument's allowed corrections
+ for step in allowed_corr_dict:
+ if allowed_corr_dict[step] not in updatewcs.apply_corrections.allowed_corrections[instr]:
+ fdict[step]
+ # Call 'updatewcs' on correctly archived file
+ updatewcs.updatewcs(file,**fdict)
+
diff --git a/lib/stwcs/gui/write_headerlet.py b/lib/stwcs/gui/write_headerlet.py
new file mode 100644
index 0000000..2072d2e
--- /dev/null
+++ b/lib/stwcs/gui/write_headerlet.py
@@ -0,0 +1,75 @@
+import os
+
+import pyfits
+from stsci.tools import teal
+
+import stwcs
+from stwcs.wcsutil import headerlet
+
+__taskname__ = __name__.split('.')[-1] # needed for help string
+__package__ = headerlet.__name__
+__version__ = stwcs.__version__
+#
+#### Interfaces used by TEAL
+#
+def getHelpAsString(docstring=False):
+ """
+ return useful help from a file in the script directory called __taskname__.help
+ """
+ install_dir = os.path.dirname(__file__)
+ htmlfile = os.path.join(install_dir,'htmlhelp',__taskname__+'.html')
+ helpfile = os.path.join(install_dir,__taskname__+'.help')
+ if docstring or (not docstring and not os.path.exists(htmlfile)):
+ helpString = __taskname__+' Version '+__version__+'\n\n'
+ if os.path.exists(helpfile):
+ helpString += teal.getHelpFileAsString(__taskname__,__file__)
+ else:
+ helpString += headerlet.write_headerlet.__doc__
+
+ else:
+ helpString = 'file://'+htmlfile
+
+ return helpString
+
+def run(configObj=None):
+ if not os.path.exists(configObj['filename']):
+ print '='*60
+ print 'ERROR:'
+ print ' No valid "filename" parameter value provided!'
+ print ' Please check the working directory and restart this task.'
+ print '='*60
+ return
+
+ if configObj['hdrname'] in ['',' ','INDEF']:
+ print '='*60
+ print 'ERROR:'
+ print ' No valid "hdrname" parameter value provided!'
+ print ' Please restart this task and provide a value for this parameter.'
+ print '='*60
+ return
+
+ str_kw = ['wcsname','destim','sipname','npolfile','d2imfile',
+ 'descrip','history','author','output']
+
+ # create dictionary of remaining parameters, deleting extraneous ones
+ # such as those above
+ cdict = configObj.dict()
+ # remove any rules defined for the TEAL interface
+ if cdict.has_key("_RULES_"): del cdict['_RULES_']
+ del cdict['_task_name_']
+ del cdict['filename']
+ del cdict['hdrname']
+
+ # Convert blank string input as None
+ for kw in str_kw:
+ if cdict[kw] == '': cdict[kw] = None
+ if cdict['wcskey'].lower() == 'primary': cdict['wcskey'] = ' '
+
+ # Call function with properly interpreted input parameters
+ # Syntax: write_headerlet(filename, hdrname, output, sciext='SCI',
+ # wcsname=None, wcskey=None, destim=None,
+ # sipname=None, npolfile=None, d2imfile=None,
+ # author=None, descrip=None, history=None,
+ # attach=True, clobber=False)
+ headerlet.write_headerlet(configObj['filename'], configObj['hdrname'],
+ **cdict) \ No newline at end of file
diff --git a/lib/stwcs/updatewcs/__init__.py b/lib/stwcs/updatewcs/__init__.py
index 9ab8ade..179612d 100644
--- a/lib/stwcs/updatewcs/__init__.py
+++ b/lib/stwcs/updatewcs/__init__.py
@@ -100,6 +100,7 @@ def updatewcs(input, vacorr=True, tddcorr=True, npolcorr=True, d2imcorr=True,
cleanWCS(f)
makecorr(f, acorr)
+
return files
def makecorr(fname, allowed_corr):
@@ -359,4 +360,3 @@ def getCorrections(instrument):
print "The following corrections will be performed for instrument %s\n" % instrument
for c in acorr: print c,': ' , apply_corrections.cnames[c]
-
diff --git a/lib/stwcs/updatewcs/makewcs.py b/lib/stwcs/updatewcs/makewcs.py
index e422a4a..c2e709d 100644
--- a/lib/stwcs/updatewcs/makewcs.py
+++ b/lib/stwcs/updatewcs/makewcs.py
@@ -1,6 +1,5 @@
from __future__ import division # confidence high
-from stwcs import DEGTORAD, RADTODEG
import numpy as np
from math import sin, sqrt, pow, cos, asin, atan2,pi
import utils
@@ -149,7 +148,7 @@ class MakeWCS(object):
ref_wcs.idcmodel.shift(offsetx,offsety)
rfx, rfy = ref_wcs.idcmodel.cx, ref_wcs.idcmodel.cy
-
+
ref_wcs.setPscale()
offshift = offsh
dec = ref_wcs.wcs.crval[1]
@@ -244,10 +243,10 @@ def troll(roll, dec, v2, v3):
Generic Conversion at STScI.
"""
# Convert all angles to radians
- _roll = DEGTORAD(roll)
- _dec = DEGTORAD(dec)
- _v2 = DEGTORAD(v2 / 3600.)
- _v3 = DEGTORAD(v3 / 3600.)
+ _roll = np.deg2rad(roll)
+ _dec = np.deg2rad(dec)
+ _v2 = np.deg2rad(v2 / 3600.)
+ _v3 = np.deg2rad(v3 / 3600.)
# compute components
sin_rho = sqrt((pow(sin(_v2),2)+pow(sin(_v3),2)) - (pow(sin(_v2),2)*pow(sin(_v3),2)))
@@ -260,7 +259,7 @@ def troll(roll, dec, v2, v3):
B = atan2( sin(A)*cos(_dec), (sin(_dec)*sin_rho - cos(_dec)*cos(rho)*cos(A)))
# compute final value
- troll = RADTODEG(pi - (gamma+B))
+ troll = np.rad2deg(pi - (gamma+B))
return troll
diff --git a/lib/stwcs/updatewcs/utils.py b/lib/stwcs/updatewcs/utils.py
index b7fe7c4..b761efc 100644
--- a/lib/stwcs/updatewcs/utils.py
+++ b/lib/stwcs/updatewcs/utils.py
@@ -57,7 +57,7 @@ def extract_rootname(kwvalue):
else:
rootname = fname
- return rootname
+ return rootname.strip()
def construct_distname(fobj,wcsobj):
"""
@@ -73,15 +73,24 @@ def construct_distname(fobj,wcsobj):
if idcname is None and wcsobj.sip is not None:
idcname = 'UNKNOWN'
- npolname = extract_rootname(fobj[0].header.get('NPOLFILE', "NONE"))
+ npolname = build_npolname(fobj)
if npolname is None and wcsobj.cpdis1 is not None:
npolname = 'UNKNOWN'
- d2imname = extract_rootname(fobj[0].header.get('D2IMFILE', "NONE"))
+ d2imname = build_d2imname(fobj)
if d2imname is None and wcsobj.det2im is not None:
d2imname = 'UNKNOWN'
- sipname = '%s_%s'%(fobj[0].header.get('rootname',""),idcname)
+ sipname = build_sipname(fobj)
+
+ distname = build_distname(sipname,npolname,d2imname)
+ return {'DISTNAME':distname,'SIPNAME':sipname}
+
+def build_distname(sipname,npolname,d2imname):
+ """
+ Core function to build DISTNAME keyword value without the HSTWCS input.
+ """
+
distname = sipname.strip()
if npolname != 'NONE' or d2imname != 'NONE':
if d2imname != 'NONE':
@@ -89,4 +98,38 @@ def construct_distname(fobj,wcsobj):
else:
distname+='-'+npolname.strip()
- return {'DISTNAME':distname,'SIPNAME':sipname} \ No newline at end of file
+ return distname
+
+def build_sipname(fobj):
+ try:
+ sipname = fobj[0].header["SIPNAME"]
+ except KeyError:
+ try:
+ sipname = fobj[0].header['rootname']+'_'+ \
+ extract_rootname(fobj[0].header["IDCTAB"])
+ except KeyError:
+ if 'A_ORDER' in fobj[1].header or 'B_ORDER' in fobj[1].header:
+ sipname = 'UNKNOWN'
+ else:
+ sipname = 'NOMODEL'
+ return sipname
+
+def build_npolname(fobj):
+ try:
+ npolfile = extract_rootname(fobj[0].header["NPOLFILE"])
+ except KeyError:
+ if countExtn(f, 'WCSDVARR'):
+ npolfile = 'UNKNOWN'
+ else:
+ npolfile = 'NOMODEL'
+ return npolfile
+
+def build_d2imname(fobj):
+ try:
+ d2imfile = extract_rootname(fobj[0].header["D2IMFILE"])
+ except KeyError:
+ if countExtn(f, 'D2IMARR'):
+ d2imfile = 'UNKNOWN'
+ else:
+ d2imfile = 'NOMODEL'
+ return d2imfile \ No newline at end of file
diff --git a/lib/stwcs/wcsutil/altwcs.py b/lib/stwcs/wcsutil/altwcs.py
index 1f6dc01..4f25abb 100644
--- a/lib/stwcs/wcsutil/altwcs.py
+++ b/lib/stwcs/wcsutil/altwcs.py
@@ -9,6 +9,7 @@ from stsci.tools import fileutil as fu
altwcskw = ['WCSAXES', 'CRVAL', 'CRPIX', 'PC', 'CDELT', 'CD', 'CTYPE', 'CUNIT',
'PV', 'PS']
+altwcskw_extra = ['LATPOLE','LONPOLE','RESTWAV','RESTFRQ']
# file operations
def archiveWCS(fname, ext=None, wcskey=" ", wcsname=" ", reusekey=False):
@@ -98,24 +99,23 @@ def archiveWCS(fname, ext=None, wcskey=" ", wcsname=" ", reusekey=False):
choose another wcsname." %wcsname)
else:
wkey = next_wcskey(f[wcsext].header)
- wname = wcsname
+ if wcsname.strip():
+ wname = wcsname
+ else:
+ # determine which WCSNAME needs to be replicated in archived WCS
+ wname = wcsnames(f[wcsext].header)[' ']
else:
wkey = wcskey
wname = wcsname
for e in ext:
- try:
- w = pywcs.WCS(f[e].header, fobj=f)
- except:
- # this should be revisited, should catch specific errors
- # KeyError - not a valid key, ValueError - not a valid extension, etc
+ hwcs = readAltWCS(f,e,wcskey=' ')
+ if hwcs is None:
continue
- hwcs = w.to_header()
+
wcsnamekey = 'WCSNAME' + wkey
f[e].header.update(key=wcsnamekey, value=wname)
-
- if w.wcs.has_cd():
- pc2cd(hwcs)
+
try:
old_wcsname=hwcs.pop('WCSNAME')
except:
@@ -276,17 +276,13 @@ def deleteWCS(fname, ext=None, wcskey=" ", wcsname=" "):
closefobj(fname, fobj)
raise KeyError("Could not find alternate WCS with key %s in this file" % wcskey)
wkey = wcskey
-
+
prexts = []
for i in ext:
hdr = fobj[i].header
- try:
- w = pywcs.WCS(hdr, fobj, key=wkey)
- except KeyError:
+ hwcs = readAltWCs(fobj,i,wcskey=wkey)
+ if hwcs is None:
continue
- hwcs = w.to_header()
- if w.wcs.has_cd():
- pc2cd(hwcs, key=wkey)
for k in hwcs.keys():
del hdr[k]
#del hdr['ORIENT'+wkey]
@@ -323,17 +319,11 @@ def _restore(fobj, ukey, fromextnum, toextnum=None, fromextnam=None, toextnam=No
toextension =toextnum
else:
toextension = fromextension
-
- try:
- nwcs = pywcs.WCS(fobj[fromextension].header, fobj=fobj, key=ukey)
- except KeyError:
- print 'restoreWCS: Could not read WCS with key %s' %ukey
- print ' Skipping %s[%s]' % (fobj.filename(), str(fromextension))
+
+ hwcs = readAltWCS(fobj,fromextension,wcskey=ukey,verbose=True)
+ if hwcs is None:
return
- hwcs = nwcs.to_header()
- if nwcs.wcs.has_cd():
- pc2cd(hwcs, key=ukey)
for k in hwcs.keys():
key = k[:-1]
if key in fobj[toextension].header.keys():
@@ -367,6 +357,77 @@ def _getheader(fobj, ext):
hdr = fobj[ext].header
return hdr
+def readAltWCS(fobj, ext, wcskey=' ',verbose=False):
+ """ Reads in alternate WCS from specified extension
+
+ Parameters
+ ----------
+ fobj: string, pyfits.HDUList
+ fits filename or pyfits file object
+ containing alternate/primary WCS(s) to be converted
+ wcskey: string [" ",A-Z]
+ alternate/primary WCS key that will be replaced by the new key
+ ext: int
+ extension number
+
+ Returns
+ -------
+ hdr: pyfits.Header
+ header object with ONLY the keywords for specified alternate WCS
+ """
+ if isinstance(fobj, str):
+ fobj = pyfits.open(fobj)
+
+ hdr = _getheader(fobj,ext)
+ try:
+ nwcs = pywcs.WCS(hdr, fobj=fobj, key=wcskey)
+ except KeyError:
+ if verbose:
+ print 'readAltWCS: Could not read WCS with key %s' %wcskey
+ print ' Skipping %s[%s]' % (fobj.filename(), str(ext))
+ return None
+ hwcs = nwcs.to_header()
+
+ if nwcs.wcs.has_cd():
+ hwcs = pc2cd(hwcs, key=wcskey)
+
+ return hwcs
+
+def convertAltWCS(fobj,ext,oldkey=" ",newkey=' '):
+ """
+ Translates the alternate/primary WCS with one key to an alternate/primary WCS with
+ another key.
+
+ Parameters
+ ----------
+ fobj: string, pyfits.HDUList, or pyfits.Header
+ fits filename, pyfits file object or pyfits header
+ containing alternate/primary WCS(s) to be converted
+ ext: int
+ extension number
+ oldkey: string [" ",A-Z]
+ alternate/primary WCS key that will be replaced by the new key
+ newkey: string [" ",A-Z]
+ new alternate/primary WCS key
+
+ Returns
+ -------
+ hdr: pyfits.Header
+ header object with keywords renamed from oldkey to newkey
+ """
+ hdr = readAltWCS(fobj,ext,wcskey=oldkey)
+ if hdr is None:
+ return None
+ # Converting WCS to new key
+ for card in hdr:
+ if oldkey == ' ' or oldkey == '':
+ cname = card
+ else:
+ cname = card.rstrip(oldkey)
+ hdr.rename_key(card,cname+newkey,force=True)
+
+ return hdr
+
def wcskeys(fobj, ext=None):
"""
Returns a list of characters used in the header for alternate
@@ -383,7 +444,12 @@ def wcskeys(fobj, ext=None):
_check_headerpars(fobj, ext)
hdr = _getheader(fobj, ext)
names = hdr["WCSNAME*"]
- return [key.split('WCSNAME')[1].upper() for key in names.keys()]
+ d = []
+ for key in names.keys():
+ wkey = key.replace('WCSNAME','')
+ if wkey == '': wkey = ' '
+ d.append(wkey)
+ return d
def wcsnames(fobj, ext=None):
"""
@@ -403,7 +469,9 @@ def wcsnames(fobj, ext=None):
names = hdr["WCSNAME*"]
d = {}
for c in names:
- d[c.key[-1]] = c.value
+ wkey = c.key.replace('WCSNAME','')
+ if wkey == '': wkey = ' '
+ d[wkey] = c.value
return d
def available_wcskeys(fobj, ext=None):
@@ -425,7 +493,7 @@ def available_wcskeys(fobj, ext=None):
all_keys = list(string.ascii_uppercase)
used_keys = wcskeys(hdr)
try:
- used_keys.remove("")
+ used_keys.remove(" ")
except ValueError:
pass
[all_keys.remove(key) for key in used_keys]
diff --git a/lib/stwcs/wcsutil/headerlet.py b/lib/stwcs/wcsutil/headerlet.py
index 4c29234..194e16b 100644
--- a/lib/stwcs/wcsutil/headerlet.py
+++ b/lib/stwcs/wcsutil/headerlet.py
@@ -1,6 +1,9 @@
from __future__ import division
import logging
import os
+import string
+import textwrap
+import copy
import tarfile
import tempfile
import time
@@ -14,14 +17,26 @@ import altwcs
import wcscorr
from hstwcs import HSTWCS
from mappings import basic_wcs
+from stwcs.updatewcs import utils
+
from stsci.tools.fileutil import countExtn
from stsci.tools import fileutil as fu
+#### Logging support functions
module_logger = logging.getLogger('headerlet')
import atexit
atexit.register(logging.shutdown)
+FITS_STD_KW = ['XTENSION', 'BITPIX', 'NAXIS', 'PCOUNT',
+ 'GCOUNT','EXTNAME', 'EXTVER', 'ORIGIN',
+ 'INHERIT', 'DATE', 'IRAF-TLM']
+
+DEFAULT_SUMMARY_COLS = ['HDRNAME','WCSNAME','DISTNAME','AUTHOR','DATE',
+ 'SIPNAME','NPOLFILE','D2IMFILE','DESCRIP']
+COLUMN_DICT = {'vals':[],'width':[]}
+COLUMN_FMT = '{:<{width}}'
+
def setLogger(logger, level, mode='w'):
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
log_filename = 'headerlet.log'
@@ -30,18 +45,212 @@ def setLogger(logger, level, mode='w'):
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.setLevel(level)
+
+def initLogging(function_name, logger = None, level=100, verbose=False, logmode='w'):
+ """ Initialize logging for a function
+
+ Parameters
+ ----------
+ function_name: string
+ Name of function which will be recorded in log
+ level: int
+ Logging level
+ verbose: bool
+ If True, will set logging to report more activity
+ """
+ if logger is None:
+ logger = module_logger
+
+ if verbose:
+ setLogger(logger, verbose, mode=logmode)
+ else:
+ logger.setLevel(level)
+
+ logger.info("Starting %s: %s" % (function_name, time.asctime()))
+
-def hdrnames(fobj):
+#### Utility functions
+def is_par_blank(par):
+ return par in ['',' ','INDEF',"None",None]
+
+def parseFilename(fname,mode='readonly'):
"""
- Returns a list of HDRNAME keywords from all HeaderletHDU
- extensions in a science file.
+ Interprets the input as either a filename of a file that needs to be opened
+ or a PyFITS object.
+
+ Parameters
+ ----------
+ fname: string, pyfits.HDUList
+ Input pointing to a file or PyFITS object. An input filename (str) will
+ be expanded as necessary to interpret any environmental variables
+ included in the filename.
+ mode: string
+ Specifies what PyFITS mode to use when opening the file, if it needs
+ to open the file at all [Default: 'readonly']
+
+ Returns
+ -------
+ fobj: pyfits.HDUList
+ PyFITS handle for input file
+ fname: string
+ Name of input file
+ close_fobj: bool
+ Flag specifying whether or not fobj needs to be closed since it was
+ opened by this function. This allows a program to know whether they
+ need to worry about closing the PyFITS object as opposed to letting
+ the higher level interface close the object.
+ """
+ close_fobj = False
+ if not isinstance(fname,list):
+ if isinstance(fname,str):
+ fname = fu.osfn(fname)
+ fobj = pyfits.open(fname,mode=mode)
+ close_fobj = True
+ else:
+ fobj = fname
+ if hasattr(fobj,'filename'):
+ fname = fobj.filename()
+ else:
+ fname = ''
+ return fobj,fname,close_fobj
+def getHeaderletKwNames(fobj,kw='HDRNAME'):
+ """
+ Returns a list of specified keywords from all HeaderletHDU
+ extensions in a science file.
+
Parameters
----------
fobj: string, pyfits.HDUList
+ kw: str
+ Name of keyword to be read and reported
"""
+ fobj,fname,open_fobj = parseFilename(fobj)
+
+ hdrnames = []
+ for ext in fobj:
+ if isinstance(ext,pyfits.hdu.base.NonstandardExtHDU):
+ hdrnames.append(ext.header[kw])
+ if open_fobj:
+ fobj.close()
+
+ return hdrnames
+
+
+
+def findHeaderletHDUs(fobj, hdrext=None, hdrname=None, distname=None, strict=True):
+ """
+ Returns all HeaderletHDU extensions in a science file that matches
+ the inputs specified by the user. If no hdrext, hdrname or distname are
+ specified, this function will return a list of all HeaderletHDU objects.
+
+ Parameters
+ ----------
+ fobj: string, pyfits.HDUList
+ Name of FITS file or open pyfits object (pyfits.HDUList instance)
+ hdrext: int, tuple or None
+ index number(EXTVER) or extension tuple of HeaderletHDU to be returned
+ hdrname: string
+ value of HDRNAME for HeaderletHDU to be returned
+ distname: string
+ value of DISTNAME for HeaderletHDUs to be returned
+ strict: bool [Default: True]
+ Specifies whether or not at least one parameter needs to be provided
+ If False, all extension indices returned if hdrext, hdrname and distname
+ are all None. If True and hdrext, hdrname, and distname are all None,
+ raise an Exception requiring one to be specified.
+
+ Returns
+ -------
+ hdrlets: list
+ A list of all matching HeaderletHDU extension indices (could be just one)
+ """
+ get_all = False
+ if hdrext is None and hdrname is None and distname is None:
+ if not strict:
+ get_all = True
+ else:
+ print '====================================================='
+ print 'No valid Headerlet extension specified.'
+ print ' Either "hdrname", "hdrext", or "distname" needs to be specified.'
+ print '====================================================='
+ raise ValueError
+
+ fobj,fname,open_fobj = parseFilename(fobj)
+
+ hdrlets = []
+ if hdrext is not None and isinstance(hdrext,int):
+ if hdrext in range(len(fobj)): # insure specified hdrext is in fobj
+ if isinstance(fobj[hdrext],pyfits.hdu.base.NonstandardExtHDU) and \
+ fobj[hdrext].header['EXTNAME'] == 'HDRLET':
+ hdrlets.append(hdrext)
+ else:
+ for ext in fobj:
+ if isinstance(ext,pyfits.hdu.base.NonstandardExtHDU):
+ if get_all:
+ hdrlets.append(fobj.index(ext))
+ else:
+ if hdrext is not None:
+ if isinstance(hdrext,tuple):
+ hdrextname = hdrext[0]
+ hdrextnum = hdrext[1]
+ else:
+ hdrextname = 'HDRLET'
+ hdrextnum = hdrext
+ hdrext_match = ((hdrext is not None) and
+ (hdrextnum == ext.header['EXTVER']) and
+ (hdrextname == ext.header['EXTNAME']))
+ hdrname_match = ((hdrname is not None) and
+ (hdrname == ext.header['HDRNAME']))
+ distname_match = ((distname is not None) and
+ (distname == ext.header['DISTNAME']))
+ if hdrext_match or hdrname_match or distname_match:
+ hdrlets.append(fobj.index(ext))
+
+ if open_fobj:
+ fobj.close()
+
+ if len(hdrlets) == 0:
+ if hdrname:
+ kwerr = 'hdrname'
+ kwval = hdrname
+ elif hdrext:
+ kwerr = 'hdrext'
+ kwval = hdrext
+ else:
+ kwerr = 'distname'
+ kwval = distname
+ print '====================================================='
+ print 'No valid Headerlet extension found!'
+ print ' "%s" = %s not found in %s.'%(kwerr,kwval,fname)
+ print '====================================================='
+ raise ValueError
+
+ return hdrlets
+
+def verifyHdrnameIsUnique(fobj,hdrname):
+ """
+ Verifies that no other HeaderletHDU extension has the specified hdrname.
+
+ Parameters
+ ----------
+ fobj: string, pyfits.HDUList
+ Name of FITS file or open pyfits object (pyfits.HDUList instance)
+ hdrname: string
+ value of HDRNAME for HeaderletHDU to be compared as unique
+
+ Returns
+ -------
+ unique: bool
+ If True, no other HeaderletHDU has the specified HDRNAME value
+ """
+ hdrnames_list = getHeaderletKwNames(fobj)
+ unique = not(hdrname in hdrnames_list)
+
+ return unique
+
def isWCSIdentical(scifile, file2, verbose=False):
"""
Compares the WCS solution of 2 files.
@@ -68,12 +277,7 @@ def isWCSIdentical(scifile, file2, verbose=False):
- Velocity aberation
"""
- if verbose:
- setLogger(module_logger, verbose)
- else:
- module_logger.setLevel(100)
-
- module_logger.info("Starting isWCSIdentical: %s" % time.asctime())
+ initLogging('isWCSIdentical', level=100, verbose=False)
result = True
numsci1 = max(countExtn(scifile), countExtn(scifile, 'SIPWCS'))
@@ -140,37 +344,447 @@ def isWCSIdentical(scifile, file2, verbose=False):
return result
+def updateRefFiles(source, dest, verbose=False):
+ """
+ Update the reference files name in the primary header of 'dest'
+ using values from 'source'
-def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ", wcsname=None,
- sipname=None, npolfile=None, d2imfile=None,
- verbose=100, logmode='w'):
+ Parameters
+ ----------
+ source: pyfits.Header.ascardlist
+ dest: pyfits.Header.ascardlist
+ """
+ module_logger.info("Updating reference files")
+ phdukw = {'IDCTAB': True,
+ 'NPOLFILE': True,
+ 'D2IMFILE': True}
+
+ try:
+ wind = dest.index_of('HISTORY')
+ except KeyError:
+ wind = len(dest)
+ for key in phdukw.keys():
+ try:
+ value = source[key]
+ dest.insert(wind, value)
+ except KeyError:
+ # TODO: I don't understand what the point of this is. Is it meant
+ # for logging purposes? Right now it isn't used.
+ phdukw[key] = False
+ return phdukw
+
+def getRootname(fname):
+ """
+ returns the value of ROOTNAME or DESTIM
+ """
+
+ try:
+ rootname = pyfits.getval(fname, 'ROOTNAME')
+ except KeyError:
+ rootname = pyfits.getval(fname, 'DESTIM')
+ return rootname
+
+def mapFitsExt2HDUListInd(fname, extname):
+ """
+ Map FITS extensions with 'EXTNAME' to HDUList indexes.
+ """
+
+ f,fname,close_fobj = parseFilename(fname)
+
+ d = {}
+ for hdu in f:
+ if 'EXTNAME' in hdu.header and hdu.header['EXTNAME'] == extname:
+ extver = hdu.header['EXTVER']
+ d[(extname, extver)] = f.index_of((extname, extver))
+ if close_fobj:
+ f.close()
+ return d
+
+def getHeaderletObj(fname, sciext='SCI', wcskey=' ',wcsname=None,hdrname=None,
+ sipname=None, npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ rms_ra = None, rms_dec = None, nmatch=None, catalog=None,
+ hdrletnum=1, verbose=100):
+ """
+ Generate and return a HeaderletHDU with EXTVER and HDRNAME set
+
+ """
+ fobj,fname,open_fobj = parseFilename(fname)
+
+ # Translate 'wcskey' value for PRIMARY WCS to valid altwcs value of ' '
+ if wcskey == 'PRIMARY': wcskey = ' '
+
+ hlt = create_headerlet(fobj, sciext=sciext,
+ wcskey=wcskey, wcsname=wcsname, hdrname=hdrname,
+ sipname=sipname, npolfile=npolfile, d2imfile=d2imfile,
+ author=author, descrip=descrip, history=history,
+ rms_ra=rms_ra, rms_dec=rms_dec, nmatch=nmatch,
+ catalog=catalog, verbose=verbose, logmode='a')
+ if open_fobj:
+ fobj.close()
+
+ return hlt
+
+def print_summary(summary_cols, summary_dict, pad=2, maxwidth=None, idcol=None,
+ output=None, clobber=True, quiet=False ):
+ """
+ Print out summary dictionary to STDOUT, and possibly an output file
+
+ """
+ nrows = None
+ if idcol:
+ nrows = len(idcol['vals'])
+
+ # Find max width of each column
+ column_widths = {}
+ for kw in summary_dict:
+ colwidth = np.array(summary_dict[kw]['width']).max()
+ if maxwidth:
+ colwidth = min(colwidth,maxwidth)
+ column_widths[kw] = colwidth + pad
+ if nrows is None:
+ nrows = len(summary_dict[kw]['vals'])
+
+ # print rows now
+ outstr = ''
+ # Start with column names
+ if idcol:
+ outstr += COLUMN_FMT.format(idcol['name'],width=idcol['width']+pad)
+ for kw in summary_cols:
+ outstr += COLUMN_FMT.format(kw,width=column_widths[kw])
+ outstr += '\n'
+ # Now, add a row for each headerlet
+ for row in range(nrows):
+ if idcol:
+ outstr += COLUMN_FMT.format(idcol['vals'][row],width=idcol['width']+pad)
+ for kw in summary_cols:
+ val = summary_dict[kw]['vals'][row][:(column_widths[kw]-pad)]
+ outstr += COLUMN_FMT.format(val, width=column_widths[kw])
+ outstr += '\n'
+ if not quiet:
+ print outstr
+
+ # If specified, write info to separate text file
+ write_file = False
+ if output:
+ output = fu.osfn(output) # Expand any environment variables in filename
+ write_file = True
+ if os.path.exists(output):
+ if clobber:
+ os.remove(output)
+ else:
+ print 'WARNING: Not writing results to file!'
+ print ' Output text file ',output,' already exists.'
+ print ' Set "clobber" to True or move file before trying again.'
+ write_file = False
+ if write_file:
+ fout = open(output,mode='w')
+ fout.write(outstr)
+ fout.close()
+
+#### Private utility functions
+def _createPrimaryHDU(destim, hdrname, distname, wcsname,
+ sipname, npolfile, d2imfile, upwcsver, pywcsver,
+ author, descrip, history):
+ if author is None: author = ''
+ if descrip is None: descrip = ''
+ if history is None: history = ''
+ phdu = pyfits.PrimaryHDU()
+ phdu.header.update('DESTIM', destim,
+ comment='Destination observation root name')
+ phdu.header.update('HDRNAME', hdrname, comment='Headerlet name')
+ fmt="%Y-%m-%dT%H:%M:%S"
+ phdu.header.update('DATE', time.strftime(fmt),
+ comment='Date FITS file was generated')
+ phdu.header.update('WCSNAME', wcsname, comment='WCS name')
+ phdu.header.update('DISTNAME', distname, comment='Distortion model name')
+ phdu.header.update('SIPNAME', sipname, comment='origin of SIP polynomial distortion model')
+ phdu.header.update('NPOLFILE', npolfile, comment='origin of non-polynmial distortion model')
+ phdu.header.update('D2IMFILE', d2imfile, comment='origin of detector to image correction')
+ phdu.header.update('AUTHOR', author, comment='headerlet created by this user')
+ phdu.header.update('DESCRIP', descrip, comment='Short description of headerlet solution')
+
+ # clean up history string in order to remove whitespace characters that
+ # would cause problems with FITS
+ if isinstance(history,list):
+ history_str = ''
+ for line in history:
+ history_str += line
+ else:
+ history_str = history
+ history_lines = textwrap.wrap(history_str,width=70)
+ for hline in history_lines:
+ phdu.header.add_history(hline)
+
+ phdu.header.ascard.append(upwcsver)
+ phdu.header.ascard.append(pywcsver)
+ return phdu
+
+
+#### Public Interface functions
+def extract_headerlet(filename, output, extnum=None, hdrname=None,
+ clobber=False, verbose=100):
"""
- Create a headerlet from a WCS in a science file
- If both wcskey and wcsname are given they should match, if not
+ Finds a headerlet extension in a science file and writes it out as
+ a headerlet FITS file.
+
+ If both hdrname and extnum are given they should match, if not
raise an Exception
+
+ Parameters
+ ----------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ output: string
+ Filename or just rootname of output headerlet FITS file
+ If string does not contain '.fits', it will create a filename with
+ '_hlet.fits' suffix
+ extnum: int
+ Extension number which contains the headerlet to be written out
+ hdrname: string
+ Unique name for headerlet, stored as the HDRNAME keyword
+ It stops if a value is not provided and no extnum has been specified
+ clobber: bool
+ If output file already exists, this parameter specifies whether or not
+ to overwrite that file [Default: False]
+ verbose: int
+ python logging level
+
+ """
+
+ initLogging('extract_headerlet', verbose=verbose)
+ fobj,fname,close_fobj = parseFilename(filename)
+
+ if hdrname in ['',' ',None, 'INDEF']:
+ if extnum is None:
+ print 'No valid headerlet specified! Quitting...'
+ if close_fobj: fobj.close()
+ else:
+ hdrhdu = fobj[extnum]
+ else:
+ extnum = findHeaderletHDUs(fobj,hdrname=hdrname)[0]
+ hdrhdu = fobj[extnum]
+ hdrlet = hdrhdu.headerlet
+
+ if '.fits' in output:
+ outname = output
+ else:
+ outname = '%s_hlet.fits'%(output)
+ hdrlet.write_to(outname)
+
+ if close_fobj:
+ fobj.close()
+
+def write_headerlet(filename, hdrname, output=None, sciext='SCI',
+ wcsname=None, wcskey=None, destim=None,
+ sipname=None, npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ rms_ra=None, rms_dec=None, nmatch=None, catalog=None,
+ attach=True, clobber=False):
+
+ """
+ Save a WCS as a headerlet FITS file.
+
+ This function will create a headerlet, write out the headerlet to a
+ separate headerlet file, then, optionally, attach it as an extension
+ to the science image (if it has not already been archived)
+
+ Either wcsname or wcskey must be provided; if both are given, they must
+ match a valid WCS.
+
+ Updates wcscorr if necessary.
Parameters
----------
- fname: string or HDUList
- science file
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ hdrname: string
+ Unique name for this headerlet, stored as HDRNAME keyword
+ output: string or None
+ Filename or just rootname of output headerlet FITS file
+ If string does not contain '.fits', it will create a filename with
+ '_hlet.fits' suffix
+ If None, a default filename based on the input filename will be
+ generated for the headerlet FITS filename
+ sciext: string
+ name (EXTNAME) of extension that contains WCS to be saved
+ wcsname: string
+ name of WCS to be archived, if " ": stop
+ wcskey: one of A...Z or " " or "PRIMARY"
+ if " " or "PRIMARY" - archive the primary WCS
+ destim: string
+ DESTIM keyword
+ if NOne, use ROOTNAME or science file name
+ sipname: string or None (default)
+ Name of unique file where the polynomial distortion coefficients were
+ read from. If None, the behavior is:
+ The code looks for a keyword 'SIPNAME' in the science header
+ If not found, for HST it defaults to 'IDCTAB'
+ If there is no SIP model the value is 'NOMODEL'
+ If there is a SIP model but no SIPNAME, it is set to 'UNKNOWN'
+ npolfile: string or None (default)
+ Name of a unique file where the non-polynomial distortion was stored.
+ If None:
+ The code looks for 'NPOLFILE' in science header.
+ If 'NPOLFILE' was not found and there is no npol model, it is set to 'NOMODEL'
+ If npol model exists, it is set to 'UNKNOWN'
+ d2imfile: string
+ Name of a unique file where the detector to image correction was
+ stored. If None:
+ The code looks for 'D2IMFILE' in the science header.
+ If 'D2IMFILE' is not found and there is no d2im correction,
+ it is set to 'NOMODEL'
+ If d2im correction exists, but 'D2IMFILE' is missing from science
+ header, it is set to 'UNKNOWN'
+ author: string
+ Name of user who created the headerlet, added as 'AUTHOR' keyword
+ to headerlet PRIMARY header
+ descrip: string
+ Short description of the solution provided by the headerlet
+ This description will be added as the single 'DESCRIP' keyword
+ to the headerlet PRIMARY header
+ history: filename, string or list of strings
+ Long (possibly multi-line) description of the solution provided
+ by the headerlet. These comments will be added as 'HISTORY' cards
+ to the headerlet PRIMARY header
+ If filename is specified, it will format and attach all text from
+ that file as the history.
+ attach: bool
+ Specify whether or not to attach this headerlet as a new extension
+ It will verify that no other headerlet extension has been created with
+ the same 'hdrname' value.
+ clobber: bool
+ If output file already exists, this parameter specifies whether or not
+ to overwrite that file [Default: False]
+ """
+ initLogging('write_headerlet')
+
+ if wcsname in [None,' ','','INDEF'] and wcskey is None:
+ print '='*60
+ print '[write_headerlet]'
+ print 'No valid WCS found found in %s.'%fname
+ print ' A valid value for either "wcsname" or "wcskey" '
+ print ' needs to be specified. '
+ print '='*60
+ raise ValueError
+
+ if hdrname in [None, ' ','']:
+ print '='*60
+ print '[write_headerlet]'
+ print 'No valid name for this headerlet was provided for %s.'%fname
+ print ' A valid value for "hdrname" needs to be specified. '
+ print '='*60
+ raise ValueError
+
+ # Translate 'wcskey' value for PRIMARY WCS to valid altwcs value of ' '
+ if wcskey == 'PRIMARY': wcskey = ' '
+
+ if attach: umode = 'update'
+ else: umode='readonly'
+
+ fobj,fname,close_fobj = parseFilename(filename,mode=umode)
+
+ # Insure that WCSCORR table has been created with all original
+ # WCS's recorded prior to adding the headerlet WCS
+ wcscorr.init_wcscorr(fobj)
+
+ numhlt = countExtn(fobj, 'HDRLET')
+
+ if wcsname is None:
+ scihdr = fobj[sciext,1].header
+ wcsname = scihdr['wcsname'+wcskey]
+
+ hdrletobj = getHeaderletObj(fobj,sciext=sciext,
+ wcsname=wcsname, wcskey=wcskey,
+ hdrname=hdrname,
+ sipname=sipname, npolfile=npolfile,
+ d2imfile=d2imfile, author=author,
+ descrip=descrip, history=history,
+ rms_ra=rms_ra, rms_dec=rms_dec, nmatch=nmatch,
+ catalog=catalog,
+ hdrletnum=numhlt + 1, verbose=False)
+
+ if attach:
+ # Check to see whether or not a HeaderletHDU with this hdrname already
+ # exists
+ hdrnames = getHeaderletKwNames(fobj)
+ if hdrname not in hdrnames:
+ hdrlet_hdu = HeaderletHDU.fromheaderlet(hdrletobj)
+
+ if destim is not None:
+ hdrlet_hdu[0].header['destim'] = destim
+
+ fobj.append(hdrlet_hdu)
+
+ # Update the WCSCORR table with new rows from the headerlet's WCSs
+ wcscorr.update_wcscorr(fobj, hdrletobj, 'SIPWCS')
+
+ fobj.flush()
+ else:
+ print 'WARNING:'
+ print ' Headerlet with hdrname ',hdrname,' already archived for WCS ',wcsname
+ print ' No new headerlet appended to ',fname,'.'
+
+
+ if close_fobj:
+ fobj.close()
+
+ if output is None:
+ # Generate default filename for headerlet FITS file
+ output = fname[:fname.find('.fits')]
+ if '.fits' not in output:
+ output = output+'_hlet.fits'
+
+ # If user specifies an output filename for headerlet, write it out
+ if os.path.exists(output):
+ if clobber:
+ os.remove(output)
+ else:
+ print 'WARNING:'
+ print ' Headerlet file ',output,' already written out!'
+ print ' This headerlet file will not be created now.'
+
+ hdrletobj.writeto(output)
+ print 'Create Headerlet file: ',output
+
+def create_headerlet(filename, sciext=None, hdrname=None, destim=None, wcskey=" ", wcsname=None,
+ sipname=None, npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ rms_ra=None, rms_dec = None, nmatch=None, catalog=None,
+ verbose=100, logmode='w'):
+ """
+ Create a headerlet from a WCS in a science file
+ If both wcskey and wcsname are given they should match, if not
+ raise an Exception
+
+ Parameters
+ ----------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
sciext: string or python list
- Extension in which the science data is. The headerlet will be created
+ Extension in which the science data is. The headerlet will be created
from these extensions.
If string - a valid EXTNAME is expected
If list - a list of FITS extension numbers or extension tuples ('SCI', 1)
is expected.
- If None, loops over all extensions in the file, including the primary
+ If None, loops over all extensions in the file, including the primary [BROKEN-22Sept2011]
hdrname: string
value of HDRNAME keyword
Takes the value from the HDRNAME<wcskey> keyword, if not available from WCSNAME<wcskey>
- It stops if neither is found in the science file and a value is not provided
+ It stops if neither is found in the science file and a value is not provided
destim: string or None
name of file this headerlet can be applied to
if None, use ROOTNAME keyword
- wcskey: char (A...Z) or " " or None
+ wcskey: char (A...Z) or " " or "PRIMARY" or None
a char representing an alternate WCS to be used for the headerlet
- if " ", use the primary (default)
- if None use wcsname
+ if " ", use the primary (default)
+ if None use wcsname
wcsname: string or None
if wcskey is None use wcsname specified here to choose an alternate WCS for the headerlet
sipname: string or None (default)
@@ -194,33 +808,39 @@ def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ",
it is set to 'NOMODEL'
If d2im correction exists, but 'D2IMFILE' is missing from science
header, it is set to 'UNKNOWN'
+ author: string
+ Name of user who created the headerlet, added as 'AUTHOR' keyword
+ to headerlet PRIMARY header
+ descrip: string
+ Short description of the solution provided by the headerlet
+ This description will be added as the single 'DESCRIP' keyword
+ to the headerlet PRIMARY header
+ history: filename, string or list of strings
+ Long (possibly multi-line) description of the solution provided
+ by the headerlet. These comments will be added as 'HISTORY' cards
+ to the headerlet PRIMARY header
+ If filename is specified, it will format and attach all text from
+ that file as the history.
verbose: int
python logging level
logmode: 'w' or 'a'
log file open mode
-
+
Returns
-------
Headerlet object
"""
+
+ initLogging('createHeaderlet', verbose=verbose)
-
- if verbose:
- setLogger(module_logger, verbose, mode=logmode)
- else:
- module_logger.setLevel(100)
-
- module_logger.info("Starting createHeaderlet: %s" % time.asctime())
phdukw = {'IDCTAB': True,
'NPOLFILE': True,
'D2IMFILE': True}
- if not isinstance(fname, pyfits.HDUList):
- fobj = pyfits.open(fname)
- close_file = True
- else:
- fobj = fname
- close_file = False
+ fobj,fname,close_file = parseFilename(filename)
+ # Translate 'wcskey' value for PRIMARY WCS to valid altwcs value of ' '
+ if wcskey == 'PRIMARY': wcskey = ' '
+
# get all required keywords
if destim is None:
try:
@@ -230,7 +850,7 @@ def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ",
module_logger.info('DESTIM not provided')
module_logger.info('Keyword "ROOTNAME" not found')
module_logger.info('Using file name as DESTIM')
-
+
if not hdrname:
# check if HDRNAME<wcskey> is in header
hdrname = "".join(["HDRNAME",wcskey.upper()])
@@ -238,53 +858,34 @@ def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ",
hdrname = fobj[1].header['HDRNAME']
except KeyError:
try:
- hdrname = fobj[1].header['WCSNAME']
+ hdrname = fobj[1].header['WCSNAME'+wcskey]
+ print 'Using default value for HDRNAME of "%d"',
+ print ' derived from WCSNAME%d.'%(hdrname,wcskey)
except KeyError, detail:
- message = "Required keyword 'HDRNAME' not given"
+ message = "Required keywords 'HDRNAME' or 'WCSNAME' not found"
module_logger.critical(message)
print message, detail
-
+
if not wcsname:
wname = "".join(["WCSNAME",wcskey.upper()])
- try:
+ if wname in fobj[1].header:
wcsname = fobj[1].header[wname]
- except KeyError, detail:
+ else:
message = "Missing required keyword 'WCSNAME'."
module_logger.critical(message)
print message, detail
-
+
if not sipname:
- try:
- sipname = fobj[0].header["SIPNAME"]
- except KeyError:
- try:
- sipname = fobj[0].header["IDCTAB"]
- except KeyError:
- if 'A_ORDER' in fobj[1].header or 'B_ORDER' in fobj[1].header:
- sipname = 'UNKNOWN'
- else:
- sipname = 'NOMODEL'
-
+ sipname = utils.build_sipname(fobj)
+
if not npolfile:
- try:
- npolfile = fobj[0].header["NPOLFILE"]
- except KeyError:
- if countExtn(f, 'WCSDVARR'):
- npolfile = 'UNKNOWN'
- else:
- npolfile = 'NOMODEL'
-
+ npolfile = utils.build_npolname(fobj)
+
if not d2imfile:
- try:
- d2imfile = fobj[0].header["D2IMFILE"]
- except KeyError:
- if countExtn(f, 'D2IMARR'):
- npolfile = 'UNKNOWN'
- else:
- npolfile = 'NOMODEL'
-
- distname = "_".join([sipname, npolfile, d2imfile])
-
+ d2imfile = utils.build_d2imname(fobj)
+
+ distname = utils.build_distname(sipname, npolfile, d2imfile)
+
# get the version of STWCS used to create the WCS of the science file.
try:
upwcsver = fobj[0].header.ascard['STWCSVER']
@@ -298,7 +899,7 @@ def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ",
"Version of PYWCS used to update the WCS")
if not sciext:
- sciext = range(fobj)
+ sciext = range(len(fobj))
elif isinstance(sciext, str):
numsciext = countExtn(fobj, sciext)
sciext = [(sciext, i) for i in range(1, numsciext+1)]
@@ -308,69 +909,107 @@ def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ",
raise ValueError("Expected sciext to be a list of FITS extensions with science data or a string of valid EXTNAME")
if wcskey is 'O':
- message = "Warning: 'O' is a reserved key for the original WCS"
+ message = "Warning: 'O' is a reserved key for the original WCS. Quitting..."
module_logger.info(message)
print message
+ return
+
+ # open file and parse comments
+ if history not in ['',' ',None,'INDEF'] and os.path.isfile(history):
+ f = open(fu.osfn(history))
+ history = f.readlines()
+ f.close()
module_logger.debug("Data extensions form which to create headerlet:\n\t %s"
% (str(sciext)))
hdul = pyfits.HDUList()
- phdu = _createPrimaryHDU(destim, hdrname, distname, wcsname,
- sipname, npolfile, d2imfile, upwcsver, pywcsver)
+ phdu = _createPrimaryHDU(destim, hdrname, distname, wcsname,
+ sipname, npolfile, d2imfile, upwcsver, pywcsver,
+ author, descrip, history)
hdul.append(phdu)
-
+ orient_comment = "positions angle of image y axis (deg. e of n)"
+
if fu.isFits(fobj)[1] is not 'simple':
-
+
for e in sciext:
- hwcs = HSTWCS(fname, ext=e, wcskey=wcskey)
- h = hwcs.wcs2header(sip2hdr=True).ascard
+ wkeys = altwcs.wcskeys(fname,ext=e)
+ if wcskey != ' ' and wkeys > 0:
+ if wcskey not in wkeys:
+ if verbose > 100:
+ module_logger.debug('No WCS with wcskey=%s found in extension %s. Skipping...'%(wcskey,str(e)))
+ continue # skip any extension which does not have this wcskey
+
+ # This reads in full model: alternate WCS keywords plus SIP
+ hwcs = HSTWCS(fname,ext=e,wcskey=' ')
+ h = hwcs.wcs2header(sip2hdr=True)
+ h.update('ORIENTAT',hwcs.orientat, comment=orient_comment)
+ if wcskey != ' ':
+ # Now read in specified linear WCS terms from alternate WCS
+ try:
+ althdr = altwcs.convertAltWCS(fname,e,oldkey=wcskey,newkey=" ")
+ althdrwcs = HSTWCS(fname,e,wcskey=wcskey)
+ alt_orient = althdrwcs.orientat
+ except KeyError:
+ continue # Skip over any extension which does not have a WCS
+ althdr = althdr.ascard
+ # Update full WCS with values from alternate WCS
+ for card in althdr:
+ h.update(card.key,card.value)
+ h.update('ORIENTAT',alt_orientat, comment=orient_comment)
+ h = h.ascard
h.append(pyfits.Card(key='VAFACTOR', value=hwcs.vafactor,
comment='Velocity aberration plate scale factor'))
h.insert(0, pyfits.Card(key='EXTNAME', value='SIPWCS',
comment='Extension name'))
- if isinstance(e, int): val = e
+ if isinstance(e, int):
+ if 'extver' in fobj[e].header:
+ val = fobj[e].header['extver']
+ else:
+ val = e
else: val = e[1]
h.insert(1, pyfits.Card(key='EXTVER', value=val,
comment='Extension version'))
- h.append(pyfits.Card("SCIEXT", str(e),
+ h.append(pyfits.Card("SCIEXT", str(e),
"Target science data extension"))
fhdr = fobj[e].header.ascard
if npolfile is not 'NOMODEL':
cpdis = fhdr['CPDIS*...']
for c in range(1, len(cpdis) + 1):
h.append(cpdis[c - 1])
- dp = fhdr['DP%s.*...' % c]
+ dp = fhdr['DP%s*' % c]
h.extend(dp)
-
+
try:
h.append(fhdr['CPERROR%s' % c])
except KeyError:
pass
-
+
try:
h.append(fhdr['NPOLEXT'])
except KeyError:
pass
-
+
if d2imfile is not 'NOMODEL':
try:
h.append(fhdr['D2IMEXT'])
except KeyError:
pass
-
+
try:
h.append(fhdr['AXISCORR'])
except KeyError:
+ print ("'D2IMFILE' kw exists but keyword 'AXISCORR' was not found in "
+ "%s['SCI',%d]" % (fname, val))
module_logger.exception("'D2IMFILE' kw exists but keyword 'AXISCORR' was not found in "
- "%s['SCI',%d]" % (fname, e))
+ "%s['SCI',%d]" % (fname, val))
raise
-
+
try:
h.append(fhdr['D2IMERR'])
except KeyError:
h.append(pyfits.Card(key='DPERROR', value=0,
comment='Maximum error of D2IMARR'))
-
+
hdu = pyfits.ImageHDU(header=pyfits.Header(h))
hdul.append(hdu)
numwdvarr = countExtn(fname, 'WCSDVARR')
@@ -386,116 +1025,557 @@ def create_headerlet(fname, sciext=None, hdrname=None, destim=None, wcskey=" ",
fobj.close()
return Headerlet(hdul,verbose=verbose, logmode='a')
-def _createPrimaryHDU(destim, hdrname, distname, wcsname,
- sipname, npolfile, d2imfile, upwcsver, pywcsver):
- phdu = pyfits.PrimaryHDU()
- phdu.header.update('DESTIM', destim,
- comment='Destination observation root name')
- phdu.header.update('HDRNAME', hdrname, comment='Headerlet name')
- fmt="%Y-%m-%dT%H:%M:%S"
- phdu.header.update('DATE', time.strftime(fmt),
- comment='Date FITS file was generated')
- phdu.header.update('WCSNAME', wcsname, comment='WCS name')
- phdu.header.update('DISTNAME', distname, comment='Distortion model name')
- phdu.header.update('SIPNAME', sipname, comment='origin of SIP polynomial distortion model')
- phdu.header.update('NPOLFILE', npolfile, comment='origin of non-polynmial distortion model')
- phdu.header.update('D2IMFILE', d2imfile, comment='origin of detector to image correction')
-
- phdu.header.ascard.append(upwcsver)
- phdu.header.ascard.append(pywcsver)
- return phdu
-def applyHeaderlet(hdrfile, destfile, createheaderlet=True, hdrname=None,
- verbose=False):
+def apply_headerlet_as_primary(filename, hdrlet, attach=True,archive=True,
+ force=False, verbose=False):
"""
- Apply headerlet 'hdrfile' to a science observation 'destfile'
+ Apply headerlet 'hdrfile' to a science observation 'destfile' as the primary WCS
Parameters
----------
- hdrfile: string
- Headerlet file
- destfile: string
+ filename: string
File name of science observation whose WCS solution will be updated
- createheaderlet: boolean
+ hdrlet: string
+ Headerlet file
+ attach: boolean
+ True (default): append headerlet to FITS file as a new extension.
+ archive: boolean
True (default): before updating, create a headerlet with the
WCS old solution.
- hdrname: string or None (default)
- will be the value of the HDRNAME keyword in the headerlet generated
- for the old WCS solution. If not specified, a sensible default
- will be used. Not required if createheaderlet is False
+ force: boolean
+ If True, this will cause the headerlet to replace the current PRIMARY
+ WCS even if it has a different distortion model. [Default: False]
verbose: False or a python logging level
(one of 'INFO', 'DEBUG' logging levels)
(an integer representing a logging level)
"""
- if verbose:
- setLogger(module_logger, verbose)
- else:
- module_logger.setLevel(100)
- module_logger.info("Starting applyHeaderlet: %s" % time.asctime())
- hlet = Headerlet(hdrfile, verbose=verbose, logmode='a')
- hlet.apply(destfile, createheaderlet=createheaderlet, hdrname=hdrname)
+ initLogging('apply_headerlet_as_primary', verbose=verbose)
+
+ hlet = Headerlet(hdrlet, verbose=verbose, logmode='a')
+ hlet.apply_as_primary(filename, attach=attach, archive=archive, force=force)
-def updateRefFiles(source, dest, verbose=False):
+def apply_headerlet_as_alternate(filename, hdrlet, attach=True,
+ wcskey=None, wcsname=None, verbose=False):
"""
- Update the reference files name in the primary header of 'dest'
- using values from 'source'
+ Apply headerlet to a science observation as an alternate WCS
Parameters
----------
- source: pyfits.Header.ascardlist
- dest: pyfits.Header.ascardlist
+ filename: string
+ File name of science observation whose WCS solution will be updated
+ hdrlet: string
+ Headerlet file
+ attach: boolean
+ flag indicating if the headerlet should be attached as a
+ HeaderletHDU to fobj. If True checks that HDRNAME is unique
+ in the fobj and stops if not.
+ wcskey: string
+ Key value (A-Z, except O) for this alternate WCS
+ If None, the next available key will be used
+ wcsname: string
+ Name to be assigned to this alternate WCS
+ WCSNAME is a required keyword in a Headerlet but this allows the
+ user to change it as desired.
+ verbose: False or a python logging level
+ (one of 'INFO', 'DEBUG' logging levels)
+ (an integer representing a logging level)
"""
- module_logger.info("Updating reference files")
- phdukw = {'IDCTAB': True,
- 'NPOLFILE': True,
- 'D2IMFILE': True}
+ initLogging('apply_headerlet_as_alternate', verbose=verbose)
+
+ hlet = Headerlet(hdrlet, verbose=verbose, logmode='a')
+ hlet.apply_as_alternate(filename, attach=attach, wcsname=wcsname, wcskey=wcskey)
- try:
- wind = dest.index_of('HISTORY')
- except KeyError:
- wind = len(dest)
- for key in phdukw.keys():
- try:
- value = source[key]
- dest.insert(wind, value)
- except KeyError:
- # TODO: I don't understand what the point of this is. Is it meant
- # for logging purposes? Right now it isn't used.
- phdukw[key] = False
- return phdukw
+def attach_headerlet(filename, hdrlet, verbose=False):
+ """
+ Attach Headerlet as an HeaderletHDU to a science file
-def getRootname(fname):
+ Parameters
+ ----------
+ filename: string, HDUList
+ science file to which the headerlet should be applied
+ hdrlet: string or Headerlet object
+ string representing a headerlet file
"""
- returns the value of ROOTNAME or DESTIM
+ initLogging('attach_headerlet', verbose=verbose)
+
+ hlet = Headerlet(hdrlet, verbose=verbose, logmode='a')
+ hlet.attach_to_file(filename)
+
+def delete_headerlet(filename, hdrname=None, hdrext=None, distname=None):
"""
+ Deletes HeaderletHDU(s) from a science file
- try:
- rootname = pyfits.getval(fname, 'ROOTNAME')
- except KeyError:
- rootname = pyfits.getval(fname, 'DESTIM')
- return rootname
+ Notes
+ -----
+ One of hdrname, hdrext or distname should be given.
+ If hdrname is given - delete a HeaderletHDU with a name HDRNAME from fobj.
+ If hdrext is given - delete HeaderletHDU in extension.
+ If distname is given - deletes all HeaderletHDUs with a specific distortion model from fobj.
+ Updates wcscorr
-def mapFitsExt2HDUListInd(fname, extname):
+ Parameters
+ ----------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ hdrname: string or None
+ HeaderletHDU primary header keyword HDRNAME
+ hdrext: int, tuple or None
+ HeaderletHDU FITS extension number
+ tuple has the form ('HDRLET', 1)
+ distname: string or None
+ distortion model as specified in the DISTNAME keyword
+ """
+ initLogging('delete_headerlet')
+ hdrlet_ind = findHeaderletHDUs(filename,hdrname=hdrname, hdrext=hdrext,
+ distname=distname)
+ if len(hdrlet_ind) == 0:
+ print 'ERROR: '
+ print 'No HDUs deleted... No Headerlet HDUs found with '
+ print ' hdrname = ',hdrname
+ print ' hdrext = ',hdrext
+ print ' distname = ',distname
+ print 'Please review input parameters and try again. '
+ return
+
+ fobj,fname,close_fobj = parseFilename(filename,mode='update')
+
+ # delete row(s) from WCSCORR table now...
+ #
+ #
+ if hdrname not in ['',' ',None,'INDEF']:
+ selections = {'hdrname':hdrname}
+ elif hdrname in ['',' ',None,'INDEF'] and hdrext is not None:
+ selections = {'hdrname':fobj[hdrext].header['hdrname']}
+ else:
+ selections = {'distname':distname}
+ wcscorr.delete_wcscorr_row(fobj['WCSCORR'].data,selections)
+
+ # delete the headerlet extension now
+ for hdrind in hdrlet_ind:
+ del fobj[hdrind]
+
+ # Update file object with changes
+ fobj.flush()
+ # close file, if was opened by this function
+ if close_fobj:
+ fobj.close()
+ print 'Deleted headerlet from extension(s): ',hdrlet_ind
+
+def headerlet_summary(filename,columns=None,pad=2,maxwidth=None,
+ output=None,clobber=True,quiet=False):
"""
- Map FITS extensions with 'EXTNAME' to HDUList indexes.
+ Print a summary of all HeaderletHDUs in a science file to STDOUT, and
+ optionally to a text file
+ The summary includes:
+ HDRLET_ext_number HDRNAME WCSNAME DISTNAME SIPNAME NPOLFILE D2IMFILE
+
+ Parameters
+ ----------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ columns: list
+ List of headerlet PRIMARY header keywords to report in summary
+ By default (set to None), it will use the default set of keywords
+ defined as the global list DEFAULT_SUMMARY_COLS
+ pad: int
+ Number of padding spaces to put between printed columns
+ [Default: 2]
+ maxwidth: int
+ Maximum column width(not counting padding) for any column in summary
+ By default (set to None), each column's full width will be used
+ output: string (optional)
+ Name of optional output file to record summary. This filename
+ can contain environment variables.
+ [Default: None]
+ clobber: bool
+ If True, will overwrite any previous output file of same name
+ quiet: bool
+ If True, will NOT report info to STDOUT
+ """
+ if columns is None:
+ summary_cols = DEFAULT_SUMMARY_COLS
+ else:
+ summary_cols = columns
+
+ summary_dict = {}
+ for kw in summary_cols:
+ summary_dict[kw] = copy.deepcopy(COLUMN_DICT)
+
+ # Define Extension number column
+ extnums_col = copy.deepcopy(COLUMN_DICT)
+ extnums_col['name'] = 'EXTN'
+ extnums_col['width'] = 6
+
+ fobj,fname,close_fobj = parseFilename(filename)
+ # find all HDRLET extensions and combine info into a single summary
+ for extn in fobj:
+ if 'extname' in extn.header and extn.header['extname'] == 'HDRLET':
+ hdrlet_indx = fobj.index_of(('hdrlet',extn.header['extver']))
+ try:
+ ext_cols, ext_summary = extn.headerlet.summary(columns=summary_cols)
+ extnums_col['vals'].append(hdrlet_indx)
+ for kw in summary_cols:
+ for key in COLUMN_DICT:
+ summary_dict[kw][key].extend(ext_summary[kw][key])
+ except:
+ print 'Skipping headerlet...Could not read Headerlet from extension ',hdrlet_indx
+
+ if close_fobj:
+ fobj.close()
+
+ # Print out the summary dictionary
+ print_summary(summary_cols, summary_dict, pad=pad, maxwidth=maxwidth,
+ idcol=extnums_col, output=output,
+ clobber=clobber, quiet=quiet)
+
+def restore_from_headerlet(filename, hdrname=None, hdrext=None, archive=True, force=False):
"""
+ Restores a headerlet as a primary WCS
+
+ Parameters
+ ----------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ hdrname: string
+ HDRNAME keyword of HeaderletHDU
+ hdrext: int or tuple
+ Headerlet extension number of tuple ('HDRLET',2)
+ archive: boolean (default: True)
+ When the distortion model in the headerlet is the same as the distortion model of
+ the science file, this flag indicates if the primary WCS should be saved as an alternate
+ nd a headerlet extension.
+ When the distortion models do not match this flag indicates if the current primary and
+ alternate WCSs should be archived as headerlet extensions and alternate WCS.
+ force: boolean (default:False)
+ When the distortion models of the headerlet and the primary do not match, and archive
+ is False, this flag forces an update of the primary.
+ """
+ initLogging('restore_from_headerlet')
+
+ hdrlet_ind = findHeaderletHDUs(filename,hdrext=hdrext,hdrname=hdrname)
- if not isinstance(fname, pyfits.HDUList):
- f = pyfits.open(fname)
- close_file = True
+ fobj,fname,close_fobj = parseFilename(filename,mode='update')
+
+ if len(hdrlet_ind) > 1:
+ if hdrext:
+ kwerr = 'hdrext'
+ kwval = hdrext
+ else:
+ kwerr = 'hdrname'
+ kwval = hdrname
+ print '====================================================='
+ print '[restore_from_headerlet]'
+ print 'Multiple Headerlet extensions found with the same name.'
+ print ' %d Headerlets with "%s" = %s found in %s.'%(
+ len(hdrlet_ind),kwerr,kwval,fname)
+ print '====================================================='
+ if close_fobj:
+ fobj.close()
+ raise ValueError
+
+ hdrlet_indx = hdrlet_ind[0]
+
+ # read headerlet from HeaderletHDU into memory
+ if hasattr(fobj[hdrlet_ind[0]], 'hdulist'):
+ hdrlet = fobj[hdrlet_indx].hdulist
else:
- f = fname
- close_file = False
- d = {}
- for hdu in f:
- if 'EXTNAME' in hdu.header and hdu.header['EXTNAME'] == extname:
- extver = hdu.header['EXTVER']
- d[(extname, extver)] = f.index_of((extname, extver))
- if close_file:
- f.close()
- return d
+ hdrlet = fobj[hdrlet_indx].headerlet # older convention in PyFITS
+
+ # read in the names of the extensions which HeaderletHDU updates
+ extlist = []
+ for ext in hdrlet:
+ if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS':
+ # convert from string to tuple or int
+ sciext = eval(ext.header['sciext'])
+ extlist.append(fobj[sciext])
+ # determine whether distortion is the same
+ current_distname = hdrlet[0].header['distname']
+ same_dist = True
+ if current_distname != fobj[0].header['distname']:
+ same_dist = False
+ if not archive and not force:
+ if close_fobj:
+ fobj.close()
+ print '====================================================='
+ print '[restore_from_headerlet]'
+ print 'Headerlet does not have the same distortion as image!'
+ print ' Set "archive"=True to save old distortion model, or'
+ print ' set "force"=True to overwrite old model with new.'
+ print '====================================================='
+ raise ValueError
+
+ # check whether primary WCS has been archived already
+ # Use information from first 'SCI' extension
+ priwcs_name = None
+
+ scihdr = extlist[0].header
+ sci_wcsnames = altwcs.wcsnames(scihdr).values()
+ if 'hdrname' in scihdr:
+ priwcs_hdrname = scihdr['hdrname']
+ else:
+ if 'wcsname' in scihdr:
+ priwcs_hdrname = priwcs_name = scihdr['wcsname']
+ else:
+ if 'idctab' in scihdr:
+ priwcs_hdrname = ''.join(['IDC_',
+ utils.extract_rootname(scihdr['idctab'])])
+ else:
+ priwcs_hdrname = 'UNKNOWN'
+ priwcs_name = priwcs_hdrname
+ scihdr.update('WCSNAME',priwcs_name)
+
+ priwcs_unique = verifyHdrnameIsUnique(fobj,priwcs_hdrname)
+ if archive and priwcs_unique:
+ if priwcs_unique:
+ newhdrlet = create_headerlet(fobj,sciext=scihdr['extname'],
+ hdrname=priwcs_hdrname)
+ newhdrlet.attach_to_file(fobj)
+ #
+ # copy hdrlet as a primary
+ #
+ hdrlet.apply_as_primary(fobj,attach=False, archive=archive,force=force)
+
+ fobj.flush()
+ if close_fobj:
+ fobj.close()
+
+def restore_all_with_distname(filename, distname, primary, archive=True, sciext='SCI'):
+ """
+ Restores all HeaderletHDUs with a given distortion model as alternate WCSs and a primary
+
+ Parameters
+ --------------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ distname: string
+ distortion model as represented by a DISTNAME keyword
+ primary: int or string or None
+ HeaderletHDU to be restored as primary
+ if int - a fits extension
+ if string - HDRNAME
+ if None - use first HeaderletHDU
+ archive: boolean (default True)
+ flag indicating if HeaderletHDUs should be created from the
+ primary and alternate WCSs in fname before restoring all matching
+ headerlet extensions
+ """
+ initLogging('restore_all_with_distname')
+
+ fobj,fname,close_fobj = parseFilename(filename,mode='update')
+
+ hdrlet_ind = findHeaderletHDUs(fobj,distname=distname)
+ if len(hdrlet_ind) == 0:
+ print '====================================================='
+ print '[restore_all_with_distname]'
+ print 'No Headerlet extensions found with '
+ print ' "DISTNAME" = %s in %s.'%(kwval,fname)
+ print 'Full list of DISTNAMEs found in all headerlet extensions: '
+ print getHeaderletKwNames(fobj,kw='DISTNAME')
+ print '====================================================='
+ if close_fobj:
+ fobj.close()
+ raise ValueError
+
+ # Interpret 'primary' parameter input into extension number
+ if primary is None:
+ primary_ind = hdrlet_ind[0]
+ elif isinstance(primary, int):
+ primary_ind = primary
+ else:
+ primary_ind = None
+ for ind in hdrlet_ind:
+ if fobj[ind].header['hdrname'] == primary:
+ primary_ind = ind
+ break
+ if primary_ind is None:
+ if close_fobj:
+ fobj.close()
+ print '====================================================='
+ print '[restore_all_from_distname]'
+ print 'No Headerlet extensions found with '
+ print ' "DISTNAME" = %s in %s.'%(primary,fname)
+ print '====================================================='
+ raise ValueError
+ # Check to see whether 'primary' HeaderletHDU has same distname as user
+ # specified on input
+
+ # read headerlet from HeaderletHDU into memory
+ if hasattr(fobj[primary_ind], 'hdulist'):
+ primary_hdrlet = fobj[primary_ind].hdulist
+ else:
+ primary_hdrlet = fobj[primary_ind].headerlet # older convention in PyFITS
+ pri_distname = primary_hdrlet[0].header['distname']
+ if pri_distname != distname:
+ if close_fobj:
+ fobj.close()
+ print '====================================================='
+ print '[restore_all_from_distname]'
+ print 'Headerlet extension to be used as PRIMARY WCS '
+ print ' has "DISTNAME" = %s while %s.'%(pri_distname)
+ print ' "DISTNAME" = %s was specified on input.'%(distname)
+ print ' All updated WCSs must have same DISTNAME. Quitting...'
+ print '====================================================='
+ raise ValueError
+
+ # read in the names of the WCSs which the HeaderletHDUs will update
+ wnames = altwcs.wcsnames(fobj[sciext,1].header)
+
+ # work out how many HeaderletHDUs will be used to update the WCSs
+ numhlt = len(hdrlet_ind)
+ hdrnames = getHeaderletKwNames(fobj,kw='wcsname')
+
+ # read in headerletHDUs and update WCS keywords
+ for hlet in hdrlet_ind:
+ if fobj[hlet].header['distname'] == distname:
+ if hasattr(fobj[hlet], 'hdulist'):
+ hdrlet = fobj[hlet].hdulist
+ else:
+ hdrlet = fobj[hlet].headerlet # older convention in PyFITS
+ if hlet == primary_ind:
+ hdrlet.apply_as_primary(fobj,attach=False,archive=archive,force=True)
+ else:
+ hdrlet.apply_as_alternate(fobj,attach=False,wcsname=hdrlet[0].header['wcsname'])
+
+ fobj.flush()
+ if close_fobj:
+ fobj.close()
+def archive_as_headerlet(filename, hdrname, sciext='SCI',
+ wcsname=None, wcskey=None, destim=None,
+ sipname=None, npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ rms_ra=None, rms_dec=None, nmatch=None, catalog=None):
+ """
+ Save a WCS as a headerlet extension and write it out to a file.
+ This function will create a headerlet, attach it as an extension to the
+ science image (if it has not already been archived) then, optionally,
+ write out the headerlet to a separate headerlet file.
+
+ Either wcsname or wcskey must be provided, if both are given, they must match a valid WCS
+ Updates wcscorr if necessary.
+
+ Parameters
+ ----------
+ filename: string or HDUList
+ Either a filename or PyFITS HDUList object for the input science file
+ An input filename (str) will be expanded as necessary to interpret
+ any environmental variables included in the filename.
+ hdrname: string
+ Unique name for this headerlet, stored as HDRNAME keyword
+ sciext: string
+ name (EXTNAME) of extension that contains WCS to be saved
+ wcsname: string
+ name of WCS to be archived, if " ": stop
+ wcskey: one of A...Z or " " or "PRIMARY"
+ if " " or "PRIMARY" - archive the primary WCS
+ destim: string
+ DESTIM keyword
+ if NOne, use ROOTNAME or science file name
+ sipname: string or None (default)
+ Name of unique file where the polynomial distortion coefficients were
+ read from. If None, the behavior is:
+ The code looks for a keyword 'SIPNAME' in the science header
+ If not found, for HST it defaults to 'IDCTAB'
+ If there is no SIP model the value is 'NOMODEL'
+ If there is a SIP model but no SIPNAME, it is set to 'UNKNOWN'
+ npolfile: string or None (default)
+ Name of a unique file where the non-polynomial distortion was stored.
+ If None:
+ The code looks for 'NPOLFILE' in science header.
+ If 'NPOLFILE' was not found and there is no npol model, it is set to 'NOMODEL'
+ If npol model exists, it is set to 'UNKNOWN'
+ d2imfile: string
+ Name of a unique file where the detector to image correction was
+ stored. If None:
+ The code looks for 'D2IMFILE' in the science header.
+ If 'D2IMFILE' is not found and there is no d2im correction,
+ it is set to 'NOMODEL'
+ If d2im correction exists, but 'D2IMFILE' is missing from science
+ header, it is set to 'UNKNOWN'
+ author: string
+ Name of user who created the headerlet, added as 'AUTHOR' keyword
+ to headerlet PRIMARY header
+ descrip: string
+ Short description of the solution provided by the headerlet
+ This description will be added as the single 'DESCRIP' keyword
+ to the headerlet PRIMARY header
+ history: filename, string or list of strings
+ Long (possibly multi-line) description of the solution provided
+ by the headerlet. These comments will be added as 'HISTORY' cards
+ to the headerlet PRIMARY header
+ If filename is specified, it will format and attach all text from
+ that file as the history.
+ """
+ initLogging('archive_as_headerlet')
+
+ if wcsname in [None,' ','','INDEF'] and wcskey is None:
+ print '='*60
+ print '[archive_as_headerlet]'
+ print 'No valid WCS found found in %s.'%fname
+ print ' A valid value for either "wcsname" or "wcskey" '
+ print ' needs to be specified. '
+ print '='*60
+ raise ValueError
+
+ if hdrname in [None, ' ','']:
+ print '='*60
+ print '[archive_as_headerlet]'
+ print 'No valid name for this headerlet was provided for %s.'%fname
+ print ' A valid value for "hdrname" needs to be specified. '
+ print '='*60
+ raise ValueError
+
+ # Translate 'wcskey' value for PRIMARY WCS to valid altwcs value of ' '
+ if wcskey == 'PRIMARY': wcskey = ' '
+
+ fobj,fname,close_fobj = parseFilename(filename,mode='update')
+
+ numhlt = countExtn(fobj, 'HDRLET')
+
+ if wcsname is None:
+ scihdr = fobj[sciext,1].header
+ wcsname = scihdr['wcsname'+wcskey]
+
+
+ # Check to see whether or not a HeaderletHDU with this hdrname already
+ # exists
+ hdrnames = getHeaderletKwNames(fobj)
+ if hdrname not in hdrnames:
+ hdrletobj = getHeaderletObj(fobj,sciext=sciext,
+ wcsname=wcsname, wcskey=wcskey,
+ hdrname=hdrname,
+ sipname=sipname, npolfile=npolfile,
+ d2imfile=d2imfile, author=author,
+ descrip=descrip, history=history,
+ rms_ra=rms_ra, rms_dec=rms_dec, nmatch=nmatch,
+ catalog=catalog,
+ hdrletnum=numhlt + 1, verbose=False)
+ hlt_hdu = HeaderletHDU.fromheaderlet(hdrletobj)
+
+ if destim is not None:
+ hdrlet_hdu[0].header['destim'] = destim
+
+ fobj.append(hdrlet_hdu)
+
+ fobj.flush()
+ else:
+ print 'WARNING:'
+ print ' Headerlet with hdrname ',hdrname,' already archived for WCS ',wcsname
+ print ' No new headerlet appended to ',fname,'.'
+
+ if close_fobj:
+ fobj.close()
+
+#### Headerlet Class definitions
class Headerlet(pyfits.HDUList):
"""
A Headerlet class
@@ -511,139 +1591,363 @@ class Headerlet(pyfits.HDUList):
instances, or an HDUList instance
mode: string, optional
Mode with which to open the given file object
- verbose: int
+ verbose: int
python logging level, higher numbers trigger more output
logmode: 'w' or 'a'
- for internal use only, indicates whether the log file
+ for internal use only, indicates whether the log file
should be open in attach or write mode
"""
self.verbose = verbose
self.hdr_logger = logging.getLogger('headerlet.Headerlet')
- if self.verbose:
- setLogger(self.hdr_logger, self.verbose, mode=logmode)
- else:
- self.hdr_logger.setLevel(100)
-
- if not isinstance(fobj, list):
- fobj = pyfits.open(fobj, mode=mode)
+ initLogging('class Headerlet', logger=self.hdr_logger, level=100,
+ verbose=self.verbose, logmode=logmode)
+ fobj,fname,close_file = parseFilename(fobj)
+
super(Headerlet, self).__init__(fobj)
self.fname = self.filename()
self.hdrname = self[0].header["HDRNAME"]
self.wcsname = self[0].header["WCSNAME"]
- self.stwcsver = self[0].header.get("STWCSVER", "")
- self.stwcsver = self[0].header.get("PYWCSVER", "")
+ self.upwcsver = self[0].header.get("UPWCSVER", "")
+ self.pywcsver = self[0].header.get("PYWCSVER", "")
self.destim = self[0].header["DESTIM"]
- self.idctab = self[0].header["SIPNAME"]
+ self.sipname = self[0].header["SIPNAME"]
self.npolfile = self[0].header["NPOLFILE"]
self.d2imfile = self[0].header["D2IMFILE"]
self.distname = self[0].header["DISTNAME"]
self.vafactor = self[1].header.get("VAFACTOR", 1) #None instead of 1?
+ self.author = self[0].header["AUTHOR"]
+ self.descrip = self[0].header["DESCRIP"]
+
+ self.history = ''
+ for card in self[0].header['HISTORY*']:
+ self.history += card.value+'\n'
+
self.d2imerr = 0
self.axiscorr = 1
- def apply_as_primary(fobj, attach=True, archive=True, force=False):
+ def apply_as_primary(self, fobj, attach=True, archive=True, force=False):
"""
Copy this headerlet as a primary WCS to fobj
-
+
Parameters
----------
fobj: string, HDUList
science file to which the headerlet should be applied
attach: boolean
- flag indicating if the headerlet should be attached as a
- HeaderletHDU to fobj. If True checks that HDRNAME is unique
+ flag indicating if the headerlet should be attached as a
+ HeaderletHDU to fobj. If True checks that HDRNAME is unique
in the fobj and stops if not.
archive: boolean (default is True)
- When the distortion model in the headerlet is the same as the
- distortion model of the science file, this flag indicates if
- the primary WCS should be saved as an alternate and a headerlet
+ When the distortion model in the headerlet is the same as the
+ distortion model of the science file, this flag indicates if
+ the primary WCS should be saved as an alternate and a headerlet
extension.
- When the distortion models do not match this flag indicates if
- the current primary and alternate WCSs should be archived as
+ When the distortion models do not match this flag indicates if
+ the current primary and alternate WCSs should be archived as
headerlet extensions and alternate WCS.
force: boolean (default is False)
- When the distortion models of the headerlet and the primary do
- not match, and archive is False this flag forces an update
+ When the distortion models of the headerlet and the primary do
+ not match, and archive is False this flag forces an update
of the primary
"""
self.hverify()
- if self.verify_dest(dest):
- if not isinstance(dest, pyfits.HDUList):
- fobj = pyfits.open(dest, mode='update')
+ if self.verify_dest(fobj):
+ if not isinstance(fobj, pyfits.HDUList):
+ fobj = pyfits.open(fobj, mode='update')
close_dest = True
else:
- fobj = dest
close_dest = False
- # Create the WCSCORR HDU/table from the existing WCS keywords if
- # necessary
- if createsummary:
- try:
- # TODO: in the pyfits refactoring branch if will be easier to
- # test whether an HDUList contains a certain extension HDU
- # without relying on try/except
- wcscorr_table = fobj['WCSCORR']
- except KeyError:
- # The WCSCORR table needs to be created
- wcscorr.init_wcscorr(fobj)
-
+ # Check to see whether the distortion model in the destination
+ # matches the distortion model in the headerlet being applied
+ dist_models_equal=True
+ if self[0].header['DISTNAME'] != fobj[0].header['DISTNAME']:
+ if self.verbose:
+ print 'Distortion model in headerlet not the same as destination model'
+ print ' Headerlet model : ',self[0].header['DISTNAME']
+ print ' Destination model: ',fobj[0].header['DISTNAME']
+ dist_models_equal = False
+
+ if not dist_models_equal and not force:
+ raise ValueError
+
orig_hlt_hdu = None
numhlt = countExtn(fobj, 'HDRLET')
- if createheaderlet:
- # Create a headerlet for the original WCS data in the file,
- # create an HDU from the original headerlet, and append it to
- # the file
- if not hdrname:
+ hdrlet_extnames = getHeaderletKwNames(fobj)
+
+ # Insure that WCSCORR table has been created with all original
+ # WCS's recorded prior to adding the headerlet WCS
+ wcscorr.init_wcscorr(fobj)
+
+ alt_hlethdu = []
+ # If archive has been specified
+ # regardless of whether or not the distortion models are equal...
+ if archive:
+ if 'wcsname' in fobj[('SCI',1)].header:
+ hdrname = fobj[('SCI',1)].header['WCSNAME']
+ wcsname = hdrname
+ else:
hdrname = fobj[0].header['ROOTNAME'] + '_orig'
- orig_hlt = createHeaderlet(fobj, hdrname, verbose=self.verbose, logmode='a')
- orig_hlt_hdu = HeaderletHDU.fromheaderlet(orig_hlt)
- orig_hlt_hdu.update_ext_version(numhlt + 1)
- numhlt += 1
+ wcsname = None
+ wcskey = ' '
+ # Check the HDRNAME for all current headerlet extensions
+ # to see whether this PRIMARY WCS has already been appended
+ if hdrname not in hdrlet_extnames:
+ # - if WCS has not been saved, write out WCS as headerlet extension
+ # Create a headerlet for the original Primary WCS data in the file,
+ # create an HDU from the original headerlet, and append it to
+ # the file
+ orig_hlt = getHeaderletObj(fobj,sciext='SCI',
+ wcsname=wcsname, wcskey=wcskey,
+ hdrname=hdrname, sipname=None,
+ npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ hdrletnum=numhlt + 1, verbose=self.verbose)
+ orig_hlt_hdu = HeaderletHDU.fromheaderlet(orig_hlt)
+ numhlt += 1
+ orig_hlt_hdu.header.update('EXTVER',numhlt)
+
+ wcsextn = mapFitsExt2HDUListInd(fobj.filename(),"SCI")[('SCI',1)]
+ if dist_models_equal:
+ # Use the WCSNAME to determine whether or not to archive
+ # Primary WCS as altwcs
+ # wcsname = hwcs.wcs.name
+ scihdr = fobj[wcsextn].header
+ if 'hdrname' in scihdr:
+ priwcs_name = scihdr['hdrname']
+ else:
+ if 'wcsname' in scihdr:
+ priwcs_name = scihdr['wcsname']
+ else:
+ if 'idctab' in scihdr:
+ priwcs_name = ''.join(['IDC_',
+ utils.extract_rootname(scihdr['idctab'])])
+ else:
+ priwcs_name = 'UNKNOWN'
+ nextkey = altwcs.next_wcskey(fobj,ext=wcsextn)
+ numsci = countExtn(fobj,'SCI')
+ sciext_list = []
+ for i in range(1,numsci+1):
+ sciext_list.append(('SCI',i))
+ altwcs.archiveWCS(fobj,ext=sciext_list,wcskey=nextkey,wcsname=priwcs_name)
+ else:
+ for hname in altwcs.wcsnames(fobj,ext=wcsextn).values():
+ if hname != 'OPUS' and hname not in hdrlet_extnames:
+ # get HeaderletHDU for alternate WCS as well
+ alt_hlet = getHeaderletObj(fobj, sciext='SCI',
+ wcsname=hname, wcskey=wcskey,
+ hdrname=hname, sipname=None,
+ npolfile=None, d2imfile=None,
+ author=None, descrip=None, history=None,
+ hdrletnum=numhlt + 1, verbose=self.verbose)
+ numhlt += 1
+ alt_hlet_hdu = HeaderletHDU.fromheaderlet(alt_hlet)
+ alt_hlet_hdu.header.update('EXTVER',numhlt)
+ alt_hlethdu.append(alt_hlet_hdu)
+ hdrlet_extnames.append(hname)
+
+ if not dist_models_equal:
+ self._delDestWCS(fobj)
+ #! Always attach these extensions last.
+ # Otherwise their headers may get updated with the other WCS kw.
+ numwdvar = countExtn(self, 'WCSDVARR')
+ numd2im = countExtn(self, 'D2IMARR')
+ for idx in range(1, numwdvar + 1):
+ fobj.append(self[('WCSDVARR', idx)].copy())
+ for idx in range(1, numd2im + 1):
+ fobj.append(self[('D2IMARR', idx)].copy())
- self._delDestWCS(fobj)
refs = updateRefFiles(self[0].header.ascard, fobj[0].header.ascard, verbose=self.verbose)
numsip = countExtn(self, 'SIPWCS')
for idx in range(1, numsip + 1):
- fhdr = fobj[('SCI', idx)].header.ascard
+ fhdr = fobj[('SCI', idx)].header
+ siphdr = self[('SIPWCS', idx)].header.ascard
+
+ if dist_models_equal:
+ hwcs = HSTWCS(fobj,ext=('SCI',idx))
+ hwcshdr = hwcs.wcs2header(sip2hdr=not(dist_models_equal))
+
+ # a minimal attempt to get the position of the WCS keywords group
+ # in the header by looking for the PA_APER kw.
+ # at least make sure the WCS kw are written before the HISTORY kw
+ # if everything fails, append the kw to the header
+ akeywd = None
+ bkeywd = None
+ if 'PA_APER' in fhdr:
+ akeywd = 'PA_APER'
+ else:
+ if 'HISTORY' in fhdr:
+ bkeywd = 'HISTORY'
+ self.hdr_logger.debug(
+ "Updating WCS keywords after %s and/or before %s " %
+ (akeywd,bkeywd))
+ update_cpdis = False
+ for k in siphdr[-1::-1]:
+ # Replace or add WCS keyword from headerlet as PRIMARY WCS
+ # In the case that the distortion models are not equal,
+ # this will copy all keywords from headerlet into fobj
+ # When the distortion models are equal, though, it will
+ # only copy the primary WCS keywords (CRVAL,CRPIX,...)
+ if (dist_models_equal and (k.key in hwcshdr)) or \
+ (not dist_models_equal and k.key not in FITS_STD_KW):
+ if 'DP' not in k.key:
+ fhdr.update(k.key,k.value,comment=k.comment,
+ after=akeywd,before=bkeywd)
+ else:
+ update_cpdis = True
+ else:
+ pass
+ # Update WCS with HDRNAME as well
+ fhdr.update('HDRNAME',self[0].header['hdrname'],after='WCSNAME')
+
+ # Update header with record-valued keywords here
+ if update_cpdis:
+ numdp = len(siphdr['CPDIS*'])
+ for dpaxis in range(1,numdp+1):
+ cpdis_indx = fhdr.ascard.index_of('CPDIS%d'%(dpaxis))
+ for dpcard in siphdr['DP%d*'%(dpaxis)][-1::-1]:
+ fhdr.ascard.insert(cpdis_indx,dpcard)
+
+ # Update the WCSCORR table with new rows from the headerlet's WCSs
+ wcscorr.update_wcscorr(fobj, self, 'SIPWCS')
+
+ # Append the original headerlet
+ if archive and orig_hlt_hdu:
+ fobj.append(orig_hlt_hdu)
+ # Append any alternate WCS Headerlets
+ if len(alt_hlethdu) > 0:
+ for ahdu in alt_hlethdu:
+ fobj.append(ahdu)
+ if attach:
+ # Finally, append an HDU for this headerlet
+ new_hlt = HeaderletHDU.fromheaderlet(self)
+ new_hlt.update_ext_version(numhlt + 1)
+ fobj.append(new_hlt)
+
+ if close_dest:
+ fobj.close()
+ else:
+ self.hdr_logger.critical("Observation %s cannot be updated with headerlet "
+ "%s" % (fobj.filename(), self.hdrname))
+ print "Observation %s cannot be updated with headerlet %s" \
+ % (fobj.filename(), self.hdrname)
+
+ def apply_as_alternate(self, fobj, attach=True, wcskey=None, wcsname=None):
+ """
+ Copy this headerlet as an alternate WCS to fobj
+
+ Parameters
+ ----------
+ fobj: string, HDUList
+ science file/HDUList to which the headerlet should be applied
+ attach: boolean
+ flag indicating if the headerlet should be attached as a
+ HeaderletHDU to fobj. If True checks that HDRNAME is unique
+ in the fobj and stops if not.
+ wcskey: string
+ Key value (A-Z, except O) for this alternate WCS
+ If None, the next available key will be used
+ wcsname: string
+ Name to be assigned to this alternate WCS
+ WCSNAME is a required keyword in a Headerlet but this allows the
+ user to change it as desired.
+
+ """
+ self.hverify()
+ if self.verify_dest(fobj):
+ if not isinstance(fobj, pyfits.HDUList):
+ fobj = pyfits.open(fobj, mode='update')
+ close_dest = True
+ else:
+ close_dest = False
+ fname = fobj.filename()
+
+ # Verify whether this headerlet has the same distortion found in
+ # the image being updated
+ if 'DISTNAME' in fobj[0].header:
+ distname = fobj[0].header['DISTNAME']
+ else:
+ # perhaps call 'updatewcs.utils.construct_distname()' instead
+ distname = 'UNKNOWN'
+
+ if distname == 'UNKNOWN' or self.distname != distname:
+ self.hdr_logger.critical("Observation %s cannot be updated with headerlet %s"\
+ % (fname, self.hdrname))
+ print ("Observation %s cannot be updated with headerlet %s"\
+ % (fname, self.hdrname))
+ self.hdr_logger.critical(" Distortion in image: %s \n did not match \n headerlet distortion: %s"
+ % (distname, self.distname))
+ print ( " Distortion in image: %s \n did not match \n headerlet distortion: %s"
+ % (distname, self.distname))
+ print ("The method .attach_to_file() can be used to append this headerlet to %s"\
+ % (fname))
+ if close_dest: fobj.close()
+ raise ValueError
+
+ # Insure that WCSCORR table has been created with all original
+ # WCS's recorded prior to adding the headerlet WCS
+ wcscorr.init_wcscorr(fobj)
+
+ # determine value of WCSNAME to be used
+ if wcsname is not None:
+ wname = wcsname
+ else:
+ wname = self[0].header['WCSNAME']
+
+ numhlt = countExtn(fobj, 'HDRLET')
+ numsip = countExtn(self, 'SIPWCS')
+ for idx in range(1, numsip + 1):
+ fhdr = fobj[('SCI', idx)].header
siphdr = self[('SIPWCS', idx)].header.ascard
+
+ # determine what alternate WCS this headerlet will be assigned to
+ if wcskey is None:
+ wkey = altwcs.next_wcskey(fobj[('SCI',idx)].header)
+ else:
+ available_keys = altwcs.available_wcskeys(fobj[('SCI',idx)].header)
+ if wcskey in available_keys:
+ wkey = wcskey
+ else:
+ self.hdr_logger.critical("Observation %s already contains alternate WCS with key \
+ %s" % (fname, wcskey))
+ print ("Observation %s already contains alternate WCS with key \
+ %s" % (fname, wcskey))
+ if close_dest: fobj.close()
+ raise ValueError
+
# a minimal attempt to get the position of the WCS keywords group
# in the header by looking for the PA_APER kw.
- # at least make sure the WCS kw are written befir the HISTORY kw
+ # at least make sure the WCS kw are written before the HISTORY kw
# if everything fails, append the kw to the header
try:
- wind = fhdr.index_of('PA_APER')
+ wind = fhdr.ascard.index_of('HISTORY')
except KeyError:
- try:
- wind = fhdr.index_of('HISTORY')
- except KeyError:
- wind = len(fhdr)
+ wind = len(fhdr)
self.hdr_logger.debug("Inserting WCS keywords at index %s" % wind)
+
for k in siphdr:
+ """
if k.key not in ['XTENSION', 'BITPIX', 'NAXIS', 'PCOUNT',
'GCOUNT','EXTNAME', 'EXTVER', 'ORIGIN',
'INHERIT', 'DATE', 'IRAF-TLM']:
- fhdr.insert(wind, k)
+ """
+ for akw in altwcs.altwcskw:
+ if akw in k.key:
+ fhdr.ascard.insert(wind,pyfits.Card(
+ key=k.key[:7]+wkey,value=k.value,
+ comment=k.comment))
else:
pass
- #! Always attach these extensions last. Otherwise their headers may
- # get updated with the other WCS kw.
- numwdvar = countExtn(self, 'WCSDVARR')
- numd2im = countExtn(self, 'D2IMARR')
- for idx in range(1, numwdvar + 1):
- fobj.append(self[('WCSDVARR', idx)].copy())
- for idx in range(1, numd2im + 1):
- fobj.append(self[('D2IMARR', idx)].copy())
+ fhdr.ascard.insert(wind,pyfits.Card('WCSNAME'+wkey,wname))
+ # also update with HDRNAME (a non-WCS-standard kw)
+ fhdr.ascard.insert(wind,pyfits.Card('HDRNAME'+wkey,
+ self[0].header['hdrname']))
# Update the WCSCORR table with new rows from the headerlet's WCSs
- if createsummary:
- wcscorr.update_wcscorr(fobj, self, 'SIPWCS')
-
- # Append the original headerlet
- if createheaderlet and orig_hlt_hdu:
- fobj.append(orig_hlt_hdu)
-
+ wcscorr.update_wcscorr(fobj, self, 'SIPWCS')
+
if attach:
# Finally, append an HDU for this headerlet
new_hlt = HeaderletHDU.fromheaderlet(self)
@@ -654,10 +1958,123 @@ class Headerlet(pyfits.HDUList):
fobj.close()
else:
self.hdr_logger.critical("Observation %s cannot be updated with headerlet "
+ "%s" % (fname, self.hdrname))
+ print "Observation %s cannot be updated with headerlet %s" \
+ % (fname, self.hdrname)
+
+ def attach_to_file(self,fobj):
+ """
+ Attach Headerlet as an HeaderletHDU to a science file
+
+ Parameters
+ ----------
+ fobj: string, HDUList
+ science file/HDUList to which the headerlet should be applied
+
+ Notes
+ -----
+ The algorithm used by this method:
+ - verify headerlet can be applied to this file (based on DESTIM)
+ - verify that HDRNAME is unique for this file
+ - attach as HeaderletHDU to fobj
+ - update wcscorr
+ """
+ self.hverify()
+ if not isinstance(fobj, pyfits.HDUList):
+ fobj = pyfits.open(fobj, mode='update')
+ close_dest = True
+ else:
+ close_dest = False
+ if self.verify_dest(fobj) and self.verify_hdrname(fobj):
+
+ numhlt = countExtn(fobj, 'HDRLET')
+ new_hlt = HeaderletHDU.fromheaderlet(self)
+ new_hlt.header.update('extver',numhlt + 1)
+ fobj.append(new_hlt)
+
+ wcscorr.update_wcscorr(fobj, self, 'SIPWCS',active=False)
+
+ else:
+ self.hdr_logger.critical("Observation %s cannot be updated with headerlet "
"%s" % (fobj.filename(), self.hdrname))
print "Observation %s cannot be updated with headerlet %s" \
% (fobj.filename(), self.hdrname)
+ if close_dest:
+ fobj.close()
+
+ def info(self, columns=None, pad=2, maxwidth=None,
+ output=None, clobber=True, quiet=False):
+ """
+ Prints a summary of this headerlet
+ The summary includes:
+ HDRNAME WCSNAME DISTNAME SIPNAME NPOLFILE D2IMFILE
+
+ Parameters
+ ----------
+ columns: list
+ List of headerlet PRIMARY header keywords to report in summary
+ By default (set to None), it will use the default set of keywords
+ defined as the global list DEFAULT_SUMMARY_COLS
+ pad: int
+ Number of padding spaces to put between printed columns
+ [Default: 2]
+ maxwidth: int
+ Maximum column width(not counting padding) for any column in summary
+ By default (set to None), each column's full width will be used
+ output: string (optional)
+ Name of optional output file to record summary. This filename
+ can contain environment variables.
+ [Default: None]
+ clobber: bool
+ If True, will overwrite any previous output file of same name
+ quiet: bool
+ If True, will NOT report info to STDOUT
+
+ """
+ summary_cols, summary_dict = self.summary(columns=columns)
+ print_summary(summary_cols, summary_dict, pad=pad, maxwidth=maxwidth,
+ idcol=None, output=output, clobber=clobber, quiet=quiet)
+
+ def summary(self, columns=None):
+ """
+ Returns a summary of this headerlet as a dictionary
+
+ The summary includes a summary of the distortion model as :
+ HDRNAME WCSNAME DISTNAME SIPNAME NPOLFILE D2IMFILE
+
+ Parameters
+ ----------
+ columns: list
+ List of headerlet PRIMARY header keywords to report in summary
+ By default(set to None), it will use the default set of keywords
+ defined as the global list DEFAULT_SUMMARY_COLS
+
+ Returns
+ -------
+ summary: dict
+ Dictionary of values for summary
+ """
+ if columns is None:
+ summary_cols = DEFAULT_SUMMARY_COLS
+ else:
+ summary_cols = columns
+
+ # Initialize summary dict based on requested columns
+ summary = {}
+ for kw in summary_cols:
+ summary[kw] = copy.deepcopy(COLUMN_DICT)
+
+ # Populate the summary with headerlet values
+ for kw in summary_cols:
+ if kw in self[0].header:
+ val = self[0].header[kw]
+ else:
+ val = 'INDEF'
+ summary[kw]['vals'].append(val)
+ summary[kw]['width'].append(max(len(val),len(kw)))
+
+ return summary_cols,summary
def hverify(self):
self.verify()
@@ -665,8 +2082,18 @@ class Headerlet(pyfits.HDUList):
assert('DESTIM' in header and header['DESTIM'].strip())
assert('HDRNAME' in header and header['HDRNAME'].strip())
assert('STWCSVER' in header)
-
-
+
+ def verify_hdrname(self,dest):
+ """
+ Verifies that the headerlet can be applied to the observation
+
+ Reports whether or not this file already has a headerlet with this
+ HDRNAME.
+ """
+ unique = verifyHdrnameIsUnique(dest,self.hdrname)
+ self.hdr_logger.debug("verify_hdrname() returned %s"%unique)
+ return unique
+
def verify_dest(self, dest):
"""
verifies that the headerlet can be applied to the observation
@@ -775,7 +2202,7 @@ class Headerlet(pyfits.HDUList):
return
try:
for c in range(1, len(cpdis) + 1):
- del ext.header['DP%s.*...' % c]
+ del ext.header['DP%s*...' % c]
del ext.header[cpdis[c - 1].key]
del ext.header['CPERR*']
del ext.header['NPOLFILE']
@@ -806,7 +2233,8 @@ class Headerlet(pyfits.HDUList):
self.hdr_logger.debug("Removing alternate WCSs with keys %s from %s"
% (dkeys, dest.filename()))
for k in dkeys:
- altwcs.deleteWCS(dest, ext=ext, wcskey=k)
+ if k not in ['O',' ','']: # Never delete WCS with wcskey='O'
+ altwcs.deleteWCS(dest, ext=ext, wcskey=k)
def _removePrimaryWCS(self, ext):
"""
@@ -893,10 +2321,11 @@ class HeaderletHDU(pyfits.hdu.base.NonstandardExtHDU):
'Assuming that the first file in the data is '
'headerlet file.' % hlt_name)
hlt_info = members[0]
+
hlt_file = t.extractfile(hlt_info)
# hlt_file is a file-like object
return Headerlet(hlt_file, mode='readonly')
-
+
@classmethod
def fromheaderlet(cls, headerlet, compress=False):
"""
@@ -935,6 +2364,11 @@ class HeaderletHDU(pyfits.hdu.base.NonstandardExtHDU):
finally:
os.remove(name)
+ if 'sipname' in headerlet[0].header:
+ sipname = headerlet[0].header['sipname']
+ else:
+ sipname = headerlet[0].header['wcsname']
+
cards = [
pyfits.Card('XTENSION', cls._extension, 'Headerlet extension'),
pyfits.Card('BITPIX', 8, 'array data type'),
@@ -946,8 +2380,12 @@ class HeaderletHDU(pyfits.hdu.base.NonstandardExtHDU):
'name of the headerlet extension'),
phdu.header.ascard['HDRNAME'],
phdu.header.ascard['DATE'],
- pyfits.Card('SIPNAME', headerlet['SIPWCS', 1].header['WCSNAMEA'],
+ pyfits.Card('SIPNAME', sipname,
'SIP distortion model name'),
+ pyfits.Card('WCSNAME', headerlet[0].header['WCSNAME'],
+ 'WCS name'),
+ pyfits.Card('DISTNAME', headerlet[0].header['DISTNAME'],
+ 'Distortion model name'),
phdu.header.ascard['NPOLFILE'],
phdu.header.ascard['D2IMFILE'],
pyfits.Card('COMPRESS', compress, 'Uses gzip compression')
diff --git a/lib/stwcs/wcsutil/hstwcs.py b/lib/stwcs/wcsutil/hstwcs.py
index 5aab087..d90b4a5 100644
--- a/lib/stwcs/wcsutil/hstwcs.py
+++ b/lib/stwcs/wcsutil/hstwcs.py
@@ -8,7 +8,6 @@ from stwcs.distortion import models, coeff_converter
import altwcs
import numpy as np
from stsci.tools import fileutil
-from stsci.tools.fileutil import DEGTORAD, RADTODEG
import getinput
import mappings
@@ -142,7 +141,7 @@ class HSTWCS(WCS):
try:
cd12 = self.wcs.cd[0][1]
cd22 = self.wcs.cd[1][1]
- self.orientat = RADTODEG(np.arctan2(cd12,cd22))
+ self.orientat = np.rad2deg(np.arctan2(cd12,cd22))
except AttributeError:
print "This file has a PC matrix. You may want to convert it \n \
to a CD matrix, if reasonable, by running pc2.cd() method.\n \
diff --git a/lib/stwcs/wcsutil/wcscorr.py b/lib/stwcs/wcsutil/wcscorr.py
index 50d359f..96883c5 100644
--- a/lib/stwcs/wcsutil/wcscorr.py
+++ b/lib/stwcs/wcsutil/wcscorr.py
@@ -5,13 +5,16 @@ import numpy as np
from stsci.tools import fileutil
import stwcs
from stwcs.wcsutil import altwcs
+from stwcs.updatewcs import utils
import convertwcs
-
DEFAULT_WCS_KEYS = ['CRVAL1','CRVAL2','CRPIX1','CRPIX2',
'CD1_1','CD1_2','CD2_1','CD2_2',
- 'CTYPE1','CTYPE2']
-DEFAULT_PRI_KEYS = ['PA_V3']
+ 'CTYPE1','CTYPE2','ORIENTAT']
+DEFAULT_PRI_KEYS = ['HDRNAME','SIPNAME','NPOLNAME','D2IMNAME','DESCRIP']
+COL_FITSKW_DICT = {'RMS_RA':'pri.rms_ra','RMS_DEC':'pri.rms_dec',
+ 'NMatch':'pri.nmatch','Catalog':'pri.catalog'}
+
###
### WCSEXT table related keyword archive functions
###
@@ -25,8 +28,7 @@ def init_wcscorr(input, force=False):
This function will NOT overwrite any rows already present.
This function works on all SCI extensions at one time.
- """
-
+ """
# TODO: Create some sort of decorator or (for Python2.5) context for
# opening a FITS file and closing it when done, if necessary
if not isinstance(input, pyfits.HDUList):
@@ -41,21 +43,22 @@ def init_wcscorr(input, force=False):
if len(fimg) == 1:
return
- # Verify that a WCSCORR extension does not already exist...
- for extn in fimg:
- if extn.header.has_key('extname') and \
- extn.header['extname'] == 'WCSCORR':
- if not force:
- return
- else:
- del fimg['WCSCORR']
+ enames = []
+ for e in fimg: enames.append(e.name)
+ if 'WCSCORR' in enames:
+ if not force:
+ return
+ else:
+ del fimg['wcscorr']
+ print 'Initializing new WCSCORR table for ',fimg.filename()
+
# define the primary columns of the WCSEXT table with initial rows for each
# SCI extension for the original OPUS solution
numsci = fileutil.countExtn(fimg)
# create new table with more rows than needed initially to make it easier to
# add new rows later
- wcsext = create_wcscorr(numrows=numsci, padding=numsci * 4)
+ wcsext = create_wcscorr(descrip=True,numrows=numsci, padding=numsci * 4)
# Assign the correct EXTNAME value to this table extension
wcsext.header.update('TROWS', numsci * 2,
comment='Number of updated rows in table')
@@ -63,13 +66,20 @@ def init_wcscorr(input, force=False):
comment='Table with WCS Update history')
wcsext.header.update('EXTVER', 1)
- used_wcskeys = None
+ # define set of WCS keywords which need to be managed and copied to the table
+ used_wcskeys = altwcs.wcskeys(fimg['SCI', 1].header)
wcs1 = stwcs.wcsutil.HSTWCS(fimg,ext=('SCI',1))
idc2header = True
if wcs1.idcscale is None:
idc2header = False
wcs_keywords = wcs1.wcs2header(idc2hdr=idc2header).keys()
+ prihdr = fimg[0].header
+ prihdr_keys = DEFAULT_PRI_KEYS
+ pri_funcs = {'SIPNAME':stwcs.updatewcs.utils.build_sipname,
+ 'NPOLNAME':stwcs.updatewcs.utils.build_npolname,
+ 'D2IMNAME':stwcs.updatewcs.utils.build_d2imname}
+
# Now copy original OPUS values into table
for extver in xrange(1, numsci + 1):
rowind = find_wcscorr_row(wcsext.data,
@@ -93,11 +103,6 @@ def init_wcscorr(input, force=False):
wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver), wcskey=wkey)
wcshdr = wcs.wcs2header(idc2hdr=idc2header)
- for key in DEFAULT_PRI_KEYS:
- prihdr_keys = []
- if not hdr.has_key(key):
- prihdr_keys.append(key)
-
if wcsext.data.field('CRVAL1')[rownum] != 0:
# If we find values for these keywords already in the table, do not
# overwrite them again
@@ -108,7 +113,11 @@ def init_wcscorr(input, force=False):
wcsext.data.field(key)[rownum] = wcshdr[(key+wkey)[:8]]
# Now get any keywords from PRIMARY header needed for WCS updates
for key in prihdr_keys:
- wcsext.data.field(key)[rownum] = fimg[0].header[key]
+ if key in prihdr:
+ val = prihdr[key]
+ else:
+ val = ''
+ wcsext.data.field(key)[rownum] = val
# Now that we have archived the OPUS alternate WCS, remove it from the list
# of used_wcskeys
@@ -119,9 +128,6 @@ def init_wcscorr(input, force=False):
# TODO: Much of this appears to be redundant with update_wcscorr; consider
# merging them...
for uwkey in used_wcskeys:
- if uwkey in [' ',''] :
- break
-
for extver in xrange(1, numsci + 1):
hdr = fimg['SCI', extver].header
wcs = stwcs.wcsutil.HSTWCS(fimg, ext=('SCI', extver),
@@ -142,7 +148,6 @@ def init_wcscorr(input, force=False):
rownum = np.where(rowind)[0][0]
else:
print 'No available rows found for updating. '
- #print 'Archiving current WCS row number ',rownum,' in WCSCORR table for SCI,',extver
# Update selection columns for this row with relevant values
wcsext.data.field('WCS_ID')[rownum] = wcsid
@@ -155,7 +160,14 @@ def init_wcscorr(input, force=False):
wcsext.data.field(key)[rownum] = wcshdr[key + uwkey]
# Now get any keywords from PRIMARY header needed for WCS updates
for key in prihdr_keys:
- wcsext.data.field(key)[rownum] = fimg[0].header[key]
+ if key in pri_funcs:
+ val = pri_funcs[key](fimg)
+ else:
+ if key in prihdr:
+ val = prihdr[key]
+ else:
+ val = ''
+ wcsext.data.field(key)[rownum] = val
# Append this table to the image FITS file
fimg.append(wcsext)
@@ -181,8 +193,10 @@ def find_wcscorr_row(wcstab, selections):
mask = None
for i in selections:
icol = wcstab.field(i)
- if isinstance(icol,np.chararray): icol = icol.rstrip()
- bmask = (icol == selections[i])
+ if isinstance(icol,np.chararray): icol = icol.rstrip()
+ selecti = selections[i]
+ if isinstance(selecti,str): selecti = selecti.rstrip()
+ bmask = (icol == selecti)
if mask is None:
mask = bmask.copy()
else:
@@ -210,7 +224,7 @@ def archive_wcs_file(image, wcs_id=None):
fimg.close()
-def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None):
+def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None, active=True):
"""
Update WCSCORR table with a new row or rows for this extension header. It
copies the current set of WCS keywords as a new row of the table based on
@@ -232,7 +246,14 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None):
wcs_id : str, optional
The name of the WCS to add, as in the WCSNAMEa keyword. If
unspecified, all the WCSs in the specified extensions are added.
+ active: bool, optional
+ When True, indicates that the update should reflect an update of the
+ active WCS information, not just appending the WCS to the file as a
+ headerlet
"""
+ if not isinstance(dest,pyfits.HDUList):
+ dest = pyfits.open(dest,mode='update')
+ fname = dest.filename()
if source is None:
source = dest
@@ -241,51 +262,56 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None):
if numext == 0:
raise ValueError('No %s extensions found in the source HDU list.'
% extname)
+ # Initialize the WCSCORR table extension in dest if not already present
+ init_wcscorr(dest)
# Current implementation assumes the same WCS keywords are in each
# extension version; if this should not be assumed then this can be
# modified...
wcs_keys = altwcs.wcskeys(source[(extname, 1)].header)
wcs_keys = filter(None, wcs_keys)
+ if ' ' not in wcs_keys: wcs_keys.append(' ') # Insure that primary WCS gets used
wcshdr = stwcs.wcsutil.HSTWCS(source, ext=(extname, 1)).wcs2header()
wcs_keywords = wcshdr.keys()
if 'O' in wcs_keys:
wcs_keys.remove('O') # 'O' is reserved for original OPUS WCS
-
- # If we're looking for a particular wcs_id, test ahead of time that it's
- # actually present in the specified extension headers
- if wcs_id:
- wcs_key = ''
- for wcs_key in wcs_keys:
- wcsname = source[(extname, 1)].header['WCSNAME' + wcs_key]
- if wcs_id == wcsname:
- break
- else:
- raise ValueError('A WCS with name %s was not found in the %s '
- 'extension headers in the source HDU list.'
- % (wcs_id, extname))
- wcs_keys = [wcs_key] # We're only interested in this one
-
+
# create new table for hdr and populate it with the newly updated values
- new_table = create_wcscorr(numrows=0, padding=len(wcs_keys)*numext)
+ new_table = create_wcscorr(descrip=True,numrows=0, padding=len(wcs_keys)*numext)
old_table = dest['WCSCORR']
-
+ prihdr = source[0].header
+
+ # Get headerlet related keywords here
+ sipname = utils.build_sipname(source)
+ npolname = utils.build_npolname(source)
+ d2imname = utils.build_d2imname(source)
+ if 'hdrname' in prihdr:
+ hdrname = prihdr['hdrname']
+ else:
+ hdrname = ''
+
idx = -1
for wcs_key in wcs_keys:
for extver in range(1, numext + 1):
extn = (extname, extver)
+ if 'SIPWCS' in extname and not active:
+ tab_extver = 0 # Since it has not been added to the SCI header yet
+ else:
+ tab_extver = extver
hdr = source[extn].header
wcsname = hdr['WCSNAME' + wcs_key]
- selection = {'WCS_ID': wcsname, 'EXTVER': extver,
- 'WCS_key': wcs_key}
-
+ selection = {'WCS_ID': wcsname, 'EXTVER': tab_extver,
+ 'SIPNAME':sipname, 'HDRNAME': hdrname,
+ 'NPOLNAME': npolname, 'D2IMNAME':d2imname
+ }
+
# Ensure that an entry for this WCS is not already in the dest
# table; if so just skip it
rowind = find_wcscorr_row(old_table.data, selection)
if np.any(rowind):
continue
-
+
idx += 1
wcs = stwcs.wcsutil.HSTWCS(source, ext=extn, wcskey=wcs_key)
@@ -293,16 +319,29 @@ def update_wcscorr(dest, source=None, extname='SCI', wcs_id=None):
# Update selection column values
for key, val in selection.iteritems():
- new_table.data.field(key)[idx] = val
+ if key in new_table.data.names:
+ new_table.data.field(key)[idx] = val
for key in wcs_keywords:
if key in new_table.data.names:
new_table.data.field(key)[idx] = wcshdr[key + wcs_key]
- prihdr = source[0].header
for key in DEFAULT_PRI_KEYS:
- if key in new_table.data.names and prihdr.has_key(key):
+ if key in new_table.data.names and key in prihdr:
new_table.data.field(key)[idx] = prihdr[key]
+ # Now look for additional, non-WCS-keyword table column data
+ for key in COL_FITSKW_DICT:
+ fitskw = COL_FITSKW_DICT[key]
+ # Interpret any 'pri.hdrname' or
+ # 'sci.crpix1' formatted keyword names
+ if '.' in fitskw:
+ srchdr,fitskw = fitskw.split('.')
+ if 'pri' in srchdr.lower(): srchdr = prihdr
+ else: srchdr = wcshdr
+ else:
+ srchdr = wcshdr
+ if key in srchdr:
+ new_table.data.field(key)[idx] = srchdr[fitskw]
# If idx was never incremented, no rows were added, so there's nothing else
# to do...
@@ -411,16 +450,15 @@ def create_wcscorr(descrip=False, numrows=1, padding=0):
'array': np.array([0]*trows,dtype=np.int32)}
# If more columns are needed, simply add their definitions to this list
- col_names = [('CRVAL1', def_float_col), ('CRVAL2', def_float_col),
+ col_names = [('HDRNAME', def_str24_col), ('SIPNAME', def_str24_col),
+ ('NPOLNAME', def_str24_col), ('D2IMNAME', def_str24_col),
+ ('CRVAL1', def_float_col), ('CRVAL2', def_float_col),
('CRPIX1', def_float_col), ('CRPIX2', def_float_col),
('CD1_1', def_float_col), ('CD1_2', def_float_col),
('CD2_1', def_float_col), ('CD2_2', def_float_col),
('CTYPE1', def_str24_col), ('CTYPE2', def_str24_col),
('ORIENTAT', def_float_col), ('PA_V3', def_float_col),
- ('Delta_RA', def_float_col), ('Delta_Dec', def_float_col),
('RMS_RA', def_float_col), ('RMS_Dec', def_float_col),
- ('Delta_Orientat', def_float_col),
- ('Delta_Scale', def_float1_col),
('NMatch', def_int32_col), ('Catalog', def_str40_col)]
# Define selector columns
@@ -443,7 +481,7 @@ def create_wcscorr(descrip=False, numrows=1, padding=0):
if descrip:
col_list.append(
- pyfits.Column(name='Descrip', format='128A',
+ pyfits.Column(name='DESCRIP', format='128A',
array=np.array(
['Original WCS computed by OPUS'] * numrows,
dtype='S128')))
@@ -457,3 +495,117 @@ def create_wcscorr(descrip=False, numrows=1, padding=0):
return newtab
+def delete_wcscorr_row(wcstab,selections=None,rows=None):
+ """
+ Sets all values in a specified row or set of rows to default values
+
+ This function will essentially erase the specified row from the table
+ without actually removing the row from the table. This avoids the problems
+ with trying to resize the number of rows in the table while preserving the
+ ability to update the table with new rows again without resizing the table.
+
+ Parameters
+ ----------
+ wcstab: object
+ PyFITS binTable object for WCSCORR table
+ selections: dict
+ Dictionary of wcscorr column names and values to be used to select
+ the row or set of rows to erase
+ rows: int, list
+ If specified, will specify what rows from the table to erase regardless
+ of the value of 'selections'
+ """
+
+ if selections is None and rows is None:
+ print 'ERROR: Some row selection information must be provided!'
+ print ' Either a row numbers or "selections" must be provided.'
+ raise ValueError
+
+ delete_rows = None
+ if rows is None:
+ if 'wcs_id' in selections and selections['wcs_id'] == 'OPUS':
+ delete_rows = None
+ print 'WARNING: OPUS WCS information can not be deleted from WCSCORR table.'
+ print ' This row will not be deleted!'
+ else:
+ rowind = find_wcscorr_row(wcstab, selections=selections)
+ delete_rows = np.where(rowind)[0].tolist()
+ else:
+ if not isinstance(rows,list):
+ rows = [rows]
+ delete_rows = rows
+
+ # Insure that rows pointing to OPUS WCS do not get deleted, even by accident
+ for row in delete_rows:
+ if wcstab['WCS_key'][row] == 'O' or wcstab['WCS_ID'][row] == 'OPUS':
+ del delete_rows[delete_rows.index(row)]
+
+ if delete_rows is None:
+ return
+
+ # identify next empty row
+ rowind = find_wcscorr_row(wcstab, selections={'wcs_id': ''})
+ last_blank_row = np.where(rowind)[0][-1]
+
+ # copy values from blank row into user-specified rows
+ for colname in wcstab.names:
+ wcstab[colname][delete_rows] = wcstab[colname][last_blank_row]
+
+def update_wcscorr_column(wcstab, column, values, selections=None, rows=None):
+ """
+ Update the values in 'column' with 'values' for selected rows
+
+ Parameters
+ ----------
+ wcstab: object
+ PyFITS binTable object for WCSCORR table
+ column: string
+ Name of table column with values that need to be updated
+ values: string, int, or list
+ Value or set of values to copy into the selected rows for the column
+ selections: dict
+ Dictionary of wcscorr column names and values to be used to select
+ the row or set of rows to erase
+ rows: int, list
+ If specified, will specify what rows from the table to erase regardless
+ of the value of 'selections'
+ """
+ if selections is None and rows is None:
+ print 'ERROR: Some row selection information must be provided!'
+ print ' Either a row numbers or "selections" must be provided.'
+ raise ValueError
+
+ if not isinstance(values, list):
+ values = [values]
+
+ update_rows = None
+ if rows is None:
+ if 'wcs_id' in selections and selections['wcs_id'] == 'OPUS':
+ update_rows = None
+ print 'WARNING: OPUS WCS information can not be deleted from WCSCORR table.'
+ print ' This row will not be deleted!'
+ else:
+ rowind = find_wcscorr_row(wcstab, selections=selections)
+ update_rows = np.where(rowind)[0].tolist()
+ else:
+ if not isinstance(rows,list):
+ rows = [rows]
+ update_rows = rows
+
+ if update_rows is None:
+ return
+
+ # Expand single input value to apply to all selected rows
+ if len(values) > 1 and len(values) < len(update_rows):
+ print 'ERROR: Number of new values',len(values)
+ print ' does not match number of rows',len(update_rows),' to be updated!'
+ print ' Please enter either 1 value or the same number of values'
+ print ' as there are rows to be updated.'
+ print ' Table will not be updated...'
+ raise ValueError
+
+ if len(values) == 1 and len(values) < len(update_rows):
+ values = values * len(update_rows)
+ # copy values from blank row into user-specified rows
+ for row in update_rows:
+ wcstab[column][row] = values[row]