summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/utils.py23
-rw-r--r--updatewcs/__init__.py45
-rw-r--r--wcsutil/__init__.py3
3 files changed, 52 insertions, 19 deletions
diff --git a/lib/utils.py b/lib/utils.py
index 3d04195..7590b42 100644
--- a/lib/utils.py
+++ b/lib/utils.py
@@ -77,7 +77,7 @@ def restoreWCS(fnames, wcskey, clobber=False):
fobj[e].header.update(key=key, value = hwcs[k])
else:
continue
- if wcskey == 'O':
+ if wcskey == 'O' and fobj[e].header.has_key('TDDALPHA'):
fobj[e].header['TDDALPHA'] = 0.0
fobj[e].header['TDDBETA'] = 0.0
if fobj[e].header.has_key('ORIENTAT'):
@@ -103,22 +103,26 @@ def restoreWCS(fnames, wcskey, clobber=False):
fobj.writeto(name)
fobj.close()
-def archiveWCS(fname, ext, wcskey, wcsname=" "):
+def archiveWCS(fname, ext, wcskey, wcsname=" ", clobber=False):
"""
Copy the primary WCS to an alternate WCS
with wcskey and name WCSNAME.
"""
- f = pyfits.open(fname, mode='update')
- w = pywcs.WCS(f[ext].header, fobj=f)
assert len(wcskey) == 1
- if wcskey == " ":
+ if wcskey == " " and clobber==False:
print "Please provide a valid wcskey for this WCS."
print 'Use "utils.next_wcskey" to obtain a valid wcskey.'
- print 'Use utils.restoreWCS to write to the primary WCS.'
+ print 'Or use utils.restoreWCS a specific WCS to the primary WCS.'
+ print 'WCS was NOT archived.'
return
- if wcskey not in available_wcskeys(f[ext].header):
+ if (wcskey not in available_wcskeys(f[ext].header)) and clobber==False:
print 'wcskey %s is already used in this header.' % wcskey
print 'Use "utils.next_wcskey" to obtain a valid wcskey'
+ print 'or use "clobber=True" to overwrite the values.'
+ return
+
+ f = pyfits.open(fname, mode='update')
+ w = pywcs.WCS(f[ext].header, fobj=f)
hwcs = w.to_header()
wkey = 'WCSNAME' + wcskey
f[ext].header.update(key=wkey, value=wcsname)
@@ -126,8 +130,9 @@ def archiveWCS(fname, ext, wcskey, wcsname=" "):
for c in ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2']:
del hwcs[c]
elif w.wcs.has_cd():
- for c in ['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2']:
- del hwcs[c]
+ for c in ['1_1', '1_2', '2_1', '2_2']:
+ hwcs.update(key='CD'+c, value=hwcs['PC'+c])
+ del hwcs['PC'+c]
for k in hwcs.keys():
key = k+wcskey
f[ext].header.update(key=key, value = hwcs[k])
diff --git a/updatewcs/__init__.py b/updatewcs/__init__.py
index 95119f1..97c9a28 100644
--- a/updatewcs/__init__.py
+++ b/updatewcs/__init__.py
@@ -16,7 +16,8 @@ __docformat__ = 'restructuredtext'
__version__ = '0.5'
-def updatewcs(input, vacorr=True, tddcorr=True, dgeocorr=True, d2imcorr=True, checkfiles=True):
+def updatewcs(input, vacorr=True, tddcorr=True, dgeocorr=True, d2imcorr=True,
+ checkfiles=True, wcskey=" ", wcsname=" ", clobber=False):
"""
Purpose
=======
@@ -54,6 +55,16 @@ def updatewcs(input, vacorr=True, tddcorr=True, dgeocorr=True, d2imcorr=True, ch
If True, the format of the input files will be checked,
geis and waiver fits files will be converted to MEF format.
Default value is True for standalone mode.
+ `wcskey`: None, one character string A-Z or an empty string of length 1
+ If None - the primary WCS is not archived
+ If an empty string - the next available wcskey is used for the archive
+ A-Z - use this key to archive the WCS
+ `wcsname`: a string
+ The name under which the primary WCS is archived after it is updated.
+ If an empty string (default), the name of the idctable is used as
+ a base.
+ `clobber`: boolean
+ a flag for reusing the wcskey when archiving the primary WCS
"""
files = parseinput.parseinput(input)[0]
if checkfiles:
@@ -67,10 +78,10 @@ def updatewcs(input, vacorr=True, tddcorr=True, dgeocorr=True, d2imcorr=True, ch
#restore the original WCS keywords
utils.restoreWCS(f, wcskey='O', clobber=True)
- makecorr(f, acorr)
+ makecorr(f, acorr, wkey=wcskey, wname=wcsname, clobber=False)
return files
-def makecorr(fname, allowed_corr):
+def makecorr(fname, allowed_corr, wkey=" ", wname=" ", clobber=False):
"""
Purpose
=======
@@ -81,7 +92,16 @@ def makecorr(fname, allowed_corr):
file name
`acorr`: list
list of corrections to be applied
-
+ `wkey`: None, one character string A-Z or an empty string of length 1
+ If None - the primary WCS is not archived
+ If an empty string - the next available wcskey is used for the archive
+ A-Z - use this key to archive the WCS
+ `wname`: a string
+ The name under which the primary WCS is archived after it is updated.
+ If an empty string (default), the name of the idctable is used as
+ a base.
+ `clobber`: boolean
+ a flag for reusing the wcskey when archiving the primary WCS
"""
f = pyfits.open(fname, mode='update')
#Determine the reference chip and create the reference HSTWCS object
@@ -115,11 +135,18 @@ def makecorr(fname, allowed_corr):
for kw in kw2update:
hdr.update(kw, kw2update[kw])
-
- idcname = os.path.split(fileutil.osfn(ext_wcs.idctab))[1]
- wname = ''.join(['IDC_',idcname.split('_idc.fits')[0]])
- wkey = getKey(hdr, wname)
- ext_wcs.copyWCS(header=hdr, wcskey=wkey, wcsname=wname, clobber=True)
+ if wkey is not None:
+ # archive the updated primary WCS
+ if wkey == " " :
+ idcname = os.path.split(fileutil.osfn(ext_wcs.idctab))[1]
+ wname = ''.join(['IDC_',idcname.split('_idc.fits')[0]])
+ wkey = getKey(hdr, wname)
+ #in this case clobber = true, to allow updatewcs to be run repeatedly
+ ext_wcs.copyWCS(header=hdr, wcskey=wkey, wcsname=wname, clobber=True)
+ else:
+ #clobber is set to False as a warning to users
+ ext_wcs.copyWCS(header=hdr, wcskey=wkey, wcsname=wname, clobber=False)
+
elif extname in ['err', 'dq', 'sdq']:
cextver = extn.header['extver']
if cextver == sciextver:
diff --git a/wcsutil/__init__.py b/wcsutil/__init__.py
index 5bdbeb9..5344904 100644
--- a/wcsutil/__init__.py
+++ b/wcsutil/__init__.py
@@ -33,7 +33,8 @@ class HSTWCS(WCS):
`fobj`: string or PyFITS HDUList object or None
a file name, e.g j9irw4b1q_flt.fits
a fully qualified filename[EXTNAME,EXTNUM], e.g. j9irw4b1q_flt.fits[sci,1]
- a pyfits file object, e.g pyfits.open('j9irw4b1q_flt.fits')
+ a pyfits file object, e.g pyfits.open('j9irw4b1q_flt.fits'), in which case the
+ user is responsible for closing the file object.
`ext`: int or None
extension number
if ext==None, it is assumed the data is in the primary hdu