aboutsummaryrefslogtreecommitdiff
path: root/noao/imred
diff options
context:
space:
mode:
authorJoseph Hunkeler <jhunkeler@gmail.com>2015-07-08 20:46:52 -0400
committerJoseph Hunkeler <jhunkeler@gmail.com>2015-07-08 20:46:52 -0400
commitfa080de7afc95aa1c19a6e6fc0e0708ced2eadc4 (patch)
treebdda434976bc09c864f2e4fa6f16ba1952b1e555 /noao/imred
downloadiraf-linux-fa080de7afc95aa1c19a6e6fc0e0708ced2eadc4.tar.gz
Initial commit
Diffstat (limited to 'noao/imred')
-rw-r--r--noao/imred/Revisions237
-rw-r--r--noao/imred/argus/Revisions60
-rw-r--r--noao/imred/argus/argus.cl82
-rw-r--r--noao/imred/argus/argus.dat48
-rw-r--r--noao/imred/argus/argus.hd7
-rw-r--r--noao/imred/argus/argus.men32
-rw-r--r--noao/imred/argus/argus.par13
-rw-r--r--noao/imred/argus/demos/demos.cl18
-rw-r--r--noao/imred/argus/demos/demos.men4
-rw-r--r--noao/imred/argus/demos/demos.par2
-rw-r--r--noao/imred/argus/demos/doargus.cl13
-rw-r--r--noao/imred/argus/demos/header.dat36
-rw-r--r--noao/imred/argus/demos/mkdoargus.cl22
-rw-r--r--noao/imred/argus/demos/mkdoargus.dat13
-rw-r--r--noao/imred/argus/demos/xgdoargus.dat76
-rw-r--r--noao/imred/argus/doargus.cl71
-rw-r--r--noao/imred/argus/doargus.par39
-rw-r--r--noao/imred/argus/doc/doargus.hlp1464
-rw-r--r--noao/imred/argus/doc/doargus.ms1725
-rw-r--r--noao/imred/argus/params.par67
-rw-r--r--noao/imred/bias/Revisions97
-rw-r--r--noao/imred/bias/bias.cl8
-rw-r--r--noao/imred/bias/bias.hd7
-rw-r--r--noao/imred/bias/bias.men2
-rw-r--r--noao/imred/bias/bias.par3
-rw-r--r--noao/imred/bias/colbias.par16
-rw-r--r--noao/imred/bias/colbias.x308
-rw-r--r--noao/imred/bias/doc/colbias.hlp113
-rw-r--r--noao/imred/bias/doc/linebias.hlp115
-rw-r--r--noao/imred/bias/linebias.par16
-rw-r--r--noao/imred/bias/linebias.x330
-rw-r--r--noao/imred/bias/mkpkg30
-rw-r--r--noao/imred/bias/x_bias.x2
-rw-r--r--noao/imred/ccdred/Revisions1236
-rw-r--r--noao/imred/ccdred/badpiximage.par5
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/ccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/cfccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/csccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/ech.dat19
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/epi5.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/epi5_badpix.dat22
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/fpccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/OLD/instruments.men5
-rw-r--r--noao/imred/ccdred/ccddb/ctio/cfccd_both.dat27
-rw-r--r--noao/imred/ccdred/ccddb/ctio/cfccd_f1.dat27
-rw-r--r--noao/imred/ccdred/ccddb/ctio/cfccd_f2.dat27
-rw-r--r--noao/imred/ccdred/ccddb/ctio/csccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/echccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/instruments.men9
-rw-r--r--noao/imred/ccdred/ccddb/ctio/nfccd.dat23
-rw-r--r--noao/imred/ccdred/ccddb/ctio/pfccd_both.dat27
-rw-r--r--noao/imred/ccdred/ccddb/ctio/pfccd_f1.dat27
-rw-r--r--noao/imred/ccdred/ccddb/ctio/pfccd_f2.dat27
-rw-r--r--noao/imred/ccdred/ccddb/kpno/Revisions35
-rw-r--r--noao/imred/ccdred/ccddb/kpno/camera.dat21
-rw-r--r--noao/imred/ccdred/ccddb/kpno/coude.cl4
-rw-r--r--noao/imred/ccdred/ccddb/kpno/coude.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/cryocam.cl3
-rw-r--r--noao/imred/ccdred/ccddb/kpno/cryocam.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/default.cl41
-rw-r--r--noao/imred/ccdred/ccddb/kpno/demo.cl72
-rw-r--r--noao/imred/ccdred/ccddb/kpno/demo.dat3
-rw-r--r--noao/imred/ccdred/ccddb/kpno/direct.cl4
-rw-r--r--noao/imred/ccdred/ccddb/kpno/direct.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/echelle.cl3
-rw-r--r--noao/imred/ccdred/ccddb/kpno/echelle.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/fibers.cl3
-rw-r--r--noao/imred/ccdred/ccddb/kpno/fibers.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/fits.dat21
-rw-r--r--noao/imred/ccdred/ccddb/kpno/foe.cl3
-rw-r--r--noao/imred/ccdred/ccddb/kpno/foe.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/hydra.cl12
-rw-r--r--noao/imred/ccdred/ccddb/kpno/hydra.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/instruments.men12
-rw-r--r--noao/imred/ccdred/ccddb/kpno/kpnoheaders.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/specphot.cl5
-rw-r--r--noao/imred/ccdred/ccddb/kpno/specphot.dat9
-rw-r--r--noao/imred/ccdred/ccddb/kpno/sunlink.cl4
-rw-r--r--noao/imred/ccdred/ccddb/kpno/sunlink.dat8
-rw-r--r--noao/imred/ccdred/ccddb/kpno/template.cl25
-rw-r--r--noao/imred/ccdred/ccdgroups.par5
-rw-r--r--noao/imred/ccdred/ccdhedit.par4
-rw-r--r--noao/imred/ccdred/ccdinstrument.par5
-rw-r--r--noao/imred/ccdred/ccdlist.par5
-rw-r--r--noao/imred/ccdred/ccdmask.par12
-rw-r--r--noao/imred/ccdred/ccdproc.par39
-rw-r--r--noao/imred/ccdred/ccdred.cl29
-rw-r--r--noao/imred/ccdred/ccdred.hd38
-rw-r--r--noao/imred/ccdred/ccdred.men28
-rw-r--r--noao/imred/ccdred/ccdred.par12
-rw-r--r--noao/imred/ccdred/ccdtest/artobs.cl109
-rw-r--r--noao/imred/ccdred/ccdtest/artobs.hlp127
-rw-r--r--noao/imred/ccdred/ccdtest/badpix.dat4
-rw-r--r--noao/imred/ccdred/ccdtest/ccdtest.cl10
-rw-r--r--noao/imred/ccdred/ccdtest/ccdtest.hd6
-rw-r--r--noao/imred/ccdred/ccdtest/ccdtest.men4
-rw-r--r--noao/imred/ccdred/ccdtest/demo.cl1
-rw-r--r--noao/imred/ccdred/ccdtest/demo.dat182
-rw-r--r--noao/imred/ccdred/ccdtest/demo.hlp27
-rw-r--r--noao/imred/ccdred/ccdtest/demo.par1
-rw-r--r--noao/imred/ccdred/ccdtest/mkimage.hlp87
-rw-r--r--noao/imred/ccdred/ccdtest/mkimage.par10
-rw-r--r--noao/imred/ccdred/ccdtest/mkpkg10
-rw-r--r--noao/imred/ccdred/ccdtest/subsection.cl53
-rw-r--r--noao/imred/ccdred/ccdtest/subsection.hlp73
-rw-r--r--noao/imred/ccdred/ccdtest/t_mkimage.x204
-rw-r--r--noao/imred/ccdred/combine.par40
-rw-r--r--noao/imred/ccdred/cosmicrays.par15
-rw-r--r--noao/imred/ccdred/darkcombine.cl48
-rw-r--r--noao/imred/ccdred/doc/Notes96
-rw-r--r--noao/imred/ccdred/doc/badpiximage.hlp51
-rw-r--r--noao/imred/ccdred/doc/ccdgeometry.hlp73
-rw-r--r--noao/imred/ccdred/doc/ccdgroups.hlp163
-rw-r--r--noao/imred/ccdred/doc/ccdhedit.hlp108
-rw-r--r--noao/imred/ccdred/doc/ccdinst.hlp391
-rw-r--r--noao/imred/ccdred/doc/ccdlist.hlp133
-rw-r--r--noao/imred/ccdred/doc/ccdmask.hlp138
-rw-r--r--noao/imred/ccdred/doc/ccdproc.hlp825
-rw-r--r--noao/imred/ccdred/doc/ccdred.hlp104
-rw-r--r--noao/imred/ccdred/doc/ccdred.ms787
-rw-r--r--noao/imred/ccdred/doc/ccdtypes.hlp124
-rw-r--r--noao/imred/ccdred/doc/combine.hlp1146
-rw-r--r--noao/imred/ccdred/doc/contents.ms34
-rw-r--r--noao/imred/ccdred/doc/darkcombine.hlp120
-rw-r--r--noao/imred/ccdred/doc/flatcombine.hlp133
-rw-r--r--noao/imred/ccdred/doc/flatfields.hlp177
-rw-r--r--noao/imred/ccdred/doc/guide.hlp717
-rw-r--r--noao/imred/ccdred/doc/guide.ms794
-rw-r--r--noao/imred/ccdred/doc/instruments.hlp256
-rw-r--r--noao/imred/ccdred/doc/mkfringecor.hlp90
-rw-r--r--noao/imred/ccdred/doc/mkillumcor.hlp92
-rw-r--r--noao/imred/ccdred/doc/mkillumflat.hlp101
-rw-r--r--noao/imred/ccdred/doc/mkskycor.hlp103
-rw-r--r--noao/imred/ccdred/doc/mkskyflat.hlp110
-rw-r--r--noao/imred/ccdred/doc/setinstrument.hlp97
-rw-r--r--noao/imred/ccdred/doc/subsets.hlp99
-rw-r--r--noao/imred/ccdred/doc/zerocombine.hlp121
-rw-r--r--noao/imred/ccdred/flatcombine.cl49
-rw-r--r--noao/imred/ccdred/mkfringecor.par11
-rw-r--r--noao/imred/ccdred/mkillumcor.par12
-rw-r--r--noao/imred/ccdred/mkillumflat.par12
-rw-r--r--noao/imred/ccdred/mkpkg29
-rw-r--r--noao/imred/ccdred/mkskycor.par11
-rw-r--r--noao/imred/ccdred/mkskyflat.par11
-rw-r--r--noao/imred/ccdred/setinstrument.cl57
-rw-r--r--noao/imred/ccdred/skyreplace.par3
-rw-r--r--noao/imred/ccdred/src/calimage.x367
-rw-r--r--noao/imred/ccdred/src/ccdcache.com10
-rw-r--r--noao/imred/ccdred/src/ccdcache.h10
-rw-r--r--noao/imred/ccdred/src/ccdcache.x381
-rw-r--r--noao/imred/ccdred/src/ccdcheck.x67
-rw-r--r--noao/imred/ccdred/src/ccdcmp.x23
-rw-r--r--noao/imred/ccdred/src/ccdcopy.x31
-rw-r--r--noao/imred/ccdred/src/ccddelete.x55
-rw-r--r--noao/imred/ccdred/src/ccdflag.x27
-rw-r--r--noao/imred/ccdred/src/ccdinst1.key27
-rw-r--r--noao/imred/ccdred/src/ccdinst2.key39
-rw-r--r--noao/imred/ccdred/src/ccdinst3.key62
-rw-r--r--noao/imred/ccdred/src/ccdlog.x46
-rw-r--r--noao/imred/ccdred/src/ccdmean.x50
-rw-r--r--noao/imred/ccdred/src/ccdnscan.x38
-rw-r--r--noao/imred/ccdred/src/ccdproc.x106
-rw-r--r--noao/imred/ccdred/src/ccdred.h150
-rw-r--r--noao/imred/ccdred/src/ccdsection.x100
-rw-r--r--noao/imred/ccdred/src/ccdsubsets.x93
-rw-r--r--noao/imred/ccdred/src/ccdtypes.h14
-rw-r--r--noao/imred/ccdred/src/ccdtypes.x72
-rw-r--r--noao/imred/ccdred/src/combine/generic/icaclip.x1102
-rw-r--r--noao/imred/ccdred/src/combine/generic/icaverage.x163
-rw-r--r--noao/imred/ccdred/src/combine/generic/iccclip.x898
-rw-r--r--noao/imred/ccdred/src/combine/generic/icgdata.x459
-rw-r--r--noao/imred/ccdred/src/combine/generic/icgrow.x148
-rw-r--r--noao/imred/ccdred/src/combine/generic/icmedian.x343
-rw-r--r--noao/imred/ccdred/src/combine/generic/icmm.x300
-rw-r--r--noao/imred/ccdred/src/combine/generic/icombine.x607
-rw-r--r--noao/imred/ccdred/src/combine/generic/icpclip.x442
-rw-r--r--noao/imred/ccdred/src/combine/generic/icsclip.x964
-rw-r--r--noao/imred/ccdred/src/combine/generic/icsigma.x205
-rw-r--r--noao/imred/ccdred/src/combine/generic/icsort.x550
-rw-r--r--noao/imred/ccdred/src/combine/generic/icstat.x444
-rw-r--r--noao/imred/ccdred/src/combine/generic/mkpkg23
-rw-r--r--noao/imred/ccdred/src/combine/icaclip.gx573
-rw-r--r--noao/imred/ccdred/src/combine/icaverage.gx93
-rw-r--r--noao/imred/ccdred/src/combine/iccclip.gx471
-rw-r--r--noao/imred/ccdred/src/combine/icgdata.gx233
-rw-r--r--noao/imred/ccdred/src/combine/icgrow.gx81
-rw-r--r--noao/imred/ccdred/src/combine/icimstack.x125
-rw-r--r--noao/imred/ccdred/src/combine/iclog.x378
-rw-r--r--noao/imred/ccdred/src/combine/icmask.com8
-rw-r--r--noao/imred/ccdred/src/combine/icmask.h7
-rw-r--r--noao/imred/ccdred/src/combine/icmask.x354
-rw-r--r--noao/imred/ccdred/src/combine/icmedian.gx228
-rw-r--r--noao/imred/ccdred/src/combine/icmm.gx177
-rw-r--r--noao/imred/ccdred/src/combine/icombine.com40
-rw-r--r--noao/imred/ccdred/src/combine/icombine.gx395
-rw-r--r--noao/imred/ccdred/src/combine/icombine.h52
-rw-r--r--noao/imred/ccdred/src/combine/icpclip.gx233
-rw-r--r--noao/imred/ccdred/src/combine/icscale.x376
-rw-r--r--noao/imred/ccdred/src/combine/icsclip.gx504
-rw-r--r--noao/imred/ccdred/src/combine/icsection.x94
-rw-r--r--noao/imred/ccdred/src/combine/icsetout.x193
-rw-r--r--noao/imred/ccdred/src/combine/icsigma.gx115
-rw-r--r--noao/imred/ccdred/src/combine/icsort.gx386
-rw-r--r--noao/imred/ccdred/src/combine/icstat.gx237
-rw-r--r--noao/imred/ccdred/src/combine/mkpkg51
-rw-r--r--noao/imred/ccdred/src/cor.gx362
-rw-r--r--noao/imred/ccdred/src/cosmic/cosmicrays.hlp338
-rw-r--r--noao/imred/ccdred/src/cosmic/crexamine.x486
-rw-r--r--noao/imred/ccdred/src/cosmic/crfind.x305
-rw-r--r--noao/imred/ccdred/src/cosmic/crlist.h17
-rw-r--r--noao/imred/ccdred/src/cosmic/crlist.x366
-rw-r--r--noao/imred/ccdred/src/cosmic/crsurface.x46
-rw-r--r--noao/imred/ccdred/src/cosmic/mkpkg16
-rw-r--r--noao/imred/ccdred/src/cosmic/t_cosmicrays.x348
-rw-r--r--noao/imred/ccdred/src/doproc.x29
-rw-r--r--noao/imred/ccdred/src/generic/ccdred.h150
-rw-r--r--noao/imred/ccdred/src/generic/cor.x694
-rw-r--r--noao/imred/ccdred/src/generic/icaclip.x1102
-rw-r--r--noao/imred/ccdred/src/generic/icaverage.x163
-rw-r--r--noao/imred/ccdred/src/generic/iccclip.x898
-rw-r--r--noao/imred/ccdred/src/generic/icgdata.x459
-rw-r--r--noao/imred/ccdred/src/generic/icgrow.x148
-rw-r--r--noao/imred/ccdred/src/generic/icmedian.x343
-rw-r--r--noao/imred/ccdred/src/generic/icmm.x300
-rw-r--r--noao/imred/ccdred/src/generic/icombine.x607
-rw-r--r--noao/imred/ccdred/src/generic/icpclip.x442
-rw-r--r--noao/imred/ccdred/src/generic/icsclip.x964
-rw-r--r--noao/imred/ccdred/src/generic/icsigma.x205
-rw-r--r--noao/imred/ccdred/src/generic/icsort.x550
-rw-r--r--noao/imred/ccdred/src/generic/icstat.x444
-rw-r--r--noao/imred/ccdred/src/generic/mkpkg11
-rw-r--r--noao/imred/ccdred/src/generic/proc.x735
-rw-r--r--noao/imred/ccdred/src/hdrmap.com4
-rw-r--r--noao/imred/ccdred/src/hdrmap.x544
-rw-r--r--noao/imred/ccdred/src/icaclip.gx573
-rw-r--r--noao/imred/ccdred/src/icaverage.gx93
-rw-r--r--noao/imred/ccdred/src/iccclip.gx471
-rw-r--r--noao/imred/ccdred/src/icgdata.gx233
-rw-r--r--noao/imred/ccdred/src/icgrow.gx81
-rw-r--r--noao/imred/ccdred/src/icimstack.x125
-rw-r--r--noao/imred/ccdred/src/iclog.x378
-rw-r--r--noao/imred/ccdred/src/icmask.com8
-rw-r--r--noao/imred/ccdred/src/icmask.h7
-rw-r--r--noao/imred/ccdred/src/icmask.x354
-rw-r--r--noao/imred/ccdred/src/icmedian.gx228
-rw-r--r--noao/imred/ccdred/src/icmm.gx177
-rw-r--r--noao/imred/ccdred/src/icombine.com40
-rw-r--r--noao/imred/ccdred/src/icombine.gx395
-rw-r--r--noao/imred/ccdred/src/icombine.h52
-rw-r--r--noao/imred/ccdred/src/icpclip.gx233
-rw-r--r--noao/imred/ccdred/src/icscale.x376
-rw-r--r--noao/imred/ccdred/src/icsclip.gx504
-rw-r--r--noao/imred/ccdred/src/icsection.x94
-rw-r--r--noao/imred/ccdred/src/icsetout.x193
-rw-r--r--noao/imred/ccdred/src/icsigma.gx115
-rw-r--r--noao/imred/ccdred/src/icsort.gx386
-rw-r--r--noao/imred/ccdred/src/icstat.gx237
-rw-r--r--noao/imred/ccdred/src/mkpkg75
-rw-r--r--noao/imred/ccdred/src/proc.gx408
-rw-r--r--noao/imred/ccdred/src/readcor.x138
-rw-r--r--noao/imred/ccdred/src/scancor.x340
-rw-r--r--noao/imred/ccdred/src/setdark.x160
-rw-r--r--noao/imred/ccdred/src/setfixpix.x74
-rw-r--r--noao/imred/ccdred/src/setflat.x146
-rw-r--r--noao/imred/ccdred/src/setfringe.x123
-rw-r--r--noao/imred/ccdred/src/setheader.x83
-rw-r--r--noao/imred/ccdred/src/setillum.x132
-rw-r--r--noao/imred/ccdred/src/setinput.x48
-rw-r--r--noao/imred/ccdred/src/setinteract.x31
-rw-r--r--noao/imred/ccdred/src/setoutput.x52
-rw-r--r--noao/imred/ccdred/src/setoverscan.x310
-rw-r--r--noao/imred/ccdred/src/setproc.x77
-rw-r--r--noao/imred/ccdred/src/setsections.x113
-rw-r--r--noao/imred/ccdred/src/settrim.x99
-rw-r--r--noao/imred/ccdred/src/setzero.x141
-rw-r--r--noao/imred/ccdred/src/sigma.gx89
-rw-r--r--noao/imred/ccdred/src/t_badpixim.x114
-rw-r--r--noao/imred/ccdred/src/t_ccdgroups.x258
-rw-r--r--noao/imred/ccdred/src/t_ccdhedit.x87
-rw-r--r--noao/imred/ccdred/src/t_ccdinst.x667
-rw-r--r--noao/imred/ccdred/src/t_ccdlist.x325
-rw-r--r--noao/imred/ccdred/src/t_ccdmask.x384
-rw-r--r--noao/imred/ccdred/src/t_ccdproc.x176
-rw-r--r--noao/imred/ccdred/src/t_combine.x653
-rw-r--r--noao/imred/ccdred/src/t_mkfringe.x191
-rw-r--r--noao/imred/ccdred/src/t_mkillumcor.x108
-rw-r--r--noao/imred/ccdred/src/t_mkillumft.x229
-rw-r--r--noao/imred/ccdred/src/t_mkskycor.x694
-rw-r--r--noao/imred/ccdred/src/t_mkskyflat.x215
-rw-r--r--noao/imred/ccdred/src/t_skyreplace.x301
-rw-r--r--noao/imred/ccdred/src/timelog.x29
-rw-r--r--noao/imred/ccdred/x_ccdred.x15
-rw-r--r--noao/imred/ccdred/zerocombine.cl48
-rw-r--r--noao/imred/crutil/crutil.cl18
-rw-r--r--noao/imred/crutil/crutil.hd17
-rw-r--r--noao/imred/crutil/crutil.men10
-rw-r--r--noao/imred/crutil/crutil.par3
-rw-r--r--noao/imred/crutil/doc/cosmicrays.hlp306
-rw-r--r--noao/imred/crutil/doc/craverage.hlp232
-rw-r--r--noao/imred/crutil/doc/crcombine.hlp35
-rw-r--r--noao/imred/crutil/doc/credit.hlp39
-rw-r--r--noao/imred/crutil/doc/crfix.hlp48
-rw-r--r--noao/imred/crutil/doc/crgrow.hlp55
-rw-r--r--noao/imred/crutil/doc/crmedian.hlp157
-rw-r--r--noao/imred/crutil/doc/crnebula.hlp139
-rw-r--r--noao/imred/crutil/doc/overview.hlp76
-rw-r--r--noao/imred/crutil/mkpkg8
-rw-r--r--noao/imred/crutil/src/Revisions151
-rw-r--r--noao/imred/crutil/src/cosmicrays.key43
-rw-r--r--noao/imred/crutil/src/cosmicrays.par17
-rw-r--r--noao/imred/crutil/src/craverage.par23
-rw-r--r--noao/imred/crutil/src/crcombine.cl17
-rw-r--r--noao/imred/crutil/src/crcombine.par45
-rw-r--r--noao/imred/crutil/src/credit.cl13
-rw-r--r--noao/imred/crutil/src/credit.par22
-rw-r--r--noao/imred/crutil/src/crexamine.x626
-rw-r--r--noao/imred/crutil/src/crfind.x305
-rw-r--r--noao/imred/crutil/src/crfix.cl20
-rw-r--r--noao/imred/crutil/src/crgrow.par7
-rw-r--r--noao/imred/crutil/src/crlist.h17
-rw-r--r--noao/imred/crutil/src/crlist.x417
-rw-r--r--noao/imred/crutil/src/crmedian.par15
-rw-r--r--noao/imred/crutil/src/crnebula.cl116
-rw-r--r--noao/imred/crutil/src/crsurface.x46
-rw-r--r--noao/imred/crutil/src/mkpkg38
-rw-r--r--noao/imred/crutil/src/t_cosmicrays.x329
-rw-r--r--noao/imred/crutil/src/t_craverage.x847
-rw-r--r--noao/imred/crutil/src/t_crgrow.x182
-rw-r--r--noao/imred/crutil/src/t_crmedian.x417
-rw-r--r--noao/imred/crutil/src/x_crutil.x4
-rw-r--r--noao/imred/crutil/src/xtmaskname.x125
-rw-r--r--noao/imred/ctioslit/Revisions26
-rw-r--r--noao/imred/ctioslit/calibrate.par13
-rw-r--r--noao/imred/ctioslit/ctioslit.cl69
-rw-r--r--noao/imred/ctioslit/ctioslit.hd1
-rw-r--r--noao/imred/ctioslit/ctioslit.men38
-rw-r--r--noao/imred/ctioslit/ctioslit.par15
-rw-r--r--noao/imred/ctioslit/demos/demoarc1.dat38
-rw-r--r--noao/imred/ctioslit/demos/demoarc2.dat38
-rw-r--r--noao/imred/ctioslit/demos/demoobj1.dat37
-rw-r--r--noao/imred/ctioslit/demos/demos.cl18
-rw-r--r--noao/imred/ctioslit/demos/demos.men4
-rw-r--r--noao/imred/ctioslit/demos/demos.par2
-rw-r--r--noao/imred/ctioslit/demos/demostd1.dat37
-rw-r--r--noao/imred/ctioslit/demos/doslit.cl14
-rw-r--r--noao/imred/ctioslit/demos/mkdoslit.cl25
-rw-r--r--noao/imred/ctioslit/demos/xgdoslit.dat71
-rw-r--r--noao/imred/ctioslit/sensfunc.par17
-rw-r--r--noao/imred/ctioslit/sparams.par65
-rw-r--r--noao/imred/ctioslit/standard.par21
-rw-r--r--noao/imred/doc/demos.hlp77
-rw-r--r--noao/imred/doc/revisions.v2.ms89
-rw-r--r--noao/imred/doc/tutor.hlp64
-rw-r--r--noao/imred/dtoi/README1
-rw-r--r--noao/imred/dtoi/Revisions144
-rw-r--r--noao/imred/dtoi/database.x611
-rw-r--r--noao/imred/dtoi/dematch.par8
-rw-r--r--noao/imred/dtoi/dematch.x160
-rw-r--r--noao/imred/dtoi/doc/dematch.hlp51
-rw-r--r--noao/imred/dtoi/doc/dtoi.ms576
-rw-r--r--noao/imred/dtoi/doc/dtoi.toc34
-rw-r--r--noao/imred/dtoi/doc/hdfit.hlp79
-rw-r--r--noao/imred/dtoi/doc/hdshift.hlp50
-rw-r--r--noao/imred/dtoi/doc/hdtoi.hlp88
-rw-r--r--noao/imred/dtoi/doc/selftest.hlp81
-rw-r--r--noao/imred/dtoi/doc/splotlist.hlp81
-rw-r--r--noao/imred/dtoi/dtoi.cl16
-rw-r--r--noao/imred/dtoi/dtoi.hd11
-rw-r--r--noao/imred/dtoi/dtoi.men6
-rw-r--r--noao/imred/dtoi/dtoi.par2
-rw-r--r--noao/imred/dtoi/hd_aravr.x50
-rw-r--r--noao/imred/dtoi/hdfit.par9
-rw-r--r--noao/imred/dtoi/hdfit.x364
-rw-r--r--noao/imred/dtoi/hdicfit/hdic.com6
-rw-r--r--noao/imred/dtoi/hdicfit/hdicadd.x47
-rw-r--r--noao/imred/dtoi/hdicfit/hdicclean.x94
-rw-r--r--noao/imred/dtoi/hdicfit/hdicdeviant.x116
-rw-r--r--noao/imred/dtoi/hdicfit/hdicdosetup.x104
-rw-r--r--noao/imred/dtoi/hdicfit/hdicebars.x217
-rw-r--r--noao/imred/dtoi/hdicfit/hdicerrors.x143
-rw-r--r--noao/imred/dtoi/hdicfit/hdicfit.h65
-rw-r--r--noao/imred/dtoi/hdicfit/hdicfit.x80
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgaxes.x101
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgcolon.x284
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgdelete.x81
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgfit.x402
-rw-r--r--noao/imred/dtoi/hdicfit/hdicggraph.x329
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgnearest.x72
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgparams.x94
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgredraw.x22
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgsample.x84
-rw-r--r--noao/imred/dtoi/hdicfit/hdicguaxes.x38
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgundel.x87
-rw-r--r--noao/imred/dtoi/hdicfit/hdicguser.x17
-rw-r--r--noao/imred/dtoi/hdicfit/hdicgvec.x74
-rw-r--r--noao/imred/dtoi/hdicfit/hdicinit.x60
-rw-r--r--noao/imred/dtoi/hdicfit/hdicparams.x323
-rw-r--r--noao/imred/dtoi/hdicfit/hdicreject.x39
-rw-r--r--noao/imred/dtoi/hdicfit/hdicshow.x52
-rw-r--r--noao/imred/dtoi/hdicfit/hdicsort.x38
-rw-r--r--noao/imred/dtoi/hdicfit/hdictrans.x155
-rw-r--r--noao/imred/dtoi/hdicfit/hdicvshow.x155
-rw-r--r--noao/imred/dtoi/hdicfit/mkpkg37
-rw-r--r--noao/imred/dtoi/hdicfit/userfcn.x37
-rw-r--r--noao/imred/dtoi/hdshift.par2
-rw-r--r--noao/imred/dtoi/hdshift.x184
-rw-r--r--noao/imred/dtoi/hdtoi.par11
-rw-r--r--noao/imred/dtoi/hdtoi.x407
-rw-r--r--noao/imred/dtoi/minmax.x73
-rw-r--r--noao/imred/dtoi/mkpkg40
-rw-r--r--noao/imred/dtoi/selftest.par8
-rw-r--r--noao/imred/dtoi/selftest.x290
-rw-r--r--noao/imred/dtoi/spotlist.par8
-rw-r--r--noao/imred/dtoi/spotlist.x395
-rw-r--r--noao/imred/dtoi/x_dtoi.x6
-rw-r--r--noao/imred/echelle/Revisions247
-rw-r--r--noao/imred/echelle/calibrate.par13
-rw-r--r--noao/imred/echelle/demos/demoarc.dat38
-rw-r--r--noao/imred/echelle/demos/demoobj.dat37
-rw-r--r--noao/imred/echelle/demos/demos.cl20
-rw-r--r--noao/imred/echelle/demos/demos.men7
-rw-r--r--noao/imred/echelle/demos/demos.par2
-rw-r--r--noao/imred/echelle/demos/demostd.dat36
-rw-r--r--noao/imred/echelle/demos/doecslit.cl21
-rw-r--r--noao/imred/echelle/demos/dofoe.cl13
-rw-r--r--noao/imred/echelle/demos/ecdofoe.dat33
-rw-r--r--noao/imred/echelle/demos/mkdoecslit.cl137
-rw-r--r--noao/imred/echelle/demos/mkdofoe.cl103
-rw-r--r--noao/imred/echelle/demos/xgdoecslit.dat105
-rw-r--r--noao/imred/echelle/demos/xgdofoe.dat50
-rw-r--r--noao/imred/echelle/doc/Tutorial.hlp184
-rw-r--r--noao/imred/echelle/doc/doecslit.hlp1230
-rw-r--r--noao/imred/echelle/doc/doecslit.ms1479
-rw-r--r--noao/imred/echelle/doc/dofoe.hlp1155
-rw-r--r--noao/imred/echelle/doc/dofoe.ms1371
-rw-r--r--noao/imred/echelle/doc/ecidentify.hlp770
-rw-r--r--noao/imred/echelle/doc/ecreidentify.hlp117
-rw-r--r--noao/imred/echelle/echelle.cl82
-rw-r--r--noao/imred/echelle/echelle.hd11
-rw-r--r--noao/imred/echelle/echelle.men40
-rw-r--r--noao/imred/echelle/echelle.par15
-rw-r--r--noao/imred/echelle/sensfunc.par17
-rw-r--r--noao/imred/echelle/standard.par21
-rw-r--r--noao/imred/generic/Revisions220
-rw-r--r--noao/imred/generic/background.cl6
-rw-r--r--noao/imred/generic/background.par16
-rw-r--r--noao/imred/generic/darksub.cl99
-rw-r--r--noao/imred/generic/doc/Spelldict51
-rw-r--r--noao/imred/generic/doc/background.hlp82
-rw-r--r--noao/imred/generic/doc/darksub.hlp60
-rw-r--r--noao/imred/generic/doc/flat1d.hlp157
-rw-r--r--noao/imred/generic/doc/flatten.hlp42
-rw-r--r--noao/imred/generic/doc/normalize.hlp45
-rw-r--r--noao/imred/generic/doc/normflat.hlp54
-rw-r--r--noao/imred/generic/flat1d.par17
-rw-r--r--noao/imred/generic/flat1d.x478
-rw-r--r--noao/imred/generic/flatten.cl64
-rw-r--r--noao/imred/generic/flatten.par13
-rw-r--r--noao/imred/generic/generic.cl17
-rw-r--r--noao/imred/generic/generic.hd11
-rw-r--r--noao/imred/generic/generic.men6
-rw-r--r--noao/imred/generic/generic.par5
-rw-r--r--noao/imred/generic/mkpkg54
-rw-r--r--noao/imred/generic/normalize.cl79
-rw-r--r--noao/imred/generic/normflat.cl69
-rw-r--r--noao/imred/generic/normflat.par15
-rw-r--r--noao/imred/generic/x_generic.x1
-rw-r--r--noao/imred/hydra/Revisions61
-rw-r--r--noao/imred/hydra/demos/big.cl13
-rw-r--r--noao/imred/hydra/demos/demos.cl18
-rw-r--r--noao/imred/hydra/demos/demos.men13
-rw-r--r--noao/imred/hydra/demos/demos.par2
-rw-r--r--noao/imred/hydra/demos/dohydra.cl12
-rw-r--r--noao/imred/hydra/demos/dohydra1.cl12
-rw-r--r--noao/imred/hydra/demos/dohydral.cl13
-rw-r--r--noao/imred/hydra/demos/dohydranl.cl14
-rw-r--r--noao/imred/hydra/demos/donessie.cl12
-rw-r--r--noao/imred/hydra/demos/fibers.dat44
-rw-r--r--noao/imred/hydra/demos/header.dat36
-rw-r--r--noao/imred/hydra/demos/mkbig.cl29
-rw-r--r--noao/imred/hydra/demos/mkbig.dat300
-rw-r--r--noao/imred/hydra/demos/mkdohydra.cl41
-rw-r--r--noao/imred/hydra/demos/mkdohydra1.dat12
-rw-r--r--noao/imred/hydra/demos/mkdohydra2.dat12
-rw-r--r--noao/imred/hydra/demos/mkdonessie.cl36
-rw-r--r--noao/imred/hydra/demos/mkdonessie.dat12
-rw-r--r--noao/imred/hydra/demos/mklist.cl27
-rw-r--r--noao/imred/hydra/demos/xgbig.dat81
-rw-r--r--noao/imred/hydra/demos/xgdohydra.dat93
-rw-r--r--noao/imred/hydra/demos/xgdohydra1.dat89
-rw-r--r--noao/imred/hydra/demos/xgdohydranl.dat91
-rw-r--r--noao/imred/hydra/demos/xgdonessie.dat94
-rw-r--r--noao/imred/hydra/doc/dohydra.hlp1588
-rw-r--r--noao/imred/hydra/doc/dohydra.ms1853
-rw-r--r--noao/imred/hydra/dohydra.cl75
-rw-r--r--noao/imred/hydra/dohydra.par43
-rw-r--r--noao/imred/hydra/hydra.cl82
-rw-r--r--noao/imred/hydra/hydra.hd7
-rw-r--r--noao/imred/hydra/hydra.men32
-rw-r--r--noao/imred/hydra/hydra.par13
-rw-r--r--noao/imred/hydra/params.par67
-rw-r--r--noao/imred/iids/Revisions131
-rw-r--r--noao/imred/iids/calibrate.par14
-rw-r--r--noao/imred/iids/dispcor.par19
-rw-r--r--noao/imred/iids/identify.par33
-rw-r--r--noao/imred/iids/iids.cl66
-rw-r--r--noao/imred/iids/iids.hd1
-rw-r--r--noao/imred/iids/iids.men37
-rw-r--r--noao/imred/iids/iids.par17
-rw-r--r--noao/imred/iids/irs.men5
-rw-r--r--noao/imred/iids/lcalib.par7
-rw-r--r--noao/imred/iids/refspectra.par17
-rw-r--r--noao/imred/iids/reidentify.par36
-rw-r--r--noao/imred/iids/sensfunc.par17
-rw-r--r--noao/imred/iids/standard.par22
-rw-r--r--noao/imred/imred.cl55
-rw-r--r--noao/imred/imred.hd119
-rw-r--r--noao/imred/imred.men17
-rw-r--r--noao/imred/imred.par5
-rw-r--r--noao/imred/irred/Revisions61
-rw-r--r--noao/imred/irred/center.par21
-rw-r--r--noao/imred/irred/centerpars.par14
-rw-r--r--noao/imred/irred/datapars.par25
-rw-r--r--noao/imred/irred/doc/center.hlp637
-rw-r--r--noao/imred/irred/doc/irlincor.hlp81
-rw-r--r--noao/imred/irred/doc/mosproc.hlp170
-rw-r--r--noao/imred/irred/imcombine11
-rw-r--r--noao/imred/irred/iralign.par20
-rw-r--r--noao/imred/irred/irlincor.par7
-rw-r--r--noao/imred/irred/irmatch1d.par21
-rw-r--r--noao/imred/irred/irmatch2d.par21
-rw-r--r--noao/imred/irred/irmosaic.par22
-rw-r--r--noao/imred/irred/irred.cl36
-rw-r--r--noao/imred/irred/irred.hd8
-rw-r--r--noao/imred/irred/irred.men11
-rw-r--r--noao/imred/irred/irred.par3
-rw-r--r--noao/imred/irred/mkpkg24
-rw-r--r--noao/imred/irred/mosproc.cl172
-rw-r--r--noao/imred/irred/t_irlincor.x254
-rw-r--r--noao/imred/irred/txdump.par8
-rw-r--r--noao/imred/irred/x_irred.x1
-rw-r--r--noao/imred/irs/Revisions111
-rw-r--r--noao/imred/irs/calibrate.par14
-rw-r--r--noao/imred/irs/dispcor.par19
-rw-r--r--noao/imred/irs/flatfit.par24
-rw-r--r--noao/imred/irs/identify.par33
-rw-r--r--noao/imred/irs/irs.cl64
-rw-r--r--noao/imred/irs/irs.hd1
-rw-r--r--noao/imred/irs/irs.men35
-rw-r--r--noao/imred/irs/irs.par17
-rw-r--r--noao/imred/irs/lcalib.par7
-rw-r--r--noao/imred/irs/refspectra.par17
-rw-r--r--noao/imred/irs/reidentify.par36
-rw-r--r--noao/imred/irs/sensfunc.par17
-rw-r--r--noao/imred/irs/standard.par22
-rw-r--r--noao/imred/kpnocoude/Revisions59
-rw-r--r--noao/imred/kpnocoude/calibrate.par13
-rw-r--r--noao/imred/kpnocoude/demos/demoarc1.dat38
-rw-r--r--noao/imred/kpnocoude/demos/demoarc2.dat38
-rw-r--r--noao/imred/kpnocoude/demos/demoobj1.dat37
-rw-r--r--noao/imred/kpnocoude/demos/demos.cl18
-rw-r--r--noao/imred/kpnocoude/demos/demos.men6
-rw-r--r--noao/imred/kpnocoude/demos/demos.par2
-rw-r--r--noao/imred/kpnocoude/demos/demostd1.dat37
-rw-r--r--noao/imred/kpnocoude/demos/do3fiber.cl14
-rw-r--r--noao/imred/kpnocoude/demos/doslit.cl15
-rw-r--r--noao/imred/kpnocoude/demos/mkdo3fiber.cl22
-rw-r--r--noao/imred/kpnocoude/demos/mkdo3fiber.dat3
-rw-r--r--noao/imred/kpnocoude/demos/mkdoslit.cl25
-rw-r--r--noao/imred/kpnocoude/demos/xgdo3fiber.dat60
-rw-r--r--noao/imred/kpnocoude/demos/xgdoslit.dat71
-rw-r--r--noao/imred/kpnocoude/do3fiber.cl60
-rw-r--r--noao/imred/kpnocoude/do3fiber.par30
-rw-r--r--noao/imred/kpnocoude/doc/do3fiber.hlp1146
-rw-r--r--noao/imred/kpnocoude/doc/do3fiber.ms1413
-rw-r--r--noao/imred/kpnocoude/identify.par33
-rw-r--r--noao/imred/kpnocoude/kpnocoude.cl98
-rw-r--r--noao/imred/kpnocoude/kpnocoude.hd5
-rw-r--r--noao/imred/kpnocoude/kpnocoude.men41
-rw-r--r--noao/imred/kpnocoude/kpnocoude.par15
-rw-r--r--noao/imred/kpnocoude/params.par61
-rw-r--r--noao/imred/kpnocoude/reidentify.par36
-rw-r--r--noao/imred/kpnocoude/sensfunc.par17
-rw-r--r--noao/imred/kpnocoude/sparams.par65
-rw-r--r--noao/imred/kpnocoude/standard.par21
-rw-r--r--noao/imred/kpnoslit/Revisions32
-rw-r--r--noao/imred/kpnoslit/calibrate.par13
-rw-r--r--noao/imred/kpnoslit/demos/demoarc1.dat38
-rw-r--r--noao/imred/kpnoslit/demos/demoarc2.dat38
-rw-r--r--noao/imred/kpnoslit/demos/demoflat.dat38
-rw-r--r--noao/imred/kpnoslit/demos/demoobj1.dat37
-rw-r--r--noao/imred/kpnoslit/demos/demos.cl18
-rw-r--r--noao/imred/kpnoslit/demos/demos.men4
-rw-r--r--noao/imred/kpnoslit/demos/demos.par2
-rw-r--r--noao/imred/kpnoslit/demos/demostd1.dat37
-rw-r--r--noao/imred/kpnoslit/demos/doslit.cl14
-rw-r--r--noao/imred/kpnoslit/demos/mkdoslit.cl28
-rw-r--r--noao/imred/kpnoslit/demos/xgdoslit.dat71
-rw-r--r--noao/imred/kpnoslit/kpnoslit.cl69
-rw-r--r--noao/imred/kpnoslit/kpnoslit.hd1
-rw-r--r--noao/imred/kpnoslit/kpnoslit.men38
-rw-r--r--noao/imred/kpnoslit/kpnoslit.par15
-rw-r--r--noao/imred/kpnoslit/sensfunc.par17
-rw-r--r--noao/imred/kpnoslit/sparams.par65
-rw-r--r--noao/imred/kpnoslit/standard.par21
-rw-r--r--noao/imred/mkpkg20
-rw-r--r--noao/imred/quadred/doc/package.hlp142
-rw-r--r--noao/imred/quadred/doc/qhistogram.hlp37
-rw-r--r--noao/imred/quadred/doc/qstatistics.hlp52
-rw-r--r--noao/imred/quadred/doc/quadformat.hlp392
-rw-r--r--noao/imred/quadred/doc/quadjoin.hlp43
-rw-r--r--noao/imred/quadred/doc/quadscale.hlp37
-rw-r--r--noao/imred/quadred/doc/quadsections.hlp81
-rw-r--r--noao/imred/quadred/doc/quadsplit.hlp49
-rw-r--r--noao/imred/quadred/mkpkg8
-rw-r--r--noao/imred/quadred/quadred.cl68
-rw-r--r--noao/imred/quadred/quadred.hd22
-rw-r--r--noao/imred/quadred/quadred.men61
-rw-r--r--noao/imred/quadred/quadred.par13
-rw-r--r--noao/imred/quadred/src/Revisions42
-rw-r--r--noao/imred/quadred/src/ccdproc/calimage.x367
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdcache.com10
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdcache.h10
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdcache.x381
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdcheck.x67
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdcmp.x23
-rw-r--r--noao/imred/quadred/src/ccdproc/ccddelete.x55
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdflag.x27
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdlog.x46
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdmean.x50
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdnscan.x38
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdproc.par43
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdproc.x106
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdred.h155
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdsection.x100
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdsubsets.x92
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdtypes.h14
-rw-r--r--noao/imred/quadred/src/ccdproc/ccdtypes.x72
-rw-r--r--noao/imred/quadred/src/ccdproc/cor.gx362
-rw-r--r--noao/imred/quadred/src/ccdproc/corinput.gx220
-rw-r--r--noao/imred/quadred/src/ccdproc/doc/ccdproc.hlp778
-rw-r--r--noao/imred/quadred/src/ccdproc/doproc.x29
-rw-r--r--noao/imred/quadred/src/ccdproc/generic/ccdred.h155
-rw-r--r--noao/imred/quadred/src/ccdproc/generic/cor.x695
-rw-r--r--noao/imred/quadred/src/ccdproc/generic/corinput.x436
-rw-r--r--noao/imred/quadred/src/ccdproc/generic/mkpkg12
-rw-r--r--noao/imred/quadred/src/ccdproc/generic/proc.x678
-rw-r--r--noao/imred/quadred/src/ccdproc/hdrmap.com4
-rw-r--r--noao/imred/quadred/src/ccdproc/hdrmap.x544
-rw-r--r--noao/imred/quadred/src/ccdproc/mkpkg78
-rw-r--r--noao/imred/quadred/src/ccdproc/proc.gx379
-rw-r--r--noao/imred/quadred/src/ccdproc/readcor.x138
-rw-r--r--noao/imred/quadred/src/ccdproc/scancor.x340
-rw-r--r--noao/imred/quadred/src/ccdproc/setdark.x155
-rw-r--r--noao/imred/quadred/src/ccdproc/setfixpix.x181
-rw-r--r--noao/imred/quadred/src/ccdproc/setflat.x146
-rw-r--r--noao/imred/quadred/src/ccdproc/setfringe.x123
-rw-r--r--noao/imred/quadred/src/ccdproc/setheader.x76
-rw-r--r--noao/imred/quadred/src/ccdproc/setillum.x132
-rw-r--r--noao/imred/quadred/src/ccdproc/setinput.x48
-rw-r--r--noao/imred/quadred/src/ccdproc/setinteract.x31
-rw-r--r--noao/imred/quadred/src/ccdproc/setoutput.x51
-rw-r--r--noao/imred/quadred/src/ccdproc/setoverscan.x344
-rw-r--r--noao/imred/quadred/src/ccdproc/setproc.x80
-rw-r--r--noao/imred/quadred/src/ccdproc/setsections.x327
-rw-r--r--noao/imred/quadred/src/ccdproc/settrim.x115
-rw-r--r--noao/imred/quadred/src/ccdproc/setzero.x141
-rw-r--r--noao/imred/quadred/src/ccdproc/t_ccdproc.x155
-rw-r--r--noao/imred/quadred/src/ccdproc/timelog.x29
-rw-r--r--noao/imred/quadred/src/ccdproc/x_quadred.x1
-rw-r--r--noao/imred/quadred/src/mkpkg4
-rw-r--r--noao/imred/quadred/src/quad/Revisions92
-rw-r--r--noao/imred/quadred/src/quad/ccd.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/cfccd_both.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f1.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f2.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/csccd.dat23
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/echccd.dat23
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/instruments.men9
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/nfccd.dat23
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/pfccd_both.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f1.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f2.dat27
-rw-r--r--noao/imred/quadred/src/quad/ccddelete.par1
-rw-r--r--noao/imred/quadred/src/quad/ccddelete.x65
-rw-r--r--noao/imred/quadred/src/quad/ccdgetparam.par2
-rw-r--r--noao/imred/quadred/src/quad/ccdgetparam.x48
-rw-r--r--noao/imred/quadred/src/quad/ccdlog.x42
-rw-r--r--noao/imred/quadred/src/quad/ccdprcselect.par4
-rw-r--r--noao/imred/quadred/src/quad/ccdprcselect.x90
-rw-r--r--noao/imred/quadred/src/quad/ccdproc.par43
-rw-r--r--noao/imred/quadred/src/quad/ccdsection.par1
-rw-r--r--noao/imred/quadred/src/quad/ccdsection.x119
-rw-r--r--noao/imred/quadred/src/quad/ccdssselect.par4
-rw-r--r--noao/imred/quadred/src/quad/ccdssselect.x73
-rw-r--r--noao/imred/quadred/src/quad/ccdsubsets.x92
-rw-r--r--noao/imred/quadred/src/quad/ccdtypes.h14
-rw-r--r--noao/imred/quadred/src/quad/ccdtypes.x72
-rw-r--r--noao/imred/quadred/src/quad/detpars.par6
-rw-r--r--noao/imred/quadred/src/quad/doc/Geometry.fig91
-rw-r--r--noao/imred/quadred/src/quad/doc/badpiximage.hlp51
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdgeometry.hlp70
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdgroups.hlp163
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdhedit.hlp108
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdinst.hlp389
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdlist.hlp133
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdproc.hlp720
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdred.hlp98
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdred.ms787
-rw-r--r--noao/imred/quadred/src/quad/doc/ccdtypes.hlp124
-rw-r--r--noao/imred/quadred/src/quad/doc/combine.hlp1030
-rw-r--r--noao/imred/quadred/src/quad/doc/contents.ms34
-rw-r--r--noao/imred/quadred/src/quad/doc/cosmicrays.hlp220
-rw-r--r--noao/imred/quadred/src/quad/doc/darkcombine.hlp125
-rw-r--r--noao/imred/quadred/src/quad/doc/flatcombine.hlp139
-rw-r--r--noao/imred/quadred/src/quad/doc/flatfields.hlp177
-rw-r--r--noao/imred/quadred/src/quad/doc/guide.hlp715
-rw-r--r--noao/imred/quadred/src/quad/doc/guide.ms794
-rw-r--r--noao/imred/quadred/src/quad/doc/instruments.hlp248
-rw-r--r--noao/imred/quadred/src/quad/doc/mkfringecor.hlp90
-rw-r--r--noao/imred/quadred/src/quad/doc/mkillumcor.hlp92
-rw-r--r--noao/imred/quadred/src/quad/doc/mkillumflat.hlp101
-rw-r--r--noao/imred/quadred/src/quad/doc/mkskycor.hlp103
-rw-r--r--noao/imred/quadred/src/quad/doc/mkskyflat.hlp111
-rw-r--r--noao/imred/quadred/src/quad/doc/quad.hlp121
-rw-r--r--noao/imred/quadred/src/quad/doc/quadman.hlp1330
-rw-r--r--noao/imred/quadred/src/quad/doc/quadproc.hlp672
-rw-r--r--noao/imred/quadred/src/quad/doc/quadreadout.hlp19
-rw-r--r--noao/imred/quadred/src/quad/doc/setinstrument.hlp97
-rw-r--r--noao/imred/quadred/src/quad/doc/subsets.hlp97
-rw-r--r--noao/imred/quadred/src/quad/doc/zerocombine.hlp127
-rw-r--r--noao/imred/quadred/src/quad/gainmeasure.par6
-rw-r--r--noao/imred/quadred/src/quad/gainmeasure.x170
-rw-r--r--noao/imred/quadred/src/quad/hdrmap.com4
-rw-r--r--noao/imred/quadred/src/quad/hdrmap.x544
-rw-r--r--noao/imred/quadred/src/quad/irlincor.par12
-rw-r--r--noao/imred/quadred/src/quad/mkpkg56
-rw-r--r--noao/imred/quadred/src/quad/new.par8
-rw-r--r--noao/imred/quadred/src/quad/old.par2
-rw-r--r--noao/imred/quadred/src/quad/qccdproc.par43
-rw-r--r--noao/imred/quadred/src/quad/qdarkcombine.cl48
-rw-r--r--noao/imred/quadred/src/quad/qflatcombine.cl49
-rw-r--r--noao/imred/quadred/src/quad/qghdr2.x216
-rw-r--r--noao/imred/quadred/src/quad/qguser.x126
-rw-r--r--noao/imred/quadred/src/quad/qhistogram.cl58
-rw-r--r--noao/imred/quadred/src/quad/qhistogram.par17
-rw-r--r--noao/imred/quadred/src/quad/qnoproc.cl77
-rw-r--r--noao/imred/quadred/src/quad/qnoproc.par15
-rw-r--r--noao/imred/quadred/src/quad/qpcalimage.par3
-rw-r--r--noao/imred/quadred/src/quad/qpcalimage.x525
-rw-r--r--noao/imred/quadred/src/quad/qproc.cl109
-rw-r--r--noao/imred/quadred/src/quad/qproc.par15
-rw-r--r--noao/imred/quadred/src/quad/qpselect.par4
-rw-r--r--noao/imred/quadred/src/quad/qpselect.x108
-rw-r--r--noao/imred/quadred/src/quad/qsplit.gx97
-rw-r--r--noao/imred/quadred/src/quad/qsplitd.x97
-rw-r--r--noao/imred/quadred/src/quad/qspliti.x97
-rw-r--r--noao/imred/quadred/src/quad/qsplitl.x97
-rw-r--r--noao/imred/quadred/src/quad/qsplitr.x97
-rw-r--r--noao/imred/quadred/src/quad/qsplits.x97
-rw-r--r--noao/imred/quadred/src/quad/qstatistics.cl19
-rw-r--r--noao/imred/quadred/src/quad/qstatistics.par7
-rw-r--r--noao/imred/quadred/src/quad/quad.cl64
-rw-r--r--noao/imred/quadred/src/quad/quad.hd33
-rw-r--r--noao/imred/quadred/src/quad/quad.men36
-rw-r--r--noao/imred/quadred/src/quad/quad.par12
-rw-r--r--noao/imred/quadred/src/quad/quadalloc.x165
-rw-r--r--noao/imred/quadred/src/quad/quaddelete.x39
-rw-r--r--noao/imred/quadred/src/quad/quadgeom.h99
-rw-r--r--noao/imred/quadred/src/quad/quadgeom.x304
-rw-r--r--noao/imred/quadred/src/quad/quadgeomred.x165
-rw-r--r--noao/imred/quadred/src/quad/quadjoin.par4
-rw-r--r--noao/imred/quadred/src/quad/quadjoin.x638
-rw-r--r--noao/imred/quadred/src/quad/quadmap.x297
-rw-r--r--noao/imred/quadred/src/quad/quadmerge.x122
-rw-r--r--noao/imred/quadred/src/quad/quadproc.cl173
-rw-r--r--noao/imred/quadred/src/quad/quadproc.par42
-rw-r--r--noao/imred/quadred/src/quad/quadscale.par7
-rw-r--r--noao/imred/quadred/src/quad/quadscale.x159
-rw-r--r--noao/imred/quadred/src/quad/quadsections.par14
-rw-r--r--noao/imred/quadred/src/quad/quadsections.x447
-rw-r--r--noao/imred/quadred/src/quad/quadsplit.par9
-rw-r--r--noao/imred/quadred/src/quad/quadsplit.x115
-rw-r--r--noao/imred/quadred/src/quad/quadtest/artobs.cl68
-rw-r--r--noao/imred/quadred/src/quad/quadtest/artobs.par5
-rw-r--r--noao/imred/quadred/src/quad/quadtest/ccdpars.par29
-rw-r--r--noao/imred/quadred/src/quad/quadtest/logfile1
-rw-r--r--noao/imred/quadred/src/quad/quadtest/mkamp.cl166
-rw-r--r--noao/imred/quadred/src/quad/quadtest/mkimage.par10
-rw-r--r--noao/imred/quadred/src/quad/quadtest/mkquad.cl222
-rw-r--r--noao/imred/quadred/src/quad/quadtest/mkquad.par4
-rw-r--r--noao/imred/quadred/src/quad/quadtest/quadtest.cl14
-rw-r--r--noao/imred/quadred/src/quad/quadtest/quadtest.par20
-rw-r--r--noao/imred/quadred/src/quad/qzerocombine.cl48
-rw-r--r--noao/imred/quadred/src/quad/setinstrument.cl58
-rw-r--r--noao/imred/quadred/src/quad/test.x71
-rw-r--r--noao/imred/quadred/src/quad/timelog.x29
-rw-r--r--noao/imred/quadred/src/quad/x_quad.x14
-rw-r--r--noao/imred/specred/Revisions167
-rw-r--r--noao/imred/specred/doc/dofibers.hlp1531
-rw-r--r--noao/imred/specred/doc/dofibers.ms1807
-rw-r--r--noao/imred/specred/doc/doslit.hlp1201
-rw-r--r--noao/imred/specred/doc/doslit.ms1401
-rw-r--r--noao/imred/specred/doc/msresp1d.hlp191
-rw-r--r--noao/imred/specred/doc/skysub.hlp98
-rw-r--r--noao/imred/specred/dofibers.cl74
-rw-r--r--noao/imred/specred/dofibers.par42
-rw-r--r--noao/imred/specred/msresp1d.cl234
-rw-r--r--noao/imred/specred/msresp1d.par13
-rw-r--r--noao/imred/specred/params.par67
-rw-r--r--noao/imred/specred/sparams.par65
-rw-r--r--noao/imred/specred/specred.cl103
-rw-r--r--noao/imred/specred/specred.hd11
-rw-r--r--noao/imred/specred/specred.men51
-rw-r--r--noao/imred/specred/specred.par15
-rw-r--r--noao/imred/src/doecslit/Revisions93
-rw-r--r--noao/imred/src/doecslit/apslitproc.par145
-rw-r--r--noao/imred/src/doecslit/doecslit.cl106
-rw-r--r--noao/imred/src/doecslit/doecslit.par28
-rw-r--r--noao/imred/src/doecslit/sarcrefs.cl77
-rw-r--r--noao/imred/src/doecslit/sarcrefs.par6
-rw-r--r--noao/imred/src/doecslit/sbatch.cl216
-rw-r--r--noao/imred/src/doecslit/sbatch.par24
-rw-r--r--noao/imred/src/doecslit/sdoarcs.cl102
-rw-r--r--noao/imred/src/doecslit/sdoarcs.par8
-rw-r--r--noao/imred/src/doecslit/sfluxcal.cl214
-rw-r--r--noao/imred/src/doecslit/sfluxcal.par16
-rw-r--r--noao/imred/src/doecslit/sgetspec.cl177
-rw-r--r--noao/imred/src/doecslit/sgetspec.par11
-rw-r--r--noao/imred/src/doecslit/slistonly.cl241
-rw-r--r--noao/imred/src/doecslit/slistonly.par13
-rw-r--r--noao/imred/src/doecslit/slittasks.cl19
-rw-r--r--noao/imred/src/doecslit/sparams.par65
-rw-r--r--noao/imred/src/doecslit/sproc.cl490
-rw-r--r--noao/imred/src/doecslit/sproc.par35
-rw-r--r--noao/imred/src/dofoe/Revisions47
-rw-r--r--noao/imred/src/dofoe/apscript.par145
-rw-r--r--noao/imred/src/dofoe/arcrefs.cl106
-rw-r--r--noao/imred/src/dofoe/arcrefs.par9
-rw-r--r--noao/imred/src/dofoe/batch.cl207
-rw-r--r--noao/imred/src/dofoe/batch.par25
-rw-r--r--noao/imred/src/dofoe/doarcs.cl167
-rw-r--r--noao/imred/src/dofoe/doarcs.par11
-rw-r--r--noao/imred/src/dofoe/dofoe.cl89
-rw-r--r--noao/imred/src/dofoe/dofoe.par24
-rw-r--r--noao/imred/src/dofoe/dofoetasks.cl19
-rw-r--r--noao/imred/src/dofoe/listonly.cl167
-rw-r--r--noao/imred/src/dofoe/listonly.par11
-rw-r--r--noao/imred/src/dofoe/params.par69
-rw-r--r--noao/imred/src/dofoe/proc.cl464
-rw-r--r--noao/imred/src/dofoe/proc.par36
-rw-r--r--noao/imred/src/dofoe/response.cl99
-rw-r--r--noao/imred/src/dofoe/response.par12
-rw-r--r--noao/imred/src/doslit/Revisions129
-rw-r--r--noao/imred/src/doslit/apslitproc.par145
-rw-r--r--noao/imred/src/doslit/demologfile1
-rw-r--r--noao/imred/src/doslit/doslit.cl64
-rw-r--r--noao/imred/src/doslit/doslit.par26
-rw-r--r--noao/imred/src/doslit/doslittasks.cl17
-rw-r--r--noao/imred/src/doslit/sarcrefs.cl118
-rw-r--r--noao/imred/src/doslit/sarcrefs.par9
-rw-r--r--noao/imred/src/doslit/sbatch.cl199
-rw-r--r--noao/imred/src/doslit/sbatch.par20
-rw-r--r--noao/imred/src/doslit/sdoarcs.cl101
-rw-r--r--noao/imred/src/doslit/sdoarcs.par7
-rw-r--r--noao/imred/src/doslit/sfluxcal.cl196
-rw-r--r--noao/imred/src/doslit/sfluxcal.par14
-rw-r--r--noao/imred/src/doslit/sgetspec.cl178
-rw-r--r--noao/imred/src/doslit/sgetspec.par11
-rw-r--r--noao/imred/src/doslit/slistonly.cl180
-rw-r--r--noao/imred/src/doslit/slistonly.par12
-rw-r--r--noao/imred/src/doslit/sparams.par65
-rw-r--r--noao/imred/src/doslit/sproc.cl404
-rw-r--r--noao/imred/src/doslit/sproc.par33
-rw-r--r--noao/imred/src/fibers/Revisions223
-rw-r--r--noao/imred/src/fibers/apscript.par145
-rw-r--r--noao/imred/src/fibers/arcrefs.cl326
-rw-r--r--noao/imred/src/fibers/arcrefs.par13
-rw-r--r--noao/imred/src/fibers/batch.cl297
-rw-r--r--noao/imred/src/fibers/batch.par38
-rw-r--r--noao/imred/src/fibers/doalign.cl78
-rw-r--r--noao/imred/src/fibers/doalign.par7
-rw-r--r--noao/imred/src/fibers/doarcs.cl264
-rw-r--r--noao/imred/src/fibers/doarcs.par17
-rw-r--r--noao/imred/src/fibers/fibresponse.cl261
-rw-r--r--noao/imred/src/fibers/fibresponse.par13
-rw-r--r--noao/imred/src/fibers/getspec.cl49
-rw-r--r--noao/imred/src/fibers/getspec.par5
-rw-r--r--noao/imred/src/fibers/listonly.cl237
-rw-r--r--noao/imred/src/fibers/listonly.par15
-rw-r--r--noao/imred/src/fibers/mkfibers.cl167
-rw-r--r--noao/imred/src/fibers/mkfibers.par11
-rw-r--r--noao/imred/src/fibers/params.par75
-rw-r--r--noao/imred/src/fibers/proc.cl707
-rw-r--r--noao/imred/src/fibers/proc.par52
-rw-r--r--noao/imred/src/fibers/skysub.cl145
-rw-r--r--noao/imred/src/fibers/skysub.par16
-rw-r--r--noao/imred/src/fibers/temp16
-rw-r--r--noao/imred/src/temp10
-rw-r--r--noao/imred/tutor.cl14
-rw-r--r--noao/imred/vtel/README81
-rw-r--r--noao/imred/vtel/Revisions209
-rw-r--r--noao/imred/vtel/asciilook.inc19
-rw-r--r--noao/imred/vtel/d1900.x15
-rw-r--r--noao/imred/vtel/decodeheader.x67
-rw-r--r--noao/imred/vtel/dephem.x139
-rw-r--r--noao/imred/vtel/destreak.par5
-rw-r--r--noao/imred/vtel/destreak.x432
-rw-r--r--noao/imred/vtel/destreak5.cl91
-rw-r--r--noao/imred/vtel/destreak5.par4
-rw-r--r--noao/imred/vtel/dicoplot.h35
-rw-r--r--noao/imred/vtel/dicoplot.par4
-rw-r--r--noao/imred/vtel/dicoplot.x522
-rw-r--r--noao/imred/vtel/doc/destreak.hlp50
-rw-r--r--noao/imred/vtel/doc/destreak5.hlp43
-rw-r--r--noao/imred/vtel/doc/dicoplot.hlp36
-rw-r--r--noao/imred/vtel/doc/fitslogr.hlp58
-rw-r--r--noao/imred/vtel/doc/getsqib.hlp33
-rw-r--r--noao/imred/vtel/doc/makehelium.hlp38
-rw-r--r--noao/imred/vtel/doc/makeimages.hlp64
-rw-r--r--noao/imred/vtel/doc/merge.hlp90
-rw-r--r--noao/imred/vtel/doc/mrotlogr.hlp63
-rw-r--r--noao/imred/vtel/doc/mscan.hlp86
-rw-r--r--noao/imred/vtel/doc/pimtext.hlp110
-rw-r--r--noao/imred/vtel/doc/putsqib.hlp38
-rw-r--r--noao/imred/vtel/doc/quickfit.hlp59
-rw-r--r--noao/imred/vtel/doc/readvt.hlp86
-rw-r--r--noao/imred/vtel/doc/rmap.hlp47
-rw-r--r--noao/imred/vtel/doc/syndico.hlp77
-rw-r--r--noao/imred/vtel/doc/tcopy.hlp56
-rw-r--r--noao/imred/vtel/doc/trim.hlp33
-rw-r--r--noao/imred/vtel/doc/unwrap.hlp95
-rw-r--r--noao/imred/vtel/doc/vtblink.hlp53
-rw-r--r--noao/imred/vtel/doc/vtexamine.hlp50
-rw-r--r--noao/imred/vtel/doc/writetape.hlp35
-rw-r--r--noao/imred/vtel/doc/writevt.hlp43
-rw-r--r--noao/imred/vtel/fitslogr.cl104
-rw-r--r--noao/imred/vtel/fitslogr.par6
-rw-r--r--noao/imred/vtel/gauss.x16
-rw-r--r--noao/imred/vtel/getsqib.par2
-rw-r--r--noao/imred/vtel/getsqib.x55
-rw-r--r--noao/imred/vtel/gryscl.inc52
-rw-r--r--noao/imred/vtel/imfglexr.x76
-rw-r--r--noao/imred/vtel/imfilt.x170
-rw-r--r--noao/imred/vtel/imratio.x29
-rw-r--r--noao/imred/vtel/lstsq.x85
-rw-r--r--noao/imred/vtel/makehelium.cl51
-rw-r--r--noao/imred/vtel/makehelium.par4
-rw-r--r--noao/imred/vtel/makeimages.cl66
-rw-r--r--noao/imred/vtel/makeimages.par4
-rw-r--r--noao/imred/vtel/merge.par9
-rw-r--r--noao/imred/vtel/merge.x762
-rw-r--r--noao/imred/vtel/mkpkg59
-rw-r--r--noao/imred/vtel/mrotlogr.cl68
-rw-r--r--noao/imred/vtel/mrotlogr.par5
-rw-r--r--noao/imred/vtel/mrqmin.x348
-rw-r--r--noao/imred/vtel/mscan.par8
-rw-r--r--noao/imred/vtel/mscan.x188
-rw-r--r--noao/imred/vtel/nsolcrypt.dat555
-rw-r--r--noao/imred/vtel/numeric.h12
-rw-r--r--noao/imred/vtel/numeric.x177
-rw-r--r--noao/imred/vtel/pimtext.par13
-rw-r--r--noao/imred/vtel/pimtext.x131
-rw-r--r--noao/imred/vtel/pixbit.x23
-rw-r--r--noao/imred/vtel/pixelfont.inc519
-rw-r--r--noao/imred/vtel/putsqib.par3
-rw-r--r--noao/imred/vtel/putsqib.x69
-rw-r--r--noao/imred/vtel/quickfit.par8
-rw-r--r--noao/imred/vtel/quickfit.x499
-rw-r--r--noao/imred/vtel/readheader.x59
-rw-r--r--noao/imred/vtel/readss1.x163
-rw-r--r--noao/imred/vtel/readss2.x174
-rw-r--r--noao/imred/vtel/readss3.x171
-rw-r--r--noao/imred/vtel/readss4.x85
-rw-r--r--noao/imred/vtel/readsubswath.x91
-rw-r--r--noao/imred/vtel/readvt.par6
-rw-r--r--noao/imred/vtel/readvt.x347
-rw-r--r--noao/imred/vtel/rmap.par5
-rw-r--r--noao/imred/vtel/rmap.x563
-rw-r--r--noao/imred/vtel/syndico.h13
-rw-r--r--noao/imred/vtel/syndico.par14
-rw-r--r--noao/imred/vtel/syndico.x416
-rw-r--r--noao/imred/vtel/tcopy.par5
-rw-r--r--noao/imred/vtel/tcopy.x190
-rw-r--r--noao/imred/vtel/textim.x114
-rw-r--r--noao/imred/vtel/trim.par2
-rw-r--r--noao/imred/vtel/trim.x75
-rw-r--r--noao/imred/vtel/trnsfrm.inc163
-rw-r--r--noao/imred/vtel/unwrap.par9
-rw-r--r--noao/imred/vtel/unwrap.x293
-rw-r--r--noao/imred/vtel/vt.h73
-rw-r--r--noao/imred/vtel/vtblink.cl150
-rw-r--r--noao/imred/vtel/vtblink.par4
-rw-r--r--noao/imred/vtel/vtel.cl38
-rw-r--r--noao/imred/vtel/vtel.hd29
-rw-r--r--noao/imred/vtel/vtel.men23
-rw-r--r--noao/imred/vtel/vtel.par1
-rw-r--r--noao/imred/vtel/vtexamine.par3
-rw-r--r--noao/imred/vtel/vtexamine.x195
-rw-r--r--noao/imred/vtel/writetape.cl45
-rw-r--r--noao/imred/vtel/writetape.par5
-rw-r--r--noao/imred/vtel/writevt.par4
-rw-r--r--noao/imred/vtel/writevt.x232
-rw-r--r--noao/imred/vtel/x_vtel.x16
1004 files changed, 140148 insertions, 0 deletions
diff --git a/noao/imred/Revisions b/noao/imred/Revisions
new file mode 100644
index 00000000..2d7bdc46
--- /dev/null
+++ b/noao/imred/Revisions
@@ -0,0 +1,237 @@
+.help revisions Jun88 noao.imred
+.nf
+
+imred$src/doecslit/apslitproc.par
+imred$src/dofoe/apscript.par
+imred$src/doslit/apslitproc.par
+imred$src/fibers/apscript.par
+imred$src/fibers/params.par
+ Changed the default maxsep from 1000 to 100000. Unless users reset
+ the default their expectation is that marking apertures will not
+ skip an aperture number no matter how far apart the aperturers are.
+ (2/17/09, Valdes)
+
+imred$src/fibers/skysub.cl
+imred$src/fibers/skysub.par
+ Added sum as an enumerated "combine" choice. (8/11/08, Valdes)
+
+=======
+V2.14.1
+=======
+
+imred$irs/dispcor.par
+imred$iids/dispcor.par
+ Changed "Conserve flux" to "Conserve total flux" per user request.
+ (6/13/08)
+
+=====
+V2.12
+=====
+
+imred$quadred/ +
+imred$imred.cl
+imred$imred.par
+imred$imred.hd
+imred$imred.men
+imred$mkpkg
+ Added new package QUADRED. (8/24/01, Valdes)
+
+imred$crutil/ +
+imred$imred.cl
+imred$imred.par
+imred$imred.hd
+imred$imred.men
+imred$mkpkg
+ Added new package CRUTIL. (8/22/01, Valdes)
+
+imred$argus/argus.cl
+imred$argus/hydra.cl
+imred$argus/echelle.cl
+imred$argus/kpnocoude.cl
+imred$argus/specred.cl
+ Increased the minimum min_lenuserarea from 40000 to 100000.
+ (7/31/96, Valdes)
+
+imred$hydra/demos/xgdonessie.dat
+ Fixed playback. (7/26/94, Valdes)
+
+imred$argus/argus.cl
+imred$argus/argus.men
+imred$ctioslit/ctioslit.cl
+imred$ctioslit/ctioslit.men
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+imred$hydra/hydra.cl
+imred$hydra/hydra.men
+imred$iids/iids.cl
+imred$iids/iids.men
+imred$irs/irs.cl
+imred$irs/irs.men
+imred$kpnocoude/kpnocoude.cl
+imred$kpnocoude/kpnocoude.men
+imred$kpnoslit/kpnoslit.cl
+imred$kpnoslit/kpnoslit.men
+imred$specred/specred.cl
+imred$specred/specred.men
+ Added new SFLIP task to packages. (7/18/94, Valdes)
+
+imred$doc/demos.hlp
+ Added noao.twodspec.longslit to the packages with demos. (7/24/92, Valdes)
+
+=======
+V2.10.0
+=======
+
+imred$imred.cl
+imred$imred.men
+imred$<spectroscopy packages>
+ Removed the SETAIRMASS task definition from IMRED and added it to
+ all the spectroscopy packages. Also added the new SETJD task to
+ the spectroscopy packages. (1/29/92, Valdes)
+
+imred$imred.cl
+imred$imred.men
+imred$imred.hd
+imred$ctioslit/* +
+imred$kpnocoude/* +
+imred$kpnoslit/* +
+imred$msred --> imred$specred
+imred$kpcoude/* -
+imred$foe/* -
+imred$echelle/*
+imred$src/slits/*
+ Reorganized the slit reductions packages and merged the fiber and
+ slit processing tasks into the kpnocoude package and the
+ echelle slit and FOE tasks in the echelle package. (1/15/92, Valdes)
+
+imred$imred.cl
+imred$imred.men
+imred$imred.hd
+imred$nessie/* -
+imred$hydra/* +
+ Changed package from NESSIE to HYDRA. (7/26/91, Valdes)
+
+imred$imred.cl
+imred$imred.men
+imred$imred.hd
+imred$foe/*
+ Added new package for Fiber Optic Echelle reductions.
+ (3/22/91, Valdes)
+
+imred$echelle
+imred$src/ecslits/*
+ Added new echelle slit reduction scripts and documentation.
+ (3/22/91, Valdes)
+
+imred$argus
+imred$echelle
+imred$goldcam
+imred$msred
+imred$nessie
+imred$specred
+imred$src
+ Miscellaneous minor updates. (3/22/91, Valdes)
+
+imred$imred.cl
+imred$imred.men
+imred$imred.hd
+imred$observatory.par -
+imred$observatory.cl -
+ Removed observatory from here and replaced it with a new version in
+ noao. (11/6/90, Valdes)
+
+imred$imred.cl
+ 1. Changed logical directory bias to biasdir to avoid incorrect
+ interpretation of the command:
+ cl> imrename foo bias
+ to be a move to the logical directory. Bias is a likely name to
+ be used in CCDRED which will always have bias defined.
+ (10/2/90, Valdes)
+
+imred$doc +
+imred$imred.hd
+imred$observatory.hlp -> noao$imred/doc
+imred$tutorial.hlp -> noao$imred/doc
+imred$demos.hlp +
+ 1. Created doc directory and moved above help to it.
+ 2. The help for the demos task which appear in a number of packages was
+ added to the IMRED help for lack of a more obvious place to put it.
+ (9/20/90, Valdes)
+
+imred$argus +
+imred$goldcam +
+imred$kpcoude +
+imred$nessie +
+imred$specred +
+imred$specphot -
+imred$coude -
+imred$echelle
+imred$msred
+imred$iids
+imred$irs
+imred$mkpkg
+imred$imred.cl
+imred$imred.men
+imred$imred.hd
+imred$imred.par
+ A major update of the IMRED spectroscopy packages was made. New packages
+ were added, one was removed, COUDE was renamed to KPCOUDE, new versions
+ of ECHELLE and MSRED based on major changes to APEXTRACT and ONEDSPEC
+ were installed, and minor changes made.
+ (8-9/90, Valdes)
+
+imred$irred
+ Valdes, June 2, 1989
+ Added the SETAIRMASS task.
+
+imred$irred
+ Davis, April 1, 1989
+ 1. Installed the IRRED package. At present this is simply a collection
+ of tasks useful for IR observers.
+
+imred$cryomap -
+noao$mkpkg
+noao$imred.cl
+noao$imred.men
+noao$imred.hd
+ Valdes, February 27, 1987
+ 1. The CRYOMAP (Cryogenic Camera Multi-Aperture Plate) package was
+ archived (Tape 2977 - J. Barnes) and removed. It was never used
+ and data has not been taken with this system in many years. The
+ APEXTRACT pacakge is better anyway. The MULTISPEC package remains.
+ 2. The IMRED package was modified to delete the CRYOMAP package.
+
+imred$dtoi
+ Hammond, February 13. 1987
+ 1. Installed the DTOI package
+
+imred$observatory.cl
+ Valdes, October 6, 1986
+ 1. The OBSERVATORY task now calls EPARAM to edit the parameters.
+ 2. The help page was modified.
+
+imred$specphot/* +
+ Valdes/Barnes, October 6, 1986
+ 1. New IMRED package called SPECPHOT added. The focus of the package
+ is extraction of 1D spectra from 2D detectors and spectrophotometric
+ reduction of the spectra.
+
+imred$tutor.cl +
+imred$tutor.hlp +
+imred$imred.cl
+imred$imred.men
+imred$imred.hd
+ Valdes, August 18, 1986:
+ 1. Added experimental online TUTOR task.
+
+====================================
+Version 2.3 Release, August 18, 1986
+====================================
+
+imred$imred.cl: Valdes, April 30, 1986
+ 1. Removed the commands to load plot and images since this is now
+ done when the NOAO package is loaded.
+
+imred$observatory.cl: Valdes, April 7, 1986
+ 1. Task OBSERVATORY added to contain observatory parameters.
+.endhelp
diff --git a/noao/imred/argus/Revisions b/noao/imred/argus/Revisions
new file mode 100644
index 00000000..55828da5
--- /dev/null
+++ b/noao/imred/argus/Revisions
@@ -0,0 +1,60 @@
+.help revisions Jul91 noao.imred.argus
+.nf
+imred$argus/doc/doargus.hlp
+ Fixed minor formating problem. (4/22/99, Valdes)
+
+imred$argus/doc/dohdydra.hlp
+imred$argus/doc/dohdydra.ms
+ Updated for change where if both crval and cdelt are INDEF then the
+ automatic identification is not done. (5/2/96, Valdes)
+
+imred$argus/demos/mkdoargus.cl
+ Made the ARTDATA package parameters explicit (4/15/97, Valdes)
+
+imred$argus/argus.cl
+ Increased the minimum min_lenuserarea from 40000 to 100000.
+ (7/31/96, Valdes)
+
+imred$argus/doargus.cl
+imred$argus/doargus.par
+imred$argus/params.par
+imred$argus/doc/doargus.hlp
+imred$argus/doc/doargus.ms
+ Added crval/cdelt parameters used in new version with automatic arc
+ line identification. (4/5/96, Valdes)
+
+imred$argus/doc/doargus.hlp
+imred$argus/doc/doargus.ms
+ Describes the new header option for the aperture identification table.
+ (7/25/95, Valdes)
+
+imred$argus/argus.cl
+imred$argus/doargus.cl
+imred$argus/doargus.par
+imred$argus/doc/doargus.hlp
+imred$argus/doc/doargus.ms
+imred$argus/demos/xgdoargus.dat
+ Added sky alignment option. (7/19/95, Valdes)
+
+imred$argus/argus.cl
+imred$argus/argus.men
+ Added APSCATTER to the package. (6/30/95, Valdes)
+
+=======
+V2.10.4
+=======
+
+imred$argus/argus.cl
+ Renamed the fiber response task to fibresponse. (12/31/94, Valdes)
+
+imred$argus/argus.cl
+ The task was incorrectly defined as being in the logical directory
+ msred instead of specred. (8/24/92, Valdes)
+
+=======
+V2.10.1
+=======
+
+imred/argus/*
+ Installed (7/24/91, Valdes)
+.endhelp
diff --git a/noao/imred/argus/argus.cl b/noao/imred/argus/argus.cl
new file mode 100644
index 00000000..66e8949e
--- /dev/null
+++ b/noao/imred/argus/argus.cl
@@ -0,0 +1,82 @@
+#{ ARGUS package definition
+
+proto # bscale
+
+s1 = envget ("min_lenuserarea")
+if (s1 == "")
+ reset min_lenuserarea = 100000
+else if (int (s1) < 100000)
+ reset min_lenuserarea = 100000
+
+# Define ARGUS package
+package argus
+
+# Package script tasks
+task doargus = "argus$doargus.cl"
+task params = "argus$params.par"
+
+# Fiber reduction script tasks
+task proc = "srcfibers$proc.cl"
+task fibresponse = "srcfibers$fibresponse.cl"
+task arcrefs = "srcfibers$arcrefs.cl"
+task doarcs = "srcfibers$doarcs.cl"
+task doalign = "srcfibers$doalign.cl"
+task skysub = "srcfibers$skysub.cl"
+task batch = "srcfibers$batch.cl"
+task listonly = "srcfibers$listonly.cl"
+task getspec = "srcfibers$getspec.cl"
+
+task msresp1d = "specred$msresp1d.cl"
+
+# Demos
+set demos = "argus$demos/"
+task demos = "demos$demos.cl"
+task mkfibers = "srcfibers$mkfibers.cl"
+
+# Onedspec tasks
+task autoidentify,
+ continuum,
+ dispcor,
+ dopcor,
+ identify,
+ refspectra,
+ reidentify,
+ sapertures,
+ sarith,
+ sflip,
+ slist,
+ specplot,
+ specshift,
+ splot = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ aprecenter,
+ apresize,
+ apscatter,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apdefault = "apextract$apdefault.par"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apscript = "srcfibers$x_apextract.e"
+task apscat1 = "apextract$apscat1.par"
+task apscat2 = "apextract$apscat2.par"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apscript, apscat1, apscat2, dispcor1, mkfibers
+hidetask params, proc, batch, arcrefs, doarcs, listonly, fibresponse, getspec
+hidetask doalign
+
+clbye()
diff --git a/noao/imred/argus/argus.dat b/noao/imred/argus/argus.dat
new file mode 100644
index 00000000..6caa8ccd
--- /dev/null
+++ b/noao/imred/argus/argus.dat
@@ -0,0 +1,48 @@
+1 1
+2 0
+3 1
+4 0
+5 1
+6 0
+7 1
+8 0
+9 1
+10 0
+11 1
+12 0
+13 1
+14 0
+15 1
+16 0
+17 1
+18 0
+19 1
+20 0
+21 1
+22 0
+23 1
+24 0
+25 1
+26 0
+27 1
+28 0
+29 1
+30 0
+31 1
+32 0
+33 1
+34 0
+35 1
+36 0
+37 1
+38 0
+39 1
+40 0
+41 1
+42 0
+43 1
+44 0
+45 1
+46 0
+47 1
+48 0
diff --git a/noao/imred/argus/argus.hd b/noao/imred/argus/argus.hd
new file mode 100644
index 00000000..a3fb1064
--- /dev/null
+++ b/noao/imred/argus/argus.hd
@@ -0,0 +1,7 @@
+# Help directory for the ARGUS package.
+
+$doc = "./doc/"
+
+doargus hlp=doc$doargus.hlp
+
+revisions sys=Revisions
diff --git a/noao/imred/argus/argus.men b/noao/imred/argus/argus.men
new file mode 100644
index 00000000..d5769d30
--- /dev/null
+++ b/noao/imred/argus/argus.men
@@ -0,0 +1,32 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and remove scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ continuum - Fit the continuum in spectra
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify features in spectrum for dispersion solution
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically identify features in spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra having different wavelength ranges
+ scopy - Select and copy apertures in different spectral formats
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Preliminary spectral plot/analysis
+
+ doargus - Process ARGUS spectra
+ demos - Demonstrations and tests
diff --git a/noao/imred/argus/argus.par b/noao/imred/argus/argus.par
new file mode 100644
index 00000000..c2c381a5
--- /dev/null
+++ b/noao/imred/argus/argus.par
@@ -0,0 +1,13 @@
+# ARGUS parameter file
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,""
+version,s,h,"ARGUS V1: January 1992"
diff --git a/noao/imred/argus/demos/demos.cl b/noao/imred/argus/demos/demos.cl
new file mode 100644
index 00000000..5b065c51
--- /dev/null
+++ b/noao/imred/argus/demos/demos.cl
@@ -0,0 +1,18 @@
+# DEMOS -- Run specified demo provided a demo file exists.
+
+procedure demos (demoname)
+
+file demoname {prompt="Demo name"}
+
+begin
+ file demo, demofile
+
+ if ($nargs == 0 && mode != "h")
+ type ("demos$demos.men")
+ demo = demoname
+ demofile = "demos$" // demo // ".cl"
+ if (access (demofile))
+ cl (< demofile)
+ else
+ error (1, "Unknown demo " // demo)
+end
diff --git a/noao/imred/argus/demos/demos.men b/noao/imred/argus/demos/demos.men
new file mode 100644
index 00000000..ee467bdf
--- /dev/null
+++ b/noao/imred/argus/demos/demos.men
@@ -0,0 +1,4 @@
+ MENU of ARGUS Demonstrations
+
+ doargus - Quick test of DOARGUS (small images, no comments, no delays)
+ mkdoargus - Make DOARGUS test data (12 fibers, 100x256)
diff --git a/noao/imred/argus/demos/demos.par b/noao/imred/argus/demos/demos.par
new file mode 100644
index 00000000..4181ed59
--- /dev/null
+++ b/noao/imred/argus/demos/demos.par
@@ -0,0 +1,2 @@
+demoname,f,a,"",,,"Demo name"
+mode,s,h,"ql",,,
diff --git a/noao/imred/argus/demos/doargus.cl b/noao/imred/argus/demos/doargus.cl
new file mode 100644
index 00000000..51e7f0d9
--- /dev/null
+++ b/noao/imred/argus/demos/doargus.cl
@@ -0,0 +1,13 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdoargus.cl")
+
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+unlearn doargus params
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdoargus.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/argus/demos/header.dat b/noao/imred/argus/demos/header.dat
new file mode 100644
index 00000000..2e104ae7
--- /dev/null
+++ b/noao/imred/argus/demos/header.dat
@@ -0,0 +1,36 @@
+OBJECT = 'V640Mon 4500 ' / object name
+OBSERVAT= 'CTIO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 1200. / actual integration time
+DARKTIME= 1200. / total elapsed time
+IMAGETYP= 'object ' / object, dark, bias, etc.
+DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+UT = '12:19:55.00 ' / universal time
+ST = '09:13:15.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:08:52.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '44.580 ' / zenith distance
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'IRAF/ARTDATA ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/argus/demos/mkdoargus.cl b/noao/imred/argus/demos/mkdoargus.cl
new file mode 100644
index 00000000..39c94db0
--- /dev/null
+++ b/noao/imred/argus/demos/mkdoargus.cl
@@ -0,0 +1,22 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkfibers ("demoobj", type="object", fibers="demos$mkdoargus.dat",
+ title="Argus artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=1)
+mkfibers ("demoflat", type="flat", fibers="demos$mkdoargus.dat",
+ title="Argus artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=2)
+mkfibers ("demoarc", type="henear", fibers="demos$mkdoargus.dat",
+ title="Argus artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=3)
diff --git a/noao/imred/argus/demos/mkdoargus.dat b/noao/imred/argus/demos/mkdoargus.dat
new file mode 100644
index 00000000..8f9311c0
--- /dev/null
+++ b/noao/imred/argus/demos/mkdoargus.dat
@@ -0,0 +1,13 @@
+ 1 1 1.096679 gauss 2.7 0 86.801 0.002
+ 2 0 1.164292 gauss 2.7 0 81.093 0.002
+ 3 1 0.457727 gauss 2.7 0 74.824 0.002
+ 4 0 1.269284 gauss 2.7 0 68.719 0.002
+ 5 1 1.309297 gauss 2.7 0 62.536 0.002
+ 7 1 1.283618 gauss 2.7 0 50.218 0.002
+ 8 0 0.687173 gauss 2.7 0 43.963 0.002
+ 9 1 1.175850 gauss 2.7 0 38.0091 0.002
+10 0 0.757532 gauss 2.7 0 31.9606 0.002
+11 1 0.939866 gauss 2.7 0 25.1000 0.002
+12 0 1.015546 gauss 2.7 0 19.5097 0.002
+13 1 0.372036 gauss 2.7 0 13.5889 0.002
+14 0 1.065080 gauss 2.7 0 07.4535 0.002
diff --git a/noao/imred/argus/demos/xgdoargus.dat b/noao/imred/argus/demos/xgdoargus.dat
new file mode 100644
index 00000000..e3fb9dfd
--- /dev/null
+++ b/noao/imred/argus/demos/xgdoargus.dat
@@ -0,0 +1,76 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\sargus\n
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdoargus\n
+demoobj\r
+demoflat\r
+demoflat\r
+\r
+demoarc\r
+\r
+\r
+rdnoise\r
+gain\r
+\r
+13\r
+4\r
+5\r
+7\r
+\r
+6600\r
+6.1\r
+\r
+\r
+\r
+\r
+\r
+\r
+n\r
+\r
+\r
+\r
+\r
+\r
+y\r
+^Z
+doargus\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+j/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+n\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+\n
+\n
+#/<-5\s\s\s\s/=(.\s=\r 3\r
+#/<-5\s\s\s\s/=(.\s=\r 14\r
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/argus/doargus.cl b/noao/imred/argus/doargus.cl
new file mode 100644
index 00000000..a34e61c8
--- /dev/null
+++ b/noao/imred/argus/doargus.cl
@@ -0,0 +1,71 @@
+# DOARGUS -- Process ARGUS spectra from 2D to wavelength calibrated 1D.
+#
+# The task PROC does all of the interactive work and BATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure doargus (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+file throughput = "" {prompt="Throughput file or image (optional)"}
+string arcs1 = "" {prompt="List of arc spectra"}
+string arcs2 = "" {prompt="List of shift arc spectra"}
+file arctable = "" {prompt="Arc assignment table (optional)\n"}
+
+string readnoise = "0." {prompt="Read out noise sigma (photons)"}
+string gain = "1." {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+int fibers = 48 {prompt="Number of fibers"}
+real width = 6. {prompt="Width of profiles (pixels)"}
+real minsep = 8. {prompt="Minimum separation between fibers (pixels)"}
+real maxsep = 10. {prompt="Maximum separation between fibers (pixels)"}
+file apidtable = "" {prompt="Aperture identifications"}
+string crval = "INDEF" {prompt="Approximate central wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion"}
+string objaps = "" {prompt="Object apertures"}
+string skyaps = "2x2" {prompt="Sky apertures"}
+string objbeams = "" {prompt="Object beam numbers"}
+string skybeams = "" {prompt="Sky beam numbers\n"}
+
+bool scattered = no {prompt="Subtract scattered light?"}
+bool fitflat = yes {prompt="Fit and ratio flat field spectrum?"}
+bool clean = yes {prompt="Detect and replace bad pixels?"}
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool skyalign = no {prompt="Align sky lines?"}
+bool skysubtract = yes {prompt="Subtract sky?"}
+bool skyedit = yes {prompt="Edit the sky spectra?"}
+bool saveskys = yes {prompt="Save sky spectra?"}
+bool splot = no {prompt="Plot the final spectrum?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = yes {prompt="Update spectra if cal data changes?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset params = "" {prompt="Algorithm parameters"}
+
+begin
+ apscript.readnoise = readnoise
+ apscript.gain = gain
+ apscript.nfind = fibers
+ apscript.width = width
+ apscript.t_width = width
+ apscript.minsep = minsep
+ apscript.maxsep = maxsep
+ apscript.radius = minsep
+ apscript.clean = clean
+ proc.datamax = datamax
+
+ proc (objects, apref, flat, throughput, arcs1, arcs2, "",
+ arctable, fibers, apidtable, crval, cdelt, objaps, skyaps, "",
+ objbeams, skybeams, "", scattered, fitflat, no, no, no, no,
+ clean, dispcor, no, skyalign, skysubtract, skyedit, saveskys,
+ splot, redo, update, batch, listonly)
+
+ if (proc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("batch&batch") | cl
+ }
+end
diff --git a/noao/imred/argus/doargus.par b/noao/imred/argus/doargus.par
new file mode 100644
index 00000000..fb55934c
--- /dev/null
+++ b/noao/imred/argus/doargus.par
@@ -0,0 +1,39 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,h,"",,,"Aperture reference spectrum"
+flat,f,h,"",,,"Flat field spectrum"
+throughput,f,h,"",,,"Throughput file or image (optional)"
+arcs1,s,h,"",,,"List of arc spectra"
+arcs2,s,h,"",,,"List of shift arc spectra"
+arctable,f,h,"",,,"Arc assignment table (optional)
+"
+readnoise,s,h,"0.",,,"Read out noise sigma (photons)"
+gain,s,h,"1.",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+fibers,i,h,48,,,"Number of fibers"
+width,r,h,6.,,,"Width of profiles (pixels)"
+minsep,r,h,8.,,,"Minimum separation between fibers (pixels)"
+maxsep,r,h,10.,,,"Maximum separation between fibers (pixels)"
+apidtable,f,h,"",,,"Aperture identifications"
+crval,s,h,INDEF,,,"Approximate central wavelength"
+cdelt,s,h,INDEF,,,"Approximate dispersion"
+objaps,s,h,"",,,"Object apertures"
+skyaps,s,h,"2x2",,,"Sky apertures"
+objbeams,s,h,"",,,"Object beam numbers"
+skybeams,s,h,"",,,"Sky beam numbers
+"
+scattered,b,h,no,,,"Subtract scattered light?"
+fitflat,b,h,yes,,,"Fit and ratio flat field spectrum?"
+clean,b,h,yes,,,"Detect and replace bad pixels?"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+skyalign,b,h,no,,,"Align sky lines?"
+skysubtract,b,h,yes,,,"Subtract sky?"
+skyedit,b,h,yes,,,"Edit the sky spectra?"
+saveskys,b,h,yes,,,"Save sky spectra?"
+splot,b,h,no,,,"Plot the final spectrum?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,yes,,,"Update spectra if cal data changes?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+params,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/argus/doc/doargus.hlp b/noao/imred/argus/doc/doargus.hlp
new file mode 100644
index 00000000..0ffc4bb6
--- /dev/null
+++ b/noao/imred/argus/doc/doargus.hlp
@@ -0,0 +1,1464 @@
+.help doargus Jul95 noao.imred.argus
+.ih
+NAME
+doargus -- Argus data reduction task
+.ih
+USAGE
+doargus objects
+.ih
+SUMMARY
+The \fBdoargus\fR reduction task is specialized for scattered light
+subtraction, extraction, flat fielding, fiber throughput correction,
+wavelength calibration, and sky subtraction of \fIArgus\fR fiber spectra.
+It is a command language script which collects and combines the functions
+and parameters of many general purpose tasks to provide a single complete
+data reduction path. The task provides a degree of guidance, automation,
+and record keeping necessary when dealing with the large amount of data
+generated by this multifiber instrument.
+.ih
+PARAMETERS
+.ls objects
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.le
+.ls apref = ""
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.le
+.ls flat = "" (optional)
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field calibrations. If a separate
+throughput file or image is not specified the flat field is also used
+for computing a fiber throughput correction.
+.le
+.ls throughput = "" (optional)
+Throughput file or image. If an image is specified, typically a blank sky
+observation, the total flux through each fiber is used to correct for fiber
+throughput. If a file consisting of lines with the aperture number and
+relative throughput is specified then the fiber throughput will be
+corrected by those values. If neither is specified but a flat field image
+is given it is used to compute the throughput.
+.le
+.ls arcs1 = "" (at least one if dispersion correcting)
+List of primary arc spectra. These spectra are used to define the dispersion
+functions for each fiber apart from a possible zero point correction made
+with secondary shift spectra or arc calibration fibers in the object spectra.
+One fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for all other fibers and
+arc spectra are derived from it.
+.le
+.ls arcs2 = "" (optional)
+List of optional shift arc spectra. Features in these secondary observations
+are used to supply a wavelength zero point shift through the observing
+sequence. One type of observation is dome lamps containing characteristic
+emission lines.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIparams.sort\fR, such as the observation time is made.
+.le
+
+.ls readnoise = "0." (apsum)
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.le
+.ls gain = "1." (apsum)
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or arc
+spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls fibers = 48 (apfind)
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum. It is best if this
+reflects the actual number of fibers which may be found in the aperture
+reference image. Note that Argus fibers which are unassigned will still
+contain enough light for identification and the aperture identification
+table will be used to eliminate the unassigned fibers. The interactive
+review of the aperture assignments allows verification and adjustments
+to the automatic aperture definitions.
+.le
+.ls width = 6. (apedit)
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.le
+.ls minsep = 8. (apfind)
+Minimum separation between fibers. Weaker spectra or noise within this
+distance of a stronger spectrum are rejected.
+.le
+.ls maxsep = 10. (apfind)
+Maximum separation between adjacent fibers. This parameter
+is used to identify missing fibers. If two adjacent spectra exceed this
+separation then it is assumed that a fiber is missing and the aperture
+identification assignments will be adjusted accordingly.
+.le
+.ls apidtable = "" (apfind)
+Aperture identification table. This may be either a text file or an
+image. A text file contains the fiber number, beam number defining object
+(1), sky (0), and arc (2) fibers, and a object title. An image contains
+the keywords SLFIBnnn with string value consisting of the fiber number,
+beam number, optional right ascension and declination, and an object
+title. Unassigned and broken fibers (beam of -1)
+should be included in this list since they will automatically be excluded.
+.le
+.ls crval = INDEF, cdelt = INDEF (autoidentify)
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.le
+.ls objaps = "", skyaps = "2x2"
+List of object and sky aperture numbers. These are used to identify
+object and sky
+apertures for sky subtraction. Note sky apertures may be identified as
+both object and sky if one wants to subtract the mean sky from the
+individual sky spectra. Because the fibers typically alternate
+sky and object the default is to define the sky apertures by their
+aperture numbers and select both object and sky fibers for sky subtraction.
+.le
+.ls objbeams = "", skybeams = ""
+List of object and sky beam numbers.
+The beam numbers are typically the same as the aperture numbers unless
+set in the \fIapidtable\fR.
+.le
+
+.ls scattered = no (apscatter)
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.le
+.ls fitflat = yes (flat1d)
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.le
+.ls clean = yes (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.le
+.ls dispcor = yes
+Dispersion correct spectra? Depending on the \fIparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.le
+.ls skyalign = no
+Align sky lines? If yes then for the first object spectrum you are asked
+to mark one or more sky lines to use for alignment. Then these lines will
+be found in all spectra and an average zeropoint shift computed and applied
+to the dispersion solution to align these lines. Note that this assumes
+the sky lines are seen in all fibers.
+.le
+.ls skysubtract = yes
+Subtract sky from the object spectra? If yes the sky spectra are combined
+and subtracted from the object spectra as defined by the object and sky
+aperture/beam parameters.
+.le
+.ls skyedit = yes
+Overplot all the sky spectra and allow contaminated sky spectra to be
+deleted?
+.le
+.ls saveskys = yes
+Save the combined sky spectrum? If no then the sky spectrum will be
+deleted after sky subtraction is completed.
+.le
+.ls splot = no
+Plot the final spectra with the task \fBsplot\fR?
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.le
+.ls update = yes
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.le
+.ls batch = no
+Process spectra as a background or batch job provided there are no interactive
+options (\fIskyedit\fR and \fIsplot\fR) selected.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls params = "" (pset)
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.le
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdoargus\fR.
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.le
+.ls observatory = "observatory"
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For Argus data the image headers
+identify the observatory as "kpno" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls database = "database"
+Database (directory) used for storing aperture and dispersion information.
+.le
+.ls verbose = no
+Print verbose information available with various tasks.
+.le
+.ls logfile = "logfile", plotfile = ""
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.le
+.ls records = ""
+Dummy parameter to be ignored.
+.le
+.ls version = "ARGUS: ..."
+Version of the package.
+.le
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdoargus\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls order = "decreasing" (apfind)
+When assigning aperture identifications order the spectra "increasing"
+or "decreasing" with increasing pixel position (left-to-right or
+right-to-left in a cross-section plot of the image).
+.le
+.ls extras = no (apsum)
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.le
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+.ls lower = -3., upper = 3. (apdefault)
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 3 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+.ls buffer = 1. (apscatter)
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.le
+.ls apscat1 = "" (apscatter)
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.le
+.ls apscat2 = "" (apscatter)
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.le
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum) (fit1d|fit2d)
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for Argus data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+.ls nsubaps = 1 (apsum)
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.le
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+.ls f_interactive = yes (fit1d)
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \fIfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.le
+.ls f_function = "spline3", f_order = 10 (fit1d)
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (autoidentify/identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelists$ctiohenear.dat" (autoidentify/identify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.le
+.ls match = -3. (autoidentify/identify)
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 4. (autoidentify/identify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 10. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "chebyshev", i_order = 3 (autoidentify/identify)
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.le
+.ls i_niterate = 2, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (reidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+.ls addfeatures = no (reidentify)
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.le
+.le
+.ls sort = "jd", group = "ljd" (refspectra)
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdoargus\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.le
+.ls log = no (dispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (dispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+
+.ce
+-- SKY SUBTRACTION PARAMETERS --
+.ls combine = "average" (scombine) (average|median)
+Option for combining sky pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average" or "median"
+of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "none" (scombine) (none|minmax|avsigclip)
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+help for \fBscombine\fR. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the low and high pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.fi
+
+.le
+.ls scale = "none" (none|mode|median|mean)
+Multiplicative scaling to be applied to each spectrum. The choices are none
+or scale by the mode, median, or mean. This should not be necessary if the
+flat field and throughput corrections have been properly made.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+The \fBdoargus\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIArgus\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single, complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by these multifiber instruments.
+
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \fIredo\fR and \fIupdate\fR options, skip or
+repeat some or all the steps.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage for
+Argus since there are many variations possible. Because \fBdoargus\fR
+combines many separate, general purpose tasks the description given here
+refers to these tasks and leaves some of the details to their help
+documentation.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdoargus\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.le
+.ls [2]
+Set the \fBdoargus\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. A throughput file or image,
+such as a blank sky observation, may also be specified. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Prepare and specify the aperture identification table if desired. If
+the image headers contain the fiber identification information with
+SLFIB keywords then specify an image for the aperture identification table.
+You might wish to verify the geometry parameters,
+separations, dispersion direction, etc., which may change with different
+detector setups. The processing parameters are set for complete reductions
+but for quicklook you might not use the clean option or dispersion
+calibration and sky subtraction.
+
+The parameters are set for a particular Argus configuration and different
+configurations may use different flat fields, arcs, and aperture
+identification tables.
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+The apertures are defined using the specified aperture reference image.
+The spectra are found automatically and apertures assigned based on
+task parameters and the aperture identification table. Unassigned
+fibers may have a negative beam number and will be ignored in subsequent
+processing. The resize option sets the aperture size to the widths of
+the profiles at a fixed fraction of the peak height. The interactive
+review of the apertures is recommended. If the identifications are off
+by a shift the 'o' key is used. To exit the aperture review type 'q'.
+.le
+.ls [5]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.le
+.ls [6]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.le
+.ls [7]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra. If a throughput image, a
+sky flat, is specified then the total sky counts through each fiber are
+used to correct the total flat field counts. Alternatively, a separately
+derived throughput file can be used for specifying throughput corrections.
+If neither type of throughput is used the flat field also provides the
+throughput correction. The final response spectra are normalized to a unit
+mean over all fibers. The relative average throughput for each fiber is
+recorded in the log and possibly printed to the terminal.
+.le
+.ls [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.le
+.ls [9]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.le
+.ls [10]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.le
+.ls [11]
+If the sky line alignment option is selected and the sky lines have not
+been identified for a particular aperture identification table then you are
+asked to mark one or more sky lines. You may simply accept the wavelengths
+of these lines as defined by the dispersion solution for this spectrum and
+fiber or you may specify knowns wavelengths for the lines. These lines will
+be reidentified in all object spectra extracted and a mean zeropoint shift
+will be added to the dispersion solution. This has the effect of aligning
+these lines to optimize sky subtraction.
+.le
+.ls [12]
+The object spectra are now automatically scattered light subtracted,
+ extracted, flat fielded, and dispersion corrected.
+.le
+.ls [13]
+When sky subtracting, the individual sky spectra may be reviewed and some
+spectra eliminated using the 'd' key. The last deleted spectrum may be
+recovered with the 'e' key. After exiting the review with 'q' you are
+asked for the combining option. The type of combining is dictated by the
+number of sky fibers.
+.le
+.ls [14]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.le
+.ls [15]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added. The flat field and arc spectra may
+also have part of the aperture identification table name added, if
+used, to
+allow different configurations to use the same 2D flat field and arcs
+but with different aperture definitions. If using the sky alignment
+option an image "align" with the aperture identification table name
+applied will also be created.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of Argus object and
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must
+be processed to remove overscan, bias, and dark count effects.
+This is generally done using the \fBccdred\fR package.
+The \fBdoargus\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is
+generally not done at this stage but as part of \fBdoargus\fR.
+If flat fielding is done as part of the basic CCD processing then
+a flattened flat field, blank sky observation, or throughput file
+should still be created for applying fiber throughput corrections.
+
+The task \fBdoargus\fR uses several types of calibration spectra. These
+are flat fields, blank sky flat fields, comparison lamp spectra, and auxiliary
+mercury line (from the dome lights) or sky line spectra. The flat field,
+throughput image or file, and auxiliary emission line spectra are optional.
+If a flat field is used then the sky flat
+or throughput file is optional assuming the flat field has the same fiber
+iillumination. It is legal to specify only a throughput image or file and
+leave the flat field blank in order to simply apply a throughput
+correction. Because only the total counts through each fiber are used from
+a throughput image, sky flat exposures need not be of high signal per
+pixel.
+
+There are two types of dispersion calibration methods. One is to take arc
+calibration exposures through all fibers periodically and apply the
+dispersion function derived from one or interpolated between pairs to the
+object fibers. This is the usual method with Argus.
+A second (uncommon) method is to use \fIauxiliary
+line spectra\fR such as lines in the dome lights or sky lines to monitor
+shifts relative to a few actual arc exposures. The main reason to do this
+is if taking arc exposures through all fibers is inconvenient.
+
+The assignment of arc or auxiliary line calibration exposures to object
+exposures is generally done by selecting the nearest in time and
+interpolating. There are other options possible which are described under
+the task \fBrefspectra\fR. The most general option is to define a table
+giving the object image name and the one or two arc spectra to be assigned
+to that object. That file is called an \fIarc assignment table\fR and it
+is one of the optional setup files which can used with \fBdoargus\fR.
+
+The first step in the processing is identifying the spectra in the images.
+The default method is to use the fact that object and sky fibers alternate
+and assign sequential numbers to the fibers so that the sky fibers are the
+even aperture numbers and the object fibers are the odd aperture numbers.
+In this case the beam numbers are not used (and are the same as the
+aperture numbers) and there are no object identifications associated with the
+spectra.
+
+A very useful, optional, setup parameter is an \fIaperture identification
+table\fR. The table contains information about the fiber assignments
+including object titles. The table is either a text file or an image
+containing the keywords SLFIB. An aperture identification file contains
+lines consisting of an aperture number, a beam number, and an object
+identification. In an image the SLFIB keywords contain the aperture
+number, the beam numbers, optional right ascension and declination, and a
+title. The aperture identification information must be in the same order
+as the fibers in the image. The aperture number may be any unique number
+but it is recommended that the normal sequential fiber numbers be used.
+The beam number may be used to flag object or sky spectra or simply be the
+same as the aperture number. The object identifications are optional but
+it is good practice to include them so that the data will contain the
+object information independent of other records. Figure 1 shows an example
+of a file for a configuration called LMC123.
+
+.nf
+
+ Figure 1: Example Aperture Identification File
+
+ cl> type LMC124
+ 1 1 143
+ 2 0 sky
+ 3 1 121
+ .
+ .
+ .
+ 47 1 s92
+ 48 0 sky
+
+.fi
+Note the identification of the sky fibers with beam number 0 and the
+object fibers with 1. Any broken fibers should be included and
+identified by a different beam number, say beam number -1, to give the
+automatic spectrum finding operation the best chance to make the
+correct identifications. Naturally the identification table will vary
+for each configuration.
+Additional information about the aperture identification
+table may be found in the description of the task \fBapfind\fR.
+
+In more recent Argus data the fiber information is included in the
+image header under the keywords SLFIB. In this case you don't need
+to prepare a file and simply specify the name of an image, typically
+the same as the aperture reference image, for the aperture identification
+table.
+
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. When the \fIextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tools such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images.
+
+\fBPackage Parameters\fR
+
+The \fBargus\fR package parameters set parameters affecting all the
+tasks in the package.
+The dispersion axis parameter defines the image axis along which the
+dispersion runs. This is used if the image header doesn't define the
+dispersion axis with the DISPAXIS keyword.
+The observatory parameter is only required
+for data taken with fiber instruments other than Argus.
+The spectrum interpolation type might be changed to "sinc" but
+with the cautions given in \fBonedspec.package\fR.
+The other parameters define the standard I/O functions.
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdoargus\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of apertures, traces, and extracted spectra
+but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The list of objects and arcs can be @ files if desired. The aperture
+reference spectrum is usually the same as the flat field spectrum though it
+could be any exposure with enough signal to accurately define the positions
+and trace the spectra. The first list of arcs are the standard Th-Ar or
+HeNeAr comparison arc spectra (they must all be of the same type). The
+second list of arcs are the auxiliary emission line exposures mentioned
+previously.
+
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction.
+The dispersion axis defines the wavelength direction of spectra in
+the image if not defined in the image header by the keyword DISPAXIS. The
+width and separation parameters define the dimensions (in pixels) of the
+spectra (fiber profile) across the dispersion. The width parameter
+primarily affects the centering. The maximum separation parameter is
+important if missing spectra are to
+be correctly skipped. The number of fibers can be left at the default
+and the task will try to account for unassigned or missing fibers.
+However, this may lead to occasional incorrect
+identifications so it is recommended that only the true number of
+fibers be specified. The aperture identification table was described
+earlier.
+
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+
+The task needs to know which fibers are object and which are sky
+if sky subtraction is to be done. One could explicitly
+give the aperture numbers but the recommended way is to use the default
+of selecting every second fiber as sky. If no list of aperture or beam
+numbers is given
+then all apertures or beam numbers are selected. Sky subtracted sky
+spectra are useful for evaluating the sky subtraction. Since only
+the spectra identified as objects are sky subtracted one can exclude
+fibers from the sky subtraction. For example, to eliminate the sky
+spectra from the final results the \fIobjaps\fR parameter could be
+set to "1x2". All other fibers will remain in the extracted spectra
+but will not be sky subtracted.
+
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra. The \fIclean\fR option invokes a profile fitting and
+deviant point rejection algorithm as well as a variance weighting of points
+in the aperture. These options require knowing the effective (i.e.
+accounting for any image combining) read out noise and gain. For a
+discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR.
+
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale. If simultaneous arc fibers are defined there is an option to delete
+them from the final spectra when they are no longer needed.
+
+The sky alignment option allows applying a zeropoint dispersion shift
+to all fibers based on one or more sky lines. This requires all fibers
+to have the sky lines visible. When there are sky lines this will
+improve the sky subtraction if there is a systematic error in the
+fiber iillumination between the sky and the arc calibration.
+
+The sky subtraction option selects whether to combine the sky fiber spectra
+and subtract this sky from the object fiber spectra. It is also possible
+to subtract the sky and object fibers by pairs. \fIDispersion
+correction and sky subtraction are independent operations.\fR This means
+that if dispersion correction is not done then the sky subtraction will be
+done with respect to pixel coordinates. This might be desirable in some
+quick look cases though it is incorrect for final reductions.
+
+The sky subtraction option has two additional options. The individual sky
+spectra may be examined and contaminated spectra deleted interactively
+before combining. This can be a useful feature in crowded regions. The
+final combined sky spectrum (or individual skys if subtracting by
+pairs) may be saved for later inspection in an image
+with the spectrum name prefixed by \fBsky\fR.
+
+After a spectrum has been processed it is possible to examine the results
+interactively using the \fBsplot\fR tasks. This option has a query which
+may be turned off with "YES" or "NO" if there are multiple spectra to be
+processed.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are a new
+aperture identification table, a new reference image, new flat fields, and a
+new arc reference. If all input spectra are to be processed regardless of
+previous processing the \fIredo\fR flag may be used. Note that
+reprocessing clobbers the previously processed output spectra.
+
+The \fIbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if sky spectra editing and
+\fBsplot\fR review (interactive operations) are turned off, either when the
+task is run or by responding with "NO" to the queries during processing.
+
+The \fIlistonly\fR option prints a summary of the processing steps which
+will be performed on the input spectra without actually doing anything.
+This is useful for verifying which spectra will be affected if the input
+list contains previously processed spectra. The listing does not include
+any arc spectra which may be extracted to dispersion calibrate an object
+spectrum.
+
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdoargus\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the \fBdoargus\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdoargus\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+Argus. The parameter values can be changed from the
+defaults by using the parameter editor,
+.nf
+
+ cl> epar params
+
+.fi
+or simple typing \fIparams\fR. The parameter editor can also be
+entered when editing the \fBdoargus\fR parameters by typing \fI:e
+params\fR or simply \fI:e\fR if positioned at the \fIparams\fR
+parameter.
+
+\fBExtraction\fR
+
+The identification of the spectra in the two dimensional images and their
+scattered light subtraction and extraction to one dimensional spectra
+in multispec format is accomplished
+using the tasks from the \fBapextract\fR package. The first parameters
+through \fInsubaps\fR control the extractions.
+
+The dispersion line is that used for finding the spectra, for plotting in
+the aperture editor, and as the starting point for tracing. The default
+value of \fBINDEF\fR selects the middle of the image. The aperture
+finding, adjusting, editing, and tracing operations also allow summing a
+number of dispersion lines to improve the signal. The number of lines is
+set by the \fInsum\fR parameter.
+
+The order parameter defines whether the order of the aperture
+identifications in the aperture identification table (or the default
+sequential numbers if no table is used) is in the same sense as the image
+coordinates (increasing) or the opposite sense (decreasing). If the
+aperture identifications turn out to be opposite to what is desired when
+viewed in the aperture editing graph then simply change this parameter.
+
+The basic data output by the spectral extraction routines are the one
+dimensional spectra. Additional information may be output when the
+\fIextras\fR option is selected and the cleaning or variance weighting
+options are also selected. In this case a three dimensional image is
+produced with the first element of the third dimension being the cleaned
+and/or weighted spectra, the second element being the uncleaned and
+unweighted spectra, and the third element being an estimate of the sigma
+of each pixel in the extracted spectrum. Currently the sigma data is not
+used by any other tasks and is only for reference.
+
+The initial step of finding the fiber spectra in the aperture reference
+image consists of identifying the peaks in a cut across the dispersion,
+eliminating those which are closer to each other than the \fIminsep\fR
+distance, and then keeping the specified \fInfibers\fR highest peaks. The
+centers of the profiles are determined using the \fBcenter1d\fR algorithm
+which uses the \fIwidth\fR parameter.
+
+Apertures are then assigned to each spectrum. The initial edges of the
+aperture relative to the center are defined by the \fIlower\fR and
+\fIupper\fR parameters.
+The initial apertures are the same for all spectra but they can each be
+automatically resized. The automatic resizing sets the aperture limits
+at a fraction of the peak relative to the interfiber minimum.
+The default \fIylevel\fR is to resize the apertures to 5% of the peak.
+See the description for the task \fBapresize\fR for further details.
+
+The user is given the opportunity to graphically review and adjust the
+aperture definitions. This is recommended
+and it is fundamentally important that the correct aperture/beam numbers
+be associated with the proper fibers;
+otherwise the spectrum
+identifications will not be for the objects they say. An important command in
+this regard is the 'o' key which allows reordering the identifications.
+This is required if the first
+fiber is actually missing since the initial assignment begins with the
+first spectrum found. The
+aperture editor is a very powerful tool and is described in detail as
+\fBapedit\fR.
+
+The next set of parameters control the tracing and function fitting of the
+aperture reference positions along the dispersion direction. The position
+of a spectrum across the dispersion is determined by the centering
+algorithm (see \fBcenter1d\fR) at a series of evenly spaced steps, given by
+the parameter \fIt_step\fR, along the dispersion. The step size should be
+fine enough to follow position changes but it is not necessary to measure
+every point. The fitted points may jump around a little bit due to noise
+and cosmic rays even when summing a number of lines. Thus, a smooth
+function is fit. The function type, order, and iterative rejection of
+deviant points is controlled by the other trace parameters. For more
+discussion consult the help pages for \fBaptrace\fR and \fBicfit\fR. The
+default is to fit a cubic spline of three pieces with a single iteration of
+3 sigma rejection.
+
+The actual extraction of the spectra by summing across the aperture at each
+point along the dispersion is controlled by the next set of parameters.
+The default extraction simply sums the pixels using partial pixels at the
+ends. The options allow selection of a weighted sum based on a Poisson
+variance model using the \fIreadnoise\fR and \fIgain\fR detector
+parameters. Note that if the \fIclean\fR option is selected the variance
+weighted extraction is used regardless of the \fIweights\fR parameter. The
+sigma thresholds for cleaning are also set in the \fBparams\fR parameters.
+For more on the variance weighted extraction and cleaning see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+
+The last parameter, \fInsubaps\fR, is used only in special cases when it is
+desired to subdivide the fiber profiles into subapertures prior to
+dispersion correction. After dispersion correction the subapertures are
+then added together. The purpose of this is to correct for wavelength
+shifts across a fiber.
+
+\fBScattered Light Subtraction\fR
+
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+
+\fBFlat Field and Fiber Throughput Corrections\fR
+
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdoargus\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdoargus\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+
+In addition to correcting for pixel-to-pixel response the flat field step
+also corrects for differences in the fiber throughput. Thus, even if the
+pixel-to-pixel flat field corrections have been made in some other way it
+is desirable to use a sky or dome flat observation for determining a fiber
+throughput correction. Alternatively, a separately derived throughput
+file may be specified. This file consists of the aperture numbers
+(the same as used for the aperture reference) and relative throughput
+numbers.
+
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+After extraction one or more corrections are applied. If the \fIfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \fIf_function\fR and
+\fIf_order\fR. If the parameter \fIf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+
+If a throughput image is available (an observation of blank sky
+usually at twilight) it is extracted. If no flat field is used the average
+signal through each fiber is computed and this becomes the response
+normalization function. Note that a dome flat may be used in place of a
+sky in the sky flat field parameter for producing throughput only
+corrections. If a flat field is specified then each sky spectrum is
+divided by the appropriate flat field spectrum. The total counts through
+each fiber are multiplied into the flat field spectrum thus making the sky
+throughput of each fiber the same. This correction is important if the
+iillumination of the fibers differs between the flat field source and the
+sky. Since only the total counts are required the sky or dome flat field
+spectra need not be particularly strong though care must be taken to avoid
+objects.
+
+Instead of a sky flat or other throughput image a separately derived
+throughput file may be used. It may be used with or without a
+flat field.
+
+The final step is to normalize the flat field spectra by the mean counts of
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra. The final relative throughput values are recorded in the
+log and possibly printed on the terminal.
+
+These flat field response steps and algorithm are available as a separate
+task called \fBmsresp1d\fR.
+
+\fBDispersion Correction\fR
+
+Dispersion corrections are applied to the extracted spectra if the
+\fBdispcor\fR parameter is set. This can be a complicated process which
+the \fBdoargus\fR task tries to simplify for you. There are three basic
+steps involved; determining the dispersion functions relating pixel
+position to wavelength, assigning the appropriate dispersion function to a
+particular observation, and resampling the spectra to evenly spaced pixels
+in wavelength.
+
+The comparison arc spectra are used to define dispersion functions for the
+fibers using the tasks \fBautoidentify\fR and \fBreidentify\fR. The
+interactive \fBautoidentify\fR task is only used on the central fiber of the
+first arc spectrum to define the basic reference dispersion solution from
+which all other fibers and arc spectra are automatically derived using
+\fBreidentify\fR. \fBAutoidentify\fR attempts to automatically identify
+the arc lines using the \fIcrval\fR and \fIcdelt\fR parameters. Whether
+or not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+
+Generally, taking a number of comparison arc lamp exposures interspersed
+with the program spectra is sufficient to accurately dispersion calibrate
+Argus spectra. Dispersion functions are
+determined independently for each fiber of each arc image and then assigned
+to the matching fibers in the program object observations. The assignment
+consists of selecting one or two arc images to calibrate each object
+image.
+
+However, there is another calibration option which may be of interest.
+This option uses auxiliary line spectra, such as from dome lights or night
+sky lines, to monitor wavelength zero point shifts which are added to the
+basic dispersion function derived from a single reference arc. Initially
+one of the auxiliary fiber spectra is plotted interactively by
+\fBidentify\fR with the reference dispersion function for the appropriate
+fiber. The user marks one or more lines which will be used to compute zero
+point wavelength shifts in the dispersion functions automatically. In this
+case it is auxiliary arc images which are assigned to particular objects.
+
+The arc or auxiliary line image assignments may be done either explicitly with an arc assignment
+table (parameter \fIarctable\fR) or based on a header parameter. The task
+used is \fBrefspectra\fR and the user should consult this task if the
+default behavior is not what is desired. The default is to interpolate
+linearly between the nearest arcs based on the Julian date (corrected to
+the middle of the exposure). The Julian date and a local Julian day number
+(the day number at local noon) are computed automatically by the task
+\fBsetjd\fR and recorded in the image headers under the keywords JD and
+LJD. In addition the universal time at the middle of the exposure, keyword
+UTMIDDLE, is computed by the task \fBsetairmass\fR and this may also be used
+for ordering the arc and object observations.
+
+An optional step is to use sky lines in the spectra to compute a zeropoint
+dispersion shift that will align the sky lines. This may improve sky
+subtraction if the iillumination is not the same between the arc calibration
+and the sky. When selected the object spectrum is dispersion corrected
+using a non-linear dispersion function to avoid resampling the spectrum.
+The sky lines are then reidentified in wavelength space from a template
+list of sky lines. The mean shift in the lines for each fiber relative to
+the template in that fiber is computed to give the zeropoint shift. The
+database file is created when the first object is extracted. You are asked
+to mark the sky lines in one fiber and then the lines are automatically
+reidentified in all other fibers. Note that this technique requires the
+sky lines be found in all fibers.
+
+The last step of dispersion correction (resampling the spectrum to evenly
+spaced pixels in wavelength) is optional and relatively straightforward.
+If the \fIlinearize\fR parameter is no then the spectra are not resampled
+and the nonlinear dispersion information is recorded in the image header.
+Other IRAF tasks (the coordinate description is specific to IRAF) will use
+this information whenever wavelengths are needed. If linearizing is
+selected a linear dispersion relation, either linear in the wavelength or
+the log of the wavelength, is defined once and applied to every extracted
+spectrum. The resampling algorithm parameters allow selecting the
+interpolation function type, whether to conserve flux per pixel by
+integrating across the extent of the final pixel, and whether to linearize
+to equal linear or logarithmic intervals. The latter may be appropriate
+for radial velocity studies. The default is to use a fifth order
+polynomial for interpolation, to conserve flux, and to not use logarithmic
+wavelength bins. These parameters are described fully in the help for the
+task \fBdispcor\fR which performs the correction. The interpolation
+function options and the nonlinear dispersion coordinate system is
+described in the help topic \fBonedspec.package\fR.
+
+\fBSky Subtraction\fR
+
+Sky subtraction is selected with the \fIskysubtract\fR processing option.
+The sky spectra are selected by their aperture and beam numbers.
+If the \fIskyedit\fR
+option is selected the sky spectra are plotted using the task
+\fBspecplot\fR. By default they are superposed to allow identifying
+spectra with unusually high signal due to object contamination. To
+eliminate a sky spectrum from consideration point at it with the cursor and
+type 'd'. The last deleted spectrum may be undeleted with 'e'. This
+allows recovery of incorrect or accidental deletions.
+
+If the combining option is "none" then the sky and object fibers are
+paired and one sky is subtracted from one object and the saved sky will
+be the individual sky fiber spectra.
+
+However, the usual
+case is to combine the individual skys into a single master sky spectrum
+which is then subtracted from each object spectrum.
+The sky combining algorithm parameters define how the individual sky fiber
+spectra, after interactive editing, are combined before subtraction from
+the object fibers. The goals of combining are to reduce noise, eliminate
+cosmic-rays, and eliminate fibers with inadvertent objects. The common
+methods for doing this to use a median and/or a special sigma clipping
+algorithm (see \fBscombine\fR for details). The scale
+parameter determines whether the individual skys are first scaled to a
+common mode. The scaling should be used if the throughput is uncertain,
+but in that case you probably did the wrong thing in the throughput
+correction. If the sky subtraction is done interactively, i.e. with the
+\fIskyedit\fR option selected, then after selecting the spectra to be
+combined a query is made for the combining algorithm. This allows
+modifying the default algorithm based on the number of sky spectra
+selected since the "avsigclip" rejection algorithm requires at least
+three spectra.
+
+The combined sky spectrum is subtracted from only those spectra specified
+by the object aperture and beam numbers. Other spectra
+are retained unchanged. One may include the sky spectra as
+object spectra to produce residual sky spectra for analysis. The combined
+master sky spectra may be saved if the \fIsaveskys\fR parameter is set.
+The saved sky is given the name of the object spectrum with the prefix
+"sky".
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is also the sequence performed
+by the test procedure "demos qtest".
+
+.nf
+ar> demos mkqdata
+Creating image demoobj ...
+Creating image demoflat ...
+Creating image demoarc ...
+hy> argus.verbose = yes
+hy> doargus demoobj apref=demoflat flat=demoflat arcs1=demoarc \
+>>> fib=13 width=4. minsep=5. maxsep=7. clean- splot+
+Set reference apertures for demoflat
+Resize apertures for demoflat? (yes):
+Edit apertures for demoflat? (yes):
+<Exit with 'q'>
+Fit curve to aperture 1 of demoflat interactively (yes):
+<Exit with 'q'>
+Fit curve to aperture 2 of demoflat interactively (yes): N
+Create response function demoflatnorm.ms
+Extract flat field demoflat
+Fit and ratio flat field demoflat
+<Exit with 'q'>
+Extract flat field demoflat
+Fit and ratio flat field demoflat
+Create the normalized response demoflatnorm.ms
+demoflatnorm.ms -> demoflatnorm.ms using bzero: 0.
+ and bscale: 1.000001
+ mean: 1.000001 median: 1.110622 mode: 1.331709
+ upper: INDEF lower: INDEF
+Average aperture response:
+1. 1.136281
+2. 1.208727
+3. 0.4720535
+4. 1.308195
+5. 1.344551
+6. 1.330406
+7. 0.7136359
+8. 1.218975
+9. 0.7845755
+10. 0.9705642
+11. 1.02654
+12. 0.3745525
+13. 1.110934
+Extract arc reference image demoarc
+Determine dispersion solution for demoarc
+<A dispersion solution is found automatically.>
+<Type 'f' to look at fit. Type 'q' to exit fit.>
+<Exit with 'q'>
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Tue 16:01:07 11-Feb-92
+ Reference image = d....ms.imh, New image = d....ms, Refit = yes
+ Image Data Found Fit Pix Shift User Shift Z Shift RMS
+d....ms - Ap 7 29/29 29/29 9.53E-4 0.00409 2.07E-7 0.273
+Fit dispersion function interactively? (no|yes|NO|YES) (yes): n
+d....ms - Ap 5 29/29 29/29 -0.0125 -0.0784 -1.2E-5 0.315
+Fit dispersion function interactively? (no|yes|NO|YES) (no): y
+d....ms - Ap 5 29/29 29/29 -0.0125 -0.0784 -1.2E-5 0.315
+d....ms - Ap 4 29/29 29/29 -0.0016 -0.0118 -2.7E-6 0.284
+Fit dispersion function interactively? (no|yes|NO|YES) (yes): N
+d....ms - Ap 4 29/29 29/29 -0.0016 -0.0118 -2.7E-6 0.284
+d....ms - Ap 3 29/29 29/29 -0.00112 -0.00865 -1.8E-6 0.282
+d....ms - Ap 2 29/29 29/29 -0.00429 -0.0282 -4.9E-6 0.288
+d....ms - Ap 1 29/29 28/29 0.00174 0.00883 6.63E-7 0.228
+d....ms - Ap 9 29/29 29/29 -0.00601 -0.0387 -6.5E-6 0.268
+d....ms - Ap 10 29/29 29/29 -9.26E-4 -0.00751 -1.7E-6 0.297
+d....ms - Ap 11 29/29 29/29 0.00215 0.0114 1.05E-6 0.263
+d....ms - Ap 12 29/29 29/29 -0.00222 -0.0154 -2.8E-6 0.293
+d....ms - Ap 13 29/29 29/29 -0.0138 -0.0865 -1.4E-5 0.29
+d....ms - Ap 14 29/29 29/29 -0.00584 -0.0378 -6.8E-6 0.281
+
+Dispersion correct demoarc
+demoarc.ms: w1 = 5785.8..., w2 = 7351.6..., dw = 6.14..., nw = 256
+ Change wavelength coordinate assignments? (yes|no|NO): n
+Extract object spectrum demoobj
+Assign arc spectra for demoobj
+[demoobj] refspec1='demoarc'
+Dispersion correct demoobj
+demoobj.ms.imh: w1 = 5785.833, w2 = 7351.63, dw = 6.140378, nw = 256
+Sky subtract demoobj: skybeams=0
+Edit the sky spectra? (yes):
+<Exit with 'q'>
+Sky rejection option (none|minmax|avsigclip) (avsigclip):
+demoobj.ms.imh:
+Splot spectrum? (no|yes|NO|YES) (yes):
+Image line/aperture to plot (1:) (1):
+<Look at spectra and change apertures with # key>
+<Exit with 'q'>
+.fi
+.ih
+REVISIONS
+.ls DOARGUS V2.11
+A sky alignment option was added.
+
+The aperture identification can now be taken from image header keywords.
+
+The initial arc line identifications is done with the automatic line
+identification algorithm.
+.le
+.ls DOARGUS V2.10.3
+The usual output WCS format is "equispec". The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. A scattered
+light subtraction processing option has been added.
+.le
+.ih
+SEE ALSO
+apedit, apfind, approfiles, aprecenter, apresize, apsum, aptrace, apvariance,
+ccdred, center1d, dispcor, fit1d, icfit, identify, msresp1d, observatory,
+onedspec.package, refspectra, reidentify, scombine, setairmass, setjd,
+specplot, splot
+.endhelp
diff --git a/noao/imred/argus/doc/doargus.ms b/noao/imred/argus/doc/doargus.ms
new file mode 100644
index 00000000..f68883c1
--- /dev/null
+++ b/noao/imred/argus/doc/doargus.ms
@@ -0,0 +1,1725 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND July 1995
+.TL
+Guide to the ARGUS Reduction Task DOARGUS
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+The \fBdoargus\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIArgus\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by this multifiber instrument. This guide describes what
+this task does, it's usage, and parameters.
+.AE
+.NH
+Introduction
+.LP
+The \fBdoargus\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIArgus\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single, complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by these multifiber instruments.
+.LP
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \f(CWredo\fR and \f(CWupdate\fR options, skip or
+repeat some or all the steps.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage for
+Argus since there are many variations possible. Because \fBdoargus\fR
+combines many separate, general purpose tasks, the description given here
+refers to these tasks and leaves some of the details to their help
+documentation.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdoargus\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.IP [2]
+Set the \fBdoargus\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. A throughput file or image,
+such as a blank sky observation, may also be specified. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Prepare and specify an aperture identification table if desired.
+You might wish to verify the geometry parameters,
+separations, dispersion direction, etc., which may change with different
+detector setups. The processing parameters are set for complete reductions
+but for quicklook you might not use the clean option or dispersion
+calibration and sky subtraction.
+.IP
+The parameters are set for a particular Argus configuration and different
+configurations may use different flat fields, arcs, and aperture
+identification tables.
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.IP [4]
+The apertures are defined using the specified aperture reference image.
+The spectra are found automatically and apertures assigned based on
+task parameters and the aperture identification table. Unassigned
+fibers may have a negative beam number and will be ignored in subsequent
+processing. The resize option sets the aperture size to the widths of
+the profiles at a fixed fraction of the peak height. The interactive
+review of the apertures is recommended. If the identifications are off
+by a shift the 'o' key is used. To exit the aperture review type 'q'.
+.IP [5]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.IP [6]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.IP [7]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra. If a throughput image, a
+sky flat, is specified then the total sky counts through each fiber are
+used to correct the total flat field counts. Alternatively, a separately
+derived throughput file can be used for specifying throughput corrections.
+If neither type of throughput is used the flat field also provides the
+throughput correction. The final response spectra are normalized to a unit
+mean over all fibers. The relative average throughput for each fiber is
+recorded in the log and possibly printed to the terminal.
+.IP [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.IP [9]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.IP [10]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.IP [11]
+If the sky line alignment option is selected and the sky lines have not
+been identified for a particular aperture identification table then you are
+asked to mark one or more sky lines. You may simply accept the wavelengths
+of these lines as defined by the dispersion solution for this spectrum and
+fiber or you may specify knowns wavelengths for the lines. These lines will
+be reidentified in all object spectra extracted and a mean zeropoint shift
+will be added to the dispersion solution. This has the effect of aligning
+these lines to optimize sky subtraction.
+.IP [12]
+The object spectra are now automatically scattered light subtracted,
+ extracted, flat fielded, and dispersion corrected.
+.IP [13]
+When sky subtracting, the individual sky spectra may be reviewed and some
+spectra eliminated using the 'd' key. The last deleted spectrum may be
+recovered with the 'e' key. After exiting the review with 'q' you are
+asked for the combining option. The type of combining is dictated by the
+number of sky fibers.
+.IP [14]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.IP [15]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added. The flat field and arc spectra may
+also have part of the aperture identification table name, if used, added to
+allow different configurations to use the same 2D flat field and arcs
+but with different aperture definitions. If using the sky alignment
+option an image "align" with the aperture identification table name
+applied will also be created.
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of Argus object and
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must
+be processed to remove overscan, bias, and dark count effects. This
+is generally done using the \fBccdred\fR package.
+The \fBdoargus\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+ Flat fielding is
+generally not done at this stage but as part of \fBdoargus\fR.
+If flat fielding is done as part of the basic CCD processing then
+a flattened flat field, blank sky observation, or throughput file
+should still be created for applying fiber throughput corrections.
+.LP
+The task \fBdoargus\fR uses several types of calibration spectra. These
+are flat fields, blank sky flat fields, comparison lamp spectra, and auxiliary
+mercury line (from the dome lights) or sky line spectra. The flat field,
+throughput image or file, and auxiliary emission line spectra are optional.
+If a flat field is used then the sky flat
+or throughput file is optional assuming the flat field has the same fiber
+illumination. It is legal to specify only a throughput image or file and
+leave the flat field blank in order to simply apply a throughput
+correction. Because only the total counts through each fiber are used from
+a throughput image, sky flat exposures need not be of high signal per
+pixel.
+.LP
+There are two types of dispersion calibration methods. One is to take arc
+calibration exposures through all fibers periodically and apply the
+dispersion function derived from one or interpolated between pairs to the
+object fibers. This is the usual method with Argus.
+A second (uncommon) method is to \fIauxiliary
+line spectra\fR such as lines in the dome lights or sky lines to monitor
+shifts relative to a few actual arc exposures. The main reason to do this
+is if taking arc exposures through all fibers is inconvenient.
+.LP
+The assignment of arc or auxiliary line calibration exposures to object
+exposures is generally done by selecting the nearest in time and
+interpolating. There are other options possible which are described under
+the task \fBrefspectra\fR. The most general option is to define a table
+giving the object image name and the one or two arc spectra to be assigned
+to that object. That file is called an \fIarc assignment table\fR and it
+is one of the optional setup files which can used with \fBdoargus\fR.
+.LP
+The first step in the processing is identifying the spectra in the images.
+The default method is to use the fact that object and sky fibers alternate
+and assign sequential numbers to the fibers so the sky fibers are the
+even aperture numbers and the object fibers are the odd aperture numbers.
+In this case the beam numbers are not used and are the same as the
+aperture numbers and there are no object identifications associated with the
+spectra.
+.LP
+A very useful, optional, setup parameter is an \fIaperture identification
+table\fR. The table contains information about the fiber assignments
+including object titles. The table is either a text file or an image
+containing the keywords SLFIB. An aperture identification file contains
+lines consisting of an aperture number, a beam number, and an object
+identification. In an image the SLFIB keywords contain the aperture
+number, the beam numbers, optional right ascension and declination, and a
+title. The aperture identification information must be in the same order
+as the fibers in the image. The aperture number may be any unique number
+but it is recommended that the normal sequential fiber numbers be used.
+The beam number may be used to flag object or sky spectra or simply be the
+same as the aperture number. The object identifications are optional but
+it is good practice to include them so that the data will contain the
+object information independent of other records. Figure 1 shows an example
+of a file for a configuration called LMC123.
+
+.ce
+Figure 1: Example Aperture Identification File
+
+.V1
+ cl> type LMC124
+ 1 1 143
+ 2 0 sky
+ 3 1 121
+ .
+ .
+ .
+ 47 1 s92
+ 48 0 sky
+.V2
+
+Note the identification of the sky fibers with beam number 0 and the
+object fibers with 1. Any broken fibers should be included and
+identified by a different beam number, say beam number -1, to give the
+automatic spectrum finding operation the best chance to make the
+correct identifications. Naturally the identification table will vary
+for each configuration.
+Additional information about the aperture identification
+file may be found in the description of the task \fBapfind\fR.
+.LP
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. When the \f(CWextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tools such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images.
+.NH
+Package Parameters
+.LP
+The \fBargus\fR package parameters, shown in Figure 1, set parameters
+affecting all the tasks in the package.
+.KS
+.V1
+
+.ce
+Figure 1: Package Parameter Set for ARGUS
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = argus
+
+(dispaxi= 2) Image axis for 2D images
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Log file
+(plotfil= ) Plot file
+
+(records= )
+(version= ARGUS V1: January 1992)
+
+.KE
+.V2
+The dispersion axis parameter defines the image axis along which the
+dispersion runs. This is used if the image header doesn't define the
+dispersion axis with the DISPAXIS keyword.
+The observatory parameter is only required
+for data taken with fiber instruments other than Argus.
+The spectrum interpolation type might be changed to "sinc" but
+with the cautions given in \fBonedspec.package\fR.
+The other parameters define the standard I/O functions.
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdoargus\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of apertures, traces, and extracted spectra
+but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+.NH
+Processing Parameters
+.LP
+The \fBdoargus\fR parameters are shown in Figure 2.
+.KS
+.V1
+
+.ce
+Figure 2: Parameter Set for DOARGUS
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = argus
+ TASK = doargus
+
+objects = List of object spectra
+(apref = ) Aperture reference spectrum
+(flat = ) Flat field spectrum
+(through= ) Throughput file or image (optional)
+(arcs1 = ) List of arc spectra
+(arcs2 = ) List of shift arc spectra
+(arcrepl= ) Special aperture replacements
+(arctabl= ) Arc assignment table (optional)
+
+.KE
+.V1
+(readnoi= RDNOISE) Read out noise sigma (photons)
+(gain = GAIN) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(fibers = 97) Number of fibers
+(width = 12.) Width of profiles (pixels)
+(minsep = 8.) Minimum separation between fibers (pixels)
+(maxsep = 15.) Maximum separation between fibers (pixels)
+(apidtab= ) Aperture identifications
+(crval = INDEF) Approximate wavelength
+(cdelt = INDEF) Approximate dispersion
+(objaps = ) Object apertures
+(skyaps = ) Sky apertures
+(arcaps = ) Arc apertures
+(objbeam= 0,1) Object beam numbers
+(skybeam= 0) Sky beam numbers
+(arcbeam= ) Arc beam numbers
+
+(scatter= no) Subtract scattered light?
+(fitflat= yes) Fit and ratio flat field spectrum?
+(clean = yes) Detect and replace bad pixels?
+(dispcor= yes) Dispersion correct spectra?
+(savearc= yes) Save simultaneous arc apertures?
+(skysubt= yes) Subtract sky?
+(skyedit= yes) Edit the sky spectra?
+(savesky= yes) Save sky spectra?
+(splot = no) Plot the final spectrum?
+(redo = no) Redo operations if previously done?
+(update = yes) Update spectra if cal data changes?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(params = ) Algorithm parameters
+
+.V2
+The list of objects and arcs can be @ files if desired. The aperture
+reference spectrum is usually the same as the flat field spectrum though it
+could be any exposure with enough signal to accurately define the positions
+and trace the spectra. The first list of arcs are the standard Th-Ar or
+HeNeAr comparison arc spectra (they must all be of the same type). The
+second list of arcs are the auxiliary emission line exposures mentioned
+previously.
+.LP
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction.
+The variance
+weighting and cosmic-ray cleanning are sensitive to extremely strong
+cosmic-rays; ones which are hundreds of times brighter than the
+spectrum. The \fIdatamax\fR is used to set an upper limit for any
+real data. Any pixels above this value will be flagged as cosmic-rays
+and will not affect the extractions.
+The dispersion axis defines the wavelength direction of spectra in
+the image if not defined in the image header by the keyword DISPAXIS. The
+width and separation parameters define the dimensions (in pixels) of the
+spectra (fiber profile) across the dispersion. The width parameter
+primarily affects the centering. The maximum separation parameter is
+important if missing spectra are to
+be correctly skipped. The number of fibers can be left at the default
+and the task will try to account for unassigned or missing fibers.
+However, this may lead to occasional incorrect
+identifications so it is recommended that only the true number of
+fibers be specified. The aperture identification table was described
+earlier.
+.LP
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+.LP
+The task needs to know which fibers are object and which are sky
+if sky subtraction is to be done. One could explicitly
+give the aperture numbers but the recommended way is to use the default
+of selecting every second fiber as sky. If no list of aperture or beam
+numbers is given
+then all apertures or beam numbers are selected. Sky subtracted sky
+spectra are useful for evaluating the sky subtraction. Since only
+the spectra identified as objects are sky subtracted one can exclude
+fibers from the sky subtraction. For example, to eliminate the sky
+spectra from the final results the \fIobjaps\fR parameter could be
+set to "1x2". All other fibers will remain in the extracted spectra
+but will not be sky subtracted.
+.LP
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra. The \f(CWclean\fR option invokes a profile fitting and
+deviant point rejection algorithm as well as a variance weighting of points
+in the aperture. These options require knowing the effective (i.e.
+accounting for any image combining) read out noise and gain. For a
+discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR.
+.LP
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale. If simultaneous arc fibers are defined there is an option to delete
+them from the final spectra when they are no longer needed.
+.LP
+The sky alignment option allows applying a zeropoint dispersion shift
+to all fibers based on one or more sky lines. This requires all fibers
+to have the sky lines visible. When there are sky lines this will
+improve the sky subtraction if there is a systematic error in the
+fiber illumination between the sky and the arc calibration.
+.LP
+The sky subtraction option selects whether to combine the sky fiber spectra
+and subtract this sky from the object fiber spectra. It is also possible
+to subtract the sky and object fibers by pairs. \fIDispersion
+correction and sky subtraction are independent operations.\fR This means
+that if dispersion correction is not done then the sky subtraction will be
+done with respect to pixel coordinates. This might be desirable in some
+quick look cases though it is incorrect for final reductions.
+.LP
+The sky subtraction option has two additional options. The individual sky
+spectra may be examined and contaminated spectra deleted interactively
+before combining. This can be a useful feature in crowded regions. The
+final combined sky spectrum (or individual skys if subtracting by
+pairs) may be saved for later inspection in an image
+with the spectrum name prefixed by \fBsky\fR.
+.LP
+After a spectrum has been processed it is possible to examine the results
+interactively using the \fBsplot\fR tasks. This option has a query which
+may be turned off with "YES" or "NO" if there are multiple spectra to be
+processed.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a new
+aperture identification table, a new reference image, new flat fields, and a
+new arc reference. If all input spectra are to be processed regardless of
+previous processing the \f(CWredo\fR flag may be used. Note that
+reprocessing clobbers the previously processed output spectra.
+.LP
+The \f(CWbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if sky spectra editing and
+\fBsplot\fR review (interactive operations) are turned off, either when the
+task is run or by responding with "NO" to the queries during processing.
+.LP
+The \f(CWlistonly\fR option prints a summary of the processing steps which
+will be performed on the input spectra without actually doing anything.
+This is useful for verifying which spectra will be affected if the input
+list contains previously processed spectra. The listing does not include
+any arc spectra which may be extracted to dispersion calibrate an object
+spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdoargus\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the \fBdoargus\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdoargus\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+Argus. The parameter values can be changed from the
+defaults by using the parameter editor,
+.V1
+
+ cl> epar params
+
+.V2
+or simple typing \f(CWparams\fR. The parameter editor can also be
+entered when editing the \fBdoargus\fR parameters by typing \f(CW:e
+params\fR or simply \f(CW:e\fR if positioned at the \f(CWparams\fR
+parameter. Figure 3 shows the parameters set.
+.KS
+.V1
+
+.ce
+Figure 3: Algorithm Parameter Set
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = argus
+ TASK = params
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(order = decreasing) Order of apertures
+(extras = no) Extract sky, sigma, etc.?
+
+ -- DEFAULT APERTURE LIMITS --
+(lower = -5.) Lower aperture limit relative to center
+(upper = 5.) Upper aperture limit relative to center
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+
+.KE
+.KS
+.V1
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 3) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- SCATTERED LIGHT PARAMETERS --
+(buffer = 1.) Buffer distance from apertures
+(apscat1= ) Fitting parameters across the dispersion
+(apscat2= ) Fitting parameters along the dispersion
+
+.KE
+.KS
+.V1
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+(nsubaps= 1) Number of subapertures
+
+.KE
+.KS
+.V1
+ -- FLAT FIELD FUNCTION FITTING PARAMETERS --
+(f_inter= yes) Fit flat field interactively?
+(f_funct= spline3) Fitting function
+(f_order= 10) Fitting function order
+
+.KE
+.KS
+.V1
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli=linelists$idhenear.dat) Line list
+(match = 10.) Line list matching limit in Angstroms
+(fwidth = 4.) Arc line widths in pixels
+(cradius= 10.) Centering radius in pixels
+(i_funct= spline3) Coordinate function
+(i_order= 3) Order of dispersion function
+(i_niter= 2) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying?
+(addfeat= no) Add features when reidentifying?
+
+.KE
+.KS
+.V1
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+.KE
+.KS
+.V1
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+.KE
+.KS
+.V1
+ -- SKY SUBTRACTION PARAMETERS --
+(combine= average) Type of combine operation
+(reject = avsigclip) Sky rejection option
+(scale = none) Sky scaling option
+
+.KE
+.V2
+.NH 2
+Extraction
+.LP
+The identification of the spectra in the two dimensional images and their
+scattered light subtraction and extraction to one dimensional spectra
+in multispec format is accomplished
+using the tasks from the \fBapextract\fR package. The first parameters
+through \f(CWnsubaps\fR control the extractions.
+.LP
+The dispersion line is that used for finding the spectra, for plotting in
+the aperture editor, and as the starting point for tracing. The default
+value of \fBINDEF\fR selects the middle of the image. The aperture
+finding, adjusting, editing, and tracing operations also allow summing a
+number of dispersion lines to improve the signal. The number of lines is
+set by the \f(CWnsum\fR parameter.
+.LP
+The order parameter defines whether the order of the aperture
+identifications in the aperture identification table (or the default
+sequential numbers if no file is used) is in the same sense as the image
+coordinates (increasing) or the opposite sense (decreasing). If the
+aperture identifications turn out to be opposite to what is desired when
+viewed in the aperture editing graph then simply change this parameter.
+.LP
+The basic data output by the spectral extraction routines are the one
+dimensional spectra. Additional information may be output when the
+\f(CWextras\fR option is selected and the cleaning or variance weighting
+options are also selected. In this case a three dimensional image is
+produced with the first element of the third dimension being the cleaned
+and/or weighted spectra, the second element being the uncleaned and
+unweighted spectra, and the third element being an estimate of the sigma
+of each pixel in the extracted spectrum. Currently the sigma data is not
+used by any other tasks and is only for reference.
+.LP
+The initial step of finding the fiber spectra in the aperture reference
+image consists of identifying the peaks in a cut across the dispersion,
+eliminating those which are closer to each other than the \f(CWminsep\fR
+distance, and then keeping the specified \f(CWnfibers\fR highest peaks. The
+centers of the profiles are determined using the \fBcenter1d\fR algorithm
+which uses the \f(CWwidth\fR parameter.
+.LP
+Apertures are then assigned to each spectrum. The initial edges of the
+aperture relative to the center are defined by the \f(CWlower\fR and
+\f(CWupper\fR parameters.
+.LP
+The initial apertures are the same for all spectra but they can each be
+automatically resized. The automatic resizing sets the aperture limits
+at a fraction of the peak relative to the interfiber minimum.
+The default \f(CWylevel\fR is to resize the apertures to 5% of the peak.
+See the description for the task \fBapresize\fR for further details.
+.LP
+The user is given the opportunity to graphically review and adjust the
+aperture definitions. This is recommended
+and it is fundamentally important that the correct aperture/beam numbers
+be associated with the proper fibers;
+otherwise the spectrum
+identifications will not be for the objects they say. An important command in
+this regard is the 'o' key which allows reordering the identifications.
+This is required if the first
+fiber is actually missing since the initial assignment begins with the
+first spectrum found. The
+aperture editor is a very powerful tool and is described in detail as
+\fBapedit\fR.
+.LP
+The next set of parameters control the tracing and function fitting of the
+aperture reference positions along the dispersion direction. The position
+of a spectrum across the dispersion is determined by the centering
+algorithm (see \fBcenter1d\fR) at a series of evenly spaced steps, given by
+the parameter \f(CWt_step\fR, along the dispersion. The step size should be
+fine enough to follow position changes but it is not necessary to measure
+every point. The fitted points may jump around a little bit due to noise
+and cosmic rays even when summing a number of lines. Thus, a smooth
+function is fit. The function type, order, and iterative rejection of
+deviant points is controlled by the other trace parameters. For more
+discussion consult the help pages for \fBaptrace\fR and \fBicfit\fR. The
+default is to fit a cubic spline of three pieces with a single iteration of
+3 sigma rejection.
+.LP
+The actual extraction of the spectra by summing across the aperture at each
+point along the dispersion is controlled by the next set of parameters.
+The default extraction simply sums the pixels using partial pixels at the
+ends. The options allow selection of a weighted sum based on a Poisson
+variance model using the \f(CWreadnoise\fR and \f(CWgain\fR detector
+parameters. Note that if the \f(CWclean\fR option is selected the variance
+weighted extraction is used regardless of the \f(CWweights\fR parameter. The
+sigma thresholds for cleaning are also set in the \fBparams\fR parameters.
+For more on the variance weighted extraction and cleaning see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+.LP
+The last parameter, \f(CWnsubaps\fR, is used only in special cases when it is
+desired to subdivide the fiber profiles into subapertures prior to
+dispersion correction. After dispersion correction the subapertures are
+then added together. The purpose of this is to correct for wavelength
+shifts across a fiber.
+.NH 2
+Scattered Light Subtraction
+.LP
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+.LP
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+.LP
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+.LP
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+.NH 2
+Flat Field and Fiber Throughput Corrections
+.LP
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdoargus\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdoargus\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+.LP
+In addition to correcting for pixel-to-pixel response the flat field step
+also corrects for differences in the fiber throughput. Thus, even if the
+pixel-to-pixel flat field corrections have been made in some other way it
+is desirable to use a sky or dome flat observation for determining a fiber
+throughput correction. Alternatively, a separately derived throughput
+file may be specified. This file consists of the aperture numbers
+(the same as used for the aperture reference) and relative throughput
+numbers.
+.LP
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+After extraction one or more corrections are applied. If the \f(CWfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \f(CWf_function\fR and
+\f(CWf_order\fR. If the parameter \f(CWf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+.LP
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+.LP
+If a throughput image is available (an observation of blank sky
+usually at twilight) it is extracted. If no flat field is used the average
+signal through each fiber is computed and this becomes the response
+normalization function. Note that a dome flat may be used in place of a
+sky in the sky flat field parameter for producing throughput only
+corrections. If a flat field is specified then each sky spectrum is
+divided by the appropriate flat field spectrum. The total counts through
+each fiber are multiplied into the flat field spectrum thus making the sky
+throughput of each fiber the same. This correction is important if the
+illumination of the fibers differs between the flat field source and the
+sky. Since only the total counts are required the sky or dome flat field
+spectra need not be particularly strong though care must be taken to avoid
+objects.
+.LP
+Instead of a sky flat or other throughput image a separately derived
+throughput file may be used. It may be used with or without a
+flat field.
+.LP
+The final step is to normalize the flat field spectra by the mean counts of
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra. The final relative throughput values are recorded in the
+log and possibly printed on the terminal.
+.LP
+These flat field response steps and algorithm are available as a separate
+task called \fBmsresp1d\fR.
+.NH 2
+Dispersion Correction
+.LP
+Dispersion corrections are applied to the extracted spectra if the
+\fBdispcor\fR parameter is set. This can be a complicated process which
+the \fBdoargus\fR task tries to simplify for you. There are three basic
+steps involved; determining the dispersion functions relating pixel
+position to wavelength, assigning the appropriate dispersion function to a
+particular observation, and resampling the spectra to evenly spaced pixels
+in wavelength.
+.LP
+The comparison arc spectra are used to define dispersion functions for the
+fibers using the tasks \fBautoidentify\fR and \fBreidentify\fR. The
+interactive \fBautoidentify\fR task is only used on the central fiber of the
+first arc spectrum to define the basic reference dispersion solution from
+which all other fibers and arc spectra are automatically derived using
+\fBreidentify\fR. \fBAutoidentify\fR attempts to automatically identify
+the arc lines using the \fIcrval\fR and \fIcdelt\fR parameters. Whether or
+not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+.LP
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+.LP
+Generally, taking a number of comparison arc lamp exposures interspersed
+with the program spectra is sufficient to accurately dispersion calibrate
+Argus spectra. Dispersion functions are
+determined independently for each fiber of each arc image and then assigned
+to the matching fibers in the program object observations. The assignment
+consists of selecting one or two arc images to calibrate each object
+image.
+.LP
+However, there is another calibration option which may be of interest.
+This option uses auxiliary line spectra, such as from dome lights or night
+sky lines, to monitor wavelength zero point shifts which are added to the
+basic dispersion function derived from a single reference arc. Initially
+one of the auxiliary fiber spectra is plotted interactively by
+\fBidentify\fR with the reference dispersion function for the appropriate
+fiber. The user marks one or more lines which will be used to compute zero
+point wavelength shifts in the dispersion functions automatically. In this
+case it is auxiliary arc images which are assigned to particular objects.
+.LP
+The arc or auxiliary line image assignments may be done either explicitly with
+an arc assignment
+table (parameter \f(CWarctable\fR) or based on a header parameter. The task
+used is \fBrefspectra\fR and the user should consult this task if the
+default behavior is not what is desired. The default is to interpolate
+linearly between the nearest arcs based on the Julian date (corrected to
+the middle of the exposure). The Julian date and a local Julian day number
+(the day number at local noon) are computed automatically by the task
+\fBsetjd\fR and recorded in the image headers under the keywords JD and
+LJD. In addition the universal time at the middle of the exposure, keyword
+UTMIDDLE, is computed by the task \fBsetairmass\fR and this may also be used
+for ordering the arc and object observations.
+.LP
+An optional step is to use sky lines in the spectra to compute a zeropoint
+dispersion shift that will align the sky lines. This may improve sky
+subtraction if the illumination is not the same between the arc calibration
+and the sky. When selected the object spectrum is dispersion corrected
+using a non-linear dispersion function to avoid resampling the spectrum.
+The sky lines are then reidentified in wavelength space from a template
+list of sky lines. The mean shift in the lines for each fiber relative to
+the template in that fiber is computed to give the zeropoint shift. The
+database file is created when the first object is extracted. You are asked
+to mark the sky lines in one fiber and then the lines are automatically
+reidentified in all other fibers. Note that this technique requires the
+sky lines be found in all fibers.
+.LP
+The last step of dispersion correction (resampling the spectrum to evenly
+spaced pixels in wavelength) is optional and relatively straightforward.
+If the \f(CWlinearize\fR parameter is no then the spectra are not resampled
+and the nonlinear dispersion information is recorded in the image header.
+Other IRAF tasks (the coordinate description is specific to IRAF) will use
+this information whenever wavelengths are needed. If linearizing is
+selected a linear dispersion relation, either linear in the wavelength or
+the log of the wavelength, is defined once and applied to every extracted
+spectrum. The resampling algorithm parameters allow selecting the
+interpolation function type, whether to conserve flux per pixel by
+integrating across the extent of the final pixel, and whether to linearize
+to equal linear or logarithmic intervals. The latter may be appropriate
+for radial velocity studies. The default is to use a fifth order
+polynomial for interpolation, to conserve flux, and to not use logarithmic
+wavelength bins. These parameters are described fully in the help for the
+task \fBdispcor\fR which performs the correction. The interpolation
+function options and the nonlinear dispersion coordinate system is
+described in the help topic \fBonedspec.package\fR.
+.NH 2
+Sky Subtraction
+.LP
+Sky subtraction is selected with the \f(CWskysubtract\fR processing option.
+The sky spectra are selected by their aperture and beam numbers.
+If the \f(CWskyedit\fR
+option is selected the sky spectra are plotted using the task
+\fBspecplot\fR. By default they are superposed to allow identifying
+spectra with unusually high signal due to object contamination. To
+eliminate a sky spectrum from consideration point at it with the cursor and
+type 'd'. The last deleted spectrum may be undeleted with 'e'. This
+allows recovery of incorrect or accidental deletions.
+.LP
+If the combining option is "none" then the sky and object fibers are
+paired and one sky is subtracted from one object and the saved sky will
+be the individual sky fiber spectra.
+.LP
+However, the usual
+case is to combine the individual skys into a single master sky spectrum
+which is then subtracted from each object spectrum.
+The sky combining algorithm parameters define how the individual sky fiber
+spectra, after interactive editing, are combined before subtraction from
+the object fibers. The goals of combining are to reduce noise, eliminate
+cosmic-rays, and eliminate fibers with inadvertent objects. The common
+methods for doing this to use a median and/or a special sigma clipping
+algorithm (see \fBscombine\fR for details). The scale
+parameter determines whether the individual skys are first scaled to a
+common mode. The scaling should be used if the throughput is uncertain,
+but in that case you probably did the wrong thing in the throughput
+correction. If the sky subtraction is done interactively, i.e. with the
+\f(CWskyedit\fR option selected, then after selecting the spectra to be
+combined a query is made for the combining algorithm. This allows
+modifying the default algorithm based on the number of sky spectra
+selected since the "avsigclip" rejection algorithm requires at least
+three spectra.
+.LP
+The combined sky spectrum is subtracted from only those spectra specified
+by the object aperture and beam numbers. Other spectra
+are retained unchanged. One may include the sky spectra as
+object spectra to produce residual sky spectra for analysis. The combined
+master sky spectra may be saved if the \f(CWsaveskys\fR parameter is set.
+The saved sky is given the name of the object spectrum with the prefix
+"sky".
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+Task Help References
+.LP
+Each task in the \fBargus\fR package and tasks used by \fBdoargus\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ continuum - Fit the continuum in spectra
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify features in spectrum for dispersion solution
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically identify features in spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra having different wavelength ranges
+ scopy - Select and copy apertures in different spectral formats
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ splot - Preliminary spectral plot/analysis
+
+ doargus - Process ARGUS spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+.V2
+.SH
+Appendix A: DOARGUS Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \f(CWredo\fR flag is set or the \f(CWupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.LE
+apref = ""
+.LS
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.LE
+flat = "" (optional)
+.LS
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field calibrations. If a separate
+throughput file or image is not specified the flat field is also used
+for computing a fiber throughput correction.
+.LE
+throughput = "" (optional)
+.LS
+Throughput file or image. If an image is specified, typically a blank sky
+observation, the total flux through each fiber is used to correct for fiber
+throughput. If a file consisting of lines with the aperture number and
+relative throughput is specified then the fiber throughput will be
+corrected by those values. If neither is specified but a flat field image
+is given it is used to compute the throughput.
+.LE
+arcs1 = "" (at least one if dispersion correcting)
+.LS
+List of primary arc spectra. These spectra are used to define the dispersion
+functions for each fiber apart from a possible zero point correction made
+with secondary shift spectra or arc calibration fibers in the object spectra.
+One fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for all other fibers and
+arc spectra are derived from it.
+.LE
+arcs2 = "" (optional)
+.LS
+List of optional shift arc spectra. Features in these secondary observations
+are used to supply a wavelength zero point shift through the observing
+sequence. One type of observation is dome lamps containing characteristic
+emission lines.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \f(CWparams.sort\fR, such as the observation time is made.
+.LE
+
+readnoise = "0." (apsum)
+.LS
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.LE
+gain = "1." (apsum)
+.LS
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+fibers = 48 (apfind)
+.LS
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum. It is best if this
+reflects the actual number of fibers which may be found in the aperture
+reference image. Note that Argus fibers which are unassigned will still
+contain enough light for identification and the aperture identification
+table will be used to eliminate the unassigned fibers. The interactive
+review of the aperture assignments allows verification and adjustments
+to the automatic aperture definitions.
+.LE
+width = 6. (apedit)
+.LS
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.LE
+minsep = 8. (apfind)
+.LS
+Minimum separation between fibers. Weaker spectra or noise within this
+distance of a stronger spectrum are rejected.
+.LE
+maxsep = 10. (apfind)
+.LS
+Maximum separation between adjacent fibers. This parameter
+is used to identify missing fibers. If two adjacent spectra exceed this
+separation then it is assumed that a fiber is missing and the aperture
+identification assignments will be adjusted accordingly.
+.LE
+apidtable = "" (apfind)
+.LS
+Aperture identification table. This may be either a text file or an
+image. A text file contains the fiber number, beam number defining object
+(1), sky (0), and arc (2) fibers, and a object title. An image contains
+the keywords SLFIBnnn with string value consisting of the fiber number,
+beam number, optional right ascension and declination, and an object
+title. Unassigned and broken fibers (beam of -1)
+should be included in this list since they will automatically be excluded.
+.LE
+crval = INDEF, cdelt = INDEF (autoidentify)
+.LS
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.LE
+objaps = "", skyaps = "2x2"
+.LS
+List of object and sky aperture numbers. These are used to
+identify object and sky
+apertures for sky subtraction. Note sky apertures may be identified as
+both object and sky if one wants to subtract the mean sky from the
+individual sky spectra. Because the fibers typically alternate
+sky and object the default is to define the sky apertures by their
+aperture numbers and select both object and sky fibers for sky subtraction.
+.LE
+objbeams = "", skybeams = ""
+.LS
+List of object and sky beam numbers.
+The beam numbers are typically the same as the aperture numbers unless
+set in the \fIapidtable\fR.
+.LE
+
+scattered = no (apscatter)
+.LS
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.LE
+fitflat = yes (flat1d)
+.LS
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.LE
+clean = yes (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.LE
+dispcor = yes
+.LS
+Dispersion correct spectra? Depending on the \f(CWparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.LE
+skyalign = no
+.LS
+Align sky lines? If yes then for the first object spectrum you are asked
+to mark one or more sky lines to use for alignment. Then these lines will
+be found in all spectra and an average zeropoint shift computed and applied
+to the dispersion solution to align these lines. Note that this assumes
+the sky lines are seen in all fibers.
+.LE
+skysubtract = yes
+.LS
+Subtract sky from the object spectra? If yes the sky spectra are combined
+and subtracted from the object spectra as defined by the object and sky
+aperture/beam parameters.
+.LE
+skyedit = yes
+.LS
+Overplot all the sky spectra and allow contaminated sky spectra to be
+deleted?
+.LE
+saveskys = yes
+.LS
+Save the combined sky spectrum? If no then the sky spectrum will be
+deleted after sky subtraction is completed.
+.LE
+splot = no
+.LS
+Plot the final spectra with the task \fBsplot\fR?
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.LE
+update = yes
+.LS
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.LE
+batch = no
+.LS
+Process spectra as a background or batch job provided there are no interactive
+options (\f(CWskyedit\fR and \f(CWsplot\fR) selected.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+params = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.LE
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdoargus\fR.
+
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.LE
+observatory = "observatory"
+.LS
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For Argus data the image headers
+identify the observatory as "kpno" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.V1
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.V2
+.LE
+database = "database"
+.LS
+Database (directory) used for storing aperture and dispersion information.
+.LE
+verbose = no
+.LS
+Print verbose information available with various tasks.
+.LE
+logfile = "logfile", plotfile = ""
+.LS
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.LE
+records = ""
+.LS
+Dummy parameter to be ignored.
+.LE
+version = "ARGUS: ..."
+.LS
+Version of the package.
+.LE
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdoargus\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+order = "decreasing" (apfind)
+.LS
+When assigning aperture identifications order the spectra "increasing"
+or "decreasing" with increasing pixel position (left-to-right or
+right-to-left in a cross-section plot of the image).
+.LE
+extras = no (apsum)
+.LS
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.LE
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+
+lower = -3., upper = 3. (apdefault)
+.LS
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \f(CWnsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 3 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+.LE
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+
+buffer = 1. (apscatter)
+.LS
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.LE
+apscat1 = "" (apscatter)
+.LS
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.LE
+apscat2 = "" (apscatter)
+.LS
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.LE
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum)
+.LS
+Type of extraction weighting. Note that if the \f(CWclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \f(CWgain\fR and \f(CWreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum) (fit1d|fit2d)
+.LS
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for Argus data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+nsubaps = 1 (apsum)
+.LS
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.LE
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+
+f_interactive = yes (fit1d)
+.LS
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \f(CWfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.LE
+f_function = "spline3", f_order = 10 (fit1d)
+.LS
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (autoidentify/identify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelists$ctiohenear.dat" (autoidentify/identify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.LE
+match = -3. (autoidentify/identify)
+.LS
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 4. (autoidentify/identify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 10. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "chebyshev", i_order = 3 (autoidentify/identify)
+.LS
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.LE
+i_niterate = 2, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (reidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+addfeatures = no (reidentify)
+.LS
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.LE
+.LE
+sort = "jd", group = "ljd" (refspectra)
+.LS
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdoargus\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.LE
+log = no (dispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (dispcor)
+.LS
+Conserve the total flux during interpolation? If \f(CWno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \f(CWyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+-- SKY SUBTRACTION PARAMETERS --
+
+combine = "average" (scombine) (average|median)
+.LS
+Option for combining sky pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average" or "median"
+of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.LE
+reject = "none" (scombine) (none|minmax|avsigclip)
+.LS
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+help for \fBscombine\fR. The rejection choices are:
+
+.V1
+ none - No rejection
+ minmax - Reject the low and high pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.V2
+
+.LE
+scale = "none" (none|mode|median|mean)
+.LS
+Multiplicative scaling to be applied to each spectrum. The choices are none
+or scale by the mode, median, or mean. This should not be necessary if the
+flat field and throughput corrections have been properly made.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/argus/params.par b/noao/imred/argus/params.par
new file mode 100644
index 00000000..72e01ace
--- /dev/null
+++ b/noao/imred/argus/params.par
@@ -0,0 +1,67 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+order,s,h,"decreasing","increasing|decreasing",,"Order of apertures"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE LIMITS --"
+lower,r,h,-3.,,,"Lower aperture limit relative to center"
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,2,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- SCATTERED LIGHT PARAMETERS --"
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,"Fitting parameters along the dispersion
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,Upper rejection threshold
+nsubaps,i,h,1,1,,"Number of subapertures
+
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --"
+f_interactive,b,h,yes,,,"Fit flat field interactively?"
+f_function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+f_order,i,h,20,1,,"Fitting function order
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$idhenear.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"chebyshev","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,3,1,,"Order of dispersion function"
+i_niterate,i,h,2,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SKY SUBTRACTION PARAMETERS --"
+combine,s,h,"average","average|median",,Type of combine operation
+reject,s,h,"avsigclip","none|minmax|avsigclip",,"Sky rejection option"
+scale,s,h,"none","none|mode|median|mean",,"Sky scaling option"
diff --git a/noao/imred/bias/Revisions b/noao/imred/bias/Revisions
new file mode 100644
index 00000000..6c67e622
--- /dev/null
+++ b/noao/imred/bias/Revisions
@@ -0,0 +1,97 @@
+.help revisions Jun88 noao.imred.bias
+.nf
+noao$imred/bias/colbias.x
+noao$imred/bias/linebias.x
+noao$imred/bias/doc/colbias.hlp
+noao$imred/bias/doc/linebias.hlp
+ The output pixel type is now changed to real rather than preserving
+ the input pixel type. (3/2/93, Valdes)
+
+=======
+V2.10.2
+=======
+
+noao$imred/bias/colbias.x
+ Valdes, January 6, 1990
+ The graphics device parameter was being ignored and "stdgraph" always
+ opened.
+
+===
+V2.8
+====
+
+noao$imred/bias/colbias.x,linebias.x,colbias.par,linebias.par
+ Davis, October 4, 1988
+ 1. The parameters low_reject, high_reject and niterate have been added
+ to the linebias and colbias tasks which then pass them to the icfit
+ package.
+
+noao$imred/bias/linebias.x
+ Valdes, May, 1, 1987
+ 1. The fix of December 3 was slightly wrong.
+ 296: ...Memr[buf2+(k-1)*n+i-1] ==> ...Memr[buf2+k*n+i-j]
+ 2. Increased the maximum buffer MAXPIX from 10000 to 100000.
+
+noao$imred/bias/colbias.x
+noao$imred/bias/linebias.x
+ Valdes, January 15, 1986
+ 1. When the bias section consists of just one column or line the
+ dimensionality of the mapped bias decreases. The code had to be
+ modified to recognize and work properly in this case.
+
+noao$imred/bias/linebias.x
+ Valdes, December, 3, 1986
+ 1. There was a bug in using the median option which gave incorrect
+ results for the bias vector.
+ 291: n = min (nx - j - 1, maxn) ==> n = min (nx - j + 1, maxn)
+ 296: ...Memr[buf2+(k-1)*n+i-1] ==> ...Memr[buf2+(k-1)*n+i-j]
+
+noao$imred/bias/linebias.x
+ Valdes, October 29, 1986
+ 1. The log information for LINEBIAS identified itself as COLBIAS.
+
+====================================
+Version 2.3 Release, August 18, 1986
+====================================
+
+noao$imred/bias/colbias.x
+noao$imred/bias/linebias.x
+ Valdes, July 3, 1986
+ 1. COLBIAS and LINEBIAS modified to use new ICFIT package.
+
+=========================================
+STScI Pre-release and 1st SUN 2.3 Release
+=========================================
+
+===========
+Release 2.2
+===========
+
+From Valdes Dec 12, 1985:
+
+1. COLBIAS and LINEBIAS changes to allow taking a median instead of an
+average when generating the 1D bias data.
+------
+From Valdes Dec 11, 1985:
+
+1. COLBIAS and LINEBIAS changed to use image templates instead of filename
+templates. This allows use of the concatenation function even though the
+images should not have images sections.
+
+2. REVS task removed.
+------
+From Valdes Oct 4, 1985:
+
+1. Colbais and linebias recompiled because of the change in the icfit package
+for low and high rejection and rejection iteration.
+------
+From Valdes Aug 7, 1985:
+
+1. The task revisions has been added to page package revisions.
+To get the system revisions type system.revisions.
+
+2. The parameters to linebias and colbias now include control over
+the graphics output device, the graphics input cursor, and multiple logfiles.
+
+3. Changes have been made to use the "improved" icfit and gtools packages.
+.endhelp
diff --git a/noao/imred/bias/bias.cl b/noao/imred/bias/bias.cl
new file mode 100644
index 00000000..18624a2c
--- /dev/null
+++ b/noao/imred/bias/bias.cl
@@ -0,0 +1,8 @@
+#{ BIAS -- Bias Package
+
+package bias
+
+task colbias,
+ linebias = biasdir$x_bias.e
+
+clbye
diff --git a/noao/imred/bias/bias.hd b/noao/imred/bias/bias.hd
new file mode 100644
index 00000000..b55e1b6c
--- /dev/null
+++ b/noao/imred/bias/bias.hd
@@ -0,0 +1,7 @@
+# Help directory for the BIAS package.
+
+$doc = "./doc/"
+
+colbias hlp=doc$colbias.hlp, src=colbias.x
+linebias hlp=doc$linebias.hlp, src=linebias.x
+revisions sys=Revisions
diff --git a/noao/imred/bias/bias.men b/noao/imred/bias/bias.men
new file mode 100644
index 00000000..7bf225d4
--- /dev/null
+++ b/noao/imred/bias/bias.men
@@ -0,0 +1,2 @@
+ colbias - Fit and subtract an average column bias
+ linebias - Fit and subtract an average line bias
diff --git a/noao/imred/bias/bias.par b/noao/imred/bias/bias.par
new file mode 100644
index 00000000..4bdb4e20
--- /dev/null
+++ b/noao/imred/bias/bias.par
@@ -0,0 +1,3 @@
+# BIAS package parameter file.
+
+version,s,h,"May 1985"
diff --git a/noao/imred/bias/colbias.par b/noao/imred/bias/colbias.par
new file mode 100644
index 00000000..8c3ba4b3
--- /dev/null
+++ b/noao/imred/bias/colbias.par
@@ -0,0 +1,16 @@
+# COLBIAS -- Subtract a column bias
+
+input,s,a,,,,Input images
+output,s,a,,,,Output images
+bias,s,h,"[]",,,Bias section
+trim,s,h,"[]",,,Trim section
+median,b,h,no,,,Use median instead of average in column bias?
+interactive,b,h,yes,,,Interactive?
+function,s,h,"spline3","spline3|legendre|chebyshev|spline1",,Fitting function
+order,i,h,1,1,,Order of fitting function
+low_reject,r,h,3.0,,,Low sigma rejection factor
+high_reject,r,h,3.0,,,High sigma rejection factor
+niterate,i,h,1,,,Number of rejection iterations
+logfiles,s,h,"",,,Log files
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
diff --git a/noao/imred/bias/colbias.x b/noao/imred/bias/colbias.x
new file mode 100644
index 00000000..f7aac840
--- /dev/null
+++ b/noao/imred/bias/colbias.x
@@ -0,0 +1,308 @@
+include <imhdr.h>
+include <imio.h>
+include <pkg/gtools.h>
+include <pkg/xtanswer.h>
+
+# COLBIAS -- Remove line by line bias from images.
+#
+# A one dimensional bias vector is extracted from the bias columns.
+# A function is fit to the bias vector and the function is subtracted
+# from the image lines. A trim section may be specified to output
+# only a part of the bias subtracted image.
+
+# Control procedure for mapping the images.
+#
+# The input and output images are given by image templates. The
+# number of output images must match the number of input images. Image
+# sections are not allowed. The output image may be the same as the input
+# image.
+
+procedure colbias ()
+
+int listin # List of input images
+int listout # List of output images
+int logfiles # List of log files
+char biassec[SZ_FNAME] # Bias section
+char trimsec[SZ_FNAME] # Trim section
+int median # Use median of bias section?
+int interactive # Interactive?
+
+char function[SZ_LINE] # Curve fitting function
+int order # Order of curve fitting function
+
+char image[SZ_FNAME]
+char input[SZ_FNAME]
+char biasimage[SZ_FNAME]
+char output[SZ_FNAME]
+char logfile[SZ_FNAME]
+char original[SZ_FNAME]
+char title[SZ_LINE]
+
+int logfd
+pointer in, bias, out, ic, gt
+
+int clgeti(), clpopnu(), clgfil(), open(), gt_init(), nowhite()
+int imtopen(), imtlen(), imtgetim(), btoi()
+bool clgetb()
+long clktime()
+pointer immap()
+real clgetr()
+
+begin
+ # Get input and output lists and check that the number of images
+ # are the same.
+
+ call clgstr ("input", title, SZ_LINE)
+ listin = imtopen (title)
+ call clgstr ("output", title, SZ_LINE)
+ listout = imtopen (title)
+ if (imtlen (listin) != imtlen (listout)) {
+ call imtclose (listin)
+ call imtclose (listout)
+ call error (0, "Input and output image lists do not match")
+ }
+
+ # Get the bias and trim sections.
+
+ call clgstr ("bias", biassec, SZ_FNAME)
+ call clgstr ("trim", trimsec, SZ_FNAME)
+ if (nowhite (biassec, biassec, SZ_FNAME) == 0)
+ ;
+ if (nowhite (trimsec, trimsec, SZ_FNAME) == 0)
+ ;
+ median = btoi (clgetb ("median"))
+
+ # Determine if the task is interactive. If not set the interactive
+ # flag to always no.
+
+ if (clgetb ("interactive"))
+ interactive = YES
+ else
+ interactive = ALWAYSNO
+
+ # Initialize the curve fitting package.
+
+ call ic_open (ic)
+ call clgstr ("function", function, SZ_LINE)
+ call ic_pstr (ic, "function", function)
+ order = clgeti ("order")
+ call ic_puti (ic, "order", order)
+ call ic_putr (ic, "low", clgetr ("low_reject"))
+ call ic_putr (ic, "high", clgetr ("high_reject"))
+ call ic_puti (ic, "niterate", clgeti ("niterate"))
+ call ic_pstr (ic, "xlabel", "Line")
+ call ic_pstr (ic, "ylabel", "Bias")
+
+ gt = gt_init ()
+ call gt_sets (gt, GTTYPE, "line")
+
+ # Get the log files.
+
+ logfiles = clpopnu ("logfiles")
+
+ # For each input and output image map the bias image, the
+ # trimmed input image, and the output image. Use a temporary
+ # image header for overwritting the input image.
+
+ while ((imtgetim (listin, image, SZ_FNAME) != EOF) &&
+ (imtgetim (listout, output, SZ_FNAME) != EOF)) {
+
+ call sprintf (biasimage, SZ_FNAME, "%s%s")
+ call pargstr (image)
+ call pargstr (biassec)
+ call sprintf (input, SZ_FNAME, "%s%s")
+ call pargstr (image)
+ call pargstr (trimsec)
+
+ in = immap (input, READ_ONLY, 0)
+ bias = immap (biasimage, READ_ONLY, 0)
+ call xt_mkimtemp (image, output, original, SZ_FNAME)
+ out = immap (output, NEW_COPY, in)
+ IM_PIXTYPE(out) = TY_REAL
+
+ call sprintf (title, SZ_LINE, "colbias %s")
+ call pargstr (image)
+ call xt_answer (title, interactive)
+ call gt_sets (gt, GTTITLE, title)
+
+ # Enter log header.
+
+ while (clgfil (logfiles, logfile, SZ_FNAME) != EOF) {
+ logfd = open (logfile, APPEND, TEXT_FILE)
+ call cnvtime (clktime (0), title, SZ_LINE)
+ call fprintf (logfd, "\nCOLBIAS: %s\n")
+ call pargstr (title)
+ call fprintf (logfd, "input = %s\noutput = %s\nbias = %s\n")
+ call pargstr (input)
+ call pargstr (output)
+ call pargstr (biasimage)
+ if (median == YES)
+ call fprintf (logfd, "Median of bias section used.\n")
+ call close (logfd)
+ }
+ call clprew (logfiles)
+
+ call cb_colbias (in, bias, out, ic, gt, median, logfiles,
+ interactive)
+
+ call imunmap (in)
+ call imunmap (bias)
+ call imunmap (out)
+ call xt_delimtemp (output, original)
+ }
+
+ call ic_closer (ic)
+ call gt_free (gt)
+ call clpcls (logfiles)
+ call imtclose (listin)
+ call imtclose (listout)
+end
+
+
+# CB_COLBIAS -- Get an average column bias vector from the bias image.
+# Fit a function to the bias vector and subtract it from the input image
+# to form the output image. Line coordinates are in terms of the full
+# input image.
+
+procedure cb_colbias (in, bias, out, ic, gt, median, logfiles, interactive)
+
+pointer in # Input image pointer
+pointer bias # Bias image pointer
+pointer out # Output image pointer
+pointer ic # ICFIT pointer
+pointer gt # GTOOLS pointer
+int median # Median of bias section?
+int logfiles # List of log files
+int interactive # Interactive curve fitting?
+
+char graphics[SZ_FNAME] # Graphics output device
+char logfile[SZ_FNAME]
+int i, nbias, nx, ny, ydim, yoff, ystep, ylen
+real y, z
+pointer cv, gp, sp, ybias, zbias, wts
+
+int clgfil()
+real cveval()
+pointer gopen(), imgl2r(), impl2r()
+
+begin
+ # The bias coordinates are in terms of the full input image because
+ # the input and bias images may have different sections.
+
+ nx = IM_LEN(in, 1)
+ ny = IM_LEN(in, 2)
+
+ ydim = IM_VMAP(in, 2)
+ yoff = IM_VOFF(in, ydim)
+ ystep = IM_VSTEP(in, ydim)
+ ylen = IM_SVLEN(in, ydim)
+
+ # Get the bias vector and set the weights.
+
+ call cb_getcolbias (bias, ybias, zbias, nbias, median)
+ call smark (sp)
+ call salloc (wts, nbias, TY_REAL)
+ call amovkr (1., Memr[wts], nbias)
+
+ # Do the curve fitting using the interactive curve fitting package.
+ # Free memory when the fit is complete.
+
+ call ic_putr (ic, "xmin", 1.)
+ call ic_putr (ic, "xmax", real (ylen))
+ if ((interactive == YES) || (interactive == ALWAYSYES)) {
+ call clgstr ("graphics", graphics, SZ_FNAME)
+ gp = gopen (graphics, NEW_FILE, STDGRAPH)
+ call gt_setr (gt, GTXMIN, 1.)
+ call gt_setr (gt, GTXMAX, real (ylen))
+ call icg_fit (ic, gp, "cursor", gt, cv, Memr[ybias], Memr[zbias],
+ Memr[wts], nbias)
+ call gclose (gp)
+ } else {
+ call ic_fit (ic, cv, Memr[ybias], Memr[zbias], Memr[wts], nbias,
+ YES, YES, YES, YES)
+ }
+
+ # Log the fitting information.
+
+ while (clgfil (logfiles, logfile, SZ_FNAME) != EOF) {
+ call ic_show (ic, logfile, gt)
+ call ic_errors (ic, logfile, cv, Memr[ybias], Memr[zbias],
+ Memr[wts], nbias)
+ }
+ call clprew (logfiles)
+
+ call mfree (ybias, TY_REAL)
+ call mfree (zbias, TY_REAL)
+ call sfree (sp)
+
+ # Subtract the bias function from the input image.
+
+ do i = 1, ny {
+ y = yoff + i * ystep
+ z = cveval (cv, y)
+ call asubkr (Memr[imgl2r(in,i)], z, Memr[impl2r(out,i)], nx)
+ }
+
+ # Free curve fitting memory.
+
+ call cvfree (cv)
+end
+
+
+# CB_GETCOLBIAS -- Get the column bias vector.
+# The ybias line values are in terms of the full image.
+
+procedure cb_getcolbias (bias, ybias, zbias, nbias, median)
+
+pointer bias # Bias image pointer
+pointer ybias, zbias # Bias vector
+int nbias # Number of bias points
+int median # Median of bias section?
+
+int i, nx, ny, ydim, yoff, ystep
+
+real amedr(), asumr()
+pointer imgl1r(), imgl2r()
+
+begin
+ # Check for a bias consisting of a single column which is turned
+ # into a 1D image by IMIO.
+ if (IM_NDIM(bias) == 1) {
+ ny = IM_LEN(bias, 1)
+ ydim = IM_VMAP(bias, 1)
+ yoff = IM_VOFF(bias, ydim)
+ ystep = IM_VSTEP(bias, ydim)
+
+ nbias = ny
+ call malloc (ybias, nbias, TY_REAL)
+ call malloc (zbias, nbias, TY_REAL)
+
+ do i = 1, nbias
+ Memr[ybias+i-1] = yoff + i * ystep
+ call amovr (Memr[imgl1r(bias)], Memr[zbias], nbias)
+
+ return
+ }
+
+ nx = IM_LEN(bias, 1)
+ ny = IM_LEN(bias, 2)
+ ydim = IM_VMAP(bias, 2)
+ yoff = IM_VOFF(bias, ydim)
+ ystep = IM_VSTEP(bias, ydim)
+
+ nbias = ny
+ call malloc (ybias, nbias, TY_REAL)
+ call malloc (zbias, nbias, TY_REAL)
+
+ if (median == YES) {
+ do i = 1, ny {
+ Memr[ybias+i-1] = yoff + i * ystep
+ Memr[zbias+i-1] = amedr (Memr[imgl2r(bias,i)], nx)
+ }
+ } else {
+ do i = 1, ny {
+ Memr[ybias+i-1] = yoff + i * ystep
+ Memr[zbias+i-1] = asumr (Memr[imgl2r(bias,i)], nx) / nx
+ }
+ }
+end
diff --git a/noao/imred/bias/doc/colbias.hlp b/noao/imred/bias/doc/colbias.hlp
new file mode 100644
index 00000000..a95e4694
--- /dev/null
+++ b/noao/imred/bias/doc/colbias.hlp
@@ -0,0 +1,113 @@
+.help colbias Mar93 noao.imred.bias
+.ih
+NAME
+colbias -- Fit and subtract an average column bias
+.ih
+USAGE
+.nf
+colbias input output
+.fi
+.ih
+PARAMETERS
+.ls input
+Images to be bias subtracted. The images may not contain image sections.
+.le
+.ls output
+Output bias subtracted images. An output images may be the same as its
+matching input image. The output pixel type will be real regardless
+of the input pixel type.
+.le
+.ls bias = "[]"
+Bias section appended to the input image to define the bias region.
+The default section or an empty string will use the full image.
+.le
+.ls trim = "[]"
+Trim section appended to the input image to define the region to be
+bias subtracted and output. The default section or an empty string
+will use the full image.
+.le
+.ls median = no
+Take the median of the bias columns? If no then the bias
+columns are averaged.
+.le
+.ls function = "spline3"
+The function fit to the average bias line. The functions are "legendre",
+"chebyshev", "spline1", or "spline3". Abbreviations are allowed.
+.le
+.ls order
+The order (number of terms or number of spline pieces) in the function.
+.le
+.ls low_reject = 3.0
+The low sigma rejection factor.
+.le
+.ls high_reject = 3.0
+The high sigma rejection factor.
+.le
+.ls niterate = 1
+The maximum number of rejection iterations.
+.le
+.ls interactive = yes
+Fit the average bias line interactively?
+.le
+.ls logfiles = ""
+List of log files. If no file name is given then no log file is kept.
+.le
+.ls graphics = "stdgraph"
+Graphics output device for interactive graphics.
+.le
+.ls cursor = ""
+Graphics cursor input
+.le
+.ih
+DESCRIPTION
+For each input image in the input image list an average or median bias
+column is determined from the bias region. The bias region is defined by
+the bias section applied to the input image. A function of the image lines
+is fit to the average bias column. This function is subtracted from each
+image column in the trim region. The trim region is defined by the trim
+section applied to the input image. The bias subtracted and trimmed image
+is output to the output image. The input and output images may not contain
+sections and the number of images in each list must be the same.
+
+If the interactive flag is set then the user may interactively examine
+and fit the average bias column. The interactive fitting is done using the
+interactive curve fitting routine (see icfit). Before each image is
+processed a prompt of the form "colbias image (yes)? " is given.
+A response of yes allows interactive fitting for the specified image
+while a response of no uses the last defined fitting parameters.
+The default value is accepted with a carriage return. The possible
+responses are "yes", "no", "YES", or "NO". The capitalized responses
+permanently set the response to yes or no and the prompt is not
+issued again for the remaining images. Thus, a response of NO processes
+the remaining images non-interactively while a response of YES processes
+the remaining image interactively without prompting.
+.ih
+EXAMPLES
+The bias region for a set of images occupies columns 801 to 832 and lines
+1 to 800. To subtract the bias and remove the bias region:
+
+.nf
+ cl> colbias.bias = "[801:832,*]"
+ cl> colbias.trim = "[1:800,*]"
+ cl> colbias ccd* ccd*
+ colbias ccd001 (yes)? yes
+ colbias ccd002 (yes)?
+ colbias ccd003 (no)? NO
+.fi
+
+The first two lines set the bias and trim parameters. These parameters
+could be temporarily set on the command line but generally these parameters
+are only changed when new instruments are used. The first image
+is interactively fit and the fitting order is change to 2. The
+second image is examined and the fit found to be acceptable. All remaining
+image are then fit non-interactively using the same fitting parameters.
+.ih
+REVISIONS
+.ls COLBIAS V2.10.3
+The output pixel type is now real instead of preserving the pixel type
+of the input image.
+.le
+.ih
+SEE ALSO
+icfit
+.endhelp
diff --git a/noao/imred/bias/doc/linebias.hlp b/noao/imred/bias/doc/linebias.hlp
new file mode 100644
index 00000000..6c5d73a5
--- /dev/null
+++ b/noao/imred/bias/doc/linebias.hlp
@@ -0,0 +1,115 @@
+.help linebias Mar93 noao.imred.bias
+.ih
+NAME
+linebias -- Fit and subtract an average line bias
+.ih
+USAGE
+.nf
+linebias input output
+.fi
+.ih
+PARAMETERS
+.ls input
+Images to be bias subtracted. The images may not contain image sections.
+.le
+.ls output
+Output bias subtracted images. An output images may be the same as its
+matching input image. The output image pixel type will real regardless
+of the input image pixel type.
+.le
+.ls bias = "[]"
+Bias section appended to the input image to define the bias region.
+The default section or an empty string will use the full image.
+.le
+.ls trim = "[]"
+Trim section appended to the input image to define the region to be
+bias subtracted and output. The default section or an empty string
+will use the full image.
+.le
+.ls median = no
+Take the median of the bias lines? If no then the bias lines are averaged.
+.le
+.ls function = "spline3"
+The function fit to the average bias line. The functions are "legendre",
+"chebyshev", "spline1", or "spline3". Abbreviations are allowed.
+.le
+.ls order
+The order (number of terms or number of spline pieces) in the function.
+.le
+.ls low_reject = 3.0
+The low sigma rejection factor.
+.le
+.ls high_reject = 3.0
+The high sigma rejection factor.
+.le
+.ls niterate = 1
+The maximum number of rejection iterations.
+.le
+.ls interactive = yes
+Fit the average bias line interactively?
+.le
+.ls logfile = ""
+Name of a log file. If no file name is given then no log file is kept.
+.le
+.ls logfiles = ""
+List of log files. If no file name is given then no log file is kept.
+.le
+.ls graphics = "stdgraph"
+Graphics output device for interactive graphics.
+.le
+.ls cursor = ""
+Graphics cursor input
+.le
+.ih
+DESCRIPTION
+For each input image in the input image list an average or median bias line
+is determined from the bias region. The bias region
+is defined by the bias section applied to the input image. A function of
+the image columns is fit to the average bias line. This function is subtracted
+from each image line in the trim region. The trim region is defined by the
+trim section applied to the input image. The bias subtracted and trimmed
+image is output to the output image. The input and output images may not
+contain sections and the number of images in each list must be the same.
+
+If the interactive flag is set then the user may interactively examine
+and fit the average bias line. The interactive fitting is done using the
+interactive curve fitting routine (see icfit). Before each image is
+processed a prompt of the form "linebias image (yes)? " is given.
+A response of yes allows interactive fitting for the specified image
+while a response of no uses the last defined fitting parameters.
+The default value is accepted with a carriage return. The possible
+responses are "yes", "no", "YES", or "NO". The capitalized responses
+permanently set the response to yes or no and the prompt is not
+issued again for the remaining images. Thus, a response of NO processes
+the remaining images non-interactively while a response of YES processes
+the remaining image interactively without prompting.
+.ih
+EXAMPLES
+The bias region for a set of images occupies columns 1 to 800 and lines
+801 to 832. To subtract the bias and remove the bias region:
+
+.nf
+ cl> linebias.bias = "[*, 801:832]"
+ cl> linebias.trim = "[*, 1:800]"
+ cl> linebias ccd* ccd*
+ linebias ccd001 (yes)? yes
+ linebias ccd002 (yes)?
+ linebias ccd003 (no)? NO
+.fi
+
+The first two lines set the bias and trim parameters. These parameters
+could be temporarily set on the command line but generally these parameters
+are only changed when new instruments are used. The first image
+is interactively fit and the fitting order is change to 2. The
+second image is examined and the fit found to be acceptable. All remaining
+image are then fit non-interactively using the same fitting parameters.
+.ih
+REVISIONS
+.ls LINEBIAS V2.10.3
+The output pixel type is now real instead of preserving the pixel type
+of the input image.
+.le
+.ih
+SEE ALSO
+icfit
+.endhelp
diff --git a/noao/imred/bias/linebias.par b/noao/imred/bias/linebias.par
new file mode 100644
index 00000000..a056f02d
--- /dev/null
+++ b/noao/imred/bias/linebias.par
@@ -0,0 +1,16 @@
+# LINEBIAS -- Subtract a line bias
+
+input,s,a,,,,Input images
+output,s,a,,,,Output images
+bias,s,h,"[]",,,Bias section
+trim,s,h,"[]",,,Trim section
+median,b,h,no,,,Use median instead of average in line bias?
+interactive,b,h,yes,,,Interactive?
+function,s,h,"spline3","spline3|legendre|chebyshev|spline1",,Fitting function
+order,i,h,1,1,,Order of fitting function
+low_reject,r,h,3.0,,,Low sigma rejection factor
+high_reject,r,h,3.0,,,High sigma rejection factor
+niterate,i,h,1,,,Number of rejection iterations
+logfiles,s,h,"",,,Log files
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
diff --git a/noao/imred/bias/linebias.x b/noao/imred/bias/linebias.x
new file mode 100644
index 00000000..91d789af
--- /dev/null
+++ b/noao/imred/bias/linebias.x
@@ -0,0 +1,330 @@
+include <imhdr.h>
+include <imio.h>
+include <pkg/gtools.h>
+include <pkg/xtanswer.h>
+
+# LINEBIAS -- Remove column by column bias from images.
+#
+# A one dimensional bias vector is extracted from the bias lines.
+# A function is fit to the bias vector and the function is subtracted
+# from the image columns. A trim section may be specified to output
+# only a part of the bias subtracted image.
+
+# Control procedure for mapping the images.
+#
+# The input and output images are given by image templates. The
+# number of output images must match the number of input images. Image
+# sections are not allowed. The output image may be the same as the input
+# image.
+
+procedure linebias ()
+
+int listin # List of input images
+int listout # List of output images
+int logfiles # List of log files
+char biassec[SZ_FNAME] # Bias section
+char trimsec[SZ_FNAME] # Trim section
+int median # Median of bias section?
+int interactive # Interactive?
+
+char function[SZ_LINE] # Curve fitting function
+int order # Order of curve fitting function
+
+char image[SZ_FNAME]
+char input[SZ_FNAME]
+char biasimage[SZ_FNAME]
+char output[SZ_FNAME]
+char logfile[SZ_FNAME]
+char original[SZ_FNAME]
+char title[SZ_LINE]
+
+int logfd
+pointer in, bias, out, ic, gt
+
+int clgeti(), clpopnu(), clgfil(), open(), gt_init(), nowhite()
+int imtopen(), imtlen(), imtgetim(), btoi()
+bool clgetb()
+long clktime()
+pointer immap()
+real clgetr()
+
+begin
+ # Get input and output lists and check that the number of images
+ # are the same.
+
+ call clgstr ("input", title, SZ_LINE)
+ listin = imtopen (title)
+ call clgstr ("output", title, SZ_LINE)
+ listout = imtopen (title)
+ if (imtlen (listin) != imtlen (listout)) {
+ call imtclose (listin)
+ call imtclose (listout)
+ call error (0, "Input and output image lists do not match")
+ }
+
+ # Get the bias and trim sections.
+
+ call clgstr ("bias", biassec, SZ_FNAME)
+ call clgstr ("trim", trimsec, SZ_FNAME)
+ if (nowhite (biassec, biassec, SZ_FNAME) == 0)
+ ;
+ if (nowhite (trimsec, trimsec, SZ_FNAME) == 0)
+ ;
+ median = btoi (clgetb ("median"))
+
+ # Determine if the task is interactive. If not set the interactive
+ # flag to always no.
+
+ if (clgetb ("interactive"))
+ interactive = YES
+ else
+ interactive = ALWAYSNO
+
+ # Initialize the curve fitting package.
+
+ call ic_open (ic)
+ call clgstr ("function", function, SZ_LINE)
+ call ic_pstr (ic, "function", function)
+ order = clgeti ("order")
+ call ic_puti (ic, "order", order)
+ call ic_putr (ic, "low", clgetr ("low_reject"))
+ call ic_putr (ic, "high", clgetr ("high_reject"))
+ call ic_puti (ic, "niterate", clgeti ("niterate"))
+ call ic_pstr (ic, "xlabel", "Column")
+ call ic_pstr (ic, "ylabel", "Bias")
+
+ gt = gt_init()
+ call gt_sets (gt, GTTYPE, "line")
+
+ # Get the list of log files.
+
+ logfiles = clpopnu ("logfiles")
+
+ # For each input and output image map the bias image, the
+ # trimmed input image, and the output image. Use a temporary
+ # image header for overwritting the input image.
+
+ while ((imtgetim (listin, image, SZ_FNAME) != EOF) &&
+ (imtgetim (listout, output, SZ_FNAME) != EOF)) {
+
+ call sprintf (biasimage, SZ_FNAME, "%s%s")
+ call pargstr (image)
+ call pargstr (biassec)
+ call sprintf (input, SZ_FNAME, "%s%s")
+ call pargstr (image)
+ call pargstr (trimsec)
+
+ in = immap (input, READ_ONLY, 0)
+ bias = immap (biasimage, READ_ONLY, 0)
+ call xt_mkimtemp (image, output, original, SZ_FNAME)
+ out = immap (output, NEW_COPY, in)
+ IM_PIXTYPE(out) = TY_REAL
+
+ call sprintf (title, SZ_LINE, "linebias %s")
+ call pargstr (image)
+ call xt_answer (title, interactive)
+ call gt_sets (gt, GTTITLE, title)
+
+ # Enter a header in the log file.
+
+ while (clgfil (logfiles, logfile, SZ_FNAME) != EOF) {
+ logfd = open (logfile, APPEND, TEXT_FILE)
+ call cnvtime (clktime (0), title, SZ_LINE)
+ call fprintf (logfd, "\nLINEBIAS: %s\n")
+ call pargstr (title)
+ call fprintf (logfd, "input = %s\noutput = %s\nbias = %s\n")
+ call pargstr (input)
+ call pargstr (output)
+ call pargstr (biasimage)
+ if (median == YES)
+ call fprintf (logfd, "Median of bias section used.\n")
+ call close (logfd)
+ }
+ call clprew (logfiles)
+
+ call lb_linebias (in, bias, out, ic, gt, median, logfiles,
+ interactive)
+
+ call imunmap (in)
+ call imunmap (bias)
+ call imunmap (out)
+ call xt_delimtemp (output, original)
+ }
+
+ call ic_closer (ic)
+ call gt_free (gt)
+ call clpcls (logfiles)
+ call imtclose (listin)
+ call imtclose (listout)
+end
+
+
+# LB_LINEBIAS -- Get an average line bias vector from the bias image.
+# Fit a function to the bias vector and subtract it from the input image
+# to form the output image. Column coordinates are in terms of the full
+# input image.
+
+procedure lb_linebias (in, bias, out, ic, gt, median, logfiles, interactive)
+
+pointer in # Input image pointer
+pointer bias # Bias image pointer
+pointer out # Output image pointer
+pointer ic # ICFIT pointer
+pointer gt # GTOOLS pointer
+int median # Median of bias section?
+int logfiles # List of log files
+int interactive # Interactive curve fitting?
+
+char graphics[SZ_FNAME] # Graphics output device
+char logfile[SZ_FNAME]
+int i, nbias, nx, ny, xdim, xoff, xstep, xlen
+real x
+pointer cv, gp, sp, xbias, zbias, wts, z
+
+int clgfil()
+real cveval()
+pointer gopen(), imgl2r(), impl2r()
+
+begin
+ # The bias coordinates are in terms of the full input image because
+ # the input and bias images may have different sections.
+
+ nx = IM_LEN(in, 1)
+ ny = IM_LEN(in, 2)
+
+ xdim = IM_VMAP(in, 1)
+ xoff = IM_VOFF(in, xdim)
+ xstep = IM_VSTEP(in, xdim)
+ xlen = IM_SVLEN(in, xdim)
+
+ # Get the bias vector and set the weights.
+
+ call lb_getlinebias (bias, xbias, zbias, nbias, median)
+ call smark (sp)
+ call salloc (wts, nbias, TY_REAL)
+ call amovkr (1., Memr[wts], nbias)
+
+ # Do the curve fitting using the interactive curve fitting package.
+ # Free memory when the fit is complete.
+
+ call ic_putr (ic, "xmin", 1.)
+ call ic_putr (ic, "xmax", real (xlen))
+ if ((interactive == YES) || (interactive == ALWAYSYES)) {
+ call clgstr ("graphics", graphics, SZ_FNAME)
+ gp = gopen (graphics, NEW_FILE, STDGRAPH)
+ call gt_setr (gt, GTXMIN, 1.)
+ call gt_setr (gt, GTXMAX, real (xlen))
+ call icg_fit (ic, gp, "cursor", gt, cv, Memr[xbias], Memr[zbias],
+ Memr[wts], nbias)
+ call gclose (gp)
+ } else {
+ call ic_fit (ic, cv, Memr[xbias], Memr[zbias], Memr[wts], nbias,
+ YES, YES, YES, YES)
+ }
+
+ # Log the fitting information.
+
+ while (clgfil (logfiles, logfile, SZ_FNAME) != EOF) {
+ call ic_show (ic, logfile, gt)
+ call ic_errors (ic, logfile, cv, Memr[xbias], Memr[zbias],
+ Memr[wts], nbias)
+ }
+ call clprew (logfiles)
+
+ call mfree (xbias, TY_REAL)
+ call mfree (zbias, TY_REAL)
+ call sfree (sp)
+
+ # Subtract the bias function from the input image..
+
+ call smark (sp)
+ call salloc (z, nx, TY_REAL)
+ do i = 1, nx {
+ x = xoff + i * xstep
+ Memr[z+i-1] = cveval (cv, x)
+ }
+
+ do i = 1, ny
+ call asubr (Memr[imgl2r(in,i)], Memr[z], Memr[impl2r(out,i)], nx)
+
+ # Free allocated memory.
+
+ call cvfree (cv)
+ call sfree (sp)
+end
+
+
+# LB_GETLINEBIAS -- Get the line bias vector.
+# The xbias column values are in terms of the full image.
+
+define MAXPIX 100000 # Maximum number of pixels to buffer
+
+procedure lb_getlinebias (bias, xbias, zbias, nbias, median)
+
+pointer bias # Bias image pointer
+pointer xbias, zbias # Bias vector
+int nbias # Number of bias points
+int median # Median of bias section?
+
+int i, j, k, n, nx, ny, xdim, xoff, xstep, maxn
+pointer buf1, buf2
+
+real amedr()
+pointer imgl1r(), imgl2r(), imgs2r()
+
+begin
+ # Check for a bias consisting of a single line which is turned
+ # into a 1D image by IMIO.
+ if (IM_NDIM(bias) == 1) {
+ nx = IM_LEN(bias, 1)
+ xdim = IM_VMAP(bias, 1)
+ xoff = IM_VOFF(bias, xdim)
+ xstep = IM_VSTEP(bias, xdim)
+
+ nbias = nx
+ call malloc (xbias, nbias, TY_REAL)
+ call malloc (zbias, nbias, TY_REAL)
+
+ do i = 1, nbias
+ Memr[xbias+i-1] = xoff + i * xstep
+ call amovr (Memr[imgl1r(bias)], Memr[zbias], nbias)
+
+ return
+ }
+
+ nx = IM_LEN(bias, 1)
+ ny = IM_LEN(bias, 2)
+ xdim = IM_VMAP(bias, 1)
+ xoff = IM_VOFF(bias, xdim)
+ xstep = IM_VSTEP(bias, xdim)
+
+ nbias = nx
+ call malloc (xbias, nbias, TY_REAL)
+ call calloc (zbias, nbias, TY_REAL)
+
+ if (median == NO) {
+ do i = 1, ny
+ call aaddr (Memr[imgl2r(bias,i)], Memr[zbias], Memr[zbias], nx)
+ call adivkr (Memr[zbias], real (ny), Memr[zbias], nx)
+ } else {
+ call malloc (buf1, ny, TY_REAL)
+
+ maxn = MAXPIX / ny
+ j = 1
+ do i = 1, nx {
+ if (i == j) {
+ n = min (nx - j + 1, maxn)
+ buf2 = imgs2r (bias, j, j + n - 1, 1, ny)
+ j = j + n
+ }
+ do k = 1, ny
+ Memr[buf1+k-1] = Memr[buf2+k*n+i-j]
+ Memr[zbias+i-1] = amedr (Memr[buf1], ny)
+ }
+
+ call mfree (buf1, TY_REAL)
+ }
+
+ do i = 1, nx
+ Memr[xbias+i-1] = xoff + i * xstep
+end
diff --git a/noao/imred/bias/mkpkg b/noao/imred/bias/mkpkg
new file mode 100644
index 00000000..fefd744a
--- /dev/null
+++ b/noao/imred/bias/mkpkg
@@ -0,0 +1,30 @@
+# Make the BIAS package.
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $call bias
+ ;
+
+install:
+ $move x_bias1.e noaobin$x_bias.e
+ ;
+
+bias:
+ $omake x_bias.x
+ $link x_bias.o libpkg.a -lxtools -lcurfit -o x_bias1.e
+ ;
+
+libpkg.a:
+ colbias.x <imhdr.h> <imio.h> <pkg/xtanswer.h>\
+ <pkg/gtools.h>
+ linebias.x <imhdr.h> <imio.h> <pkg/xtanswer.h>\
+ <pkg/gtools.h>
+ ;
diff --git a/noao/imred/bias/x_bias.x b/noao/imred/bias/x_bias.x
new file mode 100644
index 00000000..e0ade131
--- /dev/null
+++ b/noao/imred/bias/x_bias.x
@@ -0,0 +1,2 @@
+task colbias,
+ linebias
diff --git a/noao/imred/ccdred/Revisions b/noao/imred/ccdred/Revisions
new file mode 100644
index 00000000..982ed391
--- /dev/null
+++ b/noao/imred/ccdred/Revisions
@@ -0,0 +1,1236 @@
+.help revisions Jun88 noao.imred.ccdred
+.nf
+t_ccdgroups.x
+t_ccdhedit.x
+t_ccdinst.x
+t_ccdlist.x
+t_ccdproc.x
+t_combine.x
+t_mkfringe.x
+t_mkillumcor.x
+t_mkillumft.x
+t_mkskycor.x
+t_mkskyflat.x
+ Added a check that the filename given to the hdmopen() procedure wasn't
+ empty. This provides a more informative error message than the "floating
+ invalid operation' one gets now when e.g. no 'instrument' file is
+ specified (10/12/13, MJF)
+
+src/ccdcache.x
+ The 'bufs' pointer was declared as TY_REAL instead of TY_SHORT (5/4/13)
+
+t_cosmicrays.x
+ A pointer to a an array of pointers was used in one place as a real. This
+ is an error when integer and real arrays are not of the same size; i.e.
+ on 64-bit architectures. (8/2/12, Valdes)
+
+=======
+V2.16.1
+=======
+
+various
+ Separated the generic combine code to a subdirectory as is done
+ for imcombine, mscred, etc. This is only a partial step towards
+ sharing the standard imcombine code. Because this is really old,
+ working code that has diverged significantly it will take some time
+ to update/merge the new imcombine code. (1/6/11, Valdes)
+
+=====
+V2.15
+=====
+
+src/icstat.gx
+ Fixed type declarations for the asum() procedures (8/25/09, MJF)
+
+doc/ccdproc.hlp
+ Removed the statements that calibration images are not reprocessed
+ if they have CCDPROC even if they lack the keywords for specific
+ operations. I looked at the code and did not see much dependence
+ on CCDPROC though there could be something I'm missing. For now,
+ since a user reported this, I will assume the behavior reported by
+ the user is correct and the documentation is wrong for some historical
+ reason. (5/27/08, Valdes)
+
+x_ccdred.x
+ Added the alias qccdproc for use in the quadred.quadproc task.
+ (3/12/08, Valdes)
+
+=====
+V2.14
+=====
+
+=======
+V2.12.2
+=======
+
+ccdred/ccdred.hd
+ Hooked up help pages for ccdtest package. (2/14/04, Valdes)
+
+ccdred/ccdtest/t_mkimage.x
+ Removed unused variable. (8/8/02, Valdes)
+
+ccdred/src/icscale.x
+ Error dereferencing a string pointer. (8/8/02, Valdes)
+
+ccdred/src/t_mkfringe.x
+ccdred/src/t_mkillumcor.x
+ccdred/src/t_mkillumft.x
+ccdred/src/t_mkskycor.x
+ccdred/src/t_mkskyflat.x
+ There was a confusion with the "output" parameter which is also in
+ the ccdproc pset. Each task now explicitly calls its own output
+ parameter. (7/31/02, Valdes)
+
+=======
+V2.12.1
+=======
+
+=====
+V2.12
+=====
+
+ccdred/src/icsetout.x
+ When computing offsets the registration point was the reference pixel
+ returned by mw_gwterm for the first image. The code then went on to
+ assume this was a logical pixel when comparing with the other images,
+ which is not true when there is a physical coordinate system. The
+ algorithm was fixed by converting the reference point to logical
+ coordinates. (4/18/02, Valdes)
+
+ccdred/src/t_ccdmask.x
+ Fixed bug where the if the last line or last column had a bad pixel
+ without a neighboring interior pixel then the mask value would be
+ some number corresponding to the number of pixels in that last line
+ or column. (2/28/02, Valdes)
+
+ccdred/ccdred.cl
+ccdred/ccdred.men
+ccdred/ccdred.hd
+ccdred/src/mkpkg
+ccdred/x_ccdred.x
+ Removed COSMICRAYS from package tasks. The source is still not
+ removed. (8/22/01, Valdes)
+
+ccdred/src/setdark.x
+ Added a check for a zero divide in calculating the dark time scaling
+ which results in an appropriate error message. (7/5/01, Valdes)
+
+========
+V2.11.3b
+========
+
+t_combine.x
+ Modified the conversion of pclip from a fraction to a number of images
+ because for even number of images the number above/below the median
+ is one too small. (9/26/00, Valdes)
+
+ccdred/src/icmedian.gx
+ Replaced with faster Wirth algorithm. (5/16/00, Valdes)
+
+ccdred/src/icgdata.gx
+ccdred/src/iclog.x
+ccdred/src/icmask.x
+ccdred/src/icombine.gx
+ccdred/src/icscale.x
+ccdred/src/icsetout.x
+ Changed declarations for the array "out" to be ARB rather than 3 in
+ some places (because it was not changed when another element was added)
+ or 4. This will insure that any future output elements added will
+ no require changing these arguments for the sake of cosmetic correctness.
+ (1/13/99, Valdes)
+
+ccdred/src/t_combine.x
+ Added workaround for error recovery problem that loses the error
+ message. (10/21/99, Valdes)
+
+ccdred$doc/ccdproc.hlp
+ The overscan type name was incorrectly given as "average" instead of
+ "mean". This was corrected in the documentation. (10/15/99, Valdes)
+
+ccdred$src/generic/mkpkg
+ccdred$src/cosmic/mkpkg
+ccdred$src/mkpkg
+ Added missing dependencies. (10/11/99, Valdes)
+
+=======
+V2.11.2
+=======
+
+ccdred$src/t_ccdlist.x
+ Date accidentally changed. File not modified. (5/13/99, Valdes)
+
+ccdred$doc/ccdproc.hlp
+ccdred$doc/mkskyflat.hlp
+ Fixed minor formating problems. (4/22/99, Valdes)
+
+ccdred$src/imcombine/icsetout.x
+ The updating of the WCS for offset images was not being done correctly.
+ (10/6/98, Valdes)
+
+ccdred$src/t_ccdmask.x
+ The overlapping of groups of columns was not quite working because
+ you can't overlap imp... calls. (9/10/98, Valdes)
+
+ccdred$src/t_ccdproc.x
+ccdred$ccdproc.par
+ccdred$doc/ccdproc.hlp
+ccdred$darkcombine.cl
+ccdred$flatcombine.cl
+ccdred$zerocombine.cl
+ 1. Added output image option to CCDPROC.
+ 2. The combine scripts all still do in place processing.
+ (6/19/98, Valdes)
+
+ccdred$doc/ccdproc.hlp
+ Fixed font change typo in Revisions section. (6/16/98, Valdes)
+
+ccdred$src/t_ccdmask.x
+ The test for a bad pixel used && instead of ||. (4/24/98, Valdes)
+
+=======
+V2.11.1
+=======
+
+ccdred$src/icscale.x
+ccdred$doc/combine.hlp
+ When zero offsets or weights are specified in a file the weights
+ are not modified for zero offsets. (10/3/97, Valdes)
+
+ccdred$src/setoutput.x
+ It is now allowed to go from ushort input to short output.
+ (9/29/97, Valdes)
+
+ccdred$src/t_combine.x
+ Fixed a segmentation violation caused by attempting to close the
+ mask data structures during error recovery when the error occurs
+ before the data structures are defined. (8/14/97, Valdes)
+
+ccdred$src/cosmic/crfind.x
+ccdred$src/cosmic/crlist.x
+ Changed arguments with adjustable arrays to use ARB. (8/6/97, Valdes)
+
+ccdred$src/setsections.
+ Generalized the LTERM update to work with arbitrary WCSDIM.
+ (7/24/97, Valdes)
+
+ccdred$src/ccdcheck.x
+ No change except date modified.
+ (7/17/97, Valdes)
+
+=====
+V2.11
+=====
+
+ccdred$src/setoverscan.x
+ccdred$src/proc.gx
+ccdred$src/ccdred.h
+ccdred$doc/ccdproc.hlp
+ The overscan fitting function now allows "average", "median", and "minmax"
+ for line-by-line overscan determination.
+ (2/21/97, Valdes)
+
+ccdred$src/setfixpix.x
+ccdred$src/setproc.x
+ccdred$src/proc.gx
+ccdred$src/setsections.x
+ccdred$src/setheader.x
+ccdred$src/ccdred.h
+ccdred$src/corinput.gx -
+ccdred$src/generic/corinput.x -
+ccdred$src/mkpkg
+ccdred$src/generic/mkpkg
+ccdred$doc/ccdproc.hlp
+ The bad pixel fixing is now done with the new fixpix routines from xtools.
+ As part of this the physical coordinate system is set to be that of
+ the CCD.
+ (2/21/97, Valdes)
+
+ccdred$src/t_ccdmask.x +
+ccdred$ccdmask.par +
+ccdred$doc/ccdmask.hlp +
+ccdred$src/mkpkg
+ccdred$ccdred.cl
+ccdred$ccdred.hd
+ccdred$ccdred.men
+ccdred$x_ccdred.x
+ A new task, CCDMASK, has been added. This task finds deviant pixels
+ in CCD data and creates a pixel mask. (2/21/97, Valdes)
+
+ccdred$src/icscale.x
+ The ccdmean keyword is now updated rather than deleted. However
+ the ccdmeant keyword is delete to force a later computation if needed.
+ (1/7/97, Valdes)
+
+ccdred$src/icsetout.x
+ccdred$doc/combine.hlp
+ A new option for computing offsets from the image WCS has been added.
+ (1/7/97, Valdes)
+
+ccdred$src/icmask.x
+ccdred$src/iclog.x
+ccdred$src/icombine.com
+ccdred$src/icmask.h +
+ccdred$src/icmask.com -
+ Changed to use a mask structure. (1/7/97, Valdes)
+
+ccdred$src/t_combine.x
+ccdred$src/icombine.gx
+ccdred$src/icimstack.x +
+ccdred$src/iclog.x
+ccdred$src/mkpkg
+ccdred$doc/combine.hlp
+ The limit on the maximum number of images that can be combined, set by
+ the maximum number of logical file descriptors, has been removed. If
+ the condition of too many files is detected the task now automatically
+ stacks all the images in a temporary image and then combines them with
+ the project option.
+
+ The project option probably did not work previously. May not still
+ work.
+ (1/7/97, Valdes)
+
+ccdred$src/icsort.gx
+ There was an error in the ic_2sort routine when there are exactly
+ three images that one of the explicit cases did not properly keep
+ the image identifications. See buglog 344. (1/17/97, Valdes)
+
+ccdred$src/calimage.x
+ The use of SZ_SUBSET-1 can cause problems because the names are
+ unique to SZ_SUBSET but if unique part is the SZ_SUBSET character
+ this causes problems. (1/17/97, Valdes)
+
+==========
+V2.10.4-p2
+==========
+
+ccdred$src/icpclip.gx
+ Fixed a bug where a variable was improperly used for two different
+ purposes causing the algorithm to fail (bug 316). (10/19/95, Valdes)
+
+ccdred$src/cosmic/crlist.x
+ The output bad pixel data accidentally included some extra fields
+ making it incorrect to use the file directly with BADPIXIMAGE.
+ The extra diagnostic fields were removed. (9/25/95, Valdes)
+
+ccdred$src/cosmic/t_cosmicrays.x
+ Added a test for interactive mode before opening the graphics
+ stream and whether to call the training routine. This change
+ was needed to allow the task to run non-interactively on
+ dumb, non-graphics terminals. (7/24/95, Valdes)
+
+=======
+V2.10.4
+=======
+
+ccdred$src/t_combine.x
+ If an error occurs while opening an input image header the error
+ recovery will close all open images and then propagate the error.
+ For the case of running out of file descriptors with STF format
+ images this will allow the error message to be printed rather
+ than the error code. (4/3/95, Valdes)
+
+ccdred$src/icscale.x
+ccdred$doc/combine.hlp
+ The behavior of the weights when using both multiplicative and zero
+ point scaling was incorrect; the zero levels have to account for
+ the scaling. (3/27/95, Valdes)
+
+ccdred$src/cosmic/t_cosmicrays.x
+ There was an error in setting the x,y coordinates of the window
+ such that it left some of the coordinates undefined. This causes
+ an FPE on the Alpha. (2/17/94, Valdes)
+
+ctype.h
+ccdred$src/ccdsubsets.x
+ Change the test for non-filename characters to map all characters
+ but alphabetic, numbers, and period to '_'. (2/17/95, Valdes)
+
+ccdred$src/proc.gx
+ The asum$t function was not properly declared. (9/13/94, Valdes)
+
+ccdred$src/t_mkfringe.x
+ccdred$src/t_mkillumcor.x
+ccdred$src/t_mkillumft.x
+ccdred$src/t_mkskycor.x
+ccdred$src/t_mkskyflat.x
+ Added calls to ccd_open/ccd_close in order to initialize the image
+ caching even if images are not actually cached. (9/13/94, Valdes)
+
+ccdred$src/cosmic/t_cosmicrays.x
+ccdred$src/cosmic/crexamine.x
+ccdred$doc/cosmicrays.hlp
+ 1. A new parameter was added to the crexamine subroutine in the
+ previous modification for "training" the program. In the
+ subroutine the parameter was used as a modifyable parameter but it
+ was being called with a fixed constant. The effect was the costant
+ value was no longer correct after the first execution and the
+ program would act as if a 'q' was typed after the first interactive
+ execution. This was fixed to treat the input argument as input
+ only.
+ 2. The help page now emphasizes that the "answer" parameter is not
+ to be used on the command line and if it is then the task will
+ ignored the value and act as if the user always responds with
+ "yes".
+ (8/17/94, Valdes)
+
+ccdred/src/cosmic/t_cosmicrays.x
+ccdred/src/cosmic/crfind.x
+ccdred/src/cosmic/crexamine.x
+ccdred/src/cosmic/crlist.x
+ccdred/src/cosmic/crlist.h
+ccdred/cosmicrays.par
+ccdred/doc/cosmicrays.hlp
+noao$lib/scr/cosmicrays.key
+ Added some new parameters and a new functionality to allow setting
+ the flux ratio threshold by training with respect to a user supplied
+ list of classifications. Normally the list would be the image
+ display cursor. (6/29/94, Valdes)
+
+ccdred/src/cosmic/t_cosmicrays.x
+ Added an imflush() and imseti() after the initial copy of the input
+ image to the output is done and before the random access to replace
+ the detected cosmic rays. The imseti sets the image I/O advice to
+ RANDOM. (6/24/94, Valdes)
+
+ccdred/src/ccdcheck.x
+ccdred/src/ccdmean.x
+ccdred/src/setheader.x
+ccdred/src/scancor.x
+ccdred/src/setillum.x
+ccdred/src/t_mkillumcor.x
+ccdred/src/t_mkfringe.x
+ccdred/src/t_mkskycor.x
+ccdred/src/t_mkillumft.x
+ccdred/src/t_mkskyflat.x
+ccdred/doc/ccdproc.hlp
+ccdred/doc/ccdinst.hlp
+ Added a CCDMEANT keyword giving the time when the CCDMEAN value was
+ calculated. Routines that later access this keyword check this time
+ against the image modify time to determine whether to invalidate
+ the value and recompute it. This solves the problem of people
+ modifying the image outside the CCDRED package and possibly using
+ an incorrect scaling value. For backwards compatiblity if the
+ new keyword is missing it is assumed to be same as the modify time;
+ i.e. the CCDMEAN keyword is valid. (6/22/94, Valdes)
+
+ccdred/src/t_mkillumcor.x
+ccdred/src/t_mkillumft.x
+ccdred/src/t_mkskycor.x
+ccdred/src/t_mkskyflat.x
+ Added an extra argument to the millumination subroutine to specify
+ whether to print log information. This is because this procedure
+ is used as an intermediate step in things like the fringe correction
+ the message is confusing to users. (6/21/94, Valdes)
+
+
+ccdred/src/icaclip.gx
+ccdred/src/iccclip.gx
+ccdred/src/icpclip.gx
+ccdred/src/icsclip.gx
+ 1. The restoration of deleted pixels to satisfy the nkeep parameter
+ was being done inside the iteration loop causing the possiblity
+ of a non-terminating loop; i.e. pixels are rejected, they are
+ restored, and the number left then does not statisfy the termination
+ condition. The restoration step was moved following the iterative
+ rejection.
+ 2. The restoration was also incorrectly when mclip=no and could
+ lead to a segmentation violation.
+ (6/13/94, Valdes)
+
+ccdred/src/iccclip.gx
+ccdred/src/icsclip.gx
+ Found and fixed another typo bug. (6/7/94, Valdes/Zhang)
+
+ccdred/src/t_combine.x
+ For some reason the clget for the nkeep parameter was deleted
+ (it was in V2.10.2 but was gone in the version as of this date).
+ It was added again. (6/6/94, Valdes)
+
+ccdred/src/icscale.x
+ The sigma scaling flag, doscale1, would not be set in the case of
+ a mean offset of zero though the scale factors could be different.
+ (5/25/94, Valdes/Zhang)
+
+ccdred/src/icsclip.gx
+ There was a missing line: l = Memi[mp1]. (5/25/94, Valdes/Zhang)
+
+pkg/images/imarith/icaclip.gx
+ccdred/src/icaclip.gx
+ccdred/src/iccclip.gx
+ccdred/src/icpclip.gx
+ccdred/src/icsclip.gx
+ The reordering step when a central median is used during rejection
+ but the final combining is average was incorrect if the number
+ of rejected low pixels was greater than the number of pixel
+ number of pixels not rejected. (5/25/94, Valdes)
+
+ccdred/src/t_combine.x
+ Added a workaround for image header copy problem which leaves part
+ of the TEMPNAME keyword in the output image headers. For an output
+ pixel list file this could cause the file to be screwed up.
+ (5/6/94, Valdes)
+
+ccdred/src/icscale.x
+ccdred/src/t_combine.x
+ 1. There is now a warning error if the scale, zero, or weight type
+ is unknown.
+ 2. An sfree was being called before the allocated memory was finished
+ being used.
+ (5/2/94, Valdes)
+
+ccdred/src/iclog.x
+ Changed the mean, median, mode, and zero formats from 6g to 7.5g to
+ insure 5 significant digits regardless of signs and decimal points.
+ (4/13/94, Valdes)
+
+ccdred/src/icaclip.gx
+ccdred/src/iccclip.gx
+ccdred/src/icsclip.gx
+ The image sigma was incorrectly computed when an offset scaling is used.
+ (3/8/94, Valdes)
+
+ccdred/src/setoverscan.x
+ccdred/doc/ccdproc.hlp
+ It is an error if no bias section is given or if the whole image is
+ given. (1/3/94, Valdes)
+
+ccdred/src/t_ccdinst.x
+ There was an error causing reentrant formats which was fixed.
+ (12/16/93, Valdes)
+
+ccdred/src/ccdnscan.x +
+ccdred/src/scancor.x
+ccdred/src/setzero.x
+ccdred/src/setdark.x
+ccdred/src/setflat.x
+ccdred/src/calimage.x
+ccdred/src/proc.gx
+
+ccdred/src/t_ccdinst.x
+ccdred/src/t_mkskyflat.x
+ccdred/src/t_ccdproc.x
+ccdred/src/ccdproc.x
+ccdred/src/setfringe.x
+ccdred/src/setillum.x
+ccdred/src/mkpkg
+
+ccdred/doc/ccdproc.hlp
+ccdred/doc/ccdinst.hlp
+ccdred/doc/instruments.hlp
+ For short scan data the task now looks for the number of scan lines
+ in the image header. Also when a calibration image is software
+ scanned a new image is created. This allows processing objects with
+ different numbers of scan lines and preserving the unscanned
+ calibration image. (12/15/93, Valdes)
+
+ccdred/src/setoutput.x
+ccdred/doc/ccdproc.hlp
+ccdred/doc/ccdred.hlp
+ 1. The output datatypes were extended from just short and real to
+ include ushort, integer, long, and double. The calculation types
+ are still only short or real.
+ 2. The output datatype is no longer allowed to be of lower precision
+ than the input datatype.
+ (12/4/93, Valdes)
+
+ccdred/src/t_combine.x
+ccdred/combine.par
+ccdred/doc/combine.hlp
+ccdred/doc/darkcombine.hlp
+ccdred/doc/flatcombine.hlp
+ccdred/doc/zerocombine.hlp
+ 1. The "outtype" parameter was being ignored and the package "pixeltype"
+ parameter was used instead. This was fixed to use the "outtype"
+ parameter.
+ 2. The output pixel datatypes now include unsigned short.
+ 3. The DARKCOMBINE, FLATCOMBINE, and ZEROCOMBINE scripts specified
+ that the output datatype be "real" because of the bug noted
+ above the output type was being determined by the package
+ "pixeltype" parameter. The change above fixes this so that
+ the combined output will always be real. The help pages did
+ not state that what the output datatype would be so a sentence
+ was added specifying the output datatype is real.
+ (12/4/93, Valdes)
+
+ccdred/icgrow.gx
+ccdred/icpclip.gx
+ccdred/icsclip.gx
+ccdred/icaclip.gx
+ccdred/iccclip.gx
+ccdred/t_combine.x
+ccdred/doc/combine.hlp
+ If there were fewer initial pixels than specified by nkeep then the
+ task would attempt to add garbage data to achieve nkeep pixels. This
+ could occur when using offsets, bad pixel masks, or thresholds. The
+ code was changed to check against the initial number of pixels rather
+ than the number of images. Also a negative nkeep is no longer
+ converted to a positive value based on the number of images. Instead
+ it specifies the maximum number of pixels to reject from the initial
+ set of pixels. (11/8/93, Valdes)
+
+ccdred/doc/ccdproc.hlp
+ Added a sentence explicitly saying the fixpix option provides
+ the same algorithm as FIXPIX. (11/1/93, Valdes)
+
+ccdred/src/icscale.x
+ccdred/doc/combine.hlp
+ The help indicated that user input scale or zero level factors
+ by an @file or keyword are multiplicative and additive while the
+ task was using then as divisive and subtractive. This was
+ corrected to agree with the intend of the documentation.
+ Also the factors are no longer normalized. (9/24/93, Valdes)
+
+ccdred/src/icsetout.x
+ The case in which absolute offsets are specified but the offsets are
+ all the same did not work correctly. (9/24/93, Valdes)
+
+ccdred/doc/geometry.hlp
+ccdred/doc/ccdproc.hlp
+ccdred/doc/guide.hlp
+ The help was modified to say that the overscan region length is
+ determine from trimsec and is ignored in biassec. (9/23/93, Valdes)
+
+ccdred/doc/instruments.hlp
+ccdred/doc/subsets.hlp
+ Added notes that comments are allowed. Also if there is more than
+ one translation for the same CCDRED parameter the last one takes
+ effect. (9/20/93, Valdes)
+
+ccdred/doc/combine.hlp
+ Clarified how bad pixel masks work with the "project" option.
+ (9/13/93, Valdes)
+
+ccdred/src/t_combine.x
+ The algorithm for making sure there are enough file descriptors failed
+ to account for the need to reopen the output image header for an
+ update. Thus when the number of input images + output images + logfile
+ was exactly 60 the task would fail. The update occurs when the output
+ image is unmapped so the solution was to close the input images first
+ except for the first image whose pointer is used in the new copy of the
+ output image. (8/4/93, Valdes)
+
+============
+V2.10.3 beta
+============
+
+ccdred/src/icgdata.gx
+ There was an indexing error in setting up the ID array when using
+ the grow option. This caused the CRREJECT/CCDCLIP algorithm to
+ fail with a floating divide by zero error when there were non-zero
+ shifts. (5/26/93, Valdes)
+
+ccdred/src/icmedian.gx
+ The median calculation is now done so that the original input data
+ is not lost. This slightly greater inefficiency is required so
+ that an output sigma image may be computed if desired. (5/10/93, Valdes)
+
+ccdred/darkcombine.cl
+ccdred/doc/darkcombine.hlp
+ccdred/doc/flatcombine.hlp
+ccddb/kpno/direct.cl
+ccddb/kpno/coude.cl
+ccddb/kpno/cryocam.cl
+ccddb/kpno/echelle.cl
+ccddb/kpno/foe.cl
+ccddb/kpno/specphot.cl
+ccddb/kpno/sunlink.cl
+ 1. Updated FLATCOMBINE defaults for KPNO data.
+ 2. Changed package defaults for DARKCOMBINE to use "minmax" rejection.
+ (4/19/93, Valdes)
+
+ccdred/src/icombine.gx
+ There was no error checking when writing to the output image. If
+ an error occurred (the example being when an imaccessible imdir was
+ set) obscure messages would result. Errchks were added.
+ (4/16/93, Valdes)
+
+ccdred/src/setfpix.x
+ccdred/src/ccdproc.x
+ccdred/src/t_ccdproc.x
+ccdred/doc/ccdproc.hlp
+ccdred/doc/instrument.hlp
+ If a specified bad pixel file is not found an abort now occurs. Also
+ the FIXPIX processing header flag is set even if there are no
+ bad pixels. The documentation was revised to stress that an "untrimmed"
+ bad pixel file refers to the original CCD coordinates which is
+ especially important with subraster readouts. (2/23/93, Valdes)
+
+ccdred/src/icaclip.gx
+ccdred/src/iccclip.gx
+ccdred/src/icpclip.gx
+ccdred/src/icsclip.gx
+ When using mclip=yes and when more pixels are rejected than allowed by
+ the nkeep parameter there was a subtle bug in how the pixels are added
+ back which can result in a segmentation violation.
+ if (nh == n2) ==> if (nh == n[i])
+ (1/20/93, Valdes)
+
+ccdred/zerocombine.cl
+ccdred/darkcombine.cl
+ccdred/flatcombine.cl
+ Explicitly set ccdproc.noproc to no. (11/23/92, Valdes)
+
+=======
+V2.10.2
+=======
+
+ccdred/src/calimage.x
+ Added test on the requested ccdtype when setting up the calibration images
+ to avoid mapping a calibration type image which is not going to be
+ used. (11/17/92, Valdes)
+
+ccdred/darkcombine.cl
+ Fixed typo in output parameter prompt string refering to a flat field.
+ (11/10/92, Valdes)
+
+ccdred/src/ccdred.h
+ccdred/src/t_ccdproc.x
+ccdred/src/proc.gx
+ Separated the minreplace operation from the findmean operation. It
+ is now a separate operation only applied to flat images.
+ (10/26/92, Valdes)
+
+ccdred/ccdtest/demo.dat
+ Removed display commands. Because DISPLAY is always loaded in V2.10
+ there was no way to escape the displaying.
+ (9/30/92, Valdes)
+
+ccdred$darkcombine.cl
+ccdred$flatcombine.cl
+ccdred$zerocombine.cl
+ccdred$doc/darkcombine.hlp
+ccdred$doc/flatcombine.hlp
+ccdred$doc/zerocombine.hlp
+ Added "blank", "nkeep", and "snoise" parameters.
+ (9/30/92, Valdes)
+
+ccdred$src/t_combine.x
+ccdred$src/icaclip.gx
+ccdred$src/iccclip.gx
+ccdred$src/icgrow.gx
+ccdred$src/iclog.x
+ccdred$src/icombine.com
+ccdred$src/icombine.gx
+ccdred$src/icombine.h
+ccdred$src/icpclip.gx
+ccdred$src/icscale.x
+ccdred$src/icsclip.gx
+ccdred$src/icsetout.x
+ccdred$combine.par
+ccdred$doc/combine.hlp
+ The weighting was changed from using the square root of the exposure time
+ or image statistics to using the values directly. This corresponds
+ to variance weighting. Other options for specifying the scaling and
+ weighting factors were added; namely from a file or from a different
+ image header keyword. The \fInkeep\fR parameter was added to allow
+ controlling the maximum number of pixels to be rejected by the clipping
+ algorithms. The \fIsnoise\fR parameter was added to include a sensitivity
+ or scale noise component to the noise model. Errors will now delete
+ the output image.
+ (9/30/92, Valdes)
+
+ccdred$src/t_combine.x
+ccdred$src/iclog.x
+ The log now prints the final image name rather than the temp name when
+ using the clobber option. (8/25/92, Valdes)
+
+ccdred$src/icaclip.gx
+ccdred$src/iccclip.gx
+ccdred$src/icpclip.gx
+ccdred$src/icsclip.gx
+ There was a very unlikely possibility that if all the input pixels had
+ exactly the same number of rejected pixels the weighted average would
+ be done incorrectly because the dflag would not be set. (8/11/92, Valdes)
+
+ccdred$src/icmm.gx
+ This procedure failed to set the dflag resulting in the weighted average
+ being computed in correctly. (8/11/92, Valdes)
+
+ccdred$src/icscale.x
+ When scaling and zero offseting the zero level factors were incorrectly
+ computed. (8/10/92, Valdes)
+
+ccdred$src/ic[acs]clip.gx
+ccdred$src/icstat.gx
+ Corrected type mismatches in intrinsic functions. (8/10/92, Valdes)
+
+=======
+V2.10.1
+=======
+
+=======
+V2.10.0
+=======
+
+=====
+V2.10
+=====
+
+ccdred$src/icombine.gx
+ Needed to clear buffers returned by impl1 during the memory check
+ to avoid possible invalid values. (4/27/92, Valdes)
+
+ccdred$src/t_ccdproc.x
+ccdred$src/calimage.x
+ Made it an error if an explicit calibration image is specified but cannot
+ be opened. Previously it would then look in the input list for the
+ appropriate type. (4/24/92, Valdes)
+
+ccdred$ccdproc.x
+ccdred$t_ccdproc.x
+ Made the COMP type be processed like and OBJECT rather that the
+ default case. The only effect of this is to not have CCDMEAN
+ calculated. (4/8/92, Valdes)
+
+ccdred$src/icalip.gx
+ccdred$src/icclip.gx
+ccdred$src/ipslip.gx
+ccdred$src/icslip.gx
+ccdred$src/icmedian.gx
+ The median calculation with an even number of points for short data
+ could overflow (addition of two short values) and be incorrect.
+ (3/16/92, Valdes)
+
+ccdred$src/iclog.x
+ Added listing of read noise and gain. (2/10/92, Valdes)
+
+ccdred$src/icpclip.gx
+ Reduced the minimum number of images allowed for PCLIP to 3.
+ (1/7/92, Valdes)
+
+ccdred$darkcombine.cl
+ccdred$flatcombine.cl
+ Set default parameters as requested by the support people.
+ (12/12/91, Valdes)
+
+ccdred$src/icgrow.gx
+ The first pixel to be checked was incorrectly set to 0 instead of 1
+ resulting in a segvio when using the grow option. (12/6/91, Valdes)
+
+ccdred$src/proc.gx
+ccdred$src/icgdata.gx
+ccdred$src/icscale.x
+ccdred$src/setfixpix.x
+ccdred$src/t_combine.x
+ Fixed argument mismatch errors found by SPPLINT. (11/22/91, Valdes)
+
+ccdred$src
+ Replaced COMBINE with new version. (9/1/91, Valdes)
+
+ccdred$ccdtest/observe.cl -> artobs.cl
+ccdred$ccdtest/observe.hlp -> artobs.hlp
+ccdred$ccdtest/subsection.cl
+ccdred$ccdtest/subsection.hlp
+ccdred$ccdtest/mkimage.hlp
+ccdred$ccdtest/demo.dat
+ccdred$ccdtest/ccdtest.men
+ccdred$ccdtest/ccdtest.hd
+ccdred$ccdtest/ccdtest.cl
+ccdred$ccddb/kpno/demo.dat
+ Renamed OBSERVE to ARTOBS to avoid conflict with the CCDACQ task of
+ the same name. (8/29/91, Valdes)
+
+ccdred$src/setoutput.x
+ccdred$src/setproc.x
+ccdred$src/setdark.x
+ccdred$src/setzero.x
+ccdred$src/setflat.x
+ccdred$src/setfringe.x
+ccdred$doc/ccdred.hlp
+ The default output pixel type and computation type are now real.
+ The computation type may be separately specified. (5/29/91, Valdes)
+
+ccdred$src/t_mkskycor.x
+ The computation of CCDMEAN failed to accumlate the last few lines causing
+ the mean to be underestimated. (4/16/91, Valdes)
+
+ccdred$src/t_ccdinst.x +
+ccdred$src/ccdinst1.key +
+ccdred$src/ccdinst2.key +
+ccdred$src/ccdinst3.key +
+ccdred$src/hdrmap.x
+ccdred$src/mkpkg
+ccdred$ccdinstrument.par +
+ccdred$ccdred.cl
+ccdred$ccdred.hd
+ccdred$ccdred.men
+ccdred$x_ccdred.x
+ Added the new task CCDINSTRUMENT. This also involved some changes to
+ the header translation package hdrmap.x. (10/23/90, Valdes)
+
+ccdred$src/imcscales.x
+ccdred$src/imcmode.gx
+ccdred$src/mkpkg
+ Added error check for incorrect mode section specification.
+ (10/3/90, Valdes)
+
+ccdred$src/ccdred.h
+ccdred$src/proc.gx
+ccdred$src/setproc.x
+ccdred$ccdproc.par
+ Added a minreplace parameter to replace flat field values less than this
+ value by the value. This provides zero division prevention without
+ requiring specific flat field checking.
+ (10/3/90, Valdes)
+
+ccdred$src/t_ccdproc.x
+ccdred$src/ccdproc.x
+ccdred$src/scancor.x
+ 1. The scan correction now computes the CCDMEAN to account for the
+ ramp down.
+ 2. Did a simple move of the ccdmean call from before scancor to
+ after scancor. Since CCDMEAN is now computed in SCANCOR this
+ has no real affect and is just cosmetic. If CCDMEAN were not
+ computed in SCANCOR then the new placement would have computed
+ the right value at the expense of another pass through the image.
+ (9/21/90, Valdes)
+
+ccdred$src/t_badpixim.x
+ The template image cannot be closed immediately after opening the NEW_COPY
+ mask image because the STF kernel doesn't make the header copy until
+ pixel I/O occurs. This only affects STF images. (6/19/90, Valdes)
+
+====
+V2.9
+====
+
+ccdred$src/t_combine.x
+ Changed:
+ char images[SZ_FNAME-1,nimages] --> char images[SZ_FNAME,nimages-1]
+ The incorrect declaration results in each successive image name have
+ additional leading characters. Apparently, since this has not be
+ found previously, the leading characters have generally been blanks.
+ (3/30/90, Valdes)
+
+ccdred$doc/combine.hlp
+ Clarified and documented definitions of the scale, offset, and weights.
+ (11/30/89, Valdes)
+
+ccdred$ccdproc.par
+ 1. All parameters now have default values. (10/31/89, Valdes)
+
+ccdred$src/cosmic/mkpkg
+ccdred$src/gtascale.x -
+ccdred$t_cosmicrays.x
+ 1. Removed duplicate of gtools procedure.
+ 2. Fixed transfer out of IFERR block message when input image was wrong.
+ 3. The badpixel file was not initialized to null if the user did not
+ want a badpixel file output. (9/21/89, Valdes)
+
+====
+V2.8
+===
+
+ccdred$src/imcmode.gx
+ Fixed bug causing infinite loop when computing mode of constant value
+ section. (8/14/89, Valdes)
+
+ccdred$src/ccdproc.x
+ccdred$src/ccddelete.x
+ccdred$src/t_ccdproc.x
+ccdred$src/t_mkfringe.x
+ccdred$src/t_mkskyflat.x
+ccdred$src/t_mkskycor.x
+ccdred$src/t_mkillumft.x
+ccdred$src/t_mkillumcor.x
+ccdred$src/t_combine.x
+ccdred$src/scancor.x
+ccdred$src/readcor.x
+ 1. Added error checking for procedure ccddelete.
+ 2. Made workaround for error handling problem with procedure imrename
+ so that specifying a bad backup prefix would result in an abort
+ with an error message. (6/16/89, Valdes)
+
+ccdred$src/imcombine.gx
+ Made same changes made to image.imcombine to recover from too many VOS
+ file description error. (6/14/89, Valdes)
+
+ccdred$setinstrument.cl
+ccdred$setinstrument.hlp
+ Incorrect instrument names are now reported to the user, a menu is
+ printed if there is one, and a second opportunity is given.
+ (6/14/89, Valdes)
+
+ccdred$ccdred.par
+ Added an ennumerated subset for the output datatype. (5/12/89, Valdes)
+
+ccdred$src/imcombine.gx
+ Because a file descriptor was not reserved for string buffer operations
+ and a call to stropen in cnvdate was not error checked the task would
+ hang when more than 115 images were combined. Better error checking
+ was added and now an error message is printed when the maximum number
+ of images that can be combined is exceeded. (5/9/89, Valdes)
+
+ccdred$src/sigma.gx
+ccdred$src/imcaverage.gx
+ 1. Weighted sigma was being computed incorrectely.
+ 2. Added errchk to imcaverage.gx.
+ (5/6/89, Valdes)
+
+ccdred$src/setdark.x
+ccdred$src/setflat.x
+ccdred$src/setfringe.x
+ccdred$src/setillum.x
+ccdred$src/setoverscan.x
+ccdred$src/settrim.x
+ccdred$src/setzero.x
+ Made the trimsec, biassec, datasec, and ccdsec error messages more
+ informative. (3/13/89, Valdes)
+
+ccdred$src/imcmode.gx
+ For short data a short variable was wraping around when there were
+ a significant number of saturated pixels leading to an infinite loop.
+ The variables were made real regardless of the image datatype.
+ (3/1/89, Valdes)
+
+ccdred$src/t_mkskyflat.x
+ccdred$src/t_mkskycor.x
+ 1. Added warning if images have not been flat fielded.
+ 2. Allowed flat field image to be found even if flatcor=no.
+ (2/24/89, Valdes)
+
+ccdred$src/imcthresh.gx
+ccdred$combine.par
+ccdred$doc/combine.hlp
+ccdred$src/imcscales.x
+ 1. Added provision for blank value when all pixels are rejected by the
+ threshold.
+ 2. Fixed a bug that improperly scaled images in the threshold option.
+ 3. The offset printed in the log now has the opposite sign so that it
+ is the value "added" to bring images to a common level.
+ (2/16/89, Valdes)
+
+ccdred$src/proc.gx
+ When the data section had fewer lines than the output image (which occurs
+ when not trimming and the overscan being along lines) pixel out of
+ bounds errors occured. This bug was due to a sign error when reading
+ the non-trimmed overscan lines. (2/13/89, Valdes)
+
+ccdred$src/setoverscan.gx
+ The overscan buffer for readaxis=column was not initialized yielding
+ unpredictable and incorrect overscan data.
+ (3/13/89, Valdes)
+
+ccdred$src/imcmode.gx
+ Added test for nx=1. (2/8/89, Valdes)
+
+ccdred$darkcombine.cl
+ccdred$flatcombine.cl
+ Changed the default parameters to use "avsigclip" combining and
+ no scaling or weighting. (1/27/89, Valdes)
+
+ccdred$src/ccdcheck.x
+ccdred$src/setillum.x
+ccdred$src/t_ccdproc.x
+ 1. If the illumination image does not have CCDMEAN in its header
+ it is calculated.
+ 2. If an error occurs in setting up for illumination or fringe
+ correction during processing a warning is issued and these
+ processing steps are skipped. They can be done later if
+ desired. Previously this caused an abort.
+ (1/27/89, Valdes)
+
+ccdred$ccdgroups.par
+ccdred$src/t_ccdgroups.x
+ccdred$doc/ccdgroups.hlp
+ Added two new group types; ccdtype and subset. (1/26/89, Valdes)
+
+ccdred$src/t_ccdlist.x
+ccdred$doc/ccdlist.hlp
+ The exposure time and dark time are now printed in long format. This
+ is useful to allow verifying the header translation is working
+ correctly. (1/26/89, Valdes)
+
+ccdred$src/setfixpix.x
+ccdred$src/t_badpixim.x
+ The magic word "untrimmed" no longer needs whitespace preceding it.
+ (1/24/89, Valdes)
+
+imred$ccdred/src/imcscales.x
+ Valdes, Dec 8, 1988
+ 1. COMBINE now prints the scale as a multiplicative quantity.
+ 2. The combined exposure time was not being scaled by the scaling
+ factors resulting in a final exposure time inconsistent with the
+ data.
+
+imred$ccdred/src/t_mkskyflat.x
+imred$ccdred/src/t_mkillumft.x
+imred$ccdred/src/t_mkskycor.x
+imred$ccdred/src/t_mkskyflat.x
+imred$ccdred/src/t_mkfringe.x
+imred$ccdred/doc/mkillumcor.hlp
+imred$ccdred/doc/mkillumflat.hlp
+imred$ccdred/mkillumflat.par
+imred$ccdred/mkillumflat.par
+ 1. Minor typo in declaration (calimage.x) which had no effect.
+ 2. Missing include file (t_mkskyflat.x) caused "Cannot open image"
+ when using MKSKYFLAT.
+ 3. Added checks for division by zero which are reported at the end as
+ the number of divisions by zero and the replacement value.
+ The replacement value was added as a parameter value in MKILLUMCOR
+ and MKILLUMFLAT.
+ 4. Updated the help pages to reflect the new division by zero parameter.
+ 5. Modified the log strings to be more informative about what
+ was done and which images were used.
+ (10/20/88 Valdes)
+
+imred$ccdred/src/imcombine.gx
+ A vops clear routine was not called generically causing a crash with
+ double images. (10/19/88 Valdes)
+
+imred$ccdred/src/t_mkskycor.x
+ Replaced calls to recipricol vops procedure to one with zero checking.
+ (10/13/88 Valdes)
+
+imred$ccdred/src/imcscales.x
+ It is now an error if the mode is not positive for mode scaling or
+ weighting. (9/28/88 Valdes)
+
+imred$ccdred/ccdred.par
+imred$ccdred/doc/ccdred.hlp
+ The plotfile parameter was changed to reflect the "" character
+ as the new default. (9/23/88 jvb)
+
+imred$ccdred/src/imcmedian.gx
+ The median option was selecting the n/2 value instead of (n+1)/2. Thus,
+ for an odd number of images the wrong value was being determined for the
+ median. (8/16/88 Valdes)
+
+imred$ccdred/src/scancor.x
+imred$ccdred/src/calimage.x
+imred$ccdred/src/ccdcmp.x +
+imred$ccdred/src/mkpkg
+ 1. The shortscan correction was incorrectly writing to the input image
+ rather than the output image causing a cannot write to file error.
+ 2. It is now a trapped error if the input image is the same as a
+ calibration image. (4/18/88 Valdes)
+
+imred$ccdred/src/imcmode.gx
+ The use of a mode sections was handled incorrectly. (4/11/88 Valdes)
+
+noao$imred/ccdred/src/setoverscan.x
+ Minor bug fix:
+ gt_setr (gt, GTXMIN, 1.) -> gt_setr (gt, GTXMIN, x[1])
+ gt_setr (gt, GTXMAX, real(npts)) -> gt_setr (gt, GTXMAX, x[npts])
+ (2/11/88 Valdes)
+
+noao$imred/ccdred/src/t_mkillumflat.x -> t_mkillumft.x
+noao$imred/ccdred/src/t_mkfringecor.x -> t_mkfringe.x
+noao$imred/ccdred/src/t_badpiximage.x -> t_badpixim.x
+noao$imred/ccdred/src/imcthreshold.gx -> imcthresh.gx
+noao$imred/ccdred/src/generic/imcthresh.x -> imcthresh.x
+noao$imred/ccdred/src/mkpkg
+noao$imred/ccdred/src/generic/mkpkg
+ Shortened long names. (2/10/88 Valdes)
+
+noao$imred/ccdred/src/t_mkskycor.x
+noao$imred/ccdred/doc/mkskycor.hlp
+noao$imred/ccdred/doc/mkillumcor.hlp
+noao$imred/ccdred/doc/mkskyflat.hlp
+noao$imred/ccdred/doc/mkillumflat.hlp
+noao$imred/ccdred/doc/mkfringecor.hlp
+ 1. When not clipping the first 3 lines of the illumination were always
+ zero.
+ 2. The clipping algorithm had several errors.
+ 3. It was unclear what a box size of 1. meant and whether one could
+ specify the entire image as the size of the box.
+ 4. The smoothing box has been generalize to let the user chose the minimum
+ and maximum box size. This lets the user do straight box smoothing
+ and the growing box smoothing. (2/2/88 Valdes)
+
+noao$imred/ccdred/src/ccdtypes.h
+ Added the comparison CCD image type. (1/21/88 Valdes)
+
+noao$imred/ccdred/src/t_mkskycor.x
+noao$imred/ccdred/src/t_mkillumcor.x
+noao$imred/ccdred/src/t_mkskyflat.x
+noao$imred/ccdred/src/t_mkillumflat.x
+noao$imred/ccdred/src/t_mkfringecor.x
+ Calling sequences to the set_ procedures were wrong. (1/20/88 Valdes)
+
+noao$imred/ccdred/src/imcscales.x
+ The exposure time is now read as real. (1/15/88 Valdes)
+
+noao$imred/ccdred/src/corinput.gx
+ Discovered an initialization bug which caused the fixing of bad lines
+ to fail after the first image. (11/12/87 Valdes)
+
+noao$imred/ccdred/ccdtest/observe.cl
+noao$imred/ccdred/ccdtest/subsection.cl
+noao$imred/ccdred/ccdtest/demo.dat
+ Made modification to allow the demo to work with STF format images.
+ The change was in being more explicit with image extensions; i.e.
+ obs* --> obs*.??h. (11/12/87 Valdes)
+
+noao$imred/ccdred/src/mkpkg
+noao$imred/ccdred/src/ccdmean.x +
+noao$imred/ccdred/src/ccdcache.h +
+noao$imred/ccdred/src/ccdcache.com
+noao$imred/ccdred/src/ccdcache.x
+noao$imred/ccdred/src/t_ccdproc.x
+noao$imred/ccdred/src/ccdproc.x
+noao$imred/ccdred/src/ccdcheck.x
+noao$imred/ccdred/src/setflat.x
+noao$imred/ccdred/src/setdark.x
+noao$imred/ccdred/src/setzero.x
+noao$imred/ccdred/src/setfixpix.x
+noao$imred/ccdred/src/setillum.x
+noao$imred/ccdred/src/setfringe.x
+noao$imred/ccdred/src/t_ccdlist.x
+ 1. There was a recursion problem caused by the absence of the CCDPROC
+ flag in a zero level image which did not need any processing
+ because there was no trimming, overscan subtraction, or bad
+ pixel correction. The procedure CCDPROC left the image
+ unmodified (no CCDPROC flag) which meant that later another unprocessed
+ calibration image would again try to process it leading to
+ recursion. Since I was uncomfortable with relying on the
+ CCDPROC flag I added the routine CCDCHECK to actually check
+ each processing flag against the defined operations. This will
+ also allow additional automatic processing of calibration
+ images if the users sets new flags after an initial pass
+ through the data. The CCDPROC flag is still set in the data
+ but it is not used.
+ 2. It is possible in data which has no object types for the flat
+ field image never to have its mean computed for later scaling.
+ There were two modifications to address this problem. If an
+ image is processed without a ccdtype then the mean will be
+ computed at a very small cost in time. If the image is later
+ used as a flat field this information will then be present.
+ Second, if a flat field calibration image does not have the
+ mean value, even if it has been processed, the mean value
+ will still be calculated.
+ 3. In looking at the recursion problem I realized that some of
+ the calibration images could be opened more than once, though
+ READ_ONLY, once for the image being processed and later if the
+ task has to backtrack to process a another calibration frame. I
+ was surprise that this was not found on VMS until I realized
+ that for OIF format images the image header is read and the
+ file is then closed. No file is actually left open until pixel
+ I/O is done. However, this should cause STF images to fail on
+ VMS because VMS does not allow a file to be open more than once
+ and the STF image header is kept open. I rewrote the image
+ caching interface to cache the IMIO pointer even if the pixel
+ data was not cached. This will insure any calibration image
+ is only opened once even if it is accessed independently from
+ different parts of the program.
+ 4. The error message when using fringe and illumination correction
+ images which have not been processed by MKFRINGECOR and
+ MKILLUMCOR was misleading when refering to the absence of the
+ MKFRINGE and MKILLUM flag. A user thought that the missing
+ flag was FRINGCOR which refers to an image being fringe corrected.
+ The message was made a little more clear.
+ 5. The CCDLIST listing for fringe correction in long format was wrong.
+ (11/12/87 Valdes)
+
+noao$imred/ccdred/src/t_combine.x
+noao$imred/ccdred/src/t_ccdhedit.x
+noao$imred/ccdred/src/setoverscan.x
+noao$imred/ccdred/src/setinput.x
+noao$imred/ccdred/src/imcscales.x
+noao$imred/ccdred/src/imclogsum.x
+noao$imred/ccdred/src/ccdlog.x
+noao$imred/ccdred/src/ccddelete.x
+ Added calls to XT_STRIPWHITE to allow null strings to be recognized
+ with whitespace. It should probably use NOWHITE but this would make
+ it incompatible with V2.5. (11/6/87 Valdes)
+.endhelp
diff --git a/noao/imred/ccdred/badpiximage.par b/noao/imred/ccdred/badpiximage.par
new file mode 100644
index 00000000..9a964701
--- /dev/null
+++ b/noao/imred/ccdred/badpiximage.par
@@ -0,0 +1,5 @@
+fixfile,f,a,,,,Bad pixel file
+template,f,a,,,,Template image
+image,f,a,,,,Bad pixel image to be created
+goodvalue,i,h,1,,,Value assigned to the good pixels
+badvalue,i,h,0,,,Value assigned to the bad pixels
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/ccd.dat b/noao/imred/ccdred/ccddb/ctio/OLD/ccd.dat
new file mode 100644
index 00000000..45e38898
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/ccd.dat
@@ -0,0 +1,23 @@
+exptime itime
+darktime itime
+imagetyp data-typ
+subset none
+biassec biassec [405:425,7:572]
+datasec datasec [35:340,4:570]
+fixfile fixfile home$badpix
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" other
+COMPARISON other
+BIAS zero
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/cfccd.dat b/noao/imred/ccdred/ccddb/ctio/OLD/cfccd.dat
new file mode 100644
index 00000000..35af13e9
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/cfccd.dat
@@ -0,0 +1,23 @@
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+subset filters
+biassec biassec
+datasec datasec
+fixfile fixfile
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" other
+COMPARISON other
+BIAS zero
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/csccd.dat b/noao/imred/ccdred/ccddb/ctio/OLD/csccd.dat
new file mode 100644
index 00000000..d46f11c0
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/csccd.dat
@@ -0,0 +1,23 @@
+exptime exptime
+darktime darktime
+imagetyp data-typ
+subset none
+biassec biassec
+datasec datasec
+fixfile fixfile
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" other
+COMPARISON other
+BIAS zero
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/ech.dat b/noao/imred/ccdred/ccddb/ctio/OLD/ech.dat
new file mode 100644
index 00000000..32cf5ee1
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/ech.dat
@@ -0,0 +1,19 @@
+exptime exptime
+darktime darktime
+subset none
+biassec biassec
+trimsec datasec
+imagetyp imagetyp
+
+'OBJECT' object
+'COMPARISON' other
+'BIAS' zero
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/epi5.dat b/noao/imred/ccdred/ccddb/ctio/OLD/epi5.dat
new file mode 100644
index 00000000..7b7613de
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/epi5.dat
@@ -0,0 +1,23 @@
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+subset none
+biassec biassec [420:431,10:576]
+trimsec trimsec [15:393,10:576]
+fixfile fixfile home$ccds/epi5_badpix.dat
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" other
+COMPARISON other
+BIAS zero
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/epi5_badpix.dat b/noao/imred/ccdred/ccddb/ctio/OLD/epi5_badpix.dat
new file mode 100644
index 00000000..d4ccc345
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/epi5_badpix.dat
@@ -0,0 +1,22 @@
+# EPI5_BADPIX.DAT - GEC EPI5 Blue Air Schmidt untrimmed coordinates
+#
+# Map includes columns which bleed due to very poor charge transfer at low
+# light levels.
+#
+# SRH 8 December 87
+#
+ 37 37 396 313
+ 37 37 510 528
+ 46 46 482 307
+ 77 77 148 490
+129 129 21 48
+154 154 346 446
+262 262 199 450
+284 284 493 549
+307 308 196 210
+307 309 395 576
+312 312 480 496
+347 348 88 111
+347 347 112 468
+352 352 127 438
+378 378 515 529
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/fpccd.dat b/noao/imred/ccdred/ccddb/ctio/OLD/fpccd.dat
new file mode 100644
index 00000000..a56c56c0
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/fpccd.dat
@@ -0,0 +1,23 @@
+EXPTIME exptime
+DARKTIME darktime
+IMAGETYP imagetyp
+subset FPZ
+biassec biassec
+datasec datasec
+fixfile fixfile
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" other
+COMPARISON other
+BIAS zero
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/OLD/instruments.men b/noao/imred/ccdred/ccddb/ctio/OLD/instruments.men
new file mode 100644
index 00000000..8fe97635
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/OLD/instruments.men
@@ -0,0 +1,5 @@
+ccd CTIO genetic CCD
+ech CTIO generic Echelle/CCD
+cfccd CTIO generic CF/CCD
+csccd CTIO generic CS/CCD
+fpccd CTIO generic FP/CCD
diff --git a/noao/imred/ccdred/ccddb/ctio/cfccd_both.dat b/noao/imred/ccdred/ccddb/ctio/cfccd_both.dat
new file mode 100644
index 00000000..37991738
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/cfccd_both.dat
@@ -0,0 +1,27 @@
+# CFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+subset filters
+#subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/cfccd_f1.dat b/noao/imred/ccdred/ccddb/ctio/cfccd_f1.dat
new file mode 100644
index 00000000..68cd2063
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/cfccd_f1.dat
@@ -0,0 +1,27 @@
+# CFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/cfccd_f2.dat b/noao/imred/ccdred/ccddb/ctio/cfccd_f2.dat
new file mode 100644
index 00000000..c4d03cb8
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/cfccd_f2.dat
@@ -0,0 +1,27 @@
+# CFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+#subset filter1
+subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/csccd.dat b/noao/imred/ccdred/ccddb/ctio/csccd.dat
new file mode 100644
index 00000000..000f8c07
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/csccd.dat
@@ -0,0 +1,23 @@
+# CCD.DAT -- Instrument file to be used with ccdred when reducing spectroscopic
+# data obtained with ArCon.
+
+subset none
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON object
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/echccd.dat b/noao/imred/ccdred/ccddb/ctio/echccd.dat
new file mode 100644
index 00000000..90d08173
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/echccd.dat
@@ -0,0 +1,23 @@
+# ECHCCD.DAT -- Instrument file to be used with ccdred when reducing echelle
+# spectroscopic data obtained with ArCon.
+
+subset none
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
+FOCUS object
diff --git a/noao/imred/ccdred/ccddb/ctio/instruments.men b/noao/imred/ccdred/ccddb/ctio/instruments.men
new file mode 100644
index 00000000..144c41d5
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/instruments.men
@@ -0,0 +1,9 @@
+cfccd_f1 - Cassegrain focus CCD direct subset=filter1
+cfccd_f2 - Cassegrain focus CCD direct subset=filter2
+cfccd_both - Cassegrain focus CCD direct subset=filters
+csccd - Cassegrain focus spectroscopy
+echccd - Echelle spectroscopy
+nfccd - Newtonian focus CCD direct (Schmidt)
+pfccd_f1 - Prime focus CCD direct subset=filter1
+pfccd_f2 - Prime focus CCD direct subset=filter2
+pfccd_both - Prime focus CCD direct subset=filters
diff --git a/noao/imred/ccdred/ccddb/ctio/nfccd.dat b/noao/imred/ccdred/ccddb/ctio/nfccd.dat
new file mode 100644
index 00000000..06a173cf
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/nfccd.dat
@@ -0,0 +1,23 @@
+# NFCCD.DAT -- Instrument file to be used with ccdred when reducing direct
+# imageing data obtained with ArCon.
+
+subset filter1
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/pfccd_both.dat b/noao/imred/ccdred/ccddb/ctio/pfccd_both.dat
new file mode 100644
index 00000000..ac8e03a6
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/pfccd_both.dat
@@ -0,0 +1,27 @@
+# PFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+subset filters
+#subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/pfccd_f1.dat b/noao/imred/ccdred/ccddb/ctio/pfccd_f1.dat
new file mode 100644
index 00000000..9893d7f1
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/pfccd_f1.dat
@@ -0,0 +1,27 @@
+# PFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/ctio/pfccd_f2.dat b/noao/imred/ccdred/ccddb/ctio/pfccd_f2.dat
new file mode 100644
index 00000000..89028468
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/ctio/pfccd_f2.dat
@@ -0,0 +1,27 @@
+# PFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+#subset filter1
+subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/ccdred/ccddb/kpno/Revisions b/noao/imred/ccdred/ccddb/kpno/Revisions
new file mode 100644
index 00000000..47195a53
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/Revisions
@@ -0,0 +1,35 @@
+.help revisions Dec91 ccddb$kpno
+.nf
+hydra.dat +
+hydra.cl +
+direct.cl +
+coude.cl
+cryocam.cl
+default.cl
+echelle.cl
+fibers.cl
+foe.cl
+specphot.cl
+sunlink.cl
+instruments.men
+ 1. Added hydra entry.
+ 2. Linked all the entries to the new "default.cl" so that each
+ setup script only contains the differences from the default.
+ (9/8/97, Valdes)
+
+*.cl
+ 1. (all) ccdred.plotfile = "".
+ 2. (all) ccdred.pixeltype = "real real".
+ 3. (direct,fibers) ccdproc.interactive = yes
+ 4. (coude, specphot) ccdproc.ccdtype = ""
+ ccdproc.flatcor = no
+ ccdproc.trimsec = ""
+ (12/12/91, Valdes)
+
+instruments.men
+ Removed sunlink from the instrument menu. (12/12/91, Valdes)
+
+coude.dat
+ Changed the subset parameter from FILTER to GRATPOS. (12/11/91, Valdes)
+
+.endhelp
diff --git a/noao/imred/ccdred/ccddb/kpno/camera.dat b/noao/imred/ccdred/ccddb/kpno/camera.dat
new file mode 100644
index 00000000..841a37b9
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/camera.dat
@@ -0,0 +1,21 @@
+exptime otime
+darktime ttime
+imagetyp data-typ
+subset f1pos
+biassec biassec []
+datasec datasec []
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+'OBJECT (0)' object
+'DARK (1)' dark
+'PROJECTOR FLAT (2)' flat
+'SKY FLAT (3)' other
+'COMPARISON LAMP (4)' other
+'BIAS (5)' zero
+'DOME FLAT (6)' flat
diff --git a/noao/imred/ccdred/ccddb/kpno/coude.cl b/noao/imred/ccdred/ccddb/kpno/coude.cl
new file mode 100644
index 00000000..1eb1a73e
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/coude.cl
@@ -0,0 +1,4 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/coude.dat"
+ccdproc.trimsec = ""
diff --git a/noao/imred/ccdred/ccddb/kpno/coude.dat b/noao/imred/ccdred/ccddb/kpno/coude.dat
new file mode 100644
index 00000000..f32350aa
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/coude.dat
@@ -0,0 +1,9 @@
+subset gratpos
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/cryocam.cl b/noao/imred/ccdred/ccddb/kpno/cryocam.cl
new file mode 100644
index 00000000..1e917ff2
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/cryocam.cl
@@ -0,0 +1,3 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/cryocam.dat"
diff --git a/noao/imred/ccdred/ccddb/kpno/cryocam.dat b/noao/imred/ccdred/ccddb/kpno/cryocam.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/cryocam.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/default.cl b/noao/imred/ccdred/ccddb/kpno/default.cl
new file mode 100644
index 00000000..df16c7b6
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/default.cl
@@ -0,0 +1,41 @@
+# Default KPNO parameters.
+
+ccdred.pixeltype = "real real"
+ccdred.verbose = yes
+ccdred.logfile = "logfile"
+ccdred.plotfile = ""
+ccdred.backup = ""
+ccdred.instrument = "ccddb$kpno/default.dat"
+ccdred.ssfile = "subsets"
+ccdred.graphics = "stdgraph"
+ccdred.cursor = ""
+
+ccdproc.ccdtype = ""
+ccdproc.fixpix = no
+ccdproc.overscan = yes
+ccdproc.trim = yes
+ccdproc.zerocor = yes
+ccdproc.darkcor = no
+ccdproc.flatcor = no
+ccdproc.readcor = no
+ccdproc.scancor = no
+ccdproc.readaxis = "line"
+ccdproc.biassec = "image"
+ccdproc.trimsec = "image"
+ccdproc.interactive = yes
+ccdproc.function = "chebyshev"
+ccdproc.order = 1
+ccdproc.sample = "*"
+ccdproc.naverage = 1
+ccdproc.niterate = 1
+ccdproc.low_reject = 3
+ccdproc.high_reject = 3
+ccdproc.grow = 0
+
+combine.rdnoise= "rdnoise"
+combine.gain="gain"
+zerocombine.rdnoise= "rdnoise"
+zerocombine.gain="gain"
+flatcombine.rdnoise= "rdnoise"
+flatcombine.gain="gain"
+flatcombine.reject = "crreject"
diff --git a/noao/imred/ccdred/ccddb/kpno/demo.cl b/noao/imred/ccdred/ccddb/kpno/demo.cl
new file mode 100644
index 00000000..51c54909
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/demo.cl
@@ -0,0 +1,72 @@
+# Demonstration parameter setting script.
+
+# Set package parameters:
+ccdred.pixeltype = "real real"
+ccdred.verbose = yes
+ccdred.logfile = "Demo.log"
+ccdred.plotfile = "Demo.plots"
+ccdred.backup = "B"
+ccdred.ssfile = "Demo.subsets"
+
+# Set processing parameters:
+ccdproc.fixpix = yes
+ccdproc.overscan = yes
+ccdproc.trim = yes
+ccdproc.zerocor = yes
+ccdproc.darkcor = yes
+ccdproc.flatcor = yes
+ccdproc.illumcor = no
+ccdproc.fringecor = no
+ccdproc.readcor = no
+ccdproc.scancor = no
+ccdproc.readaxis = "line"
+ccdproc.fixfile = "ccdtest$badpix.dat"
+ccdproc.biassec = "image"
+ccdproc.trimsec = "image"
+ccdproc.zero = ""
+ccdproc.dark = ""
+ccdproc.flat = ""
+ccdproc.illum = ""
+ccdproc.fringe = ""
+ccdproc.scantype = "shortscan"
+ccdproc.nscan = 1
+ccdproc.interactive = yes
+ccdproc.function = "legendre"
+ccdproc.order = 1
+ccdproc.sample = "*"
+ccdproc.naverage = 1
+ccdproc.niterate = 1
+ccdproc.low_reject = 3.
+ccdproc.high_reject = 3.
+ccdproc.grow = 0.
+flatcombine.process = no
+
+# Set demonstration observation parameters:
+artobs.ncols = 132
+artobs.nlines = 100
+artobs.filter = ""
+artobs.datasec = "[1:100,1:100]"
+artobs.trimsec = "[3:98,3:98]"
+artobs.biassec = "[103:130,*]"
+artobs.imdata = ""
+artobs.skyrate = 0.
+artobs.badpix = "ccdtest$badpix.dat"
+artobs.biasval = 500.
+artobs.badval = 500.
+artobs.zeroval = 100.
+artobs.darkrate = 1.
+artobs.zeroslope = 0.01
+artobs.darkslope = 0.002
+artobs.flatslope = 3.0000000000000E-4
+artobs.sigma = 5.
+artobs.seed = 0
+artobs.overwrite = no
+
+# Set demonstration subsection readout parameters:
+subsection.ncols = 82
+subsection.nlines = 50
+subsection.ccdsec = "[26:75,26:75]"
+subsection.datasec = "[1:50,1:50]"
+subsection.trimsec = ""
+subsection.biassec = "[51:82,1:50]"
+subsection.overwrite = no
diff --git a/noao/imred/ccdred/ccddb/kpno/demo.dat b/noao/imred/ccdred/ccddb/kpno/demo.dat
new file mode 100644
index 00000000..72697f58
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/demo.dat
@@ -0,0 +1,3 @@
+imagetyp ccdtype
+exptime integ
+subset filter
diff --git a/noao/imred/ccdred/ccddb/kpno/direct.cl b/noao/imred/ccdred/ccddb/kpno/direct.cl
new file mode 100644
index 00000000..dfa9bc51
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/direct.cl
@@ -0,0 +1,4 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/direct.dat"
+ccdproc.flatcor = yes
diff --git a/noao/imred/ccdred/ccddb/kpno/direct.dat b/noao/imred/ccdred/ccddb/kpno/direct.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/direct.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/echelle.cl b/noao/imred/ccdred/ccddb/kpno/echelle.cl
new file mode 100644
index 00000000..a011cc8f
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/echelle.cl
@@ -0,0 +1,3 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/echelle.dat"
diff --git a/noao/imred/ccdred/ccddb/kpno/echelle.dat b/noao/imred/ccdred/ccddb/kpno/echelle.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/echelle.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/fibers.cl b/noao/imred/ccdred/ccddb/kpno/fibers.cl
new file mode 100644
index 00000000..bb1e0398
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/fibers.cl
@@ -0,0 +1,3 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/fibers.dat"
diff --git a/noao/imred/ccdred/ccddb/kpno/fibers.dat b/noao/imred/ccdred/ccddb/kpno/fibers.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/fibers.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/fits.dat b/noao/imred/ccdred/ccddb/kpno/fits.dat
new file mode 100644
index 00000000..f47abf8d
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/fits.dat
@@ -0,0 +1,21 @@
+exptime itime
+darktime itime
+imagetyp data-typ
+subset f1pos
+biassec biassec []
+datasec datasec []
+
+fixpix bp-flag 0
+overscan bt-flag 0
+zerocor bi-flag 0
+darkcor dk-flag 0
+flatcor ff-flag 0
+fringcor fr-flag 0
+
+'object ( 0 )' object
+'dark ( 1 )' dark
+'proj flat ( 2 )' flat
+'sky flat ( 3 )' other
+'comp ( 4 )' other
+'bias ( 5 )' zero
+'dome flat ( 6 )' flat
diff --git a/noao/imred/ccdred/ccddb/kpno/foe.cl b/noao/imred/ccdred/ccddb/kpno/foe.cl
new file mode 100644
index 00000000..da4081cb
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/foe.cl
@@ -0,0 +1,3 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/foe.dat"
diff --git a/noao/imred/ccdred/ccddb/kpno/foe.dat b/noao/imred/ccdred/ccddb/kpno/foe.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/foe.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/hydra.cl b/noao/imred/ccdred/ccddb/kpno/hydra.cl
new file mode 100644
index 00000000..b24dc05e
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/hydra.cl
@@ -0,0 +1,12 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/hydra.dat"
+
+combine.gain = "gain_12"
+combine.rdnoise = "noise_12"
+zerocombine.gain = "gain_12"
+zerocombine.rdnoise = "noise_12"
+darkcombine.gain = "gain_12"
+darkcombine.rdnoise = "noise_12"
+flatcombine.gain = "gain_12"
+flatcombine.rdnoise = "noise_12"
diff --git a/noao/imred/ccdred/ccddb/kpno/hydra.dat b/noao/imred/ccdred/ccddb/kpno/hydra.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/hydra.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/instruments.men b/noao/imred/ccdred/ccddb/kpno/instruments.men
new file mode 100644
index 00000000..5dea4af6
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/instruments.men
@@ -0,0 +1,12 @@
+direct Current headers for Sun plus CCDPROC setup for direct CCD
+specphot Current headers for Sun plus CCDPROC setup for spectropho-
+ tometry, ie GoldCam, barefoot CCD
+hydra WIYN Hydra with Arcon
+foe Current headers for Sun plus CCDPROC setup for FOE
+fibers Current headers for Sun plus CCDPROC setup for fiber array
+coude Current headers for Sun plus CCDPROC setup for Coude
+cyrocam Current headers for Sun plus CCDPROC setup for Cryo Cam
+echelle Current headers for Sun plus CCDPROC setup for Echelle
+kpnoheaders Current headers with no changes to CCDPROC parameters
+fits Mountain FITS header prior to Aug. 87 (?)
+camera Mountain CAMERA header for IRAF Version 2.6 and earlier
diff --git a/noao/imred/ccdred/ccddb/kpno/kpnoheaders.dat b/noao/imred/ccdred/ccddb/kpno/kpnoheaders.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/kpnoheaders.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/specphot.cl b/noao/imred/ccdred/ccddb/kpno/specphot.cl
new file mode 100644
index 00000000..4359279d
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/specphot.cl
@@ -0,0 +1,5 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/specphot.dat"
+ccdproc.trimsec = ""
+ccdproc.grow = 1
diff --git a/noao/imred/ccdred/ccddb/kpno/specphot.dat b/noao/imred/ccdred/ccddb/kpno/specphot.dat
new file mode 100644
index 00000000..f0a6134b
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/specphot.dat
@@ -0,0 +1,9 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
+'SKY FLAT' object
diff --git a/noao/imred/ccdred/ccddb/kpno/sunlink.cl b/noao/imred/ccdred/ccddb/kpno/sunlink.cl
new file mode 100644
index 00000000..1f5fe7fe
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/sunlink.cl
@@ -0,0 +1,4 @@
+cl < "ccddb$kpno/default.cl"
+
+ccdred.instrument = "ccddb$kpno/sunlink.dat"
+ccdproc.flatcor = yes
diff --git a/noao/imred/ccdred/ccddb/kpno/sunlink.dat b/noao/imred/ccdred/ccddb/kpno/sunlink.dat
new file mode 100644
index 00000000..44d237d6
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/sunlink.dat
@@ -0,0 +1,8 @@
+subset filters
+
+DARK dark
+BIAS zero
+OBJECT object
+'DOME FLAT' flat
+'PROJECTOR FLAT' flat
+'COMPARISON' comp
diff --git a/noao/imred/ccdred/ccddb/kpno/template.cl b/noao/imred/ccdred/ccddb/kpno/template.cl
new file mode 100644
index 00000000..b5284029
--- /dev/null
+++ b/noao/imred/ccdred/ccddb/kpno/template.cl
@@ -0,0 +1,25 @@
+# Template parameter setting script. These parameters should be
+# set for a particular instrument.
+
+ccdproc.fixpix =
+ccdproc.overscan =
+ccdproc.trim =
+ccdproc.zerocor =
+ccdproc.darkcor =
+ccdproc.flatcor =
+ccdproc.readcor =
+ccdproc.scancor =
+ccdproc.readaxis =
+ccdproc.fixfile =
+ccdproc.biassec =
+ccdproc.datasec =
+ccdproc.scantype =
+ccdproc.interactive =
+ccdproc.function =
+ccdproc.order =
+ccdproc.sample =
+ccdproc.naverage =
+ccdproc.niterate =
+ccdproc.low_reject =
+ccdproc.high_reject =
+ccdproc.grow =
diff --git a/noao/imred/ccdred/ccdgroups.par b/noao/imred/ccdred/ccdgroups.par
new file mode 100644
index 00000000..4b8d5007
--- /dev/null
+++ b/noao/imred/ccdred/ccdgroups.par
@@ -0,0 +1,5 @@
+images,s,a,,,,CCD images to group
+output,s,a,,,,Output root group filename
+group,s,h,"ccdtype","position|title|date|ccdtype|subset",,Group type
+radius,r,h,"60",,,Group position radius (arc sec)
+ccdtype,s,h,"",,,CCD image types to select
diff --git a/noao/imred/ccdred/ccdhedit.par b/noao/imred/ccdred/ccdhedit.par
new file mode 100644
index 00000000..5695dffa
--- /dev/null
+++ b/noao/imred/ccdred/ccdhedit.par
@@ -0,0 +1,4 @@
+images,s,a,,,,CCD images
+parameter,s,a,,,,Image header parameter
+value,s,a,,,,Parameter value
+type,s,h,"string","string|real|integer",,Parameter type (string|real|integer)
diff --git a/noao/imred/ccdred/ccdinstrument.par b/noao/imred/ccdred/ccdinstrument.par
new file mode 100644
index 00000000..99bec801
--- /dev/null
+++ b/noao/imred/ccdred/ccdinstrument.par
@@ -0,0 +1,5 @@
+images,s,a,,,,List of images
+instrument,s,h,)_.instrument,,,CCD instrument file
+ssfile,s,h,)_.ssfile,,,Subset translation file
+edit,b,h,yes,,,Edit instrument translation file?
+parameters,s,h,"basic","basic|common|all",,Parameters to be displayed
diff --git a/noao/imred/ccdred/ccdlist.par b/noao/imred/ccdred/ccdlist.par
new file mode 100644
index 00000000..3eb82917
--- /dev/null
+++ b/noao/imred/ccdred/ccdlist.par
@@ -0,0 +1,5 @@
+images,s,a,,,,CCD images to listed
+ccdtype,s,h,"",,,CCD image type to be listed
+names,b,h,no,,,List image names only?
+long,b,h,no,,,Long format listing?
+ccdproc,pset,h,,,,CCD processing parameters
diff --git a/noao/imred/ccdred/ccdmask.par b/noao/imred/ccdred/ccdmask.par
new file mode 100644
index 00000000..8127f4dc
--- /dev/null
+++ b/noao/imred/ccdred/ccdmask.par
@@ -0,0 +1,12 @@
+image,f,a,,,,Input image
+mask,f,a,,,,Output pixel mask
+ncmed,i,h,7,1,,Column box size for median level calculation
+nlmed,i,h,7,1,,Line box size for median level calculation
+ncsig,i,h,15,10,,Column box size for sigma calculation
+nlsig,i,h,15,10,,Line box size for sigma calculation
+lsigma,r,h,6.,,,Low clipping sigma
+hsigma,r,h,6.,,,High clipping sigma
+ngood,i,h,5,1,,Minimum column length of good pixel seqments
+linterp,i,h,2,1,,Mask value for line interpolation
+cinterp,i,h,3,1,,Mask value for column interpolation
+eqinterp,i,h,2,1,,Mask value for equal interpolation
diff --git a/noao/imred/ccdred/ccdproc.par b/noao/imred/ccdred/ccdproc.par
new file mode 100644
index 00000000..f86ad07d
--- /dev/null
+++ b/noao/imred/ccdred/ccdproc.par
@@ -0,0 +1,39 @@
+images,s,a,"",,,List of CCD images to correct
+output,s,h,"",,,List of output CCD images
+ccdtype,s,h,"object",,,CCD image type to correct
+max_cache,i,h,0,0,,Maximum image caching memory (in Mbytes)
+noproc,b,h,no,,,"List processing steps only?
+"
+fixpix,b,h,yes,,,Fix bad CCD lines and columns?
+overscan,b,h,yes,,,Apply overscan strip correction?
+trim,b,h,yes,,,Trim the image?
+zerocor,b,h,yes,,,Apply zero level correction?
+darkcor,b,h,yes,,,Apply dark count correction?
+flatcor,b,h,yes,,,Apply flat field correction?
+illumcor,b,h,no,,,Apply illumination correction?
+fringecor,b,h,no,,,Apply fringe correction?
+readcor,b,h,no,,,Convert zero level image to readout correction?
+scancor,b,h,no,,,"Convert flat field image to scan correction?
+"
+readaxis,s,h,"line","column|line",, Read out axis (column|line)
+fixfile,s,h,"",,,File describing the bad lines and columns
+biassec,s,h,"",,,Overscan strip image section
+trimsec,s,h,"",,,Trim data section
+zero,s,h,"",,,Zero level calibration image
+dark,s,h,"",,,Dark count calibration image
+flat,s,h,"",,,Flat field images
+illum,s,h,"",,,Illumination correction images
+fringe,s,h,"",,,Fringe correction images
+minreplace,r,h,1.,,,Minimum flat field value
+scantype,s,h,"shortscan","shortscan|longscan",,Scan type (shortscan|longscan)
+nscan,i,h,1,1,,"Number of short scan lines
+"
+interactive,b,h,no,,,Fit overscan interactively?
+function,s,h,"legendre",,,Fitting function
+order,i,h,1,1,,Number of polynomial terms or spline pieces
+sample,s,h,"*",,,Sample points to fit
+naverage,i,h,1,,,Number of sample points to combine
+niterate,i,h,1,0,,Number of rejection iterations
+low_reject,r,h,3.,0.,,Low sigma rejection factor
+high_reject,r,h,3.,0.,,High sigma rejection factor
+grow,r,h,0.,0.,,Rejection growing radius
diff --git a/noao/imred/ccdred/ccdred.cl b/noao/imred/ccdred/ccdred.cl
new file mode 100644
index 00000000..d289b1ed
--- /dev/null
+++ b/noao/imred/ccdred/ccdred.cl
@@ -0,0 +1,29 @@
+#{ CCDRED -- CCD Reduction Package
+
+set ccddb = "ccdred$ccddb/"
+set ccdtest = "ccdred$ccdtest/"
+
+package ccdred
+
+task $ccdtest = ccdtest$ccdtest.cl
+
+task badpiximage,
+ ccdgroups,
+ ccdhedit,
+ ccdinstrument,
+ ccdlist,
+ ccdmask,
+ ccdproc,
+ combine,
+ mkfringecor,
+ mkillumcor,
+ mkillumflat,
+ mkskycor,
+ mkskyflat = ccdred$x_ccdred.e
+
+task darkcombine = ccdred$darkcombine.cl
+task flatcombine = ccdred$flatcombine.cl
+task setinstrument = ccdred$setinstrument.cl
+task zerocombine = ccdred$zerocombine.cl
+
+clbye()
diff --git a/noao/imred/ccdred/ccdred.hd b/noao/imred/ccdred/ccdred.hd
new file mode 100644
index 00000000..c98f5a87
--- /dev/null
+++ b/noao/imred/ccdred/ccdred.hd
@@ -0,0 +1,38 @@
+# Help directory for the CCDRED package.
+
+$doc = "./doc/"
+
+badpiximage hlp=doc$badpiximage.hlp
+ccdgroups hlp=doc$ccdgroups.hlp
+ccdhedit hlp=doc$ccdhedit.hlp
+ccdlist hlp=doc$ccdlist.hlp
+ccdmask hlp=doc$ccdmask.hlp
+ccdproc hlp=doc$ccdproc.hlp
+combine hlp=doc$combine.hlp
+darkcombine hlp=doc$darkcombine.hlp
+flatcombine hlp=doc$flatcombine.hlp
+mkfringecor hlp=doc$mkfringecor.hlp
+mkillumcor hlp=doc$mkillumcor.hlp
+mkillumflat hlp=doc$mkillumflat.hlp
+mkskycor hlp=doc$mkskycor.hlp
+mkskyflat hlp=doc$mkskyflat.hlp
+setinstrument hlp=doc$setinstrument.hlp
+zerocombine hlp=doc$zerocombine.hlp
+
+ccdgeometry hlp=doc$ccdgeometry.hlp
+ccdinstrument hlp=doc$ccdinst.hlp
+ccdtypes hlp=doc$ccdtypes.hlp
+flatfields hlp=doc$flatfields.hlp
+guide hlp=doc$guide.hlp
+instruments hlp=doc$instruments.hlp
+package hlp=doc$ccdred.hlp
+subsets hlp=doc$subsets.hlp
+
+revisions sys=Revisions
+
+$ccdtest = "noao$imred/ccdred/ccdtest/"
+
+ccdtest men=ccdtest$ccdtest.men,
+ hlp=..,
+ pkg=ccdtest$ccdtest.hd,
+ src=ccdtest$ccdtest.cl
diff --git a/noao/imred/ccdred/ccdred.men b/noao/imred/ccdred/ccdred.men
new file mode 100644
index 00000000..cbd02af8
--- /dev/null
+++ b/noao/imred/ccdred/ccdred.men
@@ -0,0 +1,28 @@
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdinstrument - Review and edit instrument translation files
+ ccdlist - List CCD processing information
+ ccdmask - Create bad pixel mask from CCD flat field images
+ ccdproc - Process CCD images
+ ccdtest - CCD test and demonstration package
+ combine - Combine CCD images
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+ setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+
+ ADDITIONAL HELP TOPICS
+
+ ccdgeometry - Discussion of CCD coordinate/geometry keywords
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ package - CCD image reduction package
+ subsets - Description of CCD subsets
diff --git a/noao/imred/ccdred/ccdred.par b/noao/imred/ccdred/ccdred.par
new file mode 100644
index 00000000..218e7421
--- /dev/null
+++ b/noao/imred/ccdred/ccdred.par
@@ -0,0 +1,12 @@
+# CCDRED package parameter file
+
+pixeltype,s,h,"real real",,,Output and calculation pixel datatypes
+verbose,b,h,no,,,Print log information to the standard output?
+logfile,f,h,"logfile",,,Text log file
+plotfile,f,h,"",,,Log metacode plot file
+backup,s,h,"",,,Backup directory or prefix
+instrument,s,h,"",,,CCD instrument file
+ssfile,s,h,"subsets",,,Subset translation file
+graphics,s,h,"stdgraph",,,Interactive graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+version,s,h,"2: October 1987"
diff --git a/noao/imred/ccdred/ccdtest/artobs.cl b/noao/imred/ccdred/ccdtest/artobs.cl
new file mode 100644
index 00000000..b64294a6
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/artobs.cl
@@ -0,0 +1,109 @@
+# ARTOBS -- Make a CCD observation
+
+procedure artobs (image, exptime, ccdtype)
+
+string image {prompt="Image name"}
+real exptime {prompt="Exposure time"}
+string ccdtype {prompt="CCD type"}
+
+int ncols=132 {prompt="Number of columns"}
+int nlines=100 {prompt="Number of lines"}
+string filter="" {prompt="Filter"}
+string datasec="[1:100,1:100]" {prompt="Data section"}
+string trimsec="[3:98,3:98]" {prompt="Trim section"}
+string biassec="[103:130,*]" {prompt="Bias section"}
+
+file imdata="" {prompt="Image data"}
+real skyrate=0. {prompt="Sky count rate"}
+file badpix="" {prompt="Bad pixel regions"}
+real biasval=500. {prompt="Bias value"}
+real badval=500. {prompt="Bad pixel value"}
+real zeroval=100. {prompt="Zero level value"}
+real darkrate=1. {prompt="Dark count rate"}
+real zeroslope=0.01 {prompt="Slope of zero level"}
+real darkslope=0.002 {prompt="Slope of dark count rate"}
+real flatslope=0.0003 {prompt="Flat field slope"}
+real sigma=5. {prompt="Gaussian sigma"}
+int seed=0 {prompt="Random number seed"}
+bool overwrite=no {prompt="Overwrite existing image?"}
+
+begin
+ int c1, c2, l1, l2
+ real exp, value, valslope
+ string im, type, s
+
+ im = image
+ exp = exptime
+ type = ccdtype
+
+ if (access (im//".imh") == yes)
+ im = im // ".imh"
+ if (access (im//".hhh") == yes)
+ im = im // ".hhh"
+ if (access (im) == yes) {
+ if (overwrite == yes)
+ imdelete (im, verify=no)
+ else
+ return
+ }
+
+ # Create the image.
+ s = str (ncols) // " " // str (nlines)
+ mkimage (im, "make", 0., 2, s, pixtype="short", slope=0., sigma=sigma,
+ seed=seed)
+
+ # Add a data image.
+ if (access (imdata//".imh") == yes)
+ imdata = imdata // ".imh"
+ if (access (imdata//".hhh") == yes)
+ imdata = imdata // ".hhh"
+ if (access (imdata) == yes)
+ imcopy (imdata//datasec, im//datasec, verbose=no)
+
+ # Add sky.
+ value = exp * skyrate
+ if (value != 0.)
+ mkimage (im//datasec, "add", value, slope=0., sigma=0.)
+
+ # Add flat field response.
+ if (flatslope != 0.)
+ mkimage (im//datasec, "mul", 1., slope=flatslope, sigma=0.)
+
+ # Add zero level and dark count.
+ value = zeroval + exp * darkrate
+ valslope = zeroslope + exp * darkslope
+ if ((value != 0.) && (valslope != 0.))
+ mkimage (im//datasec, "add", value, slope=valslope, sigma=0.)
+
+ # Add bias.
+ if (biasval != 0.)
+ mkimage (im, "add", biasval, slope=0., sigma=sigma, seed=0)
+
+ # Set bad pixels.
+ if (access (badpix)) {
+ list = badpix
+ while (fscan (list, c1, c2, l1, l2) != EOF) {
+ if (nscan() != 4)
+ next
+ c1 = max (1, c1)
+ c2 = min (ncols, c2)
+ l1 = max (1, l1)
+ l2 = min (nlines, l2)
+ s = "["//c1//":"//c2//","//l1//":"//l2//"]"
+ mkimage (im//s, "replace", badval, slope=0., sigma=0.)
+ }
+ }
+
+ # Set image header
+ ccdhedit (im, "exptime", exp, type="real")
+ if (type != "")
+ ccdhedit (im, "imagetyp", type, type="string")
+ if (datasec != "")
+ ccdhedit (im, "datasec", datasec, type="string")
+ if (trimsec != "")
+ ccdhedit (im, "trimsec", trimsec, type="string")
+ if (biassec != "")
+ ccdhedit (im, "biassec", biassec, type="string")
+ if (filter != "")
+ ccdhedit (im, "subset", filter, type="string")
+end
diff --git a/noao/imred/ccdred/ccdtest/artobs.hlp b/noao/imred/ccdred/ccdtest/artobs.hlp
new file mode 100644
index 00000000..02f2cf0f
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/artobs.hlp
@@ -0,0 +1,127 @@
+.help artobs Oct87 noao.imred.ccdred.ccdtest
+.ih
+NAME
+artobs -- Make a demonstration CCD observation
+.ih
+USAGE
+artobs image exptime ccdtype
+.ih
+PARAMETERS
+.ls image
+Observation to be created.
+.le
+.ls exptime
+Exposure time of observation.
+.le
+.ls ccdtype
+CCD image type of observation. This type is one of the standard types
+for the CCDRED package.
+.le
+.ls ncols = 132, nlines = 100
+The number of columns and lines in the full image created including
+bias section.
+.le
+.ls filter = ""
+Filter string for the observation.
+.le
+.ls datasec = "[1:100,1:100]"
+Data section of the observation.
+.le
+.ls trimsec = "[3:98,3:98]"
+Trim section for later processing.
+.le
+.ls biassec = "[103:130,*]"
+Prescan or overscan bias section.
+.le
+.ls imdata = ""
+Image to be used as source of observation if specified. The image must
+be at least as large as the data section.
+.le
+.ls skyrate = 0.
+Sky counting rate. The total sky value will be scaled by the exposure time.
+.le
+.ls badpix = ""
+Bad pixel region file in the standard CCDRED bad pixel file format.
+.le
+.ls biasval = 500.
+Mean bias value of the entire image.
+.le
+.ls badval = 500.
+Bad pixel value placed at the specified bad pixel regions.
+.le
+.ls zeroval = 100.
+Zero level of the data section.
+.le
+.ls darkrate = 1.
+Dark count rate. The total dark count will be scaled by the exposure time
+.le
+.ls zeroslope = 0.01
+Slope of the zero level per pixel.
+.le
+.ls darkslope = 0.002
+Slope of the dark count rate per pixel. This is also scaled by the exposure
+time.
+.le
+.ls flatslope = 3.0000000000000E-4
+The mean flat field response is 1 with a slope given by this value.
+.le
+.ls sigma = 5.
+Gaussian noise sigma per pixel.
+.le
+.ls seed = 0
+Random number seed. If zero new values are used for every observation.
+.le
+.ls overwrite = no
+Overwrite an existing image? If no a new observation is not created.
+There is no warning message.
+.le
+.ih
+DESCRIPTION
+This script task generates artificial CCD observations which include
+bad pixels, bias and zero levels, dark counts, flat field response
+variations and sky brightness levels. Optionally, image data from
+a reference image may be included. This task is designed to be used
+with the \fBccdred\fR package and includes appropriate image header
+information.
+
+First the task checks whether the requested image exists. If it does
+exist and the overwrite flag is no then a new observations is not created.
+If the overwrite flag is set then the old image is deleted and a new
+observation is created.
+
+An empty image of the specified size and of pixel data type short is
+first created. If a noise sigma is specified it is added to the entire
+image. If a reference image is specified then image section given by
+the \fIdatasec\fR parameter is copied into the data section of the
+observation. Next a sky level, specified by the \fIskyrate\fR
+parameter times the exposure time, is added to the data section.
+The flat field response with a mean of one and a slope given by the
+\fIflatslope\fR parameter is multiplied into the data section. If
+a dark count rate and/or a zero level is specified then these effects
+are added to the data section. Then the specified bias level
+is added to the entire image; i.e. including the bias section.
+Finally, the pixels specified in the bad pixel region file, if one
+is specified, are set to the bad pixel value.
+
+The CCD reduction parameters for the data section, the trim section,
+the bias section, exposure time, the CCD image type, and the filter
+are added to the image header (if they are specified) using \fBccdhedit\fR
+to apply any keyword translation.
+.ih
+EXAMPLES
+1. To create some test CCD images first set the task parameters such as
+number of columns and lines, data, bias, and trim sections, and data
+values. The images are then created as follows:
+
+ cl> artobs.filter = "V" # Set the filter
+ cl> artobs zero 0. zero # Zero level image
+ cl> artobs dark 1000. dark skyrate=0. # Dark count image
+ cl> artobs flat 1. flat skyrate=1000. # Flat field image
+ cl> artobs obj 10. object # Object image
+
+Note that the CCD image type is not used explicitly so that for a
+dark count image you must set the sky count rate to zero.
+.ih
+SEE ALSO
+mkimage, subsection, demo
+.endhelp
diff --git a/noao/imred/ccdred/ccdtest/badpix.dat b/noao/imred/ccdred/ccdtest/badpix.dat
new file mode 100644
index 00000000..92b13aa9
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/badpix.dat
@@ -0,0 +1,4 @@
+10 10 1 1000
+20 20 1 20
+30 30 50 100
+1 1000 50 50
diff --git a/noao/imred/ccdred/ccdtest/ccdtest.cl b/noao/imred/ccdred/ccdtest/ccdtest.cl
new file mode 100644
index 00000000..eb3f8b68
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/ccdtest.cl
@@ -0,0 +1,10 @@
+#{ CCDTEST -- CCDRED Test package
+
+package ccdtest
+
+task mkimage = ccdtest$x_ccdred.e
+task artobs = ccdtest$artobs.cl
+task subsection = ccdtest$subsection.cl
+task demo = ccdtest$demo.cl
+
+clbye()
diff --git a/noao/imred/ccdred/ccdtest/ccdtest.hd b/noao/imred/ccdred/ccdtest/ccdtest.hd
new file mode 100644
index 00000000..4218f9b0
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/ccdtest.hd
@@ -0,0 +1,6 @@
+# Help directory for the CCDTEST package.
+
+demo hlp=demo.hlp, src=demo.cl
+mkimage hlp=mkimage.hlp, src=t_mkimage.x
+artobs hlp=artobs.hlp, src=artobs.cl
+subsection hlp=subsection.hlp, src=subsection.cl
diff --git a/noao/imred/ccdred/ccdtest/ccdtest.men b/noao/imred/ccdred/ccdtest/ccdtest.men
new file mode 100644
index 00000000..f2b3909d
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/ccdtest.men
@@ -0,0 +1,4 @@
+ artobs - Create an artificial CCD observation
+ demo - Run a demonstration of the CCD reduction package
+ mkimage - Make or modify an image with simple values
+ subsection - Create an artificial subsection CCD observation
diff --git a/noao/imred/ccdred/ccdtest/demo.cl b/noao/imred/ccdred/ccdtest/demo.cl
new file mode 100644
index 00000000..213500c4
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/demo.cl
@@ -0,0 +1 @@
+stty (playback=demofile, verify=yes)
diff --git a/noao/imred/ccdred/ccdtest/demo.dat b/noao/imred/ccdred/ccdtest/demo.dat
new file mode 100644
index 00000000..733a319b
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/demo.dat
@@ -0,0 +1,182 @@
+\O=NOAO/IRAF V2.5 valdes@lyra Mon 15:42:35 12-Oct-87
+\T=vt640
+\G=vt640
+clear\n\{%V-%!200\}
+\n\{%10000
+ CCD REDUCTION DEMONSTRATION
+
+ In this demonstration we are going to make some (artificial) CCD
+ observations which we will reduce using the CCDRED package. The
+ dome is opening and we are ready to begin observing...\}
+\n\{%V-\}
+unlearn\sccdred;unlearn\sccdtest\n\{ # Initialize parameters and data...\}
+imdelete\s%B%%*.*\sv-\n\{%V-\}
+imrename\sB*.*\s%B%%*.*\sv-\n\{%V-\}
+imdelete\sZero*.*,Flat*.*\n\{%V-\}
+delete\sDemo*\sv-\n\{%V-\}
+\n\{%V-\}
+setinstrument\sdemo\sreview-\n\{ # Set instrument parameters...\}
+lpar\sartobs\n\{ # List observing parameters...\}
+artobs\sobs001\s0.\szero\n\{%15000 # Observe zero level images...\}
+artobs\sobs002\s0.\szero\n\{%V-\}
+artobs\sobs003\s0.\szero\n\{%V-\}
+artobs\sobs004\s0.\szero\n\{%V-\}
+artobs\sobs005\s0.\szero\n\{%V-\}
+\n\{%V-\}
+artobs.skyrate=0\n\{ # Observe a long dark count...\}
+artobs\sobs006\s1000.\sdark\n\{%V-\}
+\n\{%V-\}
+artobs.filter="V"\n\{ # Observe V flat fields...\}
+artobs.skyrate=2000\n\{%V-\}
+artobs\sobs007\s1.\sflat\n\{%V-\}
+artobs\sobs008\s1.\sflat\n\{%V-\}
+artobs\sobs009\s1.\sflat\n\{%V-\}
+artobs\sobs010\s1.\sflat\n\{%V-\}
+artobs\sobs011\s2.\sflat\n\{%V-\}
+artobs\sobs012\s2.\sflat\n\{%V-\}
+\n\{%V-\}
+artobs.filter="B"\n\{ # Observe B flat fields...\}
+artobs.skyrate=1000\n\{%V-\}
+artobs\sobs013\s1.\sflat\n\{%V-\}
+artobs\sobs014\s2.\sflat\n\{%V-\}
+artobs\sobs015\s3.\sflat\n\{%V-\}
+artobs\sobs016\s3.\sflat\n\{%V-\}
+artobs\sobs017\s3.\sflat\n\{%V-\}
+artobs\sobs018\s3.\sflat\n\{%V-\}
+\n\{%V-\}
+artobs.filter="V"\n\{ # Observe objects...\}
+artobs.skyrate=100\n\{%V-\}
+artobs\sobs019\s10.\sobject\simdata=dev$pix\n\{%V-\}
+artobs\sobs020\s20.\sobject\simdata=dev$pix\n\{%V-\}
+artobs.filter="B"\n\{%V-\}
+artobs\sobs021\s30.\sobject\simdata=dev$pix\n\{%V-\}
+artobs\sobs022\s40.\sobject\simdata=dev$pix\n\{%V-\}
+\n\{%V-\}
+lpar\ssubsection\n\{ # Subsection readout parameters...\}
+subsection\sobs023\sobs019\n\{%5000 # Readout a subsection of the CCD...\}
+dir\n\{ # Check directory of observations...\}
+clear\n\{%10000 # Continue...\}
+\n\{%15000
+ INSTRUMENT SETUP
+
+ Because there are a variety of instruments, observatories, and data
+ formats there are many parameters. To set all of these conveniently
+ there is a task which reads setup files prepared by the observing
+ staff. The setup task:
+ 1. Defines an instrument header translation file which
+ translates the image header parameters to something
+ the CCDRED package understands. This is an important
+ feature of the package.
+ 2. It runs a setup script which sets parameters and performs
+ other functions desired by the observing staff.
+ 3. The user is then given the opportunity to modify the
+ package and processing parameters...\}
+\n\{%V-\}
+setinstrument\smode=m\n\{ # Set demo instrument parameters...\}
+demo\r
+\{%5000\}^Z
+\{%5000\}^Z
+\{%5000\}\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+Zero\r
+\r
+Flat*.*\r
+^Z
+clear\n\{%5000 # Continue...\}
+\n\{%20000
+ IMAGE HEADERS
+
+ The CCDRED package uses image header information if present. This
+ includes the type of data (object, flat field, etc.), exposure
+ time, region of image containing the data, processing status, and
+ more. To make this more general there is a instrument header
+ translation file to translate image header keywords to the standard
+ names used by the package. In this example the image header
+ keywords are identical to the package except that the image type is
+ CCDTYPE, the exposure time is INTEG and the subset parameter is
+ FILTER. Let's look at the image header using the the standard
+ image header lister and the special one in the CCDRED package.
+ This special lister provides additional information about image
+ types and processing status...\}
+
+\n\{%V-\}
+imheader\sobs023\sl+\n\{ # List object image header...\}
+ccdlist\sobs*.*\n\{%5000 # List short CCD status...\}
+ccdlist\sobs023\sl+\n\{%5000 # List long CCD status...\}
+clear\n\{%5000 # Continue...\}
+\n\{%20000
+ COMBINE CALIBRATION IMAGES
+
+ In order to reduce calibration noise and eliminate cosmic ray events
+ we combine many zero level and flat field calibration images. The
+ combining task provides many options. We will combine the images by
+ scaling each image to the same exposure time, rejecting the highest
+ pixel at each image point, and taking a weighted average of the
+ remainder. Flat field images must be combined separately for each
+ filter. We will simply specify all the images and the task automatically
+ selects the appropriate images to combine! ...\}
+\n\{%V-\}
+zerocombine\smode=m\n\{ # Combine zero level images...\}
+obs*.*\r
+\{%5000\}^Z
+flatcombine\smode=m\n\{ # Combine flat field images...\}
+obs*.*\r
+\{%5000\}^Z
+clear\n\{%5000 # Continue...\}
+\n\{%15000
+ PROCESS OBSERVATIONS
+
+ We are now ready to process our observations. The processing steps we
+ have selected are to replace bad pixels by interpolation, fit and
+ subtract a readout bias given by an overscan strip, subtract the zero
+ level calibration image, scale and subtract a dark count calibration,
+ divide by a flat field, trim the image of the overscan strip and border
+ columns and lines. The task which does this is "ccdproc". The task is
+ expert at reducing CCD observations easily and efficiently. It checks
+ the image types, applies the proper filter flat field, applies the
+ proper part of the calibration images to subsection readouts, does only
+ the processing steps selected if not done previously, and automatically
+ processes the calibration images as needed. As before we simply specify
+ all the images and the task selects the appropriate images to process
+ including finding the one dark count image "obs006". Watch the log
+ messages to see what the task is doing...\}
+\n\{%V-\}
+ccdproc\sobs*.*\n\{ # Process object images...\}
+\n\{%V-\}
+\{%V-\}q0,+,\r
+NO\n\{%V-\}
+\n\{%10000
+ That's it! We're done. Now lets check the results. The "ccdlist"
+ listing will show the processing status and the images are now smaller
+ and of pixel datatype real. The CCDSEC parameter identifies the relation
+ of the image to the actual CCD pixels of the detector...\}
+\n\{%V-\}
+ccdlist\sobs*.*\sccdtype=object\n\{ # List short CCD status...\}
+ccdlist\sobs023\sl+\n\{%5000 # List long CCD status...\}
+imhead\sobs023\sl+\n\{%5000 # List object image header...\}
+dir\n\{%5000 # Check the data directory...\}
+\n\{%V-
+ We specified that the original images be saved by using the prefix B.
+ We are also left with a text log file, a metacode file containing the
+ fits to the overscan regions, and a file which maps the filter subset
+ strings to short identifiers used in CCDLIST and when creating the
+ combined images "FlatV" and "FlatB". You may look through these files,
+ or use GKIMOSAIC to examine the metacode file, now if you want.
+\}
diff --git a/noao/imred/ccdred/ccdtest/demo.hlp b/noao/imred/ccdred/ccdtest/demo.hlp
new file mode 100644
index 00000000..c03d5efb
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/demo.hlp
@@ -0,0 +1,27 @@
+.help demo Oct87 noao.imred.ccdred.ccdtest
+.ih
+NAME
+demo -- Run a demonstration of the CCD reduction package
+.ih
+USAGE
+demo
+.ih
+PARAMETERS
+.ls demofile = "ccdtest$demo.dat"
+Demonstration playback file.
+.le
+.ih
+DESCRIPTION
+This script task runs a demonstration playback. The playback file
+is specified by a hidden parameter. Normally this default playback file
+is used. The default demonstration will use the task \fBtv.display\fR if it
+is loaded to show you the CCD frames being processed.
+.ih
+EXAMPLES
+1. To run a demonstration of the \fBccdred\fR package:
+
+ cl> demo
+.ih
+SEE ALSO
+stty
+.endhelp
diff --git a/noao/imred/ccdred/ccdtest/demo.par b/noao/imred/ccdred/ccdtest/demo.par
new file mode 100644
index 00000000..70bee0f3
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/demo.par
@@ -0,0 +1 @@
+demofile,s,h,"ccdtest$demo.dat",,,Demonstration playback file
diff --git a/noao/imred/ccdred/ccdtest/mkimage.hlp b/noao/imred/ccdred/ccdtest/mkimage.hlp
new file mode 100644
index 00000000..2be4ab5b
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/mkimage.hlp
@@ -0,0 +1,87 @@
+.help mkimage Oct87 noao.imred.ccdred.ccdtest
+.ih
+NAME
+mkimage -- Make or modify and image with simple values
+.ih
+USAGE
+mkimage image option value [ndim dims]
+.ih
+PARAMETERS
+.ls image
+Image to create or modify.
+.le
+.ls option
+Editing option which is one of the following:
+.ls make
+Make a new image of the specified size, dimensionality, pixel type, and values.
+.le
+.ls replace
+Replace pixel values in the image.
+.le
+.ls add
+Add to the pixel values in the image.
+.le
+.ls multiply
+Multiply the pixel values in the image.
+.le
+.le
+.ls value
+Mean pixel value to be used.
+.le
+.ls ndim
+Number of dimensions when creating a new image.
+.le
+.ls dims
+Image dimensions given as a white space separated string (see the examples).
+.le
+.ls pixtype = "real"
+Pixel datatype when creating an image. The types are "real", "short",
+"integer", "long", and "double".
+.le
+.ls slope = 0.
+Slope of pixel values per pixel.
+.le
+.ls sigma = 0.
+Gaussian noise of pixel values if not zero.
+.le
+.ls seed = 0
+Seed for random numbers. If zero then the first time the task is
+called a seed of 1 is used and all subsequent calls while the task is in
+the process cache continue with new random numbers.
+.le
+.ih
+DESCRIPTION
+An image is created or modified using simple values. This task is intended
+for test and demonstration purposes. A image may be created of a specified
+size, dimensionality, and pixel datatype. The pixel values used in creating
+or editing an image consist of a sloped plane (which repeats for dimensions
+greater than 2) with pseudo-Gaussian noise. The sloped plane is defined such
+that:
+
+ pix[i,j] = value + slope * ((ncols + nlines) / 2 - 1) + slope * (i + j)
+
+where i and j are the pixel indices (starting with 1) and ncols and nlines
+are the number of columns and lines. The interpretation of "value" is that
+it is the mean of the plane. The Gaussian noise is only approximately random
+for purposes of speed!
+.ih
+EXAMPLES
+1. To create an 2 dimensional real image of size 100 x 200 with all zero
+values:
+
+ cl> mkimage name make 0 2 "100 200"
+
+Note that the dimension string is quoted because of the blank separated
+values.
+
+2. To add noise with a sigma of 5:
+
+ cl> mkimage name add 0 sigma=5
+
+2. To replace a region of the image with the value 10:
+
+ cl> mkimage name[10:20,30:40] replace 10
+.ih
+SEE ALSO
+artobs, subsection
+.endhelp
diff --git a/noao/imred/ccdred/ccdtest/mkimage.par b/noao/imred/ccdred/ccdtest/mkimage.par
new file mode 100644
index 00000000..148bf7ea
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/mkimage.par
@@ -0,0 +1,10 @@
+image,s,a,,,,Image to make or modify
+option,s,a,,"make|replace|add|multiply",,Editing option
+value,r,a,,,,Mean pixel value
+slope,r,h,0.,,,Slope of pixel values
+sigma,r,h,0.,0.,,Noise sigma
+seed,i,h,0,0,,Seed for noise generator
+
+ndim,i,a,,1,7,Number of dimensions
+dims,s,a,,,,Image dimensions
+pixtype,s,h,"real","short|real",,Pixel datatype
diff --git a/noao/imred/ccdred/ccdtest/mkpkg b/noao/imred/ccdred/ccdtest/mkpkg
new file mode 100644
index 00000000..79fcb59c
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/mkpkg
@@ -0,0 +1,10 @@
+# Make CCDTEST Package.
+
+$checkout libpkg.a ..
+$update libpkg.a
+$checkin libpkg.a ..
+$exit
+
+libpkg.a:
+ t_mkimage.x <imhdr.h>
+ ;
diff --git a/noao/imred/ccdred/ccdtest/subsection.cl b/noao/imred/ccdred/ccdtest/subsection.cl
new file mode 100644
index 00000000..60522c8b
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/subsection.cl
@@ -0,0 +1,53 @@
+# SUBSECTION -- Make a subsection CCD observation
+
+procedure subsection (subimage, image)
+
+string subimage {prompt="Subsection image name"}
+string image {prompt="Full image name"}
+
+int ncols=82 {prompt="Number of columns"}
+int nlines=50 {prompt="Number of lines"}
+string ccdsec="[26:75,26:75]" {prompt="CCD section"}
+string datasec="[1:50,1:50]" {prompt="Data section"}
+string trimsec="" {prompt="Trim section"}
+string biassec="[51:82,1:50]" {prompt="Bias section"}
+bool overwrite=no {prompt="Overwrite existing image?"}
+
+begin
+ string im, imdata, s
+ real biasval, sigma
+
+ im = subimage
+ imdata = image
+ biasval = artobs.biasval
+ sigma = artobs.sigma
+
+ if (access (im//".imh") == yes)
+ im = im // ".imh"
+ if (access (im//".hhh") == yes)
+ im = im // ".hhh"
+ if (access (im) == yes) {
+ if (overwrite == yes)
+ imdelete (im, verify=no)
+ else
+ return
+ }
+
+ # Create the image.
+ s = "[1:" // str (ncols) // ",1:" // str(nlines) // "]"
+ imcopy (imdata//s, im, verbose=no)
+
+ # Copy subsection image.
+ imcopy (imdata//ccdsec, im//datasec, verbose=no)
+
+ # Add bias.
+ if (biasval != 0.)
+ mkimage (im//biassec, "replace", biasval, slope=0., sigma=sigma,
+ seed=0)
+
+ # Set image header
+ ccdhedit (im, "ccdsec", ccdsec, type="string")
+ ccdhedit (im, "datasec", datasec, type="string")
+ ccdhedit (im, "trimsec", trimsec, type="string")
+ ccdhedit (im, "biassec", biassec, type="string")
+end
diff --git a/noao/imred/ccdred/ccdtest/subsection.hlp b/noao/imred/ccdred/ccdtest/subsection.hlp
new file mode 100644
index 00000000..a2779500
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/subsection.hlp
@@ -0,0 +1,73 @@
+.help subsection Oct87 noao.imred.ccdred.ccdtest
+.ih
+NAME
+subsection -- Make a subsection readout CCD image
+.ih
+USAGE
+subsection subimage image
+.ih
+PARAMETERS
+.ls subimage
+Subsection image to be created.
+.le
+.ls image
+Full image from which to take the subsection readout.
+.le
+.ls ncols = 82, nlines = 50
+Number of image columns and lines in the full subsection image including
+bias regions.
+.le
+.ls ccdsec="[26:75,26:75]"
+CCD section of the subsection. This is the image section of the full
+image to be used.
+.le
+.ls datasec = "[1:50,1:50]"
+Data section of the image.
+.le
+.ls trimsec = ""
+Trim section for later processing.
+.le
+.ls biassec="[51:82,1:50]"
+Prescan or overscan bias section.
+.le
+.ls overwrite = no
+Overwrite an existing image? If no a new observation is not created.
+There is no warning message.
+.le
+.ih
+DESCRIPTION
+This script task generates artificial CCD subsection observations
+which include bad pixels, bias and zero levels, dark counts, flat
+field response variations and sky brightness levels. It creates an
+subsection image which includes a bias section from a previously
+created image (created by the task \fBartobs\fR). This task is
+designed to be used with the \fBccdred\fR package and includes
+appropriate image header information.
+
+First the task checks whether the requested image exists. If it does
+exist and the overwrite flag is no then a new observations is not created.
+If the overwrite flag is set then the old image is deleted and a new
+observation is created.
+
+The image section give by the parameter \fIccdsec\fR of the reference
+image is copied to the new image. It is assumed the reference image
+contains any desired zero level, bias, flat field, and dark count
+effects. The bias section is then added with a bias value given by
+\fBartobs.biasval\fR with noise given by \fBartobs.sigma\fR.
+
+Also the image header parameters from the reference image are
+copied and the data, bias, trim, and ccd section parameters are
+updated.
+.ih
+EXAMPLES
+1. To create some test CCD images first create full frame observations with
+the task \fBartobs\fR. Then set the subsection parameters
+for the size of the subsection observation, the data section, trim section,
+bias section, and the CCD section of the subsection observation.
+
+ cl> artobs obj 5 object filter=V
+ cl> subsection obj1 object
+.ih
+SEE ALSO
+mkimage, artobs, demo
+.endhelp
diff --git a/noao/imred/ccdred/ccdtest/t_mkimage.x b/noao/imred/ccdred/ccdtest/t_mkimage.x
new file mode 100644
index 00000000..ff0d5f26
--- /dev/null
+++ b/noao/imred/ccdred/ccdtest/t_mkimage.x
@@ -0,0 +1,204 @@
+include <imhdr.h>
+
+define OPTIONS "|make|replace|add|multiply|"
+define MAKE 1 # Create a new image
+define REPLACE 2 # Replace pixels
+define ADD 3 # Add to pixels
+define MULTIPLY 4 # Multiply pixels
+
+# T_MKIMAGE -- Make or edit an image with simple values.
+# An image may be created of a specified size, dimensionality, and pixel
+# datatype. The image may also be edited to replace, add, or multiply
+# by specified values. The values may be a combination of a sloped plane
+# (repeated for dimensions greater than 2) and Gaussian noise.
+# The editing may be confined to sections of the image by use of image
+# sections in the input image. This task is a simple tool for
+# specialized uses in test applications.
+#
+# The sloped plane is defined such that:
+#
+# pix[i,j] = value + slope * ((ncols + nlines) / 2 - 1) + slope * (i + j)
+#
+# The interpretation of value is that it is the mean of the plane.
+#
+# The Gaussian noise is only approximately random for purposes of speed!
+
+procedure t_mkimage ()
+
+char image[SZ_FNAME] # Image to edit
+char option[7] # Edit option
+real value # Edit value
+real slope # Slope
+real sigma # Gaussian noise sigma
+long seed # Random number seed
+
+int i, op, ncols, nlines
+long vin[IM_MAXDIM], vout[IM_MAXDIM]
+pointer sp, rannums, im, buf, bufin, bufout
+
+int clgwrd(), clgeti(), clscan(), nscan() imgnlr(), impnlr()
+char clgetc()
+real clgetr()
+long clgetl()
+pointer immap()
+
+data seed/1/
+
+begin
+ call smark (sp)
+ call clgstr ("image", image, SZ_FNAME)
+ op = clgwrd ("option", option, 7, OPTIONS)
+ value = clgetr ("value")
+ slope = clgetr ("slope")
+ sigma = clgetr ("sigma")
+ if (clgetl ("seed") > 0)
+ seed = clgetl ("seed")
+
+ call amovkl (long (1), vin, IM_MAXDIM)
+ call amovkl (long (1), vout, IM_MAXDIM)
+ switch (op) {
+ case MAKE:
+ im = immap (image, NEW_IMAGE, 0)
+ IM_NDIM(im) = clgeti ("ndim")
+ i = clscan ("dims")
+ do i = 1, IM_NDIM(im)
+ call gargi (IM_LEN(im, i))
+ if (nscan() != IM_NDIM(im))
+ call error (0, "Bad dimension string")
+ switch (clgetc ("pixtype")) {
+ case 's':
+ IM_PIXTYPE(im) = TY_SHORT
+ case 'i':
+ IM_PIXTYPE(im) = TY_INT
+ case 'l':
+ IM_PIXTYPE(im) = TY_LONG
+ case 'r':
+ IM_PIXTYPE(im) = TY_REAL
+ case 'd':
+ IM_PIXTYPE(im) = TY_DOUBLE
+ default:
+ call error (0, "Bad pixel type")
+ }
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ call salloc (rannums, 2 * ncols, TY_REAL)
+ call mksigma (sigma, seed, Memr[rannums], 2*ncols)
+
+ while (impnlr (im, bufout, vout) != EOF)
+ call mkline (value, slope, sigma, seed, Memr[rannums],
+ Memr[bufout], vout[2] - 1, ncols, nlines)
+ case REPLACE:
+ im = immap (image, READ_WRITE, 0)
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ call salloc (rannums, 2 * ncols, TY_REAL)
+ call mksigma (sigma, seed, Memr[rannums], 2*ncols)
+
+ while (impnlr (im, bufout, vout) != EOF)
+ call mkline (value, slope, sigma, seed, Memr[rannums],
+ Memr[bufout], vout[2] - 1, ncols, nlines)
+ case ADD:
+ im = immap (image, READ_WRITE, 0)
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ call salloc (buf, ncols, TY_REAL)
+ call salloc (rannums, 2 * ncols, TY_REAL)
+ call mksigma (sigma, seed, Memr[rannums], 2*ncols)
+
+ while (imgnlr (im, bufin, vin) != EOF) {
+ i = impnlr (im, bufout, vout)
+ call mkline (value, slope, sigma, seed, Memr[rannums],
+ Memr[buf], vout[2] - 1, ncols, nlines)
+ call aaddr (Memr[bufin], Memr[buf], Memr[bufout], ncols)
+ }
+ case MULTIPLY:
+ im = immap (image, READ_WRITE, 0)
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ call salloc (buf, ncols, TY_REAL)
+ call salloc (rannums, 2 * ncols, TY_REAL)
+ call mksigma (sigma, seed, Memr[rannums], 2*ncols)
+
+ while (imgnlr (im, bufin, vin) != EOF) {
+ i = impnlr (im, bufout, vout)
+ call mkline (value, slope, sigma, seed, Memr[rannums],
+ Memr[buf], vout[2] - 1, ncols, nlines)
+ call amulr (Memr[bufin], Memr[buf], Memr[bufout], ncols)
+ }
+ }
+
+ call imunmap (im)
+ call sfree (sp)
+end
+
+
+# MKLINE -- Make a line of data. A slope of zero is a special case.
+# The Gaussian random numbers are taken from the sequence of stored
+# values with starting point chosen randomly in the interval 1 to ncols.
+# This is not very random but is much more efficient.
+
+procedure mkline (value, slope, sigma, seed, rannums, data, line, ncols, nlines)
+
+real value # Mean value
+real slope # Slope in mean
+real sigma # Sigma about mean
+long seed # Random number seed
+real rannums[ARB] # Random numbers
+real data[ncols] # Data for line
+int line # Line number
+int ncols # Number of columns
+int nlines # Number of lines
+
+int i
+real a, urand()
+
+begin
+ if (slope == 0.)
+ call amovkr (value, data, ncols)
+ else {
+ a = value + slope * (line - (ncols + nlines) / 2. - 1)
+ do i = 1, ncols
+ data[i] = a + slope * i
+ }
+ if (sigma > 0.) {
+ i = (ncols - 1) * urand (seed) + 1
+ call aaddr (rannums[i], data, data, ncols)
+ }
+end
+
+
+# MKSIGMA -- A sequence of random numbers of the specified sigma and
+# starting seed is generated. The random number generator is modeled after
+# that in Numerical Recipes by Press, Flannery, Teukolsky, and Vetterling.
+
+procedure mksigma (sigma, seed, rannums, nnums)
+
+real sigma # Sigma for random numbers
+long seed # Seed for random numbers
+real rannums[nnums] # Random numbers
+int nnums # Number of random numbers
+
+int i
+real v1, v2, r, fac, urand()
+
+begin
+ if (sigma > 0.) {
+ for (i=1; i<=nnums; i=i+1) {
+ repeat {
+ v1 = 2 * urand (seed) - 1.
+ v2 = 2 * urand (seed) - 1.
+ r = v1 ** 2 + v2 ** 2
+ } until ((r > 0) && (r < 1))
+ fac = sqrt (-2. * log (r) / r) * sigma
+ rannums[i] = v1 * fac
+ if (i == nnums)
+ break
+ i = i + 1
+ rannums[i] = v2 * fac
+ }
+ }
+end
diff --git a/noao/imred/ccdred/combine.par b/noao/imred/ccdred/combine.par
new file mode 100644
index 00000000..0a1ae2f8
--- /dev/null
+++ b/noao/imred/ccdred/combine.par
@@ -0,0 +1,40 @@
+# COMBINE -- Image combine parameters
+
+input,s,a,,,,List of images to combine
+output,s,a,,,,List of output images
+plfile,s,h,"",,,List of output pixel list files (optional)
+sigma,s,h,"",,,"List of sigma images (optional)
+"
+ccdtype,s,h,"",,,CCD image type to combine (optional)
+subsets,b,h,no,,,Combine images by subset parameter?
+delete,b,h,no,,,Delete input images after combining?
+clobber,b,h,no,,,"Clobber existing output image?
+"
+combine,s,h,"average","average|median",,Type of combine operation
+reject,s,h,"none","none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip",,Type of rejection
+project,b,h,no,,,Project highest dimension of input images?
+outtype,s,h,"real","short|ushort|integer|long|real|double",,Output image pixel datatype
+offsets,f,h,"none",,,Input image offsets
+masktype,s,h,"none","none|goodvalue|badvalue|goodbits|badbits",,Mask type
+maskvalue,r,h,0,,,Mask value
+blank,r,h,0.,,,"Value if there are no pixels
+"
+scale,s,h,"none",,,Image scaling
+zero,s,h,"none",,,Image zero point offset
+weight,s,h,"none",,,Image weights
+statsec,s,h,"",,,"Image section for computing statistics
+"
+lthreshold,r,h,INDEF,,,Lower threshold
+hthreshold,r,h,INDEF,,,Upper threshold
+nlow,i,h,1,0,,minmax: Number of low pixels to reject
+nhigh,i,h,1,0,,minmax: Number of high pixels to reject
+nkeep,i,h,1,,,Minimum to keep (pos) or maximum to reject (neg)
+mclip,b,h,yes,,,Use median in sigma clipping algorithms?
+lsigma,r,h,3.,0.,,Lower sigma clipping factor
+hsigma,r,h,3.,0.,,Upper sigma clipping factor
+rdnoise,s,h,"0.",,,ccdclip: CCD readout noise (electrons)
+gain,s,h,"1.",,,ccdclip: CCD gain (electrons/DN)
+snoise,s,h,"0.",,,ccdclip: Sensitivity noise (fraction)
+sigscale,r,h,0.1,0.,,Tolerance for sigma clipping scaling corrections
+pclip,r,h,-0.5,,,pclip: Percentile clipping parameter
+grow,i,h,0,,,Radius (pixels) for 1D neighbor rejection
diff --git a/noao/imred/ccdred/cosmicrays.par b/noao/imred/ccdred/cosmicrays.par
new file mode 100644
index 00000000..3d14b146
--- /dev/null
+++ b/noao/imred/ccdred/cosmicrays.par
@@ -0,0 +1,15 @@
+input,s,a,,,,List of images in which to detect cosmic rays
+output,s,a,,,,List of cosmic ray replaced output images (optional)
+badpix,s,h,"",,,"List of bad pixel files (optional)
+"
+ccdtype,s,h,"",,,CCD image type to select (optional)
+threshold,r,h,25.,,,Detection threshold above mean
+fluxratio,r,h,2.,,,Flux ratio threshold (in percent)
+npasses,i,h,5,1,,Number of detection passes
+window,s,h,"5","5|7",,"Size of detection window
+"
+interactive,b,h,yes,,,Examine parameters interactively?
+train,b,h,no,,,Use training objects?
+objects,*imcur,h,"",,,Cursor list of training objects
+savefile,f,h,"",,,File to save train objects
+answer,s,q,,"no|yes|NO|YES",,Review parameters for a particular image?
diff --git a/noao/imred/ccdred/darkcombine.cl b/noao/imred/ccdred/darkcombine.cl
new file mode 100644
index 00000000..715456eb
--- /dev/null
+++ b/noao/imred/ccdred/darkcombine.cl
@@ -0,0 +1,48 @@
+# DARKCOMBINE -- Process and combine dark count CCD images.
+
+procedure darkcombine (input)
+
+string input {prompt="List of dark images to combine"}
+file output="Dark" {prompt="Output dark image root name"}
+string combine="average" {prompt="Type of combine operation",
+ enum="average|median"}
+string reject="minmax" {prompt="Type of rejection",
+ enum="none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip"}
+string ccdtype="dark" {prompt="CCD image type to combine"}
+bool process=yes {prompt="Process images before combining?"}
+bool delete=no {prompt="Delete input images after combining?"}
+bool clobber=no {prompt="Clobber existing output image?"}
+string scale="exposure" {prompt="Image scaling",
+ enum="none|mode|median|mean|exposure"}
+string statsec="" {prompt="Image section for computing statistics"}
+int nlow=0 {prompt="minmax: Number of low pixels to reject"}
+int nhigh=1 {prompt="minmax: Number of high pixels to reject"}
+int nkeep=1 {prompt="Minimum to keep (pos) or maximum to reject (neg)"}
+bool mclip=yes {prompt="Use median in sigma clipping algorithms?"}
+real lsigma=3. {prompt="Lower sigma clipping factor"}
+real hsigma=3. {prompt="Upper sigma clipping factor"}
+string rdnoise="0." {prompt="ccdclip: CCD readout noise (electrons)"}
+string gain="1." {prompt="ccdclip: CCD gain (electrons/DN)"}
+string snoise="0." {prompt="ccdclip: Sensitivity noise (fraction)"}
+real pclip=-0.5 {prompt="pclip: Percentile clipping parameter"}
+real blank=0. {prompt="Value if there are no pixels"}
+
+begin
+ string ims
+
+ ims = input
+
+ # Process images first if desired.
+ if (process == YES)
+ ccdproc (ims, output="", ccdtype=ccdtype, noproc=no)
+
+ # Combine the flat field images.
+ combine (ims, output=output, plfile="", sigma="", combine=combine,
+ reject=reject, ccdtype=ccdtype, subsets=no, delete=delete,
+ clobber=clobber, project=no, outtype="real", offsets="none",
+ masktype="none", blank=blank, scale=scale, zero="none", weight=no,
+ statsec=statsec, lthreshold=INDEF, hthreshold=INDEF, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, lsigma=lsigma, hsigma=hsigma,
+ rdnoise=rdnoise, gain=gain, snoise=snoise, sigscale=0.1,
+ pclip=pclip, grow=0)
+end
diff --git a/noao/imred/ccdred/doc/Notes b/noao/imred/ccdred/doc/Notes
new file mode 100644
index 00000000..209faf30
--- /dev/null
+++ b/noao/imred/ccdred/doc/Notes
@@ -0,0 +1,96 @@
+12/15/93:
+I have modified CCDPROC to more fully support scan table observations. In
+combination with the ability to have the number of scan rows encoded in the
+image header automatically, this allows such data to be processed in a
+fairly foolproof and documented way.
+
+First if ccdproc.scancor=no then the NSCANROW keyword and nscan parameter
+are ignored. For actual scanned data this may be useful to override
+things. Otherwise the following steps are taken. The logic is slightly
+complex so that everything is done in the right order and only as needed.
+
+The task wants to apply dark count and flat field calibration images which
+have been scanned by the same number of rows. [Zero calibration images are
+assumed not to be scanned. This made sense to me but if desired the zero
+images can also be treated like the darks and flats.] This is similar to
+the way flat fields are checked for subset (filter/grating). If the
+appropriate dark or flat has not been scanned then it is scanned in
+software; i.e. a moving average is taken over the unscanned image.
+
+The number of scan rows is determined for each object being processed from
+the NSCANROW keyword or appropriate translation in the header translation
+file. If this keyword is not found the nscan parameter value is used;
+i.e. it is assumed the object image has been scanned by the specified
+amount. This allows using the software in cases where the number of scan
+rows is not encoded in the header. In the case of dark and flat images if
+NSCANROW is not found a value of 1 (unscanned) is assumed.
+
+The set of possible calibration images (from the zero and flat parameters
+or the list of input images) is searched for one which has been scanned
+with the same number of lines as the object being processed. If one is
+found it is processed as needed before applying to the object. If one is
+not found then an unscanned one is sought. It is an error if neither can
+be found. An unscanned image is first processed as necessary (overscan,
+trim, etc.) and then scanned in software to create a new image. The new
+image has the name of the unscanned image with the number of scan lines
+appended, for example Flat1.32. It also has the NSCANROW keyword added as
+well as a comment indicating the image from which it was created. This
+approach allows the calibration image to be created only once for each
+different scan format and the number of scan lines may be changed for
+different observations and the appropriate calibration made from the
+unscanned image.
+
+The following example shows how this all works. There are four object
+images using two filters and two scan row values and unscanned
+zero, dark, and flats.
+
+cc> dir
+Dark.imh FlatV.imh obs019.imh obs021.imh pixels
+FlatB.imh Zero.imh obs020.imh obs022.imh
+cc> hselect obs* $I,filter,nscanrow yes
+obs019.imh V 24
+obs020.imh V 32
+obs021.imh B 24
+obs022.imh B 32
+cc> ccdproc obs* overscan+ trim+ zerocor+ darkcor+ flatcor+ scancor+
+obs019.imh: Dec 15 17:53 Zero level correction image is Zero
+Dark.imh: Dec 15 17:53 Zero level correction image is Zero
+Dark.24.imh: Dec 15 17:53 Converted to shortscan from Dark.imh with nscan=24
+obs019.imh: Dec 15 17:53 Dark count correction image is Dark.24.imh
+FlatV.imh: Dec 15 17:53 Zero level correction image is Zero
+FlatV.imh: Dec 15 17:53 Dark count correction image is Dark.imh
+FlatV.24.imh: Dec 15 17:53 Converted to shortscan from FlatV.imh with nscan=24
+obs019.imh: Dec 15 17:53 Flat field image is FlatV.24.imh
+obs020.imh: Dec 15 17:53 Zero level correction image is Zero
+Dark.32.imh: Dec 15 17:53 Converted to shortscan from Dark.imh with nscan=32
+obs020.imh: Dec 15 17:53 Dark count correction image is Dark.32.imh
+FlatV.32.imh: Dec 15 17:53 Converted to shortscan from FlatV.imh with nscan=32
+obs020.imh: Dec 15 17:53 Flat field image is FlatV.32.imh
+obs021.imh: Dec 15 17:53 Zero level correction image is Zero
+obs021.imh: Dec 15 17:53 Dark count correction image is Dark.24.imh
+FlatB.imh: Dec 15 17:53 Zero level correction image is Zero
+FlatB.imh: Dec 15 17:53 Dark count correction image is Dark.imh
+FlatB.24.imh: Dec 15 17:53 Converted to shortscan from FlatB.imh with nscan=24
+obs021.imh: Dec 15 17:53 Flat field image is FlatB.24.imh
+obs022.imh: Dec 15 17:53 Zero level correction image is Zero
+obs022.imh: Dec 15 17:53 Dark count correction image is Dark.32.imh
+FlatB.32.imh: Dec 15 17:53 Converted to shortscan from FlatB.imh with nscan=32
+obs022.imh: Dec 15 17:53 Flat field image is FlatB.32.imh
+cc> ccdlist *.imh
+cc> ccdlist *.imh
+Dark.24.imh[96,96][real][dark][][OTZ]:
+Dark.32.imh[96,96][real][dark][][OTZ]:
+Dark.imh[96,96][real][dark][][OTZ]:
+FlatB.24.imh[96,96][real][flat][B][OTZD]:
+FlatB.32.imh[96,96][real][flat][B][OTZD]:
+FlatB.imh[96,96][real][flat][B][OTZD]:
+FlatV.24.imh[96,96][real][flat][V][OTZD]:
+FlatV.32.imh[96,96][real][flat][V][OTZD]:
+FlatV.imh[96,96][real][flat][V][OTZD]:
+Zero.imh[96,96][real][zero][][OT]:
+obs019.imh[96,96][real][object][V][OTZDF]:
+obs020.imh[96,96][real][object][V][OTZDF]:
+obs021.imh[96,96][real][object][B][OTZDF]:
+obs022.imh[96,96][real][object][B][OTZDF]:
+
+Frank
diff --git a/noao/imred/ccdred/doc/badpiximage.hlp b/noao/imred/ccdred/doc/badpiximage.hlp
new file mode 100644
index 00000000..46e13160
--- /dev/null
+++ b/noao/imred/ccdred/doc/badpiximage.hlp
@@ -0,0 +1,51 @@
+.help badpiximage Jun87 noao.imred.ccdred
+.ih
+NAME
+badpiximage -- Create a bad pixel mask image from a bad pixel file
+.ih
+USAGE
+badpiximage fixfile template image
+.ih
+PARAMETERS
+.ls fixfile
+Bad pixel file.
+.le
+.ls template
+Template image used to define the size of the bad pixel mask image.
+.le
+.ls image
+Bad pixel mask image to be created.
+.le
+.ls goodvalue = 1
+Integer value assigned to the good pixels.
+.le
+.ls badvalue = 0
+Integer value assigned to the bad pixels.
+.le
+.ih
+DESCRIPTION
+A bad pixel mask image is created from the specified bad pixel file.
+The format of the bad pixel file is that used by \fBccdproc\fR to
+correct CCD defects (see instruments). The bad pixel image is of pixel type short and
+has the value given by the parameter \fBgoodvalue\fR for the good
+pixels and the value given by the parameter \fBbadvalue\fR for the bad pixels.
+The image size and header parameters are taken from the specified
+template image. The bad pixel mask image may be used to view the
+location of the bad pixels and blink against an data image using an
+image display, to mask or flag bad pixels later by image arithmetic,
+and to propagate the positions of the bad pixels through the
+reductions.
+.ih
+EXAMPLES
+1. To make a bad pixel mask image from the bad pixel file "cryocambp.dat"
+using the image "ccd005" as the template:
+
+ cl> badpiximage cryocambp.dat ccd005 cryocambp
+
+2. To make the bad pixel mask image with good values of 0 and bad values of 1:
+
+ cl> badpixim cryomapbp.dat ccd005 cryocambp good=0 bad=1
+.ih
+SEE ALSO
+ccdproc, instruments
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdgeometry.hlp b/noao/imred/ccdred/doc/ccdgeometry.hlp
new file mode 100644
index 00000000..a051ae5e
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdgeometry.hlp
@@ -0,0 +1,73 @@
+.help ccdgeometry Sep87 noao.imred.ccdred
+.ih
+NAME
+ccdgeometry - Discussion of CCD geometry and header parameters
+.ih
+DESCRIPTION
+The \fBccdred\fR package maintains and updates certain geometry
+information about the images. This geometry is described by four image
+header parameters which may be present. These are defined below by the
+parameter names used in the package. Note that these names may be
+different in the image header using the image header translation
+feature of the package.
+
+.ls DATASEC
+The section of the image containing the CCD data. If absent the
+entire image is assumed to be data. Only the pixels within the
+data section are modified during processing. Therefore, there may be
+additional calibration or observation information in the image.
+If after processing, the data section is the entire image it is
+not recorded in the image header.
+.le
+.ls CCDSEC
+The section of the CCD to corresponding to the data section. This
+refers to the physical format, columns and lines, of the detector. This is
+the coordinate system used during processing to relate calibration
+data to the image data; i.e. image data pixels are calibrated by
+calibration pixels at the same CCD coordinates regardless of image pixel
+coordinates. This allows recording only parts of the CCD during data
+taking and calibrating with calibration frames covering some or all of
+the CCD. The CCD section is maintained during trimming operations.
+Note that changing the format of the images by image operators outside
+of the \fBccdred\fR package will invalidate this coordinate system.
+The size of the CCD section must agree with that of the data section.
+If a CCD section is absent then it defaults to the data section such
+that the first pixel of the data section has CCD coordinate (1,1).
+.le
+.ls BIASSEC
+The section of the image containing prescan or overscan bias information.
+It consists of a strip perpendicular to the readout axis. There may be
+both a prescan and overscan but the package currently only uses one.
+This parameter may be overridden during processing by the parameter
+\fIccdproc.biassec\fR. Only the part of the bias section along the
+readout is used and the length of the bias region is determined by
+the trim section. If one wants to limit the region of the bias
+strip used in the fit then the \fIsample\fR parameter should be used.
+.le
+.ls TRIMSEC
+The section of the image extracted during processing when the trim
+operation is selected (\fIccdproc.trim\fR). If absent when the trim
+operation is selected it defaults to the data section; i.e. the processed
+image consists only of the data section. This parameter may be overridden
+during processing by the parameter \fIccdproc.trimsec\fR. After trimming
+this parameter, if present, is removed from the image header. The
+CCD section, data section, and bias section parameters are also modified
+by trimming.
+.le
+
+The geometry is as follows. When a CCD image is recorded it consists
+of a data section corresponding to part or all of the CCD detector.
+Regions outside of the data section may contain additional information
+which are not affected except by trimming. Most commonly this consists
+of prescan and overscan bias data. When recording only part of the
+full CCD detector the package maintains information about that part and
+correctly applies calibrations for that part of the detector. Also any
+trimming operation updates the CCD coordinate information. If the
+images include the data section, bias section, trim section, and ccd
+section the processing may be performed entirely automatically.
+
+The sections are specified using the notation [c1:c2,l1:l2] where c1
+and c2 are the first and last columns and l1 and l2 are the first and
+last lines. Currently c1 and l1 must be less than c2 and l2
+respectively and no subsampling is allowed. This may be added later.
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdgroups.hlp b/noao/imred/ccdred/doc/ccdgroups.hlp
new file mode 100644
index 00000000..48c29b99
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdgroups.hlp
@@ -0,0 +1,163 @@
+.help ccdgroups Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdgroups -- Group CCD images into image lists
+.ih
+USAGE
+ccdgroups images output
+.ih
+PARAMETERS
+.ls images
+List of CCD images to be grouped.
+.le
+.ls output
+Output root group filename. The image group lists will be put in files
+with this root name followed by a number.
+.le
+.ls group = "ccdtype"
+Group type. There are currently four grouping types:
+.ls ccdtype
+Group by CCD image type.
+.le
+.ls subset
+Group by subset parameter.
+.le
+.ls position
+Group by position in right ascension (in hours) and declination (in degrees).
+The groups are defined by a radius parameter (in arc seconds).
+.le
+.ls title
+Group by identical titles.
+.le
+.ls date
+Group by identical dates.
+.le
+.le
+.ls radius = 60.
+Grouping radius when grouping by positions. This is given in arc seconds.
+.le
+.ls ccdtype = ""
+CCD image types to select from the input image list. If null ("") then
+all image types are used.
+.le
+.ih
+DESCRIPTION
+The input images, possible restricted to a particular CCD image type,
+are grouped into image lists. The "ccdtype" or "subset" groups
+produce output image lists with the given root name and the CCD type
+or subset as an extension (without a period). For the other group
+types the
+image lists have file names given by
+the root output name and a numeric extension (without a period).
+If the package parameter \fIccdred.verbose\fR is yes then the
+image name and output group list is printed for each image. The image lists can
+be used with the @ list feature for processing separate groups of observations.
+Note that grouping by CCD image type and subset is often not necessary since
+the \fBccdred\fR tasks automatically use this information (see
+\fBccdtypes\fR and \fBsubsets\fR).
+
+Besides CCD image type and subsets there are currently three ways to
+group images. These are by position in the sky, by title, and by
+date. Further groups may be added as suggested. The title grouping is
+useful if consistent titles are used when taking data. The date
+grouping is useful if multiple nights of observations are not organized
+by directories (it is recommended that data from separate nights be
+kept in separate directories). The position grouping finds
+observations within a given radius on the sky of the first member of
+the group (this is not a clustering algorithm). The right ascension
+and declination coordinates must be in standard units, hours and
+degrees respectively. The grouping radius is in arc seconds. This
+grouping type is useful for making sets of data in which separate
+calibration images are taken at each position.
+
+The date, title, and coordinates are accessed through the instrument
+translation file. The standard names used are "date-obs", "title", "ra",
+and "dec".
+.ih
+EXAMPLES
+1. For each object 5 exposures were taken to be combined in order to remove
+cosmic rays. If the titles are the same then (with ccdred.verbose=yes):
+
+.nf
+ cl> ccdgroups *.imh group group=title ccdtype=object
+ ccd005.imh --> group1
+ ccd006.imh --> group1
+ ccd007.imh --> group1
+ ccd008.imh --> group1
+ ccd009.imh --> group1
+ ccd012.imh --> group2
+ ccd013.imh --> group2
+ ccd014.imh --> group2
+ ccd015.imh --> group2
+ ccd016.imh --> group2
+ [... etc ...]
+ cl> combine @group1 obj1 proc+
+ cl> combine @group2 obj2 proc+
+ [... etc ...]
+.fi
+
+Note the numeric suffixes to the output root name "group".
+
+2. CCD observations were made in groups with a flat field, the object, and
+a comparison spectrum at each position. To group and process this data:
+
+.nf
+ cl> ccdgroups *.imh obs group=position >> logfile
+ cl> ccdproc @obs1
+ cl> ccdproc @obs2
+ cl> ccdproc @obs3
+.fi
+
+Since no flat field is specified for the parameter \fIccdproc.flat\fR
+the flat field is taken from the input image list.
+
+3. If for some reason you want to group by date and position it is possible
+to use two steps.
+
+.nf
+ cl> ccdgroups *.imh date group=date
+ cl> ccdgroups @data1 pos1
+ cl> ccdgroups @data2 pos2
+.fi
+
+4. To get groups by CCD image type:
+
+.nf
+ cl> ccdgroups *.imh "" group=ccdtype
+ ccd005.imh --> zero
+ ccd006.imh --> zero
+ ccd007.imh --> zero
+ ccd008.imh --> dark
+ ccd009.imh --> flat
+ ccd012.imh --> flat
+ ccd013.imh --> object
+ ccd014.imh --> object
+ ccd015.imh --> object
+ ccd016.imh --> object
+ [... etc ...]
+.fi
+
+Note the use of a null root name and the extension is the standard
+CCDRED types (not necessarily those used in the image header).
+
+5. To get groups by subset:
+
+.nf
+ cl> ccdgroups *.imh filt group=subset
+ ccd005.imh --> filt
+ ccd006.imh --> filtB
+ ccd007.imh --> filtB
+ ccd008.imh --> filtB
+ ccd009.imh --> filtV
+ ccd012.imh --> filtV
+ ccd013.imh --> filtV
+ ccd014.imh --> filtB
+ ccd015.imh --> filtB
+ ccd016.imh --> filtB
+ [... etc ...]
+.fi
+
+.ih
+SEE ALSO
+ccdlist, ccdtypes, instruments, subsets
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdhedit.hlp b/noao/imred/ccdred/doc/ccdhedit.hlp
new file mode 100644
index 00000000..1bc27d29
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdhedit.hlp
@@ -0,0 +1,108 @@
+.help ccdhedit Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdhedit -- CCD image header editor
+.ih
+USAGE
+ccdhedit images parameter value
+.ih
+PARAMETERS
+.ls images
+List of CCD images to be edited.
+.le
+.ls parameter
+Image header parameter. The image header parameter will be translated by
+the header translation file for the images.
+.le
+.ls value
+The parameter value. If the null string ("") is specified then the
+parameter is deleted from the image header, otherwise it is added or
+modified. If the parameter is "imagetyp" then the value string giving
+the CCD image type is translated from the package CCD type to the
+instrument specific string.
+.le
+.ls type = "string"
+The parameter type. The parameter types are "string", "real", or "integer".
+.le
+.ih
+DESCRIPTION
+The image headers of the specified CCD images are edited to add, modify,
+or delete a parameter. The parameters may be those used by the \fBccdred\fR
+package. The parameter name is translated to an image header parameter by the
+instrument translation file (see \fBinstruments\fR) if a translation is
+given. Otherwise the parameter is that in the image header. If the parameter
+is "imagetyp" the parameter value for the CCD image type may be that
+used by the package; i.e. dark, object, flat, etc. The value string will be
+translated to the instrument image string in this case. The translation
+facility allows use of this task in an instrument independent way.
+
+The value string is used to determine whether to delete or modify the
+image parameter. If the null string, "", is given the specified parameter
+is deleted. If parameters are added the header type must be specified
+as a string, real, or integer parameter. The numeric types convert the
+value string to a number.
+.ih
+EXAMPLES
+The \fBccdred\fR package is usable even with little image header information.
+However, if desired the header information can be added to images which
+lack it. In all the examples the parameters used are those of the package
+and apply equally well to any image header format provided there is an
+instrument translation file.
+
+.nf
+1. cl> ccdhedit obj* imagetyp object
+2. cl> ccdhedit flat* imagetyp flat
+3. cl> ccdhedit zero* imagetyp zero
+4. cl> ccdhedit obj0![1-3]* subset "V filter"
+5. cl> ccdhedit obj0![45]* subset "R filter"
+6. cl> ccdhedit flat001 subset "R filter"
+7. cl> ccdhedit obj* exptime 500 type=integer
+.fi
+
+8. The following is an example of a CL script which sets the CCD image type,
+the subset, and the exposure time simultaneously. The user may expand
+on this example to include other parameters or other initialization
+operations.
+
+.nf
+ cl> edit ccdheader.cl
+
+ ----------------------------------------------------------------
+ # Program to set CCD header parameters.
+
+ procedure ccdheader (images)
+
+ string images {prompt="CCD images"}
+ string imagetyp {prompt="CCD image type"}
+ string subset {prompt="CCD subset"}
+ string exptime {prompt="CCD exposure time"}
+
+ begin
+ string ims
+
+ ims = images
+ ccdhedit (ims, "imagetyp", imagetyp, type="string")
+ ccdhedit (ims, "subset", subset, type="string")
+ ccdhedit (ims, "exptime", exptime, type="real")
+ end
+ ----------------------------------------------------------------
+
+ cl> task ccdheader=ccdheader.cl
+ cl> ccdheader obj* imagetyp=object subset="V" exptime=500
+.fi
+
+9. The image header may be changed to force processing a calibration image
+as an object. For example to flatten a flat field:
+
+.nf
+ cl> ccdhedit testflat imagetyp other
+ cl> ccdproc testflat
+.fi
+
+10. To delete processing flags:
+
+ cl> ccdhedit obj042 flatcor ""
+.ih
+SEE ALSO
+hedit, instruments, ccdtypes, subsets
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdinst.hlp b/noao/imred/ccdred/doc/ccdinst.hlp
new file mode 100644
index 00000000..ea90f4a7
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdinst.hlp
@@ -0,0 +1,391 @@
+.help ccdinstrument Dec93 noao.imred.ccdred
+.ih
+NAME
+ccdinstrument -- Setup and verify CCD instrument translation files
+.ih
+USAGE
+ccdinstrument images
+.ih
+PARAMETERS
+.ls images
+List of images to be verified or used to setup a CCD instrument translation
+file.
+.le
+.ls instrument = ")_.instrument"
+CCD instrument translation file. The default is to use the translation
+file defined in the \fBccdred\fR package parameters. Note that one would
+need write permission to update this file though the task has a write
+command to save any changes to a different file.
+.le
+.ls ssfile = ")_.ssfile"
+Subset translation file. The default is to use the file defined in
+the \fBccdred\fR package parameters.
+.le
+.ls edit = yes
+Edit the instrument translation file? If "yes" an interactive
+mode is entered allowing translation parameters to be modified while if
+"no" the task is simply used to verify the translations noninteractively.
+.le
+.ls parameters = "basic"
+Parameters to be displayed. The choices are "basic" to display only the
+most basic parameters (those needed for the simplest automation of
+\fBccdred\fR tasks), "common" to display the common parameters used
+by the package (most of these are keywords to be written to the image
+rather than translated), and "all" to display all the parameters
+referenced by the package including the most obscure. For most uses
+the "basic" set is all that is important and the other options are
+included for completeness.
+.le
+.ih
+DESCRIPTION
+The purpose of this task is to provide an interface to simplify setting
+up CCD instrument translation files and to verify the translations
+for a set of images. Before this task was written users who needed to
+set up translation files for new instruments and observatories had
+to directly create the files with an editor. Many people encountered
+difficulties and were prone to errors. Also there was no task that
+directly verified the translations though \fBccdlist\fR provided some
+clues.
+
+The \fBccdred\fR package was designed to make intelligent use of
+information in image headers for determining things such as image
+calibration or object type and exposure times. While the package may
+be used without this capability it is much more convenient to be
+able to use information from the image. The package was also intended
+to be used with many different instruments, detectors, and observatories.
+The key to providing image header access across different observatories
+is the ability to translate the needs of the package to the appropriate
+keywords in the image header. This is done through a file called
+an "instrument translation file". For a complete description of
+this file and other instrument setup features of the package see
+\fBccdred.instruments\fR.
+
+The instrument translation file translates the parameter names used by
+the \fBccdred\fR package into image specific parameters and also
+supplies default values for parameters. The translation proceeds as
+follows. When a package task needs a parameter for an image, for
+example "imagetyp", it looks in the instrument translation file. If
+the file is not found or none is specified then the image header
+keyword that is requested is assumed to have the same name. If an
+instrument translation file is defined then the requested parameter is
+translated to an image header keyword, provided a translation entry is
+given. If no translation is given the package name is used. For
+example the package parameter "imagetyp" might be translated to
+"data-typ" (the old NOAO CCD keyword). If the parameter is not found
+then the default value specified in the translation file, if present,
+is returned.
+
+For recording parameter information in the header, such
+as processing flags, translation is also used. For example, if the
+flag specifying that the image has been corrected by a flat field is to
+be set then the package parameter name "flatcor" might be translated to
+"ff-flag". If no translation is given then the new image header
+parameter is entered as "flatcor".
+
+The CCD image type requires a second level of translation also defined
+in the translation file. Once the image keyword which identifies the
+type of CCD image, for example a flat field or object, is translated
+to an imahe keyword the specific
+string value must be translated to one of the CCD image types used
+by the package. The translation works in the same way, the specific
+string found is translated to the \fBccdred\fR type and returned to
+the task. This translation is tricky in that the exact string
+including all spaces and capitalizations must be correctly defined
+in the translation file. The \fBccdinstrument\fR allows doing
+this automatically thus minimizing typing errors.
+
+The basic display format of the task is a table of five columns
+giving the parameter name used by the package, the image keyword
+to which it is translated, the default value (if any), the value
+the task will receive for the current image after translation,
+and the actual keyword value in the image. A "?" is printed if
+a value cannot be determined. The idea of the task is to make sure
+that the value a \fBccdred\fR task sees is the correct one and if not
+to modify the translation appropriately. In verify mode when the
+\fBedit\fR parameter is not set the translation table is simply
+printed for each input image.
+
+In edit mode the user interactively gives commands at the ccdinstrument
+prompt to display or modify keywords. The modifications can then be
+written to the instrument file or saved in a private copy. The
+list of commands is shown below and may be printed using ? or help.
+
+.in 4
+.nf
+ CCDINSTRUMENT COMMANDS
+
+? Print command summary
+help Print command summary
+imheader Page image header
+instrument Print current instrument translation file
+next Next image
+newimage Select a new image
+quit Quit
+read Read instrument translation file
+show Show current translations
+write Write instrument translation file
+
+translate Translate image string selected by the imagetyp
+ parameter to one of the CCDRED types given as an
+ argument or queried:
+ object, zero, dark, flat, comp, illum, fringe, other
+
+.fi
+The following are CCDRED parameters which may be translated. You are
+queried for the image keyword to use or it may be typed after the command.
+An optional default value (returned if the image does not contain the
+keyword) may be typed as the second argument of the command.
+.nf
+
+ BASIC PARAMETERS
+imagetyp Image type parameter (see also translate)
+subset Subset or filter parameter
+exptime Exposure time
+darktime Dark time (may be same as the exposure time)
+.fi
+.in -4
+
+The commands may be followed by values such as file names for some of
+the general commands or the keyword and default value for the parameters
+to be translated. Note this is the only way to specify a default value.
+If no arguments are given the user is prompted with the current value
+which may then be changed.
+
+The set of parameters shown above are only those considered "basic".
+In order to avoid confusion the task can limit the set of parameters
+displayed. Without going into great detail, it is only the basic
+parameters which are generally required to have valid translations to
+allow the package to work well. However, for completeness, and if someone
+wants to go wild with translations, further parameters may be displayed
+and changed. The parameters displayed is controlled by the \fIparameters\fR
+keyword. The additional parameters not shown above are:
+
+.in 4
+.nf
+ USEFUL DEFAULT GEOMETRY PARAMETERS
+biassec Bias section (often has a default value)
+trimsec Trim section (often has a default value)
+
+ COMMON PROCESSING FLAGS
+fixpix Bad pixel replacement flag
+overscan Overscan correction flag
+trim Trim flag
+zerocor Zero level correction flag
+darkcor Dark count correction flag
+flatcor Flat field correction flag
+
+ RARELY TRANSLATED PARAMETERS
+ccdsec CCD section
+datasec Data section
+fixfile Bad pixel file
+
+fringcor Fringe correction flag
+illumcor Ilumination correction flag
+readcor One dimensional zero level read out correction
+scancor Scan mode correction flag
+nscanrow Number of scan rows
+
+illumflt Ilumination flat image
+mkfringe Fringe image
+mkillum Iillumination image
+skyflat Sky flat image
+
+ccdmean Mean value
+ccdmeant Mean value compute time
+fringscl Fringe scale factor
+ncombine Number of images combined
+date-obs Date of observations
+dec Declination
+ra Right Ascension
+title Image title
+.fi
+.in -4
+.ih
+EXAMPLES
+1. To verify the translations for a set of images using the default
+translation file:
+
+.nf
+ cl> setinst "" review-
+ cl> ccdinst dev$pix edit-
+ Image: dev$pix
+ Instrument file:
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ --------------------------------
+ imagetyp imagetyp none ?
+ subset subset ?
+ exptime exptime ? ?
+ darktime darktime ? ?
+
+ cl> setinst "" site=kpno dir=ccddb$ review-
+ cl> ccdinst dev$pix edit-
+ Image: dev$pix
+
+ Instrument file: ccddb$kpno/camera.dat
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ --------------------------------
+ imagetyp data-typ object OBJECT (0)
+ subset f1pos 2 2
+ exptime otime 600 600
+ darktime ttime 600 600
+.fi
+
+2. Set up an instrument translation file from scratch.
+
+.nf
+ ccdinst ech???.imh instr=myccd edit+
+ Warning: OPEN: File does not exist (myccd)
+ Image: ech001.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp imagetyp none ?
+ subset subset ?
+ exptime exptime ? ?
+ darktime darktime ? ?
+
+ ccdinstrument> imagetyp
+ Image keyword for image type (imagetyp): ccdtype
+ imagetyp ccdtype unknown BIAS
+ ccdinstrument> translate
+ CCDRED image type for 'BIAS' (unknown): zero
+ imagetyp ccdtype zero BIAS
+ ccdinstrument> subset
+ Image keyword for subset parameter (subset): filters
+ subset filters 1 1 0
+ ccdinstrument> exptime integ
+ exptime integ 0. 0.
+ ccdinstrument> darktime integ
+ darktime integ 0. 0.
+ ccdinstrument> show
+ Image: ech001.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype zero BIAS
+ subset filters 1 1 0
+ exptime integ 0. 0.
+ darktime integ 0. 0.
+
+ ccdinstrument> next
+ Image: ech002.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype unknown PROJECTOR FLAT
+ subset filters 1 1 0
+ exptime integ 20. 20.
+ darktime integ 20. 20.
+
+ ccdinstrument> trans
+ CCDRED image type for 'PROJECTOR FLAT' (unknown): flat
+ imagetyp ccdtype flat PROJECTOR FLAT
+ ccdinstrument> next
+ Image: ech003.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype unknown COMPARISON
+ subset filters 1 1 0
+ exptime integ 300 300
+ darktime integ 300 300
+
+ ccdinstrument> translate comp
+ imagetyp ccdtype comp COMPARISON
+ ccdinstrument> next
+ Image: ech004.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype unknown OBJECT
+ subset filters 1 1 0
+ exptime integ 3600 3600
+ darktime integ 3600 3600
+
+ ccdinstrument> translate object
+ imagetyp ccdtype object OBJECT
+ ccdinstrument> inst
+ imagetyp ccdtype
+ BIAS zero
+ subset filters
+ exptime integ
+ darktime integ
+ 'PROJECTOR FLAT' flat
+ COMPARISON comp
+ OBJECT object
+
+ ccdinstrument> next
+ Update instrument file myccd (yes)?
+.fi
+
+3. Set default geometry parameters. Note that to set a default the
+arguments must be on the command line.
+
+.nf
+ cc> ccdinst ech001 instr=myccd param=common edit+
+ Image: ech001
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype zero BIAS
+ subset filters 1 1 0
+ exptime integ 0. 0.
+ darktime integ 0. 0.
+
+ biassec biassec ? ?
+ trimsec trimsec ? ?
+
+ fixpix fixpix no ?
+ overscan overscan no ?
+ trim trim no ?
+ zerocor zerocor no ?
+ darkcor darkcor no ?
+ flatcor flatcor no ?
+
+ ccdinstrument> biassec biassec [803:830,*]
+ biassec biassec [803:830,*] [803:830,*] ?
+ ccdinstrument> trimsec trimsec [2:798,2:798]
+ trimsec trimsec [2:798,2:798] [2:798,2:798] ?
+ ccdinstrument> instr
+ trimsec trimsec [2:798,2:798]
+ biassec biassec [803:830,*]
+ imagetyp ccdtype
+ BIAS zero
+ subset filters
+ exptime integ
+ darktime integ
+ 'PROJECTOR FLAT' flat
+ COMPARISON comp
+ OBJECT object
+
+ ccdinstrument> q
+ Update instrument file myccd (yes)?
+.fi
+.ih
+SEE ALSO
+instruments, setinstrument
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdlist.hlp b/noao/imred/ccdred/doc/ccdlist.hlp
new file mode 100644
index 00000000..9ce7dfdd
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdlist.hlp
@@ -0,0 +1,133 @@
+.help ccdlist Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdlist -- List CCD processing information
+.ih
+USAGE
+ccdlist images
+.ih
+PARAMETERS
+.ls images
+CCD images to be listed. A subset of the these may be selected using the
+CCD image type parameter.
+.le
+.ls ccdtype = ""
+CCD image type to be listed. If no type is specified then all the images
+are listed. If an image type is specified then only images
+of that type are listed. See \fBccdtypes\fR for a list of the package
+image types.
+.le
+.ls names = no
+List the image names only? Used with the CCD image type parameter to make
+a list of the images of the specified type.
+.le
+.ls long = no
+Long format listing? The images are listed in a long format containing some
+image parameters and the processing history.
+.le
+.ls ccdproc (pset)
+CCD processing parameter set.
+.le
+.ih
+DESCRIPTION
+Information from the specified input images is listed on the standard
+output. A specific CCD image type may be selected from the input
+images by the parameter \fIccdtype\fR. There are three list formats;
+the default one line per image format, an image name only format, and a
+multi-line long format. The default one line format consists of the
+image name, image size, image pixel type, CCD image type, subset ID (if
+defined), processing flags, and title. This format contains the same
+information as that produced by \fBimheader\fR as well as CCD specific
+information. The processing flags identifying the processing operations
+performed on the image are given by the following single letter codes.
+
+.nf
+ B - Bad pixel replacement
+ O - Overscan bias subtraction
+ T - Trimming
+ Z - Zero level subtraction
+ D - Dark count subtraction
+ F - Flat field calibration
+ I - Iillumination correction
+ Q - Fringe correction
+.fi
+
+The long format has the same first line as the default format plus additional
+instrument information such as the exposure time and the full processing
+history. In addition to listing the completed processing, the operations
+not yet done (as specified by the \fBccdproc\fR parameters) are also
+listed.
+
+The image name only format is intended to be used to generate lists of
+images of the same CCD image type. These lists may be used as "@" file
+lists in IRAF tasks.
+.ih
+EXAMPLES
+1. To list the default format for all images:
+
+.nf
+ cl> ccdlist *.imh
+ ccd001.imh[544,512][short][unknown][V]:FOCUS L98-193
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+ ccd045.imh[544,512][short][flat][V]:dflat 6v+blue 5s
+ ccd066.imh[544,512][short][flat][B]:dflat 6v+blue 5s
+ ccd103.imh[544,512][short][flat][R]:dflat 6v+blue 5s
+ ccd104.imh[544,512][short][zero][]:bias
+ ccd105.imh[544,512][short][dark][]:dark 3600s
+.fi
+
+These images have not been processed.
+
+2. To restrict the listing to just the object images:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+.fi
+
+3. The long list for image "ccd007" is obtained by:
+
+.nf
+ cl> ccdlist ccd007 l+
+ ccd007[544,512][short][object][V]:N2968 R 600s
+ exptime = 200. darktime = 200.
+ [TO BE DONE] Overscan strip is [520:540,*]
+ [TO BE DONE] Trim image section is [3:510,3:510]
+ [TO BE DONE] Flat field correction
+.fi
+
+4. After processing the images have the short listing:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[508,508][real][object][V][OTF]:N2968 V 600s
+ ccd015.imh[508,508][real][object][B][OTF]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R][OTF]:N4036 R 600s
+.fi
+
+The processing indicated is overscan subtraction, trimming, and flat fielding.
+
+5. The long listing for "ccd007" after processing is:
+
+.nf
+ cl> ccdlist ccd007 l+
+ ccd007[508,508][real][object][V][OTF]:N2968 R 600s
+ exptime = 200. darktime = 200.
+ Jun 2 18:18 Overscan section is [520:540,*] with mean=481.8784
+ Jun 2 18:18 Trim data section is [3:510,3:510]
+ Jun 2 18:19 Flat field image is FlatV.imh with scale=138.2713
+.fi
+
+6. To make a list file containing all the flat field images:
+
+ cl> ccdlist *.imh ccdtype=flat name+ > flats
+
+This file can be used as an @ file for processing.
+.ih
+SEE ALSO
+ccdtypes ccdgroups
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdmask.hlp b/noao/imred/ccdred/doc/ccdmask.hlp
new file mode 100644
index 00000000..190ef016
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdmask.hlp
@@ -0,0 +1,138 @@
+.help ccdmask Jun96 noao.imred.ccdred
+.ih
+NAME
+ccdmask -- create a pixel mask from a CCD image
+.ih
+USAGE
+.nf
+ccdmask image mask
+.fi
+.ih
+PARAMETERS
+.ls image
+CCD image to use in defining bad pixels. Typically this is
+a flat field image or, even better, the ratio of two flat field
+images of different exposure levels.
+.le
+.ls mask
+Pixel mask name to be created. A pixel list image, .pl extension,
+is created so no extension is necessary.
+.le
+.ls ncmed = 7, nlmed = 7
+The column and line size of a moving median rectangle used to estimate the
+uncontaminated local signal. The column median size should be at least 3
+pixels to span single bad columns.
+.le
+.ls ncsig = 15, nlsig = 15
+The column and line size of regions used to estimate the uncontaminated
+local sigma using a percentile. The size of the box should contain
+of order 100 pixels or more.
+.le
+.ls lsigma = 6, hsigma = 6
+Positive sigma factors to use for selecting pixels below and above
+the median level based on the local percentile sigma.
+.le
+.ls ngood = 5
+Gaps of undetected pixels along the column direction of length less
+than this amount are also flagged as bad pixels.
+.le
+.ls linterp = 2
+Mask code for pixels having a bounding good pixel separation which is
+smaller along lines; i.e. to use line interpolation along the narrower
+dimension.
+.le
+.ls cinterp = 3
+Mask code for pixels having a bounding good pixel separation which is
+smaller along columns; i.e. to use columns interpolation along the narrower
+dimension.
+.le
+.ls eqinterp = 2
+Mask code for pixels having a bounding good pixel separation which is
+equal along lines and columns.
+.le
+.ih
+DESCRIPTION
+\fBCcdmask\fR makes a pixel mask from pixels deviating by a specified
+statistical amount from the local median level. The input images may be of
+any type but this task was designed primarily for detecting column oriented
+CCD defects such as charge traps that cause bad columns and non-linear
+sensitivities. The ideal input is a ratio of two flat fields having
+different exposure levels so that all features which would normally flat
+field properly are removed and only pixels which are not corrected by flat
+fielding are found to make the pixel mask. A single flat field may also be
+used but pixels of low or high sensitivity may be included as well as true
+bad pixels.
+
+The input image is first subtracted by a moving box median. The median is
+unaffected by bad pixels provided the median size is larger that twice
+the size of a bad region. Thus, if 3 pixel wide bad columns are present
+then the column median box size should be at least 7 pixels. The median
+box can be a single pixel wide along one dimension if needed. This may be
+appropriate for spectroscopic long slit data.
+
+The median subtracted image is then divided into blocks of size
+\fInclsig\fR by \fInlsig\fR. In each block the pixel values are sorted and
+the pixels nearest the 30.9 and 69.1 percentile points are found; this
+would be the one sigma points in a Gaussian noise distribution. The
+difference between the two count levels divided by two is then the local
+sigma estimate. This algorithm is used to avoid contamination by the bad
+pixel values. The block size must be at least 10 pixels in each dimension
+to provide sufficient pixels for a good estimate of the percentile sigma. The
+sigma uncertainty estimate of each pixel in the image is then the sigma
+from the nearest block.
+
+The deviant pixels are found by comparing the median subtracted residual to
+a specified sigma threshold factor times the local sigma above and below
+zero (the \fIlsigma\fR and \fIhsigma\fR parameters). This is done for
+individual pixels and then for column sums of pixels (excluding previously
+flagged bad pixels) from two to the number of lines in the image. The sigma
+of the sums is scaled by the square root of the number of pixels summed so
+that statistically low or high column regions may be detected even though
+individual pixels may not be statistically deviant. For the purpose of
+this task one would normally select large sigma threshold factors such as
+six or greater to detect only true bad pixels and not the extremes of the
+noise distribution.
+
+As a final step each column is examined to see if there are small
+segments of unflagged pixels between bad pixels. If the length
+of a segment is less than that given by the \fIngood\fR parameter
+all the pixels in the segment are also marked as bad.
+
+The bad pixel mask is created with good pixels identified by zero values
+and the bad pixels by non-zero values.
+The nearest good pixels along the columns and lines for
+each bad pixel are located and the separation along the columns and lines
+between those pixels is computed. The smaller separation is used to select
+the mask value. If the smaller separation is along lines the \fIlinterp\fR
+value is set, if the smaller separation is along columns the \fIcinterp\fR
+value is set, and if the two are equal the \fIeqinterp\fR value is set.
+The purpose of this is to allow interpolating across bad pixels using the
+narrowest dimension. The task \fBfixpix\fR can select the type of pixel
+replacement to use for each mask value. So one can chose, for example,
+line interpolation for the linterp values and the eqinterp values, and
+column interpolation for the cinterp values.
+
+In addition to this task, pixel mask images may be made in a variety of
+ways. Any task which produces and modifies image values may be used. Some
+useful tasks are \fBimexpr, imreplace, imcopy, text2mask\fR and
+\fBmkpattern\fR. If a new image is specified with an explicit ".pl"
+extension then the pixel mask format is produced.
+.ih
+EXAMPLES
+1. Two flat fields of exposures 1 second and 3 seconds are taken,
+overscan and zero corrected, and trimmed. These are then used
+to generate a CCD mask.
+
+.nf
+ cl> imarith flat1 / flat2 ratio
+ cl> ccdmask ratio mask
+.fi
+.ih
+REVISIONS
+.ls CCDMASK V2.11
+This task is new.
+.le
+.ih
+SEE ALSO
+imreplace, imexpr, imcopy, imedit, fixpix, text2mask
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdproc.hlp b/noao/imred/ccdred/doc/ccdproc.hlp
new file mode 100644
index 00000000..26ec6d1d
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdproc.hlp
@@ -0,0 +1,825 @@
+.help ccdproc Dec93 noao.imred.ccdred
+.ih
+NAME
+ccdproc -- Process CCD images
+.ih
+USAGE
+ccdproc images
+.ih
+PARAMETERS
+.ls images
+List of input CCD images to process. The list may include processed
+images and calibration images.
+.le
+.ls output = ""
+List of output images. If no list is given then the processing will replace
+the input images with the processed images. If a list is given it must
+match the input image list. \fINote that any dependent calibration images
+still be processed in-place with optional backup.\fR
+.le
+.ls ccdtype = ""
+CCD image type to select from the input image list. If no type is given
+then all input images will be selected. The recognized types are described
+in \fBccdtypes\fR.
+.le
+.ls max_cache = 0
+Maximum image caching memory (in Mbytes). If there is sufficient memory
+the calibration images, such as zero level, dark count, and flat fields,
+will be cached in memory when processing many input images. This
+reduces the disk I/O and makes the task run a little faster. If the
+value is zero image caching is not used.
+.le
+.ls noproc = no
+List processing steps only?
+.le
+
+.ce
+PROCESSING SWITCHES
+.ls fixpix = yes
+Fix bad CCD lines and columns by linear interpolation from neighboring
+lines and columns? If yes then a bad pixel mask, image, or file must be
+specified.
+.le
+.ls overscan = yes
+Apply overscan or prescan bias correction? If yes then the overscan
+image section and the readout axis must be specified.
+.le
+.ls trim = yes
+Trim the image of the overscan region and bad edge lines and columns?
+If yes then the data section must be specified.
+.le
+.ls zerocor = yes
+Apply zero level correction? If yes a zero level image must be specified.
+.le
+.ls darkcor = yes
+Apply dark count correction? If yes a dark count image must be specified.
+.le
+.ls flatcor = yes
+Apply flat field correction? If yes flat field images must be specified.
+.le
+.ls illumcor = no
+Apply iillumination correction? If yes iillumination images must be specified.
+.le
+.ls fringecor = no
+Apply fringe correction? If yes fringe images must be specified.
+.le
+.ls readcor = no
+Convert zero level images to readout correction images? If yes then
+zero level images are averaged across the readout axis to form one
+dimensional zero level readout correction images.
+.le
+.ls scancor = no
+Convert zero level, dark count and flat field images to scan mode flat
+field images? If yes then the form of scan mode correction is specified by
+the parameter \fIscantype\fR.
+.le
+
+.ce
+PROCESSING PARAMETERS
+.ls readaxis = "line"
+Read out axis specified as "line" or "column".
+.le
+.ls fixfile
+Bad pixel mask, image, or file. If "image" is specified then the name is
+specified in the image header or instrument translation file. If "BPM" is
+specified then the standard BPM image header keyword defines a bad pixel
+mask. A bad pixel mask is a compact format (".pl" extension) with zero
+values indicating good pixels and non-zero values indicating bad pixels. A
+bad pixel image is a regular image in which zero values are good pixels and
+non-zero values are bad pixels. A bad pixel file specifies bad pixels or
+rectangular bad pixel regions as described later. The direction of
+interpolation is determined by the mask value with a value of two
+interpolating across columns, a value of three interpolating across lines,
+and any other non-zero value interpolating along the narrowest dimension.
+.le
+.ls biassec
+Overscan bias strip image section. If "image" is specified then the overscan
+bias section is specified in the image header or instrument translation file.
+Only the part of the bias section along the readout axis is used. The
+length of the bias region fit is defined by the trim section. If one
+wants to limit the region of the overscan used in the fit to be less
+than that of the trim section then the sample region parameter,
+\fIsample\fR, should be used. It is an error if no section or the
+whole image is specified.
+.le
+.ls trimsec
+image section for trimming. If "image" is specified then the trim
+image section is specified in the image header or instrument translation file.
+.le
+.ls zero = ""
+Zero level calibration image. The zero level image may be one or two
+dimensional. The CCD image type and subset are not checked for these
+images and they take precedence over any zero level calibration images
+given in the input list.
+.le
+.ls dark = ""
+Dark count calibration image. The CCD image type and subset are not checked
+for these images and they take precedence over any dark count calibration
+images given in the input list.
+.le
+.ls flat = ""
+Flat field calibration images. The flat field images may be one or
+two dimensional. The CCD image type is not checked for these
+images and they take precedence over any flat field calibration images given
+in the input list. The flat field image with the same subset as the
+input image being processed is selected.
+.le
+.ls illum = ""
+Iillumination correction images. The CCD image type is not checked for these
+images and they take precedence over any iillumination correction images given
+in the input list. The iillumination image with the same subset as the
+input image being processed is selected.
+.le
+.ls fringe = ""
+Fringe correction images. The CCD image type is not checked for these
+images and they take precedence over any fringe correction images given
+in the input list. The fringe image with the same subset as the
+input image being processed is selected.
+.le
+.ls minreplace = 1.
+When processing flat fields, pixel values below this value (after
+all other processing such as overscan, zero, and dark corrections) are
+replaced by this value. This allows flat fields processed by \fBccdproc\fR
+to be certain to avoid divide by zero problems when applied to object
+images.
+.le
+.ls scantype = "shortscan"
+Type of scan format used in creating the CCD images. The modes are:
+.ls "shortscan"
+The CCD is scanned over a number of lines and then read out as a regular
+two dimensional image. In this mode unscanned zero level, dark count and
+flat fields are numerically scanned to form scanned flat fields comparable
+to the observations.
+.le
+.ls "longscan"
+In this mode the CCD is clocked and read out continuously to form a long
+strip. Flat fields are averaged across the readout axis to
+form a one dimensional flat field readout correction image. This assumes
+that all recorded image lines are clocked over the entire active area of the
+CCD.
+.le
+.le
+.ls nscan
+Number of object scan readout lines used in short scan mode. This parameter
+is used when the scan type is "shortscan" and the number of scan lines
+cannot be determined from the object image header (using the keyword
+nscanrows or it's translation).
+.le
+
+
+.ce
+OVERSCAN FITTING PARAMETERS
+
+There are two types of overscan (or prescan) determinations. One determines
+a independent overscan value for each line and is only available for a
+\fIreadaxis\fR of 1. The other averages the overscan along the readout
+direction to make an overscan vector, fits a smoothing function to the vector,
+and then evaluate and then evaluates the smooth function at each readout
+line or column. The line-by-line determination only uses the
+\fIfunction\fR parameter and the smoothing determinations uses all
+the following parameters.
+
+.ls function = "legendre"
+Line-by-line determination of the overscan is specified by:
+
+.nf
+ mean - the mean of the biassec columns at each line
+ median - the median of the biassec columns at each line
+ minmax - the mean at each line with the min and max excluded
+.fi
+
+The smoothed overscan vector may be fit by one of the functions:
+
+.nf
+ legendre - legendre polynomial
+ chebyshev - chebyshev polynomial
+ spline1 - linear spline
+ spline3 - cubic spline
+.fi
+.le
+.ls order = 1
+Number of polynomial terms or spline pieces in the overscan fit.
+.le
+.ls sample = "*"
+Sample points to use in the overscan fit. The string "*" specified all
+points otherwise an \fBicfit\fR range string is used.
+.le
+.ls naverage = 1
+Number of points to average or median to form fitting points. Positive
+numbers specify averages and negative numbers specify medians.
+.le
+.ls niterate = 1
+Number of rejection iterations to remove deviant points from the overscan fit.
+If 0 then no points are rejected.
+.le
+.ls low_reject = 3., high_reject = 3.
+Low and high sigma rejection factors for rejecting deviant points from the
+overscan fit.
+.le
+.ls grow = 0.
+One dimensional growing radius for rejection of neighbors to deviant points.
+.le
+.ls interactive = no
+Fit the overscan vector interactively? If yes and the overscan function type
+is one of the \fBicfit\fR types then the average overscan vector is fit
+interactively using the \fBicfit\fR package. If no then the fitting parameters
+given below are used.
+.le
+.ih
+DESCRIPTION
+\fBCcdproc\fR processes CCD images to correct and calibrate for
+detector defects, readout bias, zero level bias, dark counts,
+response, iillumination, and fringing. It also trims unwanted
+lines and columns and changes the pixel datatype. It is efficient
+and easy to use; all one has to do is set the parameters and then
+begin processing the images. The task takes care of most of the
+record keeping and automatically does the prerequisite processing
+of calibration images. Beneath this simplicity there is much that
+is going on. In this section a simple description of the usage is
+given. The following sections present more detailed discussions
+on the different operations performed and the order and logic
+of the processing steps. For a user's guide to the \fBccdred\fR
+package see \fBguide\fR. Much of the ease of use derives from using
+information in the image header. If this information is missing
+see section 13.
+
+One begins by setting the task parameters. There are many parameters
+but they may be easily reviewed and modified using the task \fBeparam\fR.
+The input CCD images to be processed are given as an image list.
+Previously processed images are ignored and calibration images are
+recognized, provided the CCD image types are in the image header (see
+\fBinstruments\fR and \fBccdtypes\fR). Therefore it is permissible to
+use simple image templates such as "*.imh". The \fIccdtype\fR parameter
+may be used to select only certain types of CCD images to process
+(see \fBccdtypes\fR).
+
+The processing operations are selected by boolean (yes/no) parameters.
+Because calibration images are recognized and processed appropriately,
+the processing operations for object images should be set.
+Any combination of operations may be specified and the operations are
+performed simultaneously. While it is possible to do operations in
+separate steps this is much less efficient. Two of the operation
+parameters apply only to zero level and flat field images. These
+are used for certain types of CCDs and modes of operation.
+
+The processing steps selected have related parameters which must be
+set. These are things like image sections defining the overscan and
+trim regions and calibration images. There are a number of parameters
+used for fitting the overscan or prescan bias section. These are
+parameters used by the standard IRAF curve fitting package \fBicfit\fR.
+The parameters are described in more detail in the following sections.
+
+In addition to the task parameters there are package parameters
+which affect \fBccdproc\fR. These include the instrument and subset
+files, the text and plot log files, the output pixel datatype,
+the amount of memory available for calibration image caching,
+the verbose parameter for logging to the terminal, and the backup
+prefix. These are described in \fBccdred\fR.
+
+Calibration images are specified by task parameters and/or in the
+input image list. If more than one calibration image is specified
+then the first one encountered is used and a warning is issued for the
+extra images. Calibration images specified by
+task parameters take precedence over calibration images in the input list.
+These images also need not have a CCD image type parameter since the task
+parameter identifies the type of calibration image. This method is
+best if there is only one calibration image for all images
+to be processed. This is almost always true for zero level and dark
+count images. If no calibration image is specified by task parameter
+then calibration images in the input image list are identified and
+used. This requires that the images have CCD image types recognized
+by the package. This method is useful if one may simply say "*.imh"
+as the image list to process all images or if the images are broken
+up into groups, in "@" files for example, each with their own calibration
+frames.
+
+When an input image is processed the task first determines the processing
+parameters and calibration images. If a requested operation has been
+done it is skipped and if all requested operations have been completed then
+no processing takes place. When it determines that a calibration image
+is required it checks for the image from the task parameter and then
+for a calibration image of the proper type in the input list.
+
+Having
+selected a calibration image it checks if it has been processed for
+all the operations selected by the CCDPROC parameters.
+After the calibration images have been identified, and processed if
+necessary, the images may be cached in memory. This is done when there
+are more than two input images (it is actually less efficient to
+cache the calibration images for one or two input images) and the parameter
+\fImax_cache\fR is greater than zero. When caching, as many calibration
+images as allowed by the specified memory are read into memory and
+kept there for all the input images. Cached images are, therefore,
+only read once from disk which reduces the amount of disk I/O. This
+makes a modest decrease in the execution time. It is not dramatic
+because the actual processing is fairly CPU intensive.
+
+Once the processing parameters and calibration images have been determined
+the input image is processed for all the desired operations in one step;
+i.e. there are no intermediate results or images. This makes the task
+efficient. If a matching list of output images is given then the processed
+image is written to the specified output image name. If no output image
+list is given then the corrected image is output as a temporary image until
+the entire image has been processed. When the image has been completely
+processed then the original image is deleted (or renamed using the
+specified backup prefix) and the corrected image replaces the original
+image. Using a temporary image protects the data in the event of an abort
+or computer failure. Keeping the original image name eliminates much of
+the record keeping and the need to generate new image names.
+.sh
+1. Fixpix
+Regions of bad lines and columns may be replaced by linear
+interpolation from neighboring lines and columns when the parameter
+\fIfixpix\fR is set. This algorithm is the same as used in the
+task \fBfixpix\fR. The bad pixels may be specified by a pixel mask,
+an image, or a text file. For the mask or image, values of zero indicate
+good pixels and other values indicate bad pixels to be replaced.
+
+The text file consists of lines with four fields, the starting and
+ending columns and the starting and ending lines. Any number of
+regions may be specified. Comment lines beginning with the character
+'#' may be included. The description applies directly to the input
+image (before trimming) so different files are needed for previously
+trimmed or subsection readouts. The data in this file is internally
+turned into the same description as a bad pixel mask with values of
+two for regions which are narrower or equal across the columns and
+a value of three for regions narrower across lines.
+
+The direction of interpolation is determined from the values in the
+mask, image, or the converted text file. A value of two interpolates
+across columns, a value of three interpolates across lines, and any
+other value interpolates across the narrowest dimension of bad pixels
+and using column interpolation if the two dimensions are equal.
+
+The bad pixel description may be specified explicitly with the parameter
+\fIfixfile\fR or indirectly if the parameter has the value "image". In the
+latter case the instrument file must contain the name of the file.
+.sh
+2. Overscan
+If an overscan or prescan correction is specified (\fIoverscan\fR
+parameter) then the image section (\fIbiassec\fR parameter) defines
+the overscan region.
+
+There are two types of overscan (or prescan) determinations. One determines
+a independent overscan value for each line and is only available for a
+\fIreadaxis\fR of 1. The other averages the overscan along the readout
+direction to make an overscan vector, fits a smoothing function to the vector,
+and then evaluate and then evaluates the smooth function at each readout
+line or column.
+
+The line-by-line determination provides an mean, median, or
+mean with the minimum and maximum values excluded. The median
+is lowest value of the middle two when the number of overscan columns
+is even rather than the mean.
+
+The smoothed overscan vector determination uses the \fBicfit\fR options
+including interactive fitting. The fitting function is generally either a
+constant (polynomial of 1 term) or a high order function which fits the
+large scale shape of the overscan vector. Bad pixel rejection is also
+available to eliminate cosmic ray events. The function fitting may be done
+interactively using the standard \fBicfit\fR iteractive graphical curve
+fitting tool. Regardless of whether the fit is done interactively, the
+overscan vector and the fit may be recorded for later review in a metacode
+plot file named by the parameter \fIccdred.plotfile\fR. The mean value of
+the bias function is also recorded in the image header and log file.
+.sh
+3. Trim
+When the parameter \fItrim\fR is set the input image will be trimmed to
+the image section given by the parameter \fItrimsec\fR. This trim
+should, of course, be the same as that used for the calibration images.
+.sh
+4. Zerocor
+After the readout bias is subtracted, as defined by the overscan or prescan
+region, there may still be a zero level bias. This level may be two
+dimensional or one dimensional (the same for every readout line). A
+zero level calibration is obtained by taking zero length exposures;
+generally many are taken and combined. To apply this zero
+level calibration the parameter \fIzerocor\fR is set. In addition if
+the zero level bias is only readout dependent then the parameter \fIreadcor\fR
+is set to reduce two dimensional zero level images to one dimensional
+images. The zero level images may be specified by the parameter \fIzero\fR
+or given in the input image list (provided the CCD image type is defined).
+
+When the zero level image is needed to correct an input image it is checked
+to see if it has been processed and, if not, it is processed automatically.
+Processing of zero level images consists of bad pixel replacement,
+overscan correction, trimming, and averaging to one dimension if the
+readout correction is specified.
+.sh
+5. Darkcor
+Dark counts are subtracted by scaling a dark count calibration image to
+the same exposure time as the input image and subtracting. The
+exposure time used is the dark time which may be different than the
+actual integration or exposure time. A dark count calibration image is
+obtained by taking a very long exposure with the shutter closed; i.e.
+an exposure with no light reaching the detector. The dark count
+correction is selected with the parameter \fIdarkcor\fR and the dark
+count calibration image is specified either with the parameter
+\fIdark\fR or as one of the input images. The dark count image is
+automatically processed as needed. Processing of dark count images
+consists of bad pixel replacement, overscan and zero level correction,
+and trimming.
+.sh
+6. Flatcor
+The relative detector pixel response is calibrated by dividing by a
+scaled flat field calibration image. A flat field image is obtained by
+exposure to a spatially uniform source of light such as an lamp or
+twilight sky. Flat field images may be corrected for the spectral
+signature in spectroscopic images (see \fBresponse\fR and
+\fBapnormalize\fR), or for iillumination effects (see \fBmkillumflat\fR
+or \fBmkskyflat\fR). For more on flat fields and iillumination corrections
+see \fBflatfields\fR. The flat field response is dependent on the
+wavelength of light so if different filters or spectroscopic wavelength
+coverage are used a flat field calibration for each one is required.
+The different flat fields are automatically selected by a subset
+parameter (see \fBsubsets\fR).
+
+Flat field calibration is selected with the parameter \fBflatcor\fR
+and the flat field images are specified with the parameter \fBflat\fR
+or as part of the input image list. The appropriate subset is automatically
+selected for each input image processed. The flat field image is
+automatically processed as needed. Processing consists of bad pixel
+replacement, overscan subtraction, zero level subtraction, dark count
+subtraction, and trimming. Also if a scan mode is used and the
+parameter \fIscancor\fR is specified then a scan mode correction is
+applied (see below). The processing also computes the mean of the
+flat field image which is used later to scale the flat field before
+division into the input image. For scan mode flat fields the ramp
+part is included in computing the mean which will affect the level
+of images processed with this flat field. Note that there is no check for
+division by zero in the interest of efficiency. If division by zero
+does occur a fatal error will occur. The flat field can be fixed by
+replacing small values using a task such as \fBimreplace\fR or
+during processing using the \fIminreplace\fR parameter. Note that the
+\fIminreplace\fR parameter only applies to flat fields processed by
+\fBccdproc\fR.
+.sh
+7. Illumcor
+CCD images processed through the flat field calibration may not be
+completely flat (in the absence of objects). In particular, a blank
+sky image may still show gradients. This residual nonflatness is called
+the iillumination pattern. It may be introduced even if the detector is
+uniformly illuminated by the sky because the flat field lamp
+iillumination may be nonuniform. The iillumination pattern is found from a
+blank sky, or even object image, by heavily smoothing and rejecting
+objects using sigma clipping. The iillumination calibration image is
+divided into the data being processed to remove the iillumination
+pattern. The iillumination pattern is a function of the subset so there
+must be an iillumination correction image for each subset to be
+processed. The tasks \fBmkillumcor\fR and \fBmkskycor\fR are used to
+create the iillumination correction images. For more on iillumination
+corrections see \fBflatfields\fR.
+
+An alternative to treating the iillumination correction as a separate
+operation is to combine the flat field and iillumination correction
+into a corrected flat field image before processing the object
+images. This will save some processing time but does require creating
+the flat field first rather than correcting the images at the same
+time or later. There are two methods, removing the large scale
+shape of the flat field and combining a blank sky image iillumination
+with the flat field. These methods are discussed further in the
+tasks which create them; \fBmkillumcor\fR and \fBmkskycor\fR.
+.sh
+8. Fringecor
+There may be a fringe pattern in the images due to the night sky lines.
+To remove this fringe pattern a blank sky image is heavily smoothed
+to produce an iillumination image which is then subtracted from the
+original sky image. The residual fringe pattern is scaled to the
+exposure time of the image to be fringe corrected and then subtracted.
+Because the intensity of the night sky lines varies with time an
+additional scaling factor may be given in the image header.
+The fringe pattern is a function of the subset so there must be
+a fringe correction image for each subset to be processed.
+The task \fBmkfringecor\fR is used to create the fringe correction images.
+.sh
+9. Readcor
+If a zero level correction is desired (\fIzerocor\fR parameter)
+and the parameter \fIreadcor\fR is yes then a single zero level
+correction vector is applied to each readout line or column. Use of a
+readout correction rather than a two dimensional zero level image
+depends on the nature of the detector or if the CCD is operated in
+longscan mode (see below). The readout correction is specified by a
+one dimensional image (\fIzero\fR parameter) and the readout axis
+(\fIreadaxis\fR parameter). If the zero level image is two dimensional
+then it is automatically processed to a one dimensional image by
+averaging across the readout axis. Note that this modifies the zero
+level calibration image.
+.sh
+10. Scancor
+CCD detectors may be operated in several modes in astronomical
+applications. The most common is as a direct imager where each pixel
+integrates one point in the sky or spectrum. However, the design of most CCD's
+allows the sky to be scanned across the CCD while shifting the
+accumulating signal at the same rate. \fBCcdproc\fR provides for two
+scanning modes called "shortscan" and "longscan". The type of scan
+mode is set with the parameter \fIscanmode\fR.
+
+In "shortscan" mode the detector is scanned over a specified number of
+lines (not necessarily at sideral rates). The lines that scroll off the
+detector during the integration are thrown away. At the end of the
+integration the detector is read out in the same way as an unscanned
+observation. The advantage of this mode is that the small scale, zero
+level, dark count and flat field responses are averaged in one dimension
+over the number of lines scanned. A zero level, dark count or flat field may be
+observed in the same way in which case there is no difference in the
+processing from unscanned imaging and the parameter \fIscancor\fR may be
+no. If it is yes, though, checking is done to insure that the calibration
+image used has the same number of scan lines as the object being
+processed. However, one obtains an increase in the statistical accuracy of
+if they are not scanned during the observation but
+digitally scanned during the processing. In shortscan mode with
+\fIscancor\fR set to yes, zero level, dark count and flat field images are
+digitally scanned, if needed, by the same number of scan lines as the
+object. The number of scan lines is determined from the object image
+header using the keyword nscanrow (or it's translation). If not found the
+object is assumed to have been scanned with the value given by the
+\fInscan\fR parameter. Zero, dark and flat calibration images are assumed
+to be unscanned if the header keyword is not found.
+
+If a scanned zero level, dark count or flat field image is not found
+matching the object then one may be created from the unscanned calibration
+image. The image will have the root name of the unscanned image with an
+extension of the number of scan rows; i.e. Flat1.32 is created from Flat1
+with a digital scanning of 32 lines.
+
+In "longscan" mode the detector is continuously read out to produce an
+arbitrarily long strip. Provided data which has not passed over the entire
+detector is thrown away, the zero level, dark count, and flat field
+corrections will be one dimensional. If \fIscancor\fR is specified and the
+scan mode is "longscan" then a one dimensional zero level, dark count, and
+flat field correction will be applied.
+.sh
+11. Processing Steps
+The following describes the steps taken by the task. This detailed
+outline provides the most detailed specification of the task.
+
+.ls 5 (1)
+An image to be processed is first checked that it is of the specified
+CCD image type. If it is not the desired type then go on to the next image.
+.le
+.ls (2)
+A temporary output image is created of the specified pixel data type
+(\fBccdred.pixeltype\fR). The header parameters are copied from the
+input image.
+.le
+.ls (3)
+If trimming is specified and the image has not been trimmed previously,
+the trim section is determined.
+.le
+.ls (4)
+If bad pixel replacement is specified and this has not been done
+previously, the bad pixel file is determined either from the task
+parameter or the instrument translation file. The bad pixel regions
+are read. If the image has been trimmed previously and the bad pixel
+file contains the word "untrimmed" then the bad pixel coordinates are
+translated to those of the trimmed image.
+.le
+.ls (5)
+If an overscan correction is specified and this correction has not been
+applied, the overscan section is averaged along the readout axis. If
+trimming is to be done the overscan section is trimmed to the same
+limits. A function is fit either interactively or noninteractively to
+the overscan vector. The function is used to produce the overscan
+vector to be subtracted from the image. This is done in real
+arithmetic.
+.le
+.ls (6)
+If the image is a zero level image go to processing step 12.
+If a zero level correction is desired and this correction has not been
+performed, find the zero level calibration image. If the zero level
+calibration image has not been processed it is processed at this point.
+This is done by going to processing step 1 for this image. After the
+calibration image has been processed, processing of the input image
+continues from this point.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (7)
+If the image is a dark count image go to processing step 12.
+If a dark count correction is desired and this correction has not been
+performed, find the dark count calibration image. If the dark count
+calibration image has not been processed it is processed at this point.
+This is done by going to processing step 1 for this image. After the
+calibration image has been processed, processing of the input image
+continues from this point. The ratio of the input image dark time
+to the dark count image dark time is determined to be multiplied with
+each pixel of the dark count image before subtracting from the input
+image.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (8)
+If the image is a flat field image go to processing step 12. If a flat
+field correction is desired and this correction has not been performed,
+find the flat field calibration image of the appropriate subset. If
+the flat field calibration image has not been processed it is processed
+at this point. This is done by going to processing step 1 for this
+image. After the calibration image has been processed, processing of
+the input image continues from this point. The mean of the image
+is determined from the image header to be used for scaling. If no
+mean is found then a unit scaling is used.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (9)
+If the image is an iillumination image go to processing step 12. If an
+iillumination correction is desired and this correction has not been performed,
+find the iillumination calibration image of the appropriate subset.
+The iillumination image must have the "mkillum" processing flag or the
+\fBccdproc\fR will abort with an error. The mean of the image
+is determined from the image header to be used for scaling. If no
+mean is found then a unit scaling is used. The processed calibration
+image may be
+cached in memory if it has not been previously and there is enough memory.
+.le
+.ls (10)
+If the image is a fringe image go to processing step 12. If a fringe
+correction is desired and this correction has not been performed,
+find the fringe calibration image of the appropriate subset.
+The iillumination image must have the "mkfringe" processing flag or the
+\fBccdproc\fR will abort with an error. The ratio of the input
+image exposure time to the fringe image exposure time is determined.
+If there is a fringe scaling in the image header then this factor
+is multiplied by the exposure time ratio. This factor is used
+for scaling. The processed calibration image may be
+cached in memory if it has not been previously and there is enough memory.
+.le
+.ls (11)
+If there are no processing operations flagged, delete the temporary output
+image, which has been opened but not used, and go to 14.
+.le
+.ls (12)
+The input image is processed line by line with trimmed lines ignored.
+A line of the input image is read. Bad pixel replacement and trimming
+is applied to the image. Image lines from the calibration images
+are read from disk or the image cache. If the calibration is one
+dimensional (such as a readout zero
+level correction or a longscan flat field correction) then the image
+vector is read only once. Note that IRAF image I/O is buffered for
+efficiency and accessing a line at a time does not mean that image
+lines are read from disk a line at a time. Given the input line, the
+calibration images, the overscan vector, and the various scale factors
+a special data path for each combination of corrections is used to
+perform all the processing in the most efficient manner. If the
+image is a flat field any pixels less than the \fIminreplace\fR
+parameter are replaced by that minimum value. Also a mean is
+computed for the flat field and stored as the CCDMEAN keyword and
+the time, in a internal format, when this value was calculated is stored
+in the CCDMEANT keyword. The time is checked against the image modify
+time to determine if the value is valid or needs to be recomputed.
+.le
+.ls (13)
+The input image is deleted or renamed to a backup image. The temporary
+output image is renamed to the input image name.
+.le
+.ls (14)
+If the image is a zero level image and the readout correction is specified
+then it is averaged to a one dimensional readout correction.
+.le
+.ls (15)
+If the image is a zero level, dark count, or flat field image and the scan
+mode correction is specified then the correction is applied. For shortscan
+mode a modified two dimensional image is produced while for longscan mode a
+one dimensional average image is produced.
+.le
+.ls (16)
+The processing is completed and either the next input image is processed
+beginning at step 1 or, if it is a calibration image which is being
+processed for an input image, control returns to the step which initiated
+the calibration image processing.
+.le
+.sh
+12. Processing Arithmetic
+The \fBccdproc\fR task has two data paths, one for real image pixel datatypes
+and one for short integer pixel datatype. In addition internal arithmetic
+is based on the rules of FORTRAN. For efficiency there is
+no checking for division by zero in the flat field calibration.
+The following rules describe the processing arithmetic and data paths.
+
+.ls (1)
+If the input, output, or any calibration image is of type real the
+real data path is used. This means all image data is converted to
+real on input. If all the images are of type short all input data
+is kept as short integers. Thus, if all the images are of the same type
+there is no datatype conversion on input resulting in greater
+image I/O efficiency.
+.le
+.ls (2)
+In the real data path the processing arithmetic is always real and,
+if the output image is of short pixel datatype, the result
+is truncated.
+.le
+.ls (3)
+The overscan vector and the scale factors for dark count, flat field,
+iillumination, and fringe calibrations are always of type real. Therefore,
+in the short data path any processing which includes these operations
+will be coerced to real arithmetic and the result truncated at the end
+of the computation.
+.le
+.sh
+13. In the Absence of Image Header Information
+The tasks in the \fBccdred\fR package are most convenient to use when
+the CCD image type, subset, and exposure time are contained in the
+image header. The ability to redefine which header parameters contain
+this information makes it possible to use the package at many different
+observatories (see \fBinstruments\fR). However, in the absence of any
+image header information the tasks may still be used effectively.
+There are two ways to proceed. One way is to use \fBccdhedit\fR
+to place the information in the image header.
+
+The second way is to specify the processing operations more explicitly
+than is needed when the header information is present. The parameter
+\fIccdtype\fR is set to "" or to "none". The calibration images are
+specified explicitly by task parameter since they cannot be recognized
+in the input list. Only one subset at a time may be processed.
+
+If dark count and fringe corrections are to be applied the exposure
+times must be added to all the images. Alternatively, the dark count
+and fringe images may be scaled explicitly for each input image. This
+works because the exposure times default to 1 if they are not given in
+the image header.
+.ih
+EXAMPLES
+The user's \fBguide\fR presents a tutorial in the use of this task.
+
+1. In general all that needs to be done is to set the task parameters
+and enter
+
+ cl> ccdproc *.imh &
+
+This will run in the background and process all images which have not
+been processed previously.
+.ih
+TIME REQUIREMENTS
+.nf
+o SUN-3, 15 MHz 68020 with 68881 floating point hardware (no FPA)
+o 8 Mb RAM, 2 Fuji Eagle disks.
+o Input images = 544 x 512 short
+o Output image = 500 x 500 real
+o Operations are overscan subtraction (O), trimming to 500x500 (T),
+ zero level subtraction (Z), dark count scaling and subtraction (D),
+ and flat field scaling and subtraction (F).
+o UNIX statistics
+ (user, system, and clock time, and misc. memory and i/o statistics):
+
+[OTF] One calibration image and 9 object images:
+No caching: 110.6u 25.5s 3:18 68% 28+ 40K 3093+1645io 9pf+0w
+Caching: 111.2u 23.0s 2:59 74% 28+105K 2043+1618io 9pf+0w
+
+[OTZF] Two calibration images and 9 object images:
+No caching: 119.2u 29.0s 3:45 65% 28+ 50K 4310+1660io 9pf+0w
+Caching: 119.3u 23.0s 3:07 75% 28+124K 2179+1601io 9pf+0w
+
+[OTZDF] Three calibration images and 9 object images:
+No caching: 149.4u 31.6s 4:41 64% 28+ 59K 5501+1680io 19pf+0w
+Caching: 151.5u 29.0s 4:14 70% 27+227K 2346+1637io 148pf+0w
+
+[OTZF] 2 calibration images and 20 images processed:
+No caching: 272.7u 63.8u 8:47 63% 28+ 50K 9598+3713io 12pf+0w
+Caching: 271.2u 50.9s 7:00 76% 28+173K 4487+3613io 51pf+0w
+.fi
+.ih
+REVISIONS
+.ls CCDPROC V2.11.2
+A new "output" parameter is available to specify an output image leaving
+the input image unchanged. If this parameter is not specified then
+the previous behavior of "in-place" operation with an optional backup
+occurs.
+.le
+.ls CCDPROC V2.11
+The bad pixel fixing was modified to allow use of pixel masks,
+images, or the text file description. Bad pixel masks are the
+desired description and use of text files is only supported for
+backward compatibility. Note that support for the trimmed
+or untrimmed conversion from text files has been eliminated.
+
+Line-by-line overscan/prescan subtraction is now provided with
+three simple algorithms.
+.le
+.ls CCDPROC: V2.10.3
+The output pixel datatypes (specified by the package parameter
+\fIpixeltype\fR have been extended to include unsigned short
+integers. Also it was previously possible to have the output
+pixel datatype be of lower precision than the input. Now the
+output pixel datatype is not allowed to lose precision; i.e.
+a real input image may not be processed to a short datatype.
+
+For short scan data the task now looks for the number of scan lines in the
+image header. Also when a calibration image is software scanned a new
+image is created. This allows processing objects with different numbers of
+scan lines and preserving the unscanned calibration image.
+
+It is an error if no biassec is specified rather than defaulting to
+the whole image.
+
+The time, in a internal format, when the CCDMEAN value is calculated is
+stored in the CCDMEANT keyword. The time is checked against the image
+modify time to determine if the value is valid or needs to be recomputed.
+.le
+.ih
+SEE ALSO
+.nf
+instruments, ccdtypes, flatfields, icfit, ccdred, guide, mkillumcor,
+mkskycor, mkfringecor
+.fi
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdred.hlp b/noao/imred/ccdred/doc/ccdred.hlp
new file mode 100644
index 00000000..f2cca5bd
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdred.hlp
@@ -0,0 +1,104 @@
+.help package Dec93 noao.imred
+.ih
+NAME
+ccdred -- CCD image reduction package
+.ih
+USAGE
+ccdred
+.ih
+PARAMETERS
+.ls pixeltype = "real real"
+Output pixel datatype and calculation datatype. When images are processed
+or created the output pixel datatype is determined by this parameter if the
+specified datatype is of equal or higher precision otherwise the input
+image datatype is preserved. For example if the output datatype is
+specified as "input" then input images which are "short" or "ushort" will
+be output as integer but any real datatype input images will remain real.
+The allowed types and order of precision are "short", "ushort", "int",
+"long", "real", or "double", for short signed integer, short unsigned
+integer, integer, long integers, and real or double floating point. Note
+that if short input images are processed into real images the disk space
+required will generally increase. The calculation datatypes may only be
+short and real with a default of real if none is specified.
+.le
+.ls verbose = no
+Print log information to the standard output?
+.le
+.ls logfile = "logfile"
+Text log file. If no filename is specified then no log file is kept.
+.le
+.ls plotfile = ""
+Log metacode plot file for the overscan bias vector fits. If no filename
+is specified then no metacode plot file is kept.
+.le
+.ls backup = ""
+Backup prefix for backup images. If no prefix is specified then no backup
+images are kept when processing. If specified then the backup image
+has the specified prefix.
+.le
+.ls instrument = ""
+CCD instrument translation file. This is usually set with
+\fBsetinstrument\fR.
+.le
+.ls ssfile = "subsets"
+Subset translation file used to define the subset identifier. See
+\fBsubsets\fR for more.
+.le
+.ls graphics = "stdgraph"
+Interactive graphics output device when fitting the overscan bias vector.
+.le
+.ls cursor = ""
+Graphics cursor input. The default is the standard graphics cursor.
+.le
+.ls version = "June 1987"
+Package version.
+.le
+.ih
+DESCRIPTION
+The CCD reduction package is loaded when this command is entered. The
+package contains parameters which affect the operation of the tasks it
+defines. When images are processed or new image are created the output
+pixel datatype is that specified by the parameter \fBpixeltype\fR. Note
+that CCD processing replaces the original image by the processed image so
+the pixel type of the CCD images may change during processing. The output
+pixel type is not allowed to change to a lower precision but it is common
+for input short images to be processed to real images. Processing images
+from short to real pixel datatypes will generally increase the amount of
+disk space required (a factor of 2 on most computers).
+
+The tasks produce log output which may be printed on the standard
+output (the terminal unless redirected) and appended to a file. The
+parameter \fIverbose\fR determines whether processing information
+is printed. This may be desirable initially, but when using background
+jobs the verbose output should be turned off. The user may look at
+the end of the log file (for example with \fBtail\fR) to determine
+the status of the processing.
+
+The package was designed to work with data from many different observatories
+and instruments. In order to accomplish this an instrument translation
+file is used to define a mapping between the package parameters and
+the particular image header format. The instrument translation file
+is specified to the package by the parameter \fIinstrument\fR. This
+parameter is generally set by the task \fBsetinstrument\fR. The other
+file used is a subset file. This is generally created and maintained
+by the package and the user need not do anything. For more sophisticated
+users see \fBinstruments\fR and \fBsubsets\fR.
+
+The package has very little graphics
+output. The exception is the overscan bias subtraction. The bias
+vector is logged in the metacode plot file if given. The plot file
+may be examined with the tasks in the \fBplot\fR package such as
+\fBgkimosaic\fR. When interactively fitting the overscan vector
+the graphics input and output devices must be specified. The defaults
+should apply in most cases.
+
+Because processing replaces the input image by the processed image it
+may be desired to save the original image. This may be done by
+specifying a backup prefix with the parameter \fIbackup\fR. For
+example, if the prefix is "orig" and the image is "ccd001", the backup
+image will be "origccd001". The prefix may be a directory but it must
+end with '/' or '$' (for logical directories).
+.ih
+SEE ALSO
+ccdproc, instruments, setinstrument, subsets
+.endhelp
diff --git a/noao/imred/ccdred/doc/ccdred.ms b/noao/imred/ccdred/doc/ccdred.ms
new file mode 100644
index 00000000..645514ec
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdred.ms
@@ -0,0 +1,787 @@
+.RP
+.TL
+The IRAF CCD Reduction Package -- CCDRED
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+P.O. Box 26732, Tucson, Arizona 85726
+September 1987
+.AB
+The IRAF\(dg CCD reduction package, \fBccdred\fR, provides tools
+for the easy and efficient reduction of CCD images. The standard
+reduction operations are replacement of bad pixels, subtraction of an
+overscan or prescan bias, subtraction of a zero level image,
+subtraction of a dark count image, division by a flat field calibration
+image, division by an illumination correction, subtraction of a fringe
+image, and trimming unwanted lines or columns. Another common
+operation provided by the package is scaling and combining images with
+a number of algorithms for rejecting cosmic rays. Data in the image
+header is used to make the reductions largely automated and
+self-documenting though the package may still be used in the absence of
+this data. Also a translation mechanism is used to relate image header
+parameters to those used by the package to allow data from a variety of
+observatories and instruments to be processed. This paper describes
+the design goals for the package and the main tasks and algorithms
+which satisfy these goals.
+.PP
+This paper is to be published as part of the proceedings of the
+Santa Cruz Summer Workshop in Astronomy and Astrophysics,
+\fIInstrumentation for Ground-Based Optical Astronomy: Present and
+Future\fR, edited by Lloyd B. Robinson and published by
+Springer-Verlag.
+.LP
+\(dgImage Reduction and Analysis Facility (IRAF), a software system
+distributed by the National Optical Astronomy Observatories (NOAO).
+.AE
+.NH
+Introduction
+.PP
+The IRAF CCD reduction package, \fBccdred\fR, provides tools
+for performing the standard instrumental corrections and calibrations
+to CCD images. The major design goals were:
+.IP
+.nf
+\(bu To be easy to use
+\(bu To be largely automated
+\(bu To be image header driven if the data allows
+\(bu To be usable for a variety of instruments and observatories
+\(bu To be efficient and capable of processing large volumes of data
+.fi
+.LP
+This paper describes the important tasks and algorithms and shows how
+these design goals were met. It is not intended to describe every
+task, parameter, and usage in detail; the package has full
+documentation on each task plus a user's guide.
+.PP
+The standard CCD correction and calibration operations performed are
+replacement of bad columns and lines by interpolation from neighboring
+columns and lines, subtraction of a bias level determined from overscan
+or prescan columns or lines, subtraction of a zero level using a zero
+length exposure calibration image, subtraction of a dark count
+calibration image appropriately scaled to the dark time exposure of the
+image, division by a scaled flat field calibration image, division by
+an illumination image (derived from a blank sky image), subtraction of
+a scaled fringe image (also derived from a blank sky image), and
+trimming the image of unwanted lines or columns such as the overscan
+strip. The processing may change the pixel datatype on disk (IRAF allows
+seven image datatypes); usually from 16 bit integer to real format.
+Two special operations are also supported for scan mode and one
+dimensional zero level and flat field calibrations; i.e. the same
+calibration is applied to each CCD readout line. Any set of operations
+may be done simultaneously over a list of images in a highly efficient
+manner. The reduction operations are recorded in the image header and
+may also be logged on the terminal and in a log file.
+.PP
+The package also provides tools for combining multiple exposures
+of object and calibration images to improve the statistical accuracy of
+the observations and to remove transient bad pixels. The combining
+operation scales images of different exposure times, adjusts for
+variable sky background, statistically weights the images by their
+signal-to-noise, and provides a number of useful algorithms for
+detecting and rejecting transient bad pixels.
+.PP
+Other tasks are provided for listing reduction information about
+the images, deriving secondary calibration images (such as sky
+corrected flat fields or illumination correction images), and easily
+setting the package parameters for different instruments.
+.PP
+This paper is organized as follows. There is a section giving an
+overview of how the package is used to reduce CCD data. This gives the
+user's perspective and illustrates the general ease of use. The next
+section describes many of the features of the package contributing to
+its ease of use, automation, and generality. The next two sections
+describe the major tools and algorithms in some detail. This includes
+discussions about achieving high efficiency. Finally the status of the
+package and its use at NOAO is given. References to additional
+documentation about IRAF and the CCD reduction package and an appendix
+listing the individual tasks in the package are found at the end of
+this paper.
+.NH
+A User's Overview
+.PP
+This section provides an overview of reducing data with the IRAF CCD
+reduction package. There are many variations in usage depending on the
+type of data, whether the image headers contain information about the
+data which may be used by the tasks, and the scientific goal. Only a
+brief example is given. A more complete discussion of usage and
+examples is given in \fIA User's Guide to the IRAF CCDRED Package\fR.
+The package was developed within the IRAF system and so makes use of
+all the sophisticated features provided. These features are also
+summarized here for those not familiar with IRAF since they are an
+important part of using the package.
+.PP
+Since the IRAF system is widely distributed and runs on a wide variety
+of computers, the site of the CCD reductions might be at the telescope,
+a system at the observatory provided for this purpose, or at the
+user's home computer. The CCD images to be processed are either
+available immediately as the data is taken, transferred from the data taking
+computer via a network link (the method adopted at NOAO), or transferred
+to the reduction computer via a medium such as magnetic tape in FITS
+format. The flexibility in reduction sites and hardware is one of the
+virtues of the IRAF-based CCD reduction package.
+.PP
+IRAF tasks typically have a number of parameters which give the user
+control over most aspects of the program. This is possible since the
+parameters are kept in parameter files so that the user need not enter
+a large number of parameters every time the task is run. The user may
+change any of these parameters as desired in several ways, such as by
+explicit assignment and using an easy to learn and use,
+fill-in-the-value type of screen editor. The parameter values are
+\fIlearned\fR so that once a user sets the values they are maintained
+until the user changes them again; even between login sessions.
+.PP
+The first step in using the CCD reduction package is to set the default
+processing parameters for the data to be reduced. These parameters include
+a database file describing the image header keyword translations and
+default values, the processing operations desired (operations
+required vary with instrument and observer), the calibration image names,
+and certain special parameters for special types of observations such
+as scan mode. A special script task (a command procedure) is available
+to automatically set the default values, given the instrument name, to standard
+values defined by the support staff. Identifying the instrument in this
+way may be all the novice user need do though most people quickly learn
+to adjust parameters at will.
+.PP
+As an example suppose there is an instrument identified as \fLrca4m\fR
+for an RCA CCD at the NOAO 4 meter telescope. The user gives the command
+
+.ft L
+ cl> setinstrument rca4m
+.ft R
+
+which sets the default parameters to values suggested by the support staff
+for this instrument. The user may then change these suggested values if
+desired. In this example the processing switches are set to perform
+overscan bias subtraction, zero level image subtraction, flat fielding,
+and trimming.
+.PP
+The NOAO image headers contain information identifying the type of
+image, such as object, zero level, and flat field, the filter used to
+match flat fields with object images, the location of the overscan bias
+data, the trim size for the data, and whether the image has been
+processed. With this information the user need not worry about
+selecting images, pairing object images with calibration images, or
+inadvertently reprocessing an image.
+.PP
+The first step is to combine multiple zero level and flat field observations
+to reduce the effects of statistical noise. This is done by the
+commands
+
+.nf
+.ft L
+ cl> zerocombine *.imh
+ cl> flatcombine *.imh
+.ft R
+.fi
+
+The "cl> " is the IRAF command language prompt. The first command says
+look through all the images and combine the zero level images. The
+second command says look through all the images and combine the flat
+field images by filter. What could be simpler? Some \fIhidden\fR (default)
+parameters the user may modify are the combined image name, whether to
+process the images first, and the type of combining algorithm to use.
+.PP
+The next step is to process the images using the combined calibration
+images. The command is
+
+.ft L
+ cl> ccdproc *.imh
+.ft R
+
+This command says look through all the images, find the object images,
+find the overscan data based on the image header and subtract the
+bias, subtract the zero level calibration image, divide by the flat field
+calibration image, and trim the bias data and edge lines and columns.
+During this operation the task recognizes that the
+zero level and flat field calibration images have not been processed
+and automatically processes them when they are needed. The log output
+of this task, which may be to the terminal, to a file, or both, shows
+how this works.
+
+.nf
+.ft L
+ ccd003: Jun 1 15:12 Trim data section is [3:510,3:510]
+ ccd003: Jun 1 15:12 Overscan section is [520:540,*], mean=485.0
+ Dark: Jun 1 15:12 Trim data section is [3:510,3:510]
+ Dark: Jun 1 15:13 Overscan section is [520:540,*], mean=484.6
+ ccd003: Jun 1 15:13 Dark count image is Dark.imh
+ FlatV: Jun 1 15:13 Trim data section is [3:510,3:510]
+ FlatV: Jun 1 15:14 Overscan section is [520:540,*], mean=486.4
+ ccd003: Jun 1 15:15 Flat field image is FlatV.imh, scale=138.2
+ ccd004: Jun 1 15:16 Trim data section is [3:510,3:510]
+ ccd004: Jun 1 15:16 Overscan section is [520:540,*], mean=485.2
+ ccd004: Jun 1 15:16 Dark count image is Dark.imh
+ ccd004: Jun 1 15:16 Flat field image is FlatV.imh, scale=138.2
+ \fI<... more ...>\fL
+ ccd013: Jun 1 15:22 Trim data section is [3:510,3:510]
+ ccd013: Jun 1 15:23 Overscan section is [520:540,*], mean=482.4
+ ccd013: Jun 1 15:23 Dark count image is Dark.imh
+ FlatB: Jun 1 15:23 Trim data section is [3:510,3:510]
+ FlatB: Jun 1 15:23 Overscan section is [520:540,*], mean=486.4
+ ccd013: Jun 1 15:24 Flat field image is FlatB.imh, scale=132.3
+ \fI<... more ...>\fL
+.ft R
+.fi
+
+.PP
+The log gives the name of the image and a time stamp for each entry.
+The first image is ccd003. It is to be trimmed to the specified
+size given as an \fIimage section\fR, an array notation used commonly
+in IRAF to specify subsections of images. The location of the
+overscan data is also given by an image section which, in this case,
+was found in the image header. The mean bias level of the overscan
+is also logged though the overscan is actually a function of the
+readout line with the order of the function selected by the user.
+.PP
+When the task comes to subtracting the zero level image it first
+notes that the calibration image has not been processed and switches
+to processing the zero level image. Since it knows it is a zero level
+image the task does not attempt to zero level or flat field correct
+this image. After the zero level image has been processed the task
+returns to the object image only to find that the flat field image
+also has not been processed. It determines that the object image was
+obtained with a V filter and selects the flat field image having the same
+filter. The flat field image is processed through the zero level correction
+and then the task again returns to the object image, ccd003, which it
+finishes processing.
+.PP
+The next image, ccd004, is also a V filter
+observation. Since the zero level and V filter flat field have been
+processed the object image is processed directly. This continues
+for all the object images except for a detour to process the B filter flat
+field when the task first encounters a B filter object image.
+.PP
+In summary, the basic usage of the CCD reduction package is quite simple.
+First, the instrument is identified and some parameters for the data
+are set. Calibration images are then combined if needed. Finally,
+the processing is done with the simple command
+
+.ft L
+ cl> ccdproc *.imh&
+.ft R
+
+where the processing is performed as a \fIbackground job\fR in this example.
+This simplicity was a major goal of the package.
+.NH
+Features of the Package
+.PP
+This section describes some of the special features of the package
+which contribute to its ease of use, generality, and efficiency.
+The major criteria for ease of use are to minimize the user's record keeping
+involving input and output image names, the types of images, subset
+parameters such as filters which must be kept separate, and the state
+of processing of each image. The goal is to allow input images to
+be specified using simple wildcards, such as "*.imh" to specify all
+images, with the knowledge that the task will only operate on images
+for which it makes sense. To accomplish this the tasks must be able to
+determine the type of image, subset, and the state of processing from
+the image itself. This is done by making use of image header parameters.
+.PP
+For generality the package does not require any image header information
+except the exposure time. It is really not very much more difficult to
+reduce such data. Mainly, the user must be more explicit about specifying
+images and setting task parameters or add the information to the image
+headers. Some default header information may also be set in the image
+header translation file (discussed below).
+.PP
+One important image header parameter is the image type. This
+discriminates between object images and various types of calibration
+images such as flat field, zero level, dark count, comparison arcs,
+illumination, and fringe images. This information is used in two
+ways. For most of the tasks the user may select that only one type of
+image be considered. Thus, all the flat field images may be selected
+for combining or only the processing status of the object images be
+listed. The second usage is to allow the processing tasks to identify
+the standard calibration images and apply only those operations which
+make sense. For example, flat field images are not divided by a
+flat field. This allows the user to set the processing operations
+desired for the object images without fear of misprocessing the
+calibration images. The image type is also used to automatically
+select calibration images from a list of images to be processed instead
+of explicitly identifying them.
+.PP
+A related parameter specifies the subset. For certain operations the
+images must have a common value for this parameter. This parameter is
+often the filter but it may also apply to a grating or aperture, for example.
+The subset parameter is used to identify the appropriate flat field
+image to apply to an image or to select common flat fields to be combined
+into a higher quality flat field. This is automatic and the user need not
+keep track of which image was taken with which filter or grating.
+.PP
+The other important image header parameters are the processing flags.
+These identify when an image has been processed and also act as a history
+of the operation including calibration images used and other parameter
+information. The usage of these parameters is obvious; it allows the
+user to include processed images in a wildcard list knowing that the
+processing will not be repeated and to quickly determine the processing
+status of the image.
+.PP
+Use of image header parameters often ties the software to the a
+particular observatory. To maintain generality and usefulness for data
+other than that at NOAO, the CCD reduction package was designed to
+provide a translation between parameters requested by the package and
+those actually found in the image header. This translation is defined
+in a simple text file which maps one keyword to another and also gives
+a default value to be used if the image header does not include a
+value. In addition the translation file maps the arbitrary strings
+which may identify image types to the standard types which the package
+recognizes. This is a relatively simple scheme and does not allow for
+forming combinations or for interpreting values which are not simple
+such as embedding an exposure time as part of a string. A more complex
+translation scheme may prove desirable as experience is gained with
+other types of image header formats, but by then a general header translation
+ability and/or new image database structure may be a standard IRAF
+feature.
+.PP
+This feature has proven useful at NOAO. During the course of
+developing the package the data taking system was modernized by
+updating keywords and adding new information in the image headers,
+generally following the lines laid out by the \fBccdred\fR package.
+However, there is a period of transition and it is also desirable to
+reduce preexisting data. There are several different formats for this
+data. The header translation files make coping with these different
+formats relatively easy.
+.PP
+A fundamental aspect of the package is that the processing
+modifies the images. In other words, the reduction operations are
+performed directly on the image. This "feature" further simplifies
+record keeping, frees the user from having to form unique output image
+names, and minimizes the amount of disk space required. There
+are two safety features in this process. First, the modifications do
+not take effect until the operation is completed on the image. This
+allows the user to abort the task without leaving the image data in a
+partially processed state and protects data if the computer
+crashes. The second feature is that there is a parameter which may be
+set to make a backup of the input data with a particular prefix; for
+example "b", "orig", or "imdir$" (a logical directory prefix). This
+backup feature may be used when there is sufficient disk space, when
+learning to use the package, or just to be cautious.
+.PP
+In a similar effort to efficiently manage disk space, when combining
+images into a master object or calibration image, there is an option to
+delete the input images upon completion of the combining operation.
+Generally this is desirable when there are many calibration exposures,
+such as zero level or flat field images, which are not used after they
+are combined into a final calibration image.
+.PP
+The goal of generality for many instruments at
+different observatories inherently conflicts with the goal of ease of
+use. Generality requires many parameters and options. This is
+feasible in the CCD reduction package, as well as the other IRAF packages,
+because of the IRAF parameter handling mechanism. In \fBccdred\fR
+there still remains the problem of setting the parameters appropriately
+for a particular instrument, image header format, and observatory.
+.PP
+To make this convenient there is a task, \fBsetinstrument\fR, that,
+based on an instrument name, runs a setup script for the instrument.
+An example of this task was given in the previous section.
+The script may do any type of operation but mainly it sets default
+parameters. The setup scripts are generally created by the support staff
+for the instrument. The combination of the setup script and the
+instrument translation file make the package, in a sense, programmable
+and achieves the desired instrument/observatory generality with ease of use.
+.NH
+CCD Processing
+.PP
+This section describes in some detail how the CCD processing is performed.
+The task which does the basic CCD processing is call \fBccdproc\fR.
+From the point of view of usage the task is very simple but a great deal
+is required to achieve this simplicity. The approach we take in describing
+the task is to follow the flow of control as the task runs with digressions
+as appropriate.
+.PP
+The highest level of control is a loop over the input images; all the
+operations are performed successively on each image. It is common for
+IRAF tasks which operate on individual images to allow the operation to
+be repeated automatically over a list of input images. This is important
+in the \fBccdred\fR package because data sets are often large and the
+processing is generally the same for each image. It would be tedious
+to have to give the processing command for each image to be processed.
+If an error occurs while processing an image the error is
+printed as a warning and processing continues with the next image.
+This provides protection primarily against mistyped or nonexistent images.
+.PP
+Before the first image is processed the calibration images are
+identified. There are two ways to specify calibration images;
+explicitly via task parameters or implicitly as part of the list of
+images to be processed. Explicitly identifying calibration images
+takes precedence over calibration images in the input list. Specifying
+calibration images as part of the input image list requires that the
+image types can be determined from the image header. Using the input
+list provides a mechanism for breaking processing up into sets of
+images (possibly using files containing the image names for each set)
+each having their own calibration images. One can, of course,
+selectively specify input and calibration images, but whenever possible
+one would like to avoid having to specify explicit images to process
+since this requires record keeping by the user.
+.PP
+The first step in processing an image is to check that it is of the
+appropriate image type. The user may select to process images of only
+one type. Generally this is object images since calibration images are
+automatically processed as needed. Images which are not of the desired
+type are skipped and the next image is considered.
+.PP
+A temporary output image is created next. The output pixel datatype on
+disk may be changed at this point as selected by the user.
+For example it is common for the raw CCD images to be digitized as 16
+bit integers but after calibration it is sometimes desirable to have
+real format pixels. If no output pixel datatype is specified the
+output image takes the same pixel datatype as the input image. The
+processing is done by operating on the input image and writing the
+results to a temporary output image. When the processing is complete
+the output image replaces the input image. This gives the effect of
+processing the images in place but with certain safeguards. If the
+computer crashes or the processing is interrupted the integrity of the
+input image is maintained. The reasons for chosing to process the
+images in this way are to avoid having to generate new image names (a
+tiresome record keeping process for the user), to minimize disk
+usage, and generally the unprocessed images are not used once they have
+been processed. When dealing with large volumes of data these reasons
+become fairly important. However, the user may specify a backup prefix
+for the images in which case, once the processing is completed, the
+original input image is renamed by appending it to the prefix (or with
+an added digit if a previous backup image of the same name exits)
+before the processed output image takes the original input name.
+.PP
+The next step is to determine the image geometry. Only a subsection of
+the raw image may contain the CCD data. If this region is specified by
+a header parameter then the processing will affect only this region.
+This allows calibration and other data to be part of the image.
+Normally, the only other data in a image is overscan or prescan data.
+The location of this bias data is determined from the image header or
+from a task parameter (which overrides the image header value). To
+relate calibration images of different sizes and to allow for readout
+of only a portion of the CCD detector, a header parameter may relate
+the image data coordinates to the full CCD coordinates. Application of
+calibration image data and identifying bad pixel regions via a bad
+pixel file is done in this CCD coordinate system. The final
+geometrical information is the region of the input image to be output
+after processing; an operation called trimming. This is defined by an
+image header parameter or a task parameter. Trimming of the image is
+selected by the user. Any or all of this geometry information may be
+absent from the image and appropriate defaults are used.
+.PP
+Each selected operation which is appropriate for the image type is then
+considered. If the operation has been performed previously it will not
+be repeated. If all selected operations have been performed then the
+temporary output image is deleted and the input image is left
+unchanged. The next image is then processed.
+.PP
+For each selected operation to be performed the pertinent data is
+determined. This consists of such things as the name of the
+calibration image, scaling factors, the overscan bias function, etc.
+Note that at this point only the parameters are determined, the
+operation is not yet performed. This is because operations are not
+performed sequentially but simultaneously as described below. Consider
+flat fielding as an example. First the input image is checked to see
+if it has been flat fielded. Then the flat field calibration image is
+determined. The flat field image is checked to see if it has been
+processed. If it has not been processed then it is processed by
+calling a procedure which is essentially a copy of the main processing
+program. After the flat field image has been processed, parameters
+affecting the processing, such as the flat field scale factor
+(essentially the mean of the flat field image), are determined. A log
+of the operation is then printed if desired.
+.PP
+Once all the processing operations and parameters have been defined the
+actual processing begins. One of the key design goals was that the
+processing be efficient. There are two primary methods used to achieve
+this goal; separate processing paths for 16 bit integer data and
+floating point data and simultaneous operations. If the image, the
+calibration images, and the output image (as selected by the user) are
+16 bit integer pixel datatypes then the image data is read and written
+as integer data. This eliminates internal datatype conversions both
+during I/O and during computations. However, many operations include
+use of real factors such as the overscan bias, dark count exposure
+scaling, and flat field scaling which causes the computation to be done
+in real arithmetic before the result is stored again as an integer
+value. In any case there is never any loss of precision except when
+converting the output pixel to short integer. If any of the images are
+not integer then a real internal data path is used in which input and
+output image data are converted to real as necessary.
+.PP
+For each data path the processing proceeds line-by-line. For each line
+in the output image data region (ignoring pixels outside the data area
+and pixels which are trimmed) the appropriate input data and
+calibration data are obtained. The calibration data is determined from
+the CCD coordinates of the output image and are not necessarily from
+the same image line or columns. The input data is copied to the output
+array while applying bad pixel corrections and trimming. The line is
+then processed using a specially optimized procedure. This procedure
+applies all operations simultaneously for all combinations of
+operations. As an example, consider subtracting an overscan bias,
+subtracting a zero level, and dividing by a flat field. The basic
+kernel of the task, where the bulk of the CPU time is used, is
+
+.nf
+.ft L
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale / flat[i]
+.ft R
+.fi
+
+Here, \fIn\fR is the number of pixels in the line, \fIoverscan\fR is
+the overscan bias value for the line, \fIzero\fR is the zero level data
+from the zero level image, \fIflatscale\fR is the mean of the flat
+field image, and \fIflat\fR is the flat field data from the flat
+field image. Note the operations are not applied sequentially but
+in a single statement. This is the most efficient method and there is
+no need for intermediate images.
+.PP
+Though the processing is logically performed line-by-line in the program,
+the image I/O from the disk is not done this way. The IRAF virtual
+operating system image interface automatically provides multi-line
+buffering for maximal I/O efficiency.
+.PP
+In many image processing systems it has been standard to apply operations
+sequentially over an image. This requires producing intermediate images.
+Since this is clearly inefficient in terms of I/O it has been the practice
+to copy the images into main memory and operate upon them there until
+the final image is ready to be saved. This has led to the perception
+that in order to be efficient an image processing system \fImust\fR
+store images in memory. This is not true and the IRAF CCD reduction
+package illustrates this. The CCD processing does not use intermediate
+images and does not need to keep the entire image in main memory.
+Furthermore, though of lesser importance than I/O, the single statement method
+illustrated above is more efficient than multiple passes through the
+images even when the images are kept in main memory. Finally, as CCD
+detectors increase in size and small, fast, and cheap processors become
+common it is a distinct advantage to not require the large amounts of
+memory needed to keep entire images in memory.
+.PP
+There is one area in which use of main memory can improve performance
+and \fBccdproc\fR does take advantage of it if desired. The calibration
+images usually are the same for many input images. By specifying the
+maximum amount of memory available for storing images in memory
+the calibration images may be stored in memory up to that amount.
+By parameterizing the memory requirement there is no builtin dependence
+on large memory!
+.PP
+After processing the input image the last steps are to log the operations
+in the image header using processing keywords and replace the input
+image by the output image as described earlier. The CCD coordinates
+of the data are recorded in the header, even if not there previously, to
+allow further processing on the image after the image has been trimmed.
+.NH
+Combining Images
+.PP
+The second important tool in the CCD reduction package is a task to combine
+many images into a single, higher quality image. While this may also be
+done with more general image processing tools (the IRAF task \fBimsum\fR
+for example) the \fBccdred\fR tasks include special CCD dependent features such
+as recognizing the image types and using the image header translation
+file. Combining images is often done
+with calibration images, which are easy to obtain in number, where it
+is important to minimize the statistical noise so as to not affect the
+object images. Sometimes object images also are combined.
+The task is called \fBcombine\fR and there are special versions of
+this task called \fBzerocombine, darkcombine\fR, and \fBflatcombine\fR
+for the standard calibration images.
+.PP
+The task takes a list of input images to be combined. As output there
+is the combined image, an optional sigma image, and optional log output either
+to the terminal, to a log file, or both. A subset or subsets
+of the input images may be selected based on the image type and a
+subset parameter such as the filter. As with the processing task,
+this allows selecting images without having to explicitly list each
+image from a large data set. When combining based on a subset parameter
+there is an output image, and possibly a sigma image, for each separate subset.
+The output image pixel datatype may also be changed during combining;
+usually from 16 bit integer input to real output.
+The sigma image is the standard deviation of the input images about the
+output image.
+.PP
+Except for summing the images together,
+combining images may require correcting for variations between the images
+due to differing exposure times, sky background, extinctions, and
+positions. Currently, extinction corrections and registration are
+not included but scaling and shifting corrections are included.
+The scaling corrections may be done by exposure times or by computing
+the mode in each image. Additive shifting is also done by computing
+the mode in the images. The region of the image in which the mode
+is computed can be specified but by default the whole image is used.
+A scaling correction is used when the flux level or sensitivity is varying.
+The offset correction is used when the sky brightness is varying independently
+of the object brightness. If the images are not scaled then special
+data paths combine the images more efficiently.
+.PP
+Except for medianing and summing, the images are combined by averaging.
+The average may be weighted by
+
+.nf
+.ft L
+ weight = (N * scale / mode) ** 2
+.ft R
+.fi
+
+where \fIN\fR is the number of images previously combined (the task
+records the number of images combined in the image header), \fIscale\fR
+is the relative scale (applied by dividing) from the exposure time or
+mode, and \fImode\fR is the background mode estimate used when adding a
+variable offset.
+.PP
+The combining operation is the heart of the task. There are a number
+algorithms which may be used as well as applying statistical weights.
+The algorithms are used to detect and reject deviant pixels, such as
+cosmic rays.
+The choice of algorithm depends on the data, the number of images,
+and the importance of rejecting cosmic rays. The more complex the
+algorithm the more time consuming the operation.
+The list below summarizes the algorithms.
+Further algorithms may be added in time.
+
+.IP "Sum - sum the input images"
+.br
+The input images are combined by summing. Care must be taken
+not to exceed the range of the 16 bit integer datatype when summing if the
+output datatype is of this type. Summing is the only algorithm in which
+scaling and weighting are not used. Also no sigma image is produced.
+.IP "Average - average the input images"
+.br
+The input images are combined by averaging. The images may be scaled
+and weighted. There is no pixel rejection. A sigma image is produced
+if more than one image is combined.
+.IP "Median - median the input images"
+.br
+The input images are combined by medianing each pixel. Unless the images
+are at the same exposure level they should be scaled. The sigma image
+is based on all the input images and is only an approximation to the
+uncertainty in the median estimates.
+.IP "Minreject, maxreject, minmaxreject - reject extreme pixels"
+.br
+At each pixel the minimum, maximum, or both are excluded from the
+average. The images should be scaled and the average may be
+weighted. The sigma image requires at least two pixels after rejection
+of the extreme values. These are relatively fast algorithms and are
+a good choice if there are many images (>15).
+.IP "Threshold - reject pixels above and below specified thresholds"
+.br
+The input images are combined with pixels above and below specified
+threshold values (before scaling) excluded. The images may be scaled
+and the average weighted. The sigma image also has the rejected
+pixels excluded.
+.IP "Sigclip - apply a sigma clipping algorithm to each pixel"
+.br
+The input images are combined by applying a sigma clipping algorithm
+at each pixel. The images should be scaled. This only rejects highly
+deviant points and so
+includes more of the data than the median or minimum and maximum
+algorithms. It requires many images (>10-15) to work effectively.
+Otherwise the bad pixels bias the sigma significantly. The mean
+used to determine the sigmas is based on the "minmaxrej" algorithm
+to eliminate the effects of bad pixels on the mean. Only one
+iteration is performed and at most one pixel is rejected at each
+point in the output image. After the deviant pixels are rejected the final
+mean is computed from all the data. The sigma image excludes the
+rejected pixels.
+.IP "Avsigclip - apply a sigma clipping algorithm to each pixel"
+.br
+The input images are combined with a variant of the sigma clipping
+algorithm which works well with only a few images. The images should
+be scaled. For each line the mean is first estimated using the
+"minmaxrej" algorithm. The sigmas at each point in the line are scaled
+by the square root of the mean, that is a Poisson scaling of the noise
+is assumed. These sigmas are averaged to get a line estimate of the
+sigma. Then the sigma at each point in the line is estimated by
+multiplying the line sigma by the square root of the mean at that point. As
+with the sigma clipping algorithm only one iteration is performed and
+at most one pixel is rejected at each point. After the deviant pixels
+are rejected the file mean is computed from all the data. The sigma
+image excludes the rejected pixels.
+.RE
+.PP
+The "avsigclip" algorithm is the best algorithm for rejecting cosmic
+rays, especially with a small number of images, but it is also the most
+time consuming. With many images (>10-15) it might be advisable to use
+one of the other algorithms ("maxreject", "median", "minmaxrej") because
+of their greater speed.
+.PP
+This task also has several design features to make it efficient and
+versatile. There are separate data paths for integer data and real
+data; as with processing, if all input images and the output image are
+of the same datatype then the I/O is done with no internal conversions.
+With mixed datatypes the operations are done as real. Even in the
+integer path the operations requiring real arithmetic to preserve the
+accuracy of the calculation are performed in that mode. There is
+effectively no limit to the number of images which may be combined.
+Also, the task determines the amount of memory available and buffers
+the I/O as much as possible. This is a case where operating on images
+from disk rather than in memory is essential.
+.NH
+Status and Conclusion
+.PP
+The initial implementation of the IRAF \fBccdred\fR package was
+completed in June 1987. It has been in use at the National Optical
+Astronomy Observatories since April 1987. The package was not
+distributed with Version 2.5 of IRAF (released in August 1987) but is
+available as a separate installation upon request. It will be part of
+future releases of IRAF.
+.PP
+At NOAO the CCD reduction package is available at the telescopes as the
+data is obtained. This is accomplished by transferring the images from
+the data taking computer to a Sun workstation (Sun Microsystems, Inc.)
+initially via tape and later by a direct link. There are several
+reasons for adopting this architecture. First, the data acquisition
+system is well established and is dedicated to its real-time function.
+The second computer was phased in without disrupting the essential
+operation of the telescopes and if it fails data taking may continue
+with data being stored on tape. The role of the second computer is to
+provide faster and more powerful reduction and analysis capability not
+required in a data acquisition system. In the future it can be more
+easily updated to follow the state of the art in small computers. As
+CCD detectors get larger the higher processing speeds will be essential
+to keep up with the data flow.
+.PP
+By writing the reduction software in the high level, portable, IRAF
+system the users have the capability to process their data from the
+basic CCD reductions to a full analysis at the telescope. Furthermore,
+the same software is widely available on a variety of computers if
+later processing or reprocessing is desired; staff and visitors at NOAO
+may also reduce their data at the headquarters facilities. The use of
+a high level system was also essential in achieving the design goals;
+it would be difficult to duplicate this complex package without
+the rich programming environment provided by the IRAF system.
+.NH
+References
+.PP
+The following documentation is distributed by the National Optical
+Astronomy Observatories, Central Computer Services, P.O. Box 26732,
+Tucson, Arizona, 85726. A comprehensive description of the IRAF system
+is given in \fIThe IRAF Data Reduction and Analysis System\fR by Doug
+Tody (also appearing in \fIProceedings of the SPIE - Instrumentation in
+Astronomy VI\fR, Vol. 627, 1986). A general guide to using IRAF is \fIA
+User's Introduction to the IRAF Command Language\fR by Peter Shames
+and Doug Tody. Both these documents are also part of the IRAF
+documentation distributed with the system.
+.PP
+A somewhat more tutorial description of the \fBccdred\fR package is
+\fIA User's Guide to the IRAF CCDRED Package\fR by the author.
+Detailed task descriptions and supplementary documentation are
+given in the on-line help library and are part of the user's guide.
+.NH
+Appendix
+.PP
+The current set of tasks making up the IRAF CCD Reduction Package,
+\fBccdred\fR, are summarized below.
+
+.nf
+.ft L
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdlist - List CCD processing information
+ ccdproc - Process CCD images
+ combine - Combine CCD images
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+.fi
+.ft R
diff --git a/noao/imred/ccdred/doc/ccdtypes.hlp b/noao/imred/ccdred/doc/ccdtypes.hlp
new file mode 100644
index 00000000..2cec33ea
--- /dev/null
+++ b/noao/imred/ccdred/doc/ccdtypes.hlp
@@ -0,0 +1,124 @@
+.help ccdtypes Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdtypes -- Description of the CCD image types
+.ih
+CCDTYPES
+The following CCD image types may be specified as the value of the parameter
+\fIccdtype\fR:
+
+.nf
+ "" - (the null string) all image types
+ object - object images
+ zero - zero level images such as a bias or preflash
+ dark - dark count images
+ flat - flat field images
+ illum - iillumination images
+ fringe - fringe correction images
+ other - other image types defined in the translation file
+ none - images without an image type parameter
+ unknown - image types not defined in the translation file
+.fi
+.ih
+DESCRIPTION
+The \fBccdred\fR package recognizes certain standard CCD image types
+identified in the image header. The tasks may select images of a
+particular CCD image type from image lists with the parameter
+\fIccdtype\fR and also recognize and take special actions for
+calibration images.
+
+In order to make use of CCD image type information the header keyword
+identifying the image type must be specified in the instrument
+translation file. This entry has the form
+
+ imagetyp keyword
+
+where keyword is the image header keyword. This allows the package to
+access the image type string. There must also be a translation between
+the image type strings and the CCD types as recognized by the package.
+This information consists of lines in the instrument translation file
+of the form
+
+ header package
+
+where header is the exact string given in the image header and package
+is one of the types recognized by the package. The image header string
+can be virtually anything and if it contains blanks it must be
+quoted. The package image types are those given above except for
+the null string, "none", and "unknown". That is, these types may
+be specified as a CCD image type in selecting images but not as a translations
+of image type strings.
+
+There may be more than one image type that maps to the same package
+type. In particular other standard CCD image types, such as comparison
+spectra, multiple exposure, standard star, etc., should be mapped to
+object or other. There may also be more than one type of flat field, i.e. dome
+flat, sky flat, and lamp flat. For more on the instrument translation
+file see the help for \fBinstruments\fR.
+.ih
+EXAMPLES
+1. The example entries in the instrument translation file are from the 1986
+NOAO CCD image header format produced by the CAMERA format tape writer.
+
+.nf
+ imagetyp data-typ
+
+ 'OBJECT (0)' object
+ 'DARK (1)' dark
+ 'PROJECTOR FLAT (2)' flat
+ 'SKY FLAT (3)' other
+ 'COMPARISON LAMP (4)' other
+ 'BIAS (5)' zero
+ 'DOME FLAT (6)' flat
+.fi
+
+The image header keyword describing the image type is "data-typ".
+The values of the image type strings in the header contain blanks so they
+are quoted. Also the case of the strings is important. Note that there
+are two types of flat field images and two types of other images.
+
+2. One way to check the image types is with the task \fBccdlist\fR.
+
+.nf
+ cl> ccdlist *.imh
+ Zero.imh[504,1][real][zero][1][OT]:FOCUS L98-193
+ Flat1.imh[504,1][real][flat][1][OTZ]:dflat 6v+blue 5s
+ ccd002.imh[504,504][real][unknown][1][OTZF]:FOCUS L98-193
+ ccd003.imh[544,512][short][object][1]:L98-193
+ ccd004.imh[544,512][short][object][1]:L98-193
+ ccd005.imh[544,512][short][object][1]:L98-193
+ oldformat.imh[544,512][short][none][1]:M31 V
+.fi
+
+The unknown type has a header image type of "MUL (8)". The old format
+image does not have any header type.
+
+3. To select only images of a particular type:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd003.imh[544,512][short][object][1]:L98-193
+ ccd004.imh[544,512][short][object][1]:L98-193
+ ccd005.imh[544,512][short][object][1]:L98-193
+ cl> ccdlist *.imh ccdtype=unknown
+ ccd002.imh[504,504][real][unknown][1][OTZF]:FOCUS L98-193
+ cl> ccdlist *.imh ccdtype=none
+ oldformat.imh[544,512][short][none][1]:M31 V
+.fi
+
+4. To process images with \fBccdproc\fR:
+
+.nf
+ cl> ccdproc *.imh
+ cl> ccdproc *.imh ccdtype=object
+.fi
+
+In the first case all the images will be processed (the default value of
+\fIccdtype\fR is ""). However, the task recognizes the calibration
+images, such as zero level and flat fields, and processes them appropriately.
+In the second case only object images are processed and all other images
+are ignored (except if needed as a calibration image).
+.ih
+SEE ALSO
+instruments
+.endhelp
diff --git a/noao/imred/ccdred/doc/combine.hlp b/noao/imred/ccdred/doc/combine.hlp
new file mode 100644
index 00000000..474937bf
--- /dev/null
+++ b/noao/imred/ccdred/doc/combine.hlp
@@ -0,0 +1,1146 @@
+.help combine Aug96 noao.imred.ccdred
+.ih
+NAME
+combine -- Combine CCD images using various algorithms
+.ih
+USAGE
+combine input output
+.ih
+PARAMETERS
+.ls input
+List of CCD images to combine. Images of a particular CCD image type may be
+selected with the parameter \fIccdtype\fR with the remaining images ignored.
+.le
+.ls output
+Output combined image or list of images. If the \fIproject\fR parameter is
+no (the typical case for CCD acquisition) then there will be one output
+image or, if the \fIsubsets\fR parameter is selected, one
+output image per subset. If the images consist of stacks then
+the \fIproject\fR option allows combining each input stack into separate
+output images as given by the image list.
+.le
+.ls plfile = "" (optional)
+Output pixel list file or list of files. If no name is given or the
+list ends prematurely then no file is produced. The pixel list file
+is a map of the number of pixels rejected or, equivalently,
+the total number of input images minus the number of pixels actually used.
+The file name is also added to the output image header under the
+keyword BPM.
+.le
+.ls sigma = "" (optional)
+Output sigma image or list of images. If no name is given or the list ends
+prematurely then no image is produced. The sigma is standard deviation,
+corrected for a finite population, of the input pixel values (excluding
+rejected pixels) about the output combined pixel values.
+.le
+
+.ls ccdtype = ""
+CCD image type to combine. If specified only input images of the specified
+type are combined. See \fBccdtypes\fR for the possible image types.
+.le
+.ls amps = yes
+Combine images by amplifier? If yes then the input images are grouped by
+the amplifier parameter and each group combined into a separate output
+image. The amplifier identifier is appended to the output image name(s).
+See \fBsubsets\fR for more on the amplifier parameter.
+.le
+.ls subsets = no
+Combine images by subset parameter? If yes then the input images are
+grouped by subset parameter and each group combined into a separate output
+image. The subset identifier is appended to the output image
+name(s). See \fBsubsets\fR for more on the subset parameter.
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images? THIS OPTION IS NO LONGER SUPPORTED BUT
+THE PARAMETER REMAINS FOR NOW FOR BACKWARD COMPATIBILITY. IF SET TO
+yes AN ERROR ABORT WILL OCCUR.
+.le
+
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+offsetting, masking, thresholding, and rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "none" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation performed on the pixels remaining after offsetting,
+masking and thresholding. The algorithms are discussed in the
+DESCRIPTION section. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the nlow and nhigh pixels
+ ccdclip - Reject pixels using CCD noise parameters
+ crreject - Reject only positive pixels using CCD noise parameters
+ sigclip - Reject pixels using a sigma clipping algorithm
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+ pclip - Reject pixels using sigma based on percentiles
+.fi
+
+.le
+.ls project = no
+Project (combine) across the highest dimension of the input images? If
+no then all the input images are combined to a single output image. If
+yes then the highest dimension elements of each input image are combined to
+an output image and optional pixel list and sigma images. Each element of
+the highest dimension may have a separate offset but there can only be one
+mask image.
+.le
+.ls outtype = "real" (short|ushort|integer|long|real|double)
+Output image pixel datatype. The pixel datatypes are "double", "real",
+"long", "integer", unsigned short ("ushort") and "short" with highest
+precedence first. If none is specified then the highest precedence
+datatype of the input images is used. A mixture of short and unsigned
+short images has a highest precedence of integer.
+The datatypes may be abbreviated to
+a single character.
+.le
+.ls offsets = "none" (none|wcs|grid|<filename>)
+Integer offsets to add to each image axes. The options are:
+.ls "none"
+No offsets are applied.
+.le
+.ls "wcs"
+The world coordinate system (wcs) in the image is used to derive the
+offsets. The nearest integer offset that matches the world coordinate
+at the center of the first input image is used.
+.le
+.ls "grid"
+A uniform grid of offsets is specified by a string of the form
+
+.nf
+ grid [n1] [s1] [n2] [s2] ...
+.fi
+
+where ni is the number of images in dimension i and si is the step
+in dimension i. For example "grid 5 100 5 100" specifies a 5x5
+grid with origins offset by 100 pixels.
+.le
+.ls <filename>
+The offsets are given in the specified file. The file consists
+of one line per image with the offsets in each dimension forming the
+columns.
+.le
+.le
+.ls masktype = "none" (none|goodvalue|badvalue|goodbits|badbits)
+Type of pixel masking to use. If "none" then no pixel masking is done
+even if an image has an associated pixel mask. The other choices
+are to select the value in the pixel mask to be treated as good
+(goodvalue) or bad (badvalue) or the bits (specified as a value)
+to be treated as good (goodbits) or bad (badbits). The pixel mask
+file name comes from the image header keyword BPM.
+Note that when
+combining images by projection of the highest dimension only one
+pixel mask is applied to all the images. \fBAlso if the number of
+input images becomes too large (currently about 115 .imh or 57 .hhh
+images) then the images are temporarily stacked and combined by projection
+which also means the bad pixel mask from the first image will be used
+for all images.\fR
+.le
+.ls maskvalue = 0
+Mask value used with the \fImasktype\fR parameter. If the mask type
+selects good or bad bits the value may be specified using IRAF notation
+for decimal, octal, or hexadecimal; i.e 12, 14b, 0cx to select bits 3
+and 4.
+.le
+.ls blank = 0.
+Output value to be used when there are no pixels.
+.le
+
+.ls scale = "none" (none|mode|median|mean|exposure|@<file>|!<keyword>)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, scale
+by the exposure time in the image header, scale by the values in a specified
+file, or scale by a specified image header keyword. When specified in
+a file the scales must be one per line in the order of the input
+images.
+.le
+.ls zero = "none" (none|mode|median|mean|@<file>|!<keyword>)
+Additive zero level image shifts to be applied. The choices are none or
+shift by the mode, median, or mean of the specified statistics section,
+shift by values given in a file, or shift by values given by an image
+header keyword. When specified in a file the zero values must be one
+per line in the order of the input images. File or keyword zero offset
+values do not allow a correction to the weights.
+.le
+.ls weight = "none" (none|mode|median|mean|exposure|@<file>|!<keyword>)
+Weights to be applied during the final averaging. The choices are none,
+the mode, median, or mean of the specified statistics section, the exposure
+time, values given in a file, or values given by an image header keyword.
+When specified in a file the weights must be one per line in the order of
+the input images and the only adjustment made by the task is for the number of
+images previously combined. In this case the weights should be those
+appropriate for the scaled images which would normally be the inverse
+of the variance in the scaled image.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling and
+weighting. If no section is given then the entire region of the input is
+sampled (for efficiency the images are sampled if they are big enough).
+When the images are offset relative to each other one can precede the image
+section with one of the modifiers "input", "output", "overlap". The first
+interprets the section relative to the input image (which is equivalent to
+not specifying a modifier), the second interprets the section relative to
+the output image, and the last selects the common overlap and any following
+section is ignored.
+.le
+
+.ce
+Algorithm Parameters
+.ls lthreshold = INDEF, hthreshold = INDEF
+Low and high thresholds to be applied to the input pixels. This is done
+before any scaling, rejection, and combining. If INDEF the thresholds
+are not used.
+.le
+.ls nlow = 1, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+These numbers are converted to fractions of the total number of input images
+so that if no rejections have taken place the specified number of pixels
+are rejected while if pixels have been rejected by masking, thresholding,
+or nonoverlap, then the fraction of the remaining pixels, truncated
+to an integer, is used.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject when
+using the clipping algorithms (ccdclip, crreject, sigclip, avsigclip, or
+pclip). When given as a positive value this is the minimum number to
+keep. When given as a negative value the absolute value is the maximum
+number to reject. If there are fewer pixels at some point due to
+offsetting, thresholding, or masking then if the number to keep (positive
+nkeep) is greater than the number of pixels no pixels will be rejected and
+if the number to reject is given (negative nkeep) then up to that number
+may be rejected.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value. The noise model for a pixel is:
+
+.nf
+ variance in DN = (rdnoise/gain)^2 + DN/gain + (snoise*DN)^2
+ variance in e- = (rdnoise)^2 + (gain*DN) + (snoise*(gain*DN))^2
+ = rdnoise^2 + Ne + (snoise * Ne)^2
+.fi
+
+where DN is the data number and Ne is the number of electrons. Sensitivity
+noise typically comes from noise introduced during flat fielding.
+.le
+.ls sigscale = 0.1 (ccdclip, crreject, sigclip, avsigclip)
+This parameter determines when poisson corrections are made to the
+computation of a sigma for images with different scale factors. If all
+relative scales are within this value of unity and all relative zero level
+offsets are within this fraction of the mean then no correction is made.
+The idea is that if the images are all similarly though not identically
+scaled, the extra computations involved in making poisson corrections for
+variations in the sigmas can be skipped. A value of zero will apply the
+corrections except in the case of equal images and a large value can be
+used if the sigmas of pixels in the images are independent of scale and
+zero level.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See the DESCRIPTION section for further details.
+.le
+.ls grow = 0
+Number of pixels to either side of a rejected pixel along image lines
+to also be rejected. This applies only to pixels rejected by one of
+the rejection algorithms and not the masked or threshold rejected pixels.
+.le
+
+PACKAGE PARAMETERS
+
+The package parameters are used to specify verbose and log output and the
+instrument and header definitions.
+.ih
+DESCRIPTION
+A set of CCD images are combined by weighted averaging or medianing. Pixels
+may be rejected from the combining by using pixel masks, threshold levels,
+and rejection algorithms. The images may be scaled multiplicatively or
+additively based on image statistics, image header keywords, or text files
+before rejection. The images may be combined with integer pixel coordinate
+offsets to produce an image bigger than any of the input images.
+This task is a variant of the \fBimages.imcombine\fR task specialized
+for CCD images.
+
+The input images to be combined are specified by a list. A subset or
+subsets of the input list may be selected using the parameters
+\fIccdtype\fR and \fIsubsets\fR. The \fIccdtype\fR parameter
+selects only images of a specified standard CCD image type.
+The \fIsubsets\fR parameter breaks up the input
+list into sublists of common subset parameter (filter, grating, etc.). For
+more information see \fBccdtypes\fR and \fBsubsets\fR. This selection
+process is useful with wildcard templates to combine, for example, the flat
+field images for each filter in one step (see \fBflatcombine\fR). When
+subsets of the input list are used the output image and optional pixel file
+and sigma image are given by root names with an amplifier and subset
+identifier appended by the task.
+
+If the \fBproject\fR parameter is yes then the highest dimension elements
+of each input image are combined to make an output image of one lower
+dimension. There is no limit to the number of elements combined in this
+case. This case is If the \fBproject\fR is no then the entire input list
+is combined to form a single output image per subset. In this case the
+images must all have the same dimensionality but they may have different
+sizes. There is a software limit of approximately 100 images in this
+case.
+
+The output image header is a copy of the first image in the combined set.
+In addition, the number of images combined is recorded under the keyword
+NCOMBINE, the exposure time is updated as the weighted average of the input
+exposure times, and any pixel list file created is recorded under the
+keyword BPM. The output pixel type is set by the parameter \fIouttype\fR.
+If left blank then the input datatype of highest precision is used.
+A mixture of short and unsigned short images has a highest precision of
+integer.
+
+In addition to one or more output combined images there may also be a pixel
+list image containing the number of pixels rejected at each point in the
+output image, an image containing the sigmas of the pixels combined about
+the final output combined pixels, and a log file. The pixel list image is
+in the compact pixel list format which can be used as an image in other
+programs. The sigma computation is the standard deviation corrected for a
+finite population (the n/(n-1) factor) including weights if a weighted
+average is used.
+
+Other input/output parameters are \fIdelete\fR and \fIclobber\fR. The
+\fIdelete\fR parameter may be set to "yes" to delete the input images
+used in producing an output image after it has been created. This is
+useful for minimizing disk space, particularly with large
+sets of calibration images needed to achieve high statistical accuracy
+in the final calibration image. The \fBclobber\fR parameter allows
+the output image names to be existing images which are overwritten (at
+the end of the operation).
+
+An outline of the steps taken by the program is given below and the
+following sections elaborate on the steps.
+
+.nf
+o Set the input image offsets and the final output image size.
+o Set the input image scales and weights
+o Write the log file output
+.fi
+
+For each output image line:
+
+.nf
+o Get input image lines that overlap the output image line
+o Reject masked pixels
+o Reject pixels outside the threshold limits
+o Reject pixels using the specified algorithm
+o Reject neighboring pixels along each line
+o Combine remaining pixels using the weighted average or median
+o Compute sigmas of remaining pixels about the combined values
+o Write the output image line, rejected pixel list, and sigmas
+.fi
+
+
+OFFSETS
+
+The images to be combined need not be of the same size or overlap. They
+do have to have the same dimensionality which will also be the dimensionality
+of the output image. Any dimensional images supported by IRAF may be
+used. Note that if the \fIproject\fR flag is yes then the input images
+are the elements of the highest dimension; for example the planes of a
+three dimensional image.
+
+The overlap of the images is determined by a set of integer pixel offsets
+with an offset for each dimension of each input image. For example
+offsets of 0, 10, and 20 in the first dimension of three images will
+result in combining the three images with only the first image in the
+first 10 colums, the first two images in the next 10 columns and
+all three images starting in the 31st column. At the 31st output column
+the 31st column of the first image will be combined with the 21st column
+of the second image and the 1st column of the third image.
+
+The output image size is set by the maximum extent in each dimension
+of any input image after applying the offsets. In the above example if
+all the images have 100 columns then the output image will have 130
+columns corresponding to the 30 column offset in the third image.
+
+The input image offsets are set using the \fIoffset\fR parameter. There
+are four ways to specify the offsets. If the word "none" or the empty
+string "" are used then all offsets will be zero and all pixels with the
+same coordinates will be combined. The output image size will be equal to
+the biggest dimensions of the input images.
+
+If "wcs" offsets are specified then the world coordinate systems (wcs)
+in the image headers are used to derived the offsets. The world coordinate
+at the center of the first input image is evaluated. Then integer pixel
+offsets are determined for each image to bring the same world coordinate
+to the same point. Note the following caveats. The world coordinate
+systems must be of the same type, orientation, and scale and only the
+nearest integer shift is used.
+
+If the input images have offsets in a regular grid or one wants to make
+an output image in which the input images are "mosaiced" together in
+a grid then the special offset string beginning with the word "grid"
+is used. The format is
+
+.nf
+ grid [n1] [s1] [n2] [s2] ...
+.fi
+
+where ni is the number of images in dimension i and si is the step in
+dimension i. For example "grid 5 100 5 100" specifies a 5x5 grid with
+origins offset by 100 pixels. Note that one must insure that the input
+images are specified in the correct order. This may best be accomplished
+using a "@" list. One useful application of the grid is to make a
+nonoverlapping mosaic of a number of images for display purposes. Suppose
+there are 16 images which are 100x100. The offset string "grid 4 101 4
+101" will produce a mosaic with a one pixel border having the value set
+by \fIblank\fR parameter between the images.
+
+The offsets may be defined in a file by specifying the file name
+in the \fIoffset\fR parameter. (Note that the special file name STDIN
+may be used to type in the values terminated by the end-of-file
+character). The file consists of a line for each input image. The lines
+must be in the same order as the input images and so an "@" list may
+be useful. The lines consist of whitespace separated offsets one for
+each dimension of the images. In the first example cited above the
+offset file might contain:
+
+.nf
+ 0 0
+ 10 0
+ 20 0
+.fi
+
+where we assume the second dimension has zero offsets.
+
+The offsets need not have zero for one of the images. The offsets may
+include negative values or refer to some arbitrary common point.
+When the offsets are read by the program it will find the minimum
+value in each dimension and subtract it from all the other offsets
+in that dimension. The above example could also be specified as:
+
+.nf
+ 225 15
+ 235 15
+ 245 15
+.fi
+
+There may be cases where one doesn't want the minimum offsets reset
+to zero. If all the offsets are positive and the comment "# Absolute"
+appears in the offset file then the images will be combined with
+blank values between the first output pixel and the first overlapping
+input pixel. Continuing with the above example, the file
+
+.nf
+ # Absolute
+ 10 10
+ 20 10
+ 30 10
+.fi
+
+will have the first pixel of the first image in the 11th pixel of the
+output image. Note that there is no way to "pad" the other side of
+the output image.
+
+
+SCALES AND WEIGHTS
+
+In order to combine images with rejection of pixels based on deviations
+from some average or median they must be scaled to a common level. There
+are two types of scaling available, a multiplicative intensity scale and an
+additive zero point shift. The intensity scaling is defined by the
+\fIscale\fR parameter and the zero point shift by the \fIzero\fR
+parameter. These parameters may take the values "none" for no scaling,
+"mode", "median", or "mean" to scale by statistics of the image pixels,
+"exposure" (for intensity scaling only) to scale by the exposure time
+keyword in the image header, any other image header keyword specified by
+the keyword name prefixed by the character '!', and the name of a file
+containing the scale factors for the input image prefixed by the
+character '@'.
+
+Examples of the possible parameter values are shown below where
+"myval" is the name of an image header keyword and "scales.dat" is
+a text file containing a list of scale factors.
+
+.nf
+ scale = none No scaling
+ zero = mean Intensity offset by the mean
+ scale = exposure Scale by the exposure time
+ zero = !myval Intensity offset by an image keyword
+ scale = @scales.dat Scales specified in a file
+.fi
+
+The image statistics factors are computed by sampling a uniform grid
+of points with the smallest grid step that yields less than 10000
+pixels; sampling is used to reduce the time need to compute the statistics.
+If one wants to restrict the sampling to a region of the image the
+\fIstatsec\fR parameter is used. This parameter has the following
+syntax:
+
+.nf
+ [input|output|overlap] [image section]
+.fi
+
+The initial modifier defaults to "input" if absent. The modifiers are useful
+if the input images have offsets. In that case "input" specifies
+that the image section refers to each input image, "output" specifies
+that the image section refers to the output image coordinates, and
+"overlap" specifies the mutually overlapping region of the input images.
+In the latter case an image section is ignored.
+
+The statistics are as indicated by their names. In particular, the
+mode is a true mode using a bin size which is a fraction of the
+range of the pixels and is not based on a relationship between the
+mode, median, and mean. Also masked pixels are excluded from the
+computations as well as during the rejection and combining operations.
+
+The "exposure" option in the intensity scaling uses the exposure time
+from the image header. If one wants to use a nonexposure time image
+header keyword the !<keyword> syntax is available.
+
+If both an intensity scaling and zero point shift are selected the
+multiplicative scaling is done first. Use of both makes sense
+if the intensity scaling is the exposure time to correct for
+different exposure times and then the zero point shift allows for
+sky brightness changes.
+
+The image statistics and scale factors are recorded in the log file
+unless they are all equal, which is equivalent to no scaling. The
+intensity scale factors are normalized to a unit mean and the zero
+point shifts are adjust to a zero mean. When the factors are specified
+in an @file or by a keyword they are not normalized.
+
+Scaling affects not only the mean values between images but also the
+relative pixel uncertainties. For example scaling an image by a
+factor of 0.5 will reduce the effective noise sigma of the image
+at each pixel by the square root of 0.5. Changes in the zero
+point also changes the noise sigma if the image noise characteristics
+are Poissonian. In the various rejection algorithms based on
+identifying a noise sigma and clipping large deviations relative to
+the scaled median or mean, one may need to account for the scaling induced
+changes in the image noise characteristics.
+
+In those algorithms it is possible to eliminate the "sigma correction"
+while still using scaling. The reasons this might be desirable are 1) if
+the scalings are similar the corrections in computing the mean or median
+are important but the sigma corrections may not be important and 2) the
+image statistics may not be Poissonian, either inherently or because the
+images have been processed in some way that changes the statistics. In the
+first case because computing square roots and making corrections to every
+pixel during the iterative rejection operation may be a significant
+computational speed limit the parameter \fIsigscale\fR selects how
+dissimilar the scalings must be to require the sigma corrections. This
+parameter is a fractional deviation which, since the scale factors are
+normalized to unity, is the actual minimum deviation in the scale factors.
+For the zero point shifts the shifts are normalized by the mean shift
+before adjusting the shifts to a zero mean. To always use sigma scaling
+corrections the parameter is set to zero and to eliminate the correction in
+all cases it is set to a very large number.
+
+If the final combining operation is "average" then the images may be
+weighted during the averaging. The weights are specified in the
+same way as the scale factors. In addition
+the NCOMBINE keyword, if present, will be used in the weights.
+The weights, scaled to a unit sum, are printed in the log output.
+
+The weights are only used for the final weighted average and sigma image
+output. They are not used to form averages in the various rejection
+algorithms. For weights in the case of no scaling or only multiplicative
+scaling the weights are used as given or determined so that images with
+lower signal levels will have lower weights. However, for cases in which
+zero level scaling is used and the zero levels are determined from image
+statistics (not from an input file or keyword) the weights are computed
+from the initial weights (the exposure time, image statistics, or input
+values) using the formula:
+
+.nf
+ weight_final = weight_initial / (scale * sky)
+.fi
+
+where the sky values are those from the image statistics before conversion
+to zero level shifts and adjustment to zero mean over all images. The
+reasoning is that if the zero level is high the sky brightness is high and
+so the S/N is lower and the weight should be lower. If any sky value
+determined from the image statistics comes out to be negative a warning is
+given and the none of the weight are adjusted for sky levels.
+
+The weights are not adjusted when the zero offsets are input from a file
+or keyword since these values do not imply the actual image sky value.
+In this case if one wants to account for different sky statistics
+in the weights the user must specify the weights in a file taking
+explicit account of changes in the weights due to different sky
+statistics.
+
+
+PIXEL MASKS
+
+A pixel mask is a type of IRAF file having the extension ".pl" which
+identifies an integer value with each pixel of the images to which it is
+applied. The integer values may denote regions, a weight, a good or bad
+flag, or some other type of integer or integer bit flag. In the common
+case where many values are the same this file is compacted to be small and
+efficient to use. It is also most compact and efficient if the majority of
+the pixels have a zero mask value so frequently zero is the value for good
+pixels. Note that these files, while not stored as a strict pixel array,
+may be treated as images in programs. This means they may be created by
+programs such as \fBmkpattern\fR, edited by \fBimedit\fR, examined by
+\fBimexamine\fR, operated upon by \fBimarith\fR, graphed by \fBimplot\fR,
+and displayed by \fBdisplay\fR.
+
+At the time of introducing this task, generic tools for creating
+pixel masks have yet to be written. There are two ways to create a
+mask in V2.10. First if a regular integer image can be created
+then it can be converted to pixel list format with \fBimcopy\fR:
+
+.nf
+ cl> imcopy template plfile.pl
+.fi
+
+by specifically using the .pl extension on output. Other programs that
+can create integer images (such \fBmkpattern\fR or \fBccdred.badpiximage\fR)
+can create the pixel list file directly by simply using the ".pl"
+extension in the output image name.
+
+To use pixel masks with \fBcombine\fR one must associate a pixel
+mask file with an image by entering the pixel list file name in the
+image header under the keyword BPM (bad pixel mask). This can be
+done with \fBhedit\fR. Note that the same pixel mask may be associated
+with more than one image as might be the case if the mask represents
+defects in the detector used to obtain the images.
+
+If a pixel mask is associated with an image the mask is used when the
+\fImasktype\fR parameter is set to a value other than "none". Note that
+when it is set to "none" mask information is not used even if it exists for
+the image. The values of \fImasktype\fR which apply masks are "goodvalue",
+"badvalue", "goodbits", and "badbits". They are used in conjunction with
+the \fImaskvalue\fR parameter. When the mask type is "goodvalue" the
+pixels with mask values matching the specified value are included in
+combining and all others are rejected. Similarly, for a mask type of
+"badvalue" the pixels with mask values matching the specified value are
+rejected and all others are accepted. The bit types are useful for
+selecting a combination of attributes in a mask consisting of bit flags.
+The mask value is still an integer but is interpreted by bitwise comparison
+with the values in the mask file.
+
+If a mask operation is specified and an image has no mask image associated
+with it then the mask values are taken as all zeros. In those cases be
+careful that zero is an accepted value otherwise the entire image will be
+rejected.
+
+In the case of combining the higher dimensions of an image into a
+lower dimensional image, the "project" option, the same pixel mask
+is applied to all of the data being combined; i.e. the same 2D
+pixel mask is applied to every plane of a 3D image. This is because
+a higher dimensional image is treated as a collection of lower
+dimensional images having the same header and hence the same
+bad pixel mask. It would be tempting to use a bad pixel mask with
+the same dimension as the image being projected but this is not
+currently how the task works.
+
+When the number of input images exceeds the maximum number of open files
+allowed by IRAF (currently about 115 .imh or 57 .hhh images) the input
+images are stacked and combined with the project option. \fBThis means
+that the bad pixel mask from the first input image will be applied to all
+the images.\fR
+
+
+THRESHOLD REJECTION
+
+In addition to rejecting masked pixels, pixels in the unscaled input
+images which are below or above the thresholds given by the parameters
+\fIlthreshold\fR and \fIhthreshold\fR are rejected. Values of INDEF
+mean that no threshold value is applied. Threshold rejection may be used
+to exclude very bad pixel values or as an alternative way of masking
+images. In the latter case one can use a task like \fBimedit\fR
+or \fBimreplace\fR to set parts of the images to be excluded to some
+very low or high magic value.
+
+
+REJECTION ALGORITHMS
+
+The \fIreject\fR parameter selects a type of rejection operation to
+be applied to pixels not masked or thresholded. If no rejection
+operation is desired the value "none" is specified.
+
+MINMAX
+.in 4
+A specified fraction of the highest and lowest pixels are rejected.
+The fraction is specified as the number of high and low pixels, the
+\fInhigh\fR and \fInlow\fR parameters, when data from all the input images
+are used. If pixels have been rejected by offseting, masking, or
+thresholding then a matching fraction of the remaining pixels, truncated
+to an integer, are used. Thus,
+
+.nf
+ nl = n * nlow/nimages + 0.001
+ nh = n * nhigh/nimages + 0.001
+.fi
+
+where n is the number of pixels surviving offseting, masking, and
+thresholding, nimages is the number of input images, nlow and nhigh
+are task parameters and nl and nh are the final number of low and
+high pixels rejected by the algorithm. The factor of 0.001 is to
+adjust for rounding of the ratio.
+
+As an example with 10 input images and specifying one low and two high
+pixels to be rejected the fractions to be rejected are nlow=0.1 and nhigh=0.2
+and the number rejected as a function of n is:
+
+.nf
+ n 0 1 2 3 4 5 6 7 8 9 10
+ nl 0 0 0 0 0 0 0 0 0 0 1
+ nh 0 0 0 0 0 1 1 1 1 1 2
+.fi
+
+.in -4
+CCDCLIP
+.in 4
+If the images are obtained using a CCD with known read out noise, gain, and
+sensitivity noise parameters and they have been processed to preserve the
+relation between data values and photons or electrons then the noise
+characteristics of the images are well defined. In this model the sigma in
+data values at a pixel with true value <I>, as approximated by the median
+or average with the lowest and highest value excluded, is given by:
+
+.nf
+ sigma = ((rn / g) ** 2 + <I> / g + (s * <I>) ** 2) ** 1/2
+.fi
+
+where rn is the read out noise in electrons, g is the gain in
+electrons per data value, s is a sensitivity noise given as a fraction,
+and ** is the exponentiation operator. Often the sensitivity noise,
+due to uncertainties in the pixel sensitivities (for example from the
+flat field), is not known in which case a value of zero can be used.
+See the task \fBstsdas.wfpc.noisemodel\fR for a way to determine
+these vaues (though that task expresses the read out noise in data
+numbers and the sensitivity noise parameter as a percentage).
+
+The read out noise is specified by the \fIrdnoise\fR parameter. The value
+may be a numeric value to be applied to all the input images or a image
+header keyword containing the value for each image. Similarly, the
+parameter \fIgain\fR specifies the gain as either a value or image header
+keyword and the parameter \fIsnoise\fR specifies the sensitivity
+noise parameter as either a value or image header keyword.
+
+The algorithm operates on each output pixel independently. It starts by
+taking the median or unweighted average (excluding the minimum and maximum)
+of the unrejected pixels provided there are at least two input pixels. The
+expected sigma is computed from the CCD noise parameters and pixels more
+that \fIlsigma\fR times this sigma below or \fIhsigma\fR times this sigma
+above the median or average are rejected. The process is then iterated
+until no further pixels are rejected. If the average is used as the
+estimator of the true value then after the first round of rejections the
+highest and lowest values are no longer excluded. Note that it is possible
+to reject all pixels if the average is used and is sufficiently skewed by
+bad pixels such as cosmic rays.
+
+If there are different CCD noise parameters for the input images
+(as might occur using the image header keyword specification) then
+the sigmas are computed for each pixel from each image using the
+same estimated true value.
+
+If the images are scaled and shifted and the \fIsigscale\fR threshold
+is exceedd then a sigma is computed for each pixel based on the
+image scale parameters; i.e. the median or average is scaled to that of the
+original image before computing the sigma and residuals.
+
+After rejection the number of retained pixels is checked against the
+\fInkeep\fR parameter. If there are fewer pixels retained than specified
+by this parameter the pixels with the smallest residuals in absolute
+value are added back. If there is more than one pixel with the same
+absolute residual (for example the two pixels about an average
+or median of two will have the same residuals) they are all added
+back even if this means more than \fInkeep\fR pixels are retained.
+Note that the \fInkeep\fR parameter only applies to the pixels used
+by the clipping rejection algorithm and does not apply to threshold
+or bad pixel mask rejection.
+
+This is the best clipping algorithm to use if the CCD noise parameters are
+adequately known. The parameters affecting this algorithm are \fIreject\fR
+to select this algorithm, \fImclip\fR to select the median or average for
+the center of the clipping, \fInkeep\fR to limit the number of pixels
+rejected, the CCD noise parameters \fIrdnoise, gain\fR and \fIsnoise\fR,
+\fIlsigma\fR and \fIhsigma\fR to select the clipping thresholds,
+and \fIsigscale\fR to set the threshold for making corrections to the sigma
+calculation for different image scale factors.
+
+.in -4
+CRREJECT
+.in 4
+This algorithm is identical to "ccdclip" except that only pixels above
+the average are rejected based on the \fIhsigma\fR parameter. This
+is appropriate for rejecting cosmic ray events and works even with
+two images.
+
+.in -4
+SIGCLIP
+.in 4
+The sigma clipping algorithm computes at each output pixel the median or
+average excluding the high and low values and the sigma about this
+estimate. There must be at least three input pixels, though for this method
+to work well there should be at least 10 pixels. Values deviating by more
+than the specified sigma threshold factors are rejected. These steps are
+repeated, except that after the first time the average includes all values,
+until no further pixels are rejected or there are fewer than three pixels.
+
+After rejection the number of retained pixels is checked against the
+\fInkeep\fR parameter. If there are fewer pixels retained than specified
+by this parameter the pixels with the smallest residuals in absolute
+value are added back. If there is more than one pixel with the same
+absolute residual (for example the two pixels about an average
+or median of two will have the same residuals) they are all added
+back even if this means more than \fInkeep\fR pixels are retained.
+Note that the \fInkeep\fR parameter only applies to the pixels used
+by the clipping rejection algorithm and does not apply to threshold
+or bad pixel mask rejection.
+
+The parameters affecting this algorithm are \fIreject\fR to select
+this algorithm, \fImclip\fR to select the median or average for the
+center of the clipping, \fInkeep\fR to limit the number of pixels
+rejected, \fIlsigma\fR and \fIhsigma\fR to select the
+clipping thresholds, and \fIsigscale\fR to set the threshold for
+making corrections to the sigma calculation for different image scale
+factors.
+
+.in -4
+AVSIGCLIP
+.in 4
+The averaged sigma clipping algorithm assumes that the sigma about the
+median or mean (average excluding the low and high values) is proportional
+to the square root of the median or mean at each point. This is
+described by the equation:
+
+.nf
+ sigma(column,line) = sqrt (gain(line) * signal(column,line))
+.fi
+
+where the \fIestimated\fR signal is the mean or median (hopefully excluding
+any bad pixels) and the gain is the \fIestimated\fR proportionality
+constant having units of photons/data number.
+
+This noise model is valid for images whose values are proportional to the
+number of photons recorded. In effect this algorithm estimates a
+detector gain for each line with no read out noise component when
+information about the detector noise parameters are not known or
+available. The gain proportionality factor is computed
+independently for each output line by averaging the square of the residuals
+(at points having three or more input values) scaled by the median or
+mean. In theory the proportionality should be the same for all rows but
+because of the estimating process will vary somewhat.
+
+Once the proportionality factor is determined, deviant pixels exceeding the
+specified thresholds are rejected at each point by estimating the sigma
+from the median or mean. If any values are rejected the median or mean
+(this time not excluding the extreme values) is recomputed and further
+values rejected. This is repeated until there are no further pixels
+rejected or the number of remaining input values falls below three. Note
+that the proportionality factor is not recomputed after rejections.
+
+If the images are scaled differently and the sigma scaling correction
+threshold is exceedd then a correction is made in the sigma
+calculations for these differences, again under the assumption that
+the noise in an image scales as the square root of the mean intensity.
+
+After rejection the number of retained pixels is checked against the
+\fInkeep\fR parameter. If there are fewer pixels retained than specified
+by this parameter the pixels with the smallest residuals in absolute
+value are added back. If there is more than one pixel with the same
+absolute residual (for example the two pixels about an average
+or median of two will have the same residuals) they are all added
+back even if this means more than \fInkeep\fR pixels are retained.
+Note that the \fInkeep\fR parameter only applies to the pixels used
+by the clipping rejection algorithm and does not apply to threshold
+or bad pixel mask rejection.
+
+This algorithm works well for even a few input images. It works better if
+the median is used though this is slower than using the average. Note that
+if the images have a known read out noise and gain (the proportionality
+factor above) then the "ccdclip" algorithm is superior. The two algorithms
+are related in that the average sigma proportionality factor is an estimate
+of the gain.
+
+The parameters affecting this algorithm are \fIreject\fR to select
+this algorithm, \fImclip\fR to select the median or average for the
+center of the clipping, \fInkeep\fR to limit the number of pixels
+rejected, \fIlsigma\fR and \fIhsigma\fR to select the
+clipping thresholds, and \fIsigscale\fR to set the threshold for
+making corrections to the sigma calculation for different image scale
+factors.
+
+.in -4
+PCLIP
+.in 4
+The percentile clipping algorithm is similar to sigma clipping using the
+median as the center of the distribution except that, instead of computing
+the sigma of the pixels from the CCD noise parameters or from the data
+values, the width of the distribution is characterized by the difference
+between the median value and a specified "percentile" pixel value. This
+width is then multipled by the scale factors \fIlsigma\fR and \fIhsigma\fR
+to define the clipping thresholds above and below the median. The clipping
+is not iterated.
+
+The pixel values at each output point are ordered in magnitude and the
+median is determined. In the case of an even number of pixels the average
+of the two middle values is used as the median value and the lower or upper
+of the two is the median pixel when counting from the median pixel to
+selecting the percentile pixel. The parameter \fIpclip\fR selects the
+percentile pixel as the number (if the absolute value is greater
+than unity) or fraction of the pixels from the median in the ordered set.
+The direction of the percentile pixel from the median is set by the sign of
+the \fIpclip\fR parameter with a negative value signifying pixels with
+values less than the median. Fractional values are internally converted to
+the appropriate number of pixels for the number of input images. A minimum
+of one pixel and a maximum corresponding to the extreme pixels from the
+median are enforced. The value used is reported in the log output. Note
+that the same percentile pixel is used even if pixels have been rejected by
+offseting, masking, or thresholding; for example, if the 3nd pixel below
+the median is specified then the 3rd pixel will be used whether there are
+10 pixels or 5 pixels remaining after the preliminary steps.
+
+Some examples help clarify the definition of the percentile pixel. In the
+examples assume 10 pixels. The median is then the average of the
+5th and 6th pixels. A \fIpclip\fR value of 2 selects the 2nd pixel
+above the median (6th) pixel which is the 8th pixel. A \fIpclip\fR
+value of -0.5 selects the point halfway between the median and the
+lowest pixel. In this case there are 4 pixels below the median,
+half of that is 2 pixels which makes the percentile pixel the 3rd pixel.
+
+The percentile clipping algorithm is most useful for clipping small
+excursions, such as the wings of bright objects when combining
+disregistered observations for a sky flat field, that are missed when using
+the pixel values to compute a sigma. It is not as powerful, however, as
+using the CCD noise parameters (provided they are accurately known) to clip
+about the median.
+
+The parameters affecting this algorithm are \fIreject\fR to select this
+algorithm, \fIpclip\fR to select the percentile pixel, \fInkeep\fR to limit
+the number of pixels rejected, and \fIlsigma\fR and \fIhsigma\fR to select
+the clipping thresholds.
+
+.in -4
+GROW REJECTION
+
+Neighbors of pixels rejected by the rejection algorithms along image lines
+may also be rejected. The number of neighbors to be rejected on either
+side is specified by the \fIgrow\fR parameter. The rejection only
+applies to neighbors along each image line. This is because the
+task operates independently on each image line and does not have the
+ability to go back to previous lines or maintain a list of rejected
+pixels to later lines.
+
+This rejection step is also checked against the \fInkeep\fR parameter
+and only as many pixels as would not violate this parameter are
+rejected. Unlike it's application in the rejection algorithms at
+this stage there is no checking on the magnitude of the residuals
+and the pixels retained which would otherwise be rejected are randomly
+selected.
+
+
+COMBINING
+
+After all the steps of offsetting the input images, masking pixels,
+threshold rejection, scaling, and applying a rejection algorithms the
+remaining pixels are combined and output. The pixels may be combined
+by computing the median or by computing a weighted average.
+
+
+SIGMA OUTPUT
+
+In addition to the combined image and optional sigma image may be
+produced. The sigma computed is the standard deviation, corrected for a
+finite population by a factor of n/(n-1), of the unrejected input pixel
+values about the output combined pixel values.
+.ih
+EXAMPLES
+1. To average and median images without any other features:
+
+.nf
+ cl> combine obj* avg combine=average reject=none
+ cl> combine obj* med combine=median reject=none
+.fi
+
+2. To reject cosmic rays:
+
+.nf
+ cl> combine obs1,obs2 Obs reject=crreject rdnoise=5.1, gain=4.3
+.fi
+
+3. To make a grid for display purposes with 21 64x64 images:
+
+.nf
+ cl> combine @list grid offset="grid 5 65 5 65"
+.fi
+
+4. To apply a mask image with good pixels marked with a zero value and
+ bad pixels marked with a value of one:
+
+.nf
+ cl> hedit ims* bpm badpix.pl add+ ver-
+ cl> combine ims* final combine=median masktype=goodval
+.fi
+
+5. To scale image by the exposure time and then adjust for varying
+ sky brightness and make a weighted average:
+
+.nf
+ cl> combine obj* avsig combine=average reject=avsig \
+ >>> scale=exp zero=mode weight=exp expname=exptime
+.fi
+.ih
+TIME REQUIREMENTS
+The following times were obtain with a Sun 4/470. The tests combine
+1000x200 images consisting of Poisson noise and cosmic rays generated
+with the \fBartdata\fR package. The times, especially the total time,
+are approximate and depend on user loads.
+
+.nf
+IMAGES: Number of images (1000x200) and datatype (R=real, S=short)
+COMBINE: Combine option
+REJECT: Rejection option with grow = 0
+ minmax: nlow = 1, nhigh = 1
+ ccdclip: lsigma = 3., hsigma = 3, sigscale = 0.
+ sigclip: lsigma = 3., hsigma = 3, sigscale = 0.
+ avsigclip: lsigma = 3., hsigma = 3, sigscale = 0.
+ pclip: lsigma = 3., hsigma = 3, pclip = -0.5
+ /a: mclip = no (clip about the average)
+ /m: mclip = yes (clip about the median)
+O M T S: Features used (Y=yes, N=no)
+O: offset = "grid 5 10 2 10"
+M: masktype = goodval, maskval = 0
+ Pixel mask has 2 bad lines and 20 bad columns
+T: lthreshold = INDEF, hthreshold = 1100.
+S: scale = mode, zero = none, weight = mode
+TIME: cpu time in seconds, total time in minutes and seconds
+
+
+IMAGES COMBINE REJECT O M T S TIME
+
+ 10R average none N N N N 1.3 0:08
+ 10R average minmax N N N N 4.3 0:10
+ 10R average pclip N N N N 17.9 0:32
+ 10R average ccdclip/a N N N N 11.6 0:21
+ 10R average crreject/a N N N N 11.4 0:21
+ 10R average sigclip/a N N N N 13.6 0:29
+ 10R average avsigclip/a N N N N 15.9 0:35
+ 10R average ccdclip/m N N N N 16.9 0:32
+ 10R average crreject/m N N N N 17.0 0:28
+ 10R average sigclip/m N N N N 19.6 0:42
+ 10R average avsigclip/m N N N N 20.6 0:43
+
+ 10R median none N N N N 6.8 0:17
+ 10R median minmax N N N N 7.8 0:15
+ 10R median pclip N N N N 16.9 1:00
+ 10R median ccdclip/a N N N N 18.0 0:34
+ 10R median crreject/a N N N N 17.7 0:30
+ 10R median sigclip/a N N N N 21.1 1:13
+ 10R median avsigclip/a N N N N 23.1 0:41
+ 10R median ccdclip/m N N N N 16.1 0:27
+ 10R median crreject/m N N N N 16.0 0:27
+ 10R median sigclip/m N N N N 18.1 0:29
+ 10R median avsigclip/m N N N N 19.6 0:32
+
+ 10R average none N N N Y 6.1 0:36
+ 10R median none N N N Y 10.4 0:49
+ 10R median pclip N N N Y 20.4 1:10
+ 10R median ccdclip/m N N N Y 19.5 0:36
+ 10R median avsigclip/m N N N Y 23.0 1:06
+
+ 10R average none N Y N N 3.5 0:12
+ 10R median none N Y N N 8.9 0:21
+ 10R median pclip N Y N N 19.9 0:45
+ 10R median ccdclip/m N Y N N 18.0 0:44
+ 10R median avsigclip/m N Y N N 20.9 0:28
+
+ 10R average none Y N N N 4.3 0:13
+ 10R median none Y N N N 9.6 0:21
+ 10R median pclip Y N N N 21.8 0:54
+ 10R median ccdclip/m Y N N N 19.3 0:44
+ 10R median avsigclip/m Y N N N 22.8 0:51
+
+ 10R average none Y Y Y Y 10.8 0:22
+ 10R median none Y Y Y Y 16.1 0:28
+ 10R median pclip Y Y Y Y 27.4 0:42
+ 10R median ccdclip/m Y Y Y Y 25.5 0:39
+ 10R median avsigclip/m Y Y Y Y 28.9 0:44
+
+ 10S average none N N N N 2.2 0:06
+ 10S average minmax N N N N 4.6 0:12
+ 10S average pclip N N N N 18.1 0:33
+.fi
+.ih
+REVISIONS
+.ls COMBINE V2.11
+The limit of the number of images that may be combined has been removed.
+If the number of images exceeds the maximum number of open images permitted
+then the images are stacked in a single temporary image and then combined
+with the project option. Note that this will double the amount of
+diskspace temporarily. There is also a limitation in this case that the
+bad pixel mask from the first image in the list will be applied to all the
+images.
+
+Integer offsets may be determined from the image world coordinate system.
+.le
+.ls COMBINE V2.10.3
+The output pixel datatype parameter, \fIouttype\fR was previously ignored
+and the package \fIpixeltype\fR was used. The task output pixel type
+parameter is now used.
+
+The factors specified by an @file or keyword are not normalized.
+.le
+.ls COMBINE V2.10.2
+The weighting was changed from using the square root of the exposure time
+or image statistics to using the values directly. This corresponds
+to variance weighting. Other options for specifying the scaling and
+weighting factors were added; namely from a file or from a different
+image header keyword. The \fInkeep\fR parameter was added to allow
+controlling the maximum number of pixels to be rejected by the clipping
+algorithms. The \fIsnoise\fR parameter was added to include a sensitivity
+or scale noise component to the noise model. Errors will now delete
+the output images.
+.le
+.ls COMBINE V2.10
+This task was greatly revised to provide many new features. These features
+are:
+
+.nf
+ o Bad pixel masks
+ o Combining offset and different size images
+ o Blank value for missing data
+ o Combining across the highest dimension (the project option)
+ o Separating threshold rejection, the rejection algorithms,
+ and the final combining statistic
+ o New CCDCLIP, CRREJECT, and PCLIP algorithms
+ o Rejection now may reject more than one pixel per output pixel
+ o Choice of a central median or average for clipping
+ o Choice of final combining operation
+ o Simultaneous multiplicative and zero point scaling
+.fi
+.le
+.ih
+LIMITATIONS
+Though the previous limit on the number of images that can be combined
+was removed in V2.11 the method has the limitation that only a single
+bad pixel mask will be used for all images.
+.ih
+SEE ALSO
+image.imcombine, instruments, ccdtypes, icfit, ccdred, guide, darkcombine,
+flatcombine, zerocombine, onedspec.scombine wfpc.noisemodel
+.endhelp
diff --git a/noao/imred/ccdred/doc/contents.ms b/noao/imred/ccdred/doc/contents.ms
new file mode 100644
index 00000000..8ba2624a
--- /dev/null
+++ b/noao/imred/ccdred/doc/contents.ms
@@ -0,0 +1,34 @@
+.sp 1i
+.ps +2
+.ft B
+.ce
+Contents
+.sp 3
+.ps -2
+.ft R
+.sp
+1.\h'|0.4i'\fBIntroduction\fP\l'|5.6i.'\0\01
+.sp
+2.\h'|0.4i'\fBGetting Started\fP\l'|5.6i.'\0\02
+.sp
+3.\h'|0.4i'\fBProcessing Your Data\fP\l'|5.6i.'\0\05
+.br
+\h'|0.4i'3.1.\h'|0.9i'Combining Calibration Images\l'|5.6i.'\0\06
+.br
+\h'|0.4i'3.2.\h'|0.9i'Calibrations and Corrections\l'|5.6i.'\0\07
+.sp
+4.\h'|0.4i'\fBSpecial Processing Operations\fP\l'|5.6i.'\0\08
+.br
+\h'|0.4i'4.1.\h'|0.9i'Spectroscopic Flat Fields\l'|5.6i.'\0\08
+.br
+\h'|0.4i'4.2.\h'|0.9i'Illumination Corrections\l'|5.6i.'\0\09
+.br
+\h'|0.4i'4.3.\h'|0.9i'Sky Flat Fields\l'|5.6i.'\010
+.br
+\h'|0.4i'4.4.\h'|0.9i'Illumination Corrected Flat Fields\l'|5.6i.'\010
+.br
+\h'|0.4i'4.5.\h'|0.9i'Fringe Corrections\l'|5.6i.'\010
+.sp
+5.\h'|0.4i'\fBSummary\fP\l'|5.6i.'\011
+.sp
+\h'|0.4i'\fBReferences\fP\l'|5.6i.'\011
diff --git a/noao/imred/ccdred/doc/darkcombine.hlp b/noao/imred/ccdred/doc/darkcombine.hlp
new file mode 100644
index 00000000..c545a13e
--- /dev/null
+++ b/noao/imred/ccdred/doc/darkcombine.hlp
@@ -0,0 +1,120 @@
+.help darkcombine Aug91 noao.imred.ccdred
+.ih
+NAME
+darkcombine -- Combine and process dark count images
+.ih
+USAGE
+darkcombine input
+.ih
+PARAMETERS
+.ls input
+List of dark count images to combine. The \fIccdtype\fR parameter
+may be used to select the zero level images from a list containing all
+types of data.
+.le
+.ls output = "Dark"
+Output dark count root image name.
+.le
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "minmax" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation. See \fBcombine\fR for details.
+.le
+.ls ccdtype = "dark"
+CCD image type to combine. If no image type is given then all input images
+are combined.
+.le
+.ls process = yes
+Process the input images before combining?
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+.ls scale = "exposure" (none|mode|median|mean|exposure)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, or scale
+by the exposure time given in the image header.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling.
+If no section is given then the entire region of the image is
+sampled (for efficiency the images are sampled if they are big enough).
+.le
+
+.ce
+Algorithm Parameters
+.ls nlow = 0, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See \fBcombine\fR for further details.
+.le
+.ls blank = 0.
+Output value to be used when there are no pixels.
+.le
+.ih
+DESCRIPTION
+The dark count images in the input image list are combined.
+The input images may be processed first if desired.
+The original images may be deleted automatically if desired.
+The output pixel datatype will be real.
+
+This task is a script which applies \fBccdproc\fR and \fBcombine\fR. The
+parameters and combining algorithms are described in detail in the help for
+\fBcombine\fR. This script has default parameters specifically set for
+dark count images and simplifies the combining parameters. There are other
+combining options not included in this task. For these additional
+features, such as thresholding, offseting, masking, and projecting, use
+\fBcombine\fR.
+.ih
+EXAMPLES
+1. The image data contains four dark count images. To automatically select
+them and combine them as a background job using the default combining algorithm:
+
+ cl> darkcombine ccd*.imh&
+.ih
+SEE ALSO
+ccdproc, combine
+.endhelp
diff --git a/noao/imred/ccdred/doc/flatcombine.hlp b/noao/imred/ccdred/doc/flatcombine.hlp
new file mode 100644
index 00000000..549c912c
--- /dev/null
+++ b/noao/imred/ccdred/doc/flatcombine.hlp
@@ -0,0 +1,133 @@
+.help flatcombine Aug91 noao.imred.ccdred
+.ih
+NAME
+flatcombine -- Combine and process flat field images
+.ih
+USAGE
+flatcombine input
+.ih
+PARAMETERS
+.ls input
+List of flat field images to combine. The \fIccdtype\fR parameter
+may be used to select the flat field images from a list containing all
+types of data.
+.le
+.ls output = "Flat"
+Output flat field root image name. The subset ID is appended.
+.le
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "avsigclip" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation. See \fBcombine\fR for details.
+.le
+.ls ccdtype = "flat"
+CCD image type to combine. If no image type is given then all input images
+are combined.
+.le
+.ls process = yes
+Process the input images before combining?
+.le
+.ls subsets = yes
+Combine images by subset parameter? If yes then the input images are
+grouped by subset parameter and each group combined into a separate output
+image. The subset identifier is appended to the output and sigma image
+names. See \fBsubsets\fR for more on the subset parameter. This is generally
+used with flat field images.
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+.ls scale = "mode" (none|mode|median|mean|exposure)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, or scale
+by the exposure time given in the image header.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling.
+If no section is given then the entire region of the image is
+sampled (for efficiency the images are sampled if they are big enough).
+.le
+
+.ce
+Algorithm Parameters
+.ls nlow = 1, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See \fBcombine\fR for further details.
+.le
+.ls blank = 1.
+Output value to be used when there are no pixels.
+.le
+.ih
+DESCRIPTION
+The flat field images in the input image list are combined. If there
+is more than one subset (such as a filter or grating) then the input
+flat field images are grouped by subset and an combined separately.
+The input images may be processed first if desired. However if all
+zero level bias effects are linear then this is not necessary and some
+processing time may be saved. The original images may be deleted
+automatically if desired. The output pixel datatype will be real.
+
+This task is a script which applies \fBccdproc\fR and \fBcombine\fR. The
+parameters and combining algorithms are described in detail in the help for
+\fBcombine\fR. This script has default parameters specifically set for
+flat field images and simplifies the combining parameters. There are other
+combining options not included in this task. For these additional
+features, such as thresholding, offseting, masking, and projecting, use
+\fBcombine\fR.
+.ih
+EXAMPLES
+1. The image data contains four flat field images for three filters.
+To automatically select them and combine them as a background job
+using the default combining algorithm:
+
+ cl> flatcombine ccd*.imh&
+
+The final images are "FlatV", "FlatB", and "FlatR".
+.ih
+SEE ALSO
+ccdproc, combine, subsets
+.endhelp
diff --git a/noao/imred/ccdred/doc/flatfields.hlp b/noao/imred/ccdred/doc/flatfields.hlp
new file mode 100644
index 00000000..94766960
--- /dev/null
+++ b/noao/imred/ccdred/doc/flatfields.hlp
@@ -0,0 +1,177 @@
+.help flatfields Jun87 noao.imred.ccdred
+
+.ih
+NAME
+flatfields -- Discussion of CCD flat field calibrations
+.ih
+DESCRIPTION
+This topic describes the different types of CCD flat fields and
+the tasks available in the \fBccdred\fR and spectroscopy packages for
+creating them. Flat field calibration is the most important operation
+performed on CCD data. This operation calibrates the relative response
+of the detector at each pixel. In some cases this is as simple as
+taking a special type of observation called a flat field. However, in
+many cases this calibration observation must be corrected for
+iillumination, scanning, wavelength, and aperture effects.
+
+The discussion is in three sections; direct imaging, scan mode,
+and spectroscopy. Though there are many similarities between these
+modes of operation there are important differences in how corrections
+are applied to the basic flat field observations. The application of
+the flat field calibrations to the observations using \fBccdproc\fR is
+the same in all cases, however.
+.sh
+1. Direct Imaging
+The starting point for determining the flat field calibration is an
+observation of something which should have uniform response at all
+points on the detector. In addition the color of the light falling at
+each pixel should be the same as that in an observation so the same
+filter must be used when determining the flat field (the issue of the
+matching the color of the objects observed at the appropriate pixels is
+ignored here). The best calibration observation is of a blank sky. If
+an accurate blank sky observation can be obtained then this is all that
+is needed for a flat field calibration. This type of flat field might
+be called a \fIsky flat\fR, though this term is more often used for a
+type of flat field described below. There are two difficulties with
+this type of calibration; finding a really blank sky and getting a
+sufficiently accurate measurement without using all the observing
+time.
+
+It is usually not possible to get a blank sky observation accurate
+enough to calibrate the individual pixels without introducing
+undesirable noise. What is generally done is to use a lamp to either
+uniformly illuminate a part of the dome or directly illuminate the
+field of view. The first type of observation is called a \fIdome
+flat\fR and the second is called a \fIprojection flat\fR. We shall call
+both of these types of observations \fBlamp flat fields\fR. If the
+iillumination is truely uniform then these types of observations are
+sufficient for flat field calibration. To get a very accurate flat
+field many observations are made and then combined (see
+\fBflatcombine\fR).
+
+Unfortunately, it is sometimes the case that the lamp flat fields
+do not illuminate the telescope/detector in the same way as the actual
+observations. Calibrating with these flat fields will introduce a
+residual large scale iillumination pattern, though it will correctly
+calibrate the relative pixel responses locally. There are two ways to
+correct for this effect. The first is to correct the flat field
+observation. The second is to apply the uncorrected flat field to the
+observations and then apply an \fIiillumination\fR correction as a
+separate operation. The first is more efficient since it consists of a
+single correction applied to each observation but in some cases the
+approximate correction is desired immediately, the observation needed
+to make the correction has not been taken yet, or the residual
+iillumination error is not discovered until later.
+
+For the two methods there are two types of correction. One is to
+use a blank sky observation to correct for the residual iillumination
+pattern. This is different than using the sky observation directly as
+a flat field calibration in that only the large scale pattern is
+needed. Determining the large scale iillumination does not require high
+signal-to-noise at each pixel and faint objects in the image can be
+either eliminated or ignored. The second method is to remove the large
+scale shape from the lamp flat field. This is not as good as using a
+blank sky observation but, if there is no such observation and the
+iillumination pattern is essentially only in the lamp flat field, this
+may be sufficient.
+
+From the above two paragraphs one sees there are four options.
+There is a task in the \fBccdred\fR package for each of these options.
+To correct a lamp flat field observation by a blank sky observation,
+called a \fIsky flat\fR, the task is \fBmkskyflat\fR. To correct the
+flat field for its own large scale gradients, called an \fIiillumination
+flat\fR, the task is \fBmkillumflat\fR. To create a secondary
+correction to be applied to data processed with the lamp flat field
+image the tasks are \fBmkskycor\fR and \fBmkillumcor\fR which are,
+respectively, based on a blank sky observation and the lamp flat field
+iillumination pattern.
+
+With this introduction turn to the individual documentation for these
+four tasks for further details.
+.sh
+2. Scan Mode
+There are two types of scan modes supported by the \fBccdred\fR
+package; \fIshortscan\fR and \fIlongscan\fR (see \fBccdproc\fR for
+further details). They both affect the manner in which flat field
+calibrations are handled. The shortscan mode produces images which are
+the same as direct images except that the light recorded at each pixel
+was collected by a number of different pixels. This improves the flat
+field calibration. If the flat field images, of the same types
+described in the direct imaging section, are observed in the same way
+as all other observations, i.e. in scan mode, then there is no
+difference from direct imaging (except in the quality of the flat
+fields). There is a statistical advantage to observing the lamp or sky
+flat field without scanning and then numerically averaging to simulate
+the result of the scanning. This improves the accuracy of
+the flat fields and might possibly allow direct blank sky observations
+to be used for flat fields. The numerical scanning is done in
+\fBccdproc\fR by setting the appropriate scanning parameters.
+
+In longscan mode the CCD detector is read out in such a way that
+each output image pixel is the sum of the light falling on all pixels
+along the direction of the scan. This reduces the flat field calibration
+to one dimension, one response value for each point across the scan.
+The one dimensional calibration is obtained from a longscan observation
+by averaging all the readout lines.
+This is done automatically in \fBccdproc\fR by setting the appropriate
+parameters. In this case very good flat fields can be obtained from
+one or more blank sky observations or an unscanned lamp observation. Other
+corrections are not generally used.
+.sh
+3. Spectroscopy
+Spectroscopic flat fields differ from direct imaging in that the
+spectrum of the sky or lamp and transmission variations with wavelength
+are part of the observation. Application of such images will introduce
+the inverse of the spectrum and transmission into the observation. It
+also distorts the observed counts making signal-to-noise estimates
+invalid. This, and the low signal in the dispersed light, makes it
+difficult to use blank sky observations directly as flat fields. As
+with direct imaging, sky observation may be used to correct for
+iillumination errors if necessary. At sufficiently high dispersion the
+continuous lamp spectrum may be flat enough that the spectral signature
+of the lamp is not a problem. Alternatively, flux calibrating the
+spectra will also remove the flat field spectral signature. The
+spectroscopic flat fields also have to be corrected for regions outside
+of the slit or apertures to avoid bad response effects when applying
+the flat field calibration to the observations.
+
+The basic scheme for removing the spectral signature is to average
+all the lines or columns across the dispersion and within the aperture
+to form an estimate of the spectrum. In addition to the averaging, a
+smooth curve is fit to the lamp spectrum to remove noise. This smooth
+shape is then divided back into each line or column to eliminate the
+shape of the spectrum without changing the shape of the spectrum
+in the spatial direction or the small scale response variations.
+Regions outside of the apertures are replaced by unity.
+This method requires that the dispersion be aligned fairly close to
+either the CCD lines or columns.
+
+This scheme is used in both longslit and multiaperture spectra.
+The latter includes echelle, slitlets, aperture masks, and fiber feeds.
+For narrow apertures which do not have wider slits for the lamp
+exposures there may be problems with flexure and defining a good
+composite spectrum. The algorithm for longslit spectra is simpler and
+is available in the task \fBresponse\fR in the \fBlongslit\fR package.
+For multiaperture data there are problems of defining where the spectra
+lie and avoiding regions off of the aperture where there is no signal.
+The task which does this is \fBapnormalize\fR in the \fBapextract\fR
+package. Note that the lamp observations must first be processed
+explicitly for bias and dark count corrections.
+
+Longslit spectra may also suffer the same types of iillumination
+problems found in direct imaging. However, in this case the iillumination
+pattern is determined from sky observations (or the flat field itself)
+by finding the large scale pattern across the dispersion and at a number
+of wavelengths while avoiding the effects of night sky spectrum. The
+task which makes this type of correction in the \fBlongslit\fR package
+is \fBiillumination\fR. This produces an iillumination correction.
+To make sky flats or the other types of corrections image arithmetic
+is used. Note also that the sky observations must be explicitly
+processed through the flat field stage before computing the iillumination.
+.ih
+SEE ALSO
+.nf
+ccdproc, guide, mkillumcor, mkillumflat, mkskycor, mkskyflat
+apextract.apnormalize, longslit.response, longslit.iillumination
+.fi
+.endhelp
diff --git a/noao/imred/ccdred/doc/guide.hlp b/noao/imred/ccdred/doc/guide.hlp
new file mode 100644
index 00000000..5006a6ec
--- /dev/null
+++ b/noao/imred/ccdred/doc/guide.hlp
@@ -0,0 +1,717 @@
+.help guide Feb88 noao.imred.ccdred
+.ce
+User's Guide to the CCDRED Package
+.sh
+1. Introduction
+
+ This guide provides a brief description of the IRAF CCD reduction
+package \fBccdred\fR and examples of reducing simple CCD data. It is a
+generic guide in that it is not tied to any particular type of data.
+There may be more specific guides (or "cookbooks") for your data.
+Detailed descriptions of the tasks and features of the package are
+provided in the help documentation for the package.
+
+ The purpose of the CCDRED package is to provide tools for the easy
+and efficient reduction of CCD images. The standard reduction
+operations are replacement of bad columns and lines by interpolation
+from neighboring columns and lines, subtraction of a bias level
+determined from overscan or prescan columns or lines, subtraction of a
+zero level using a zero length exposure calibration image, subtraction
+of a dark count calibration image appropriately scaled to the dark time
+exposure, division by a scaled flat field calibration image, division
+by an iillumination image (derived from a blank sky image), subtraction
+of a scaled fringe image (also derived from a blank sky image), and
+trimming the image of unwanted lines or columns such as the overscan
+strip. Any set of operations may be done simultaneously over a list of
+images in a highly efficient manner. The reduction operations are
+recorded in the image header and may also be logged on the terminal and
+in a log file.
+
+ The package also provides tools for combining multiple exposures
+of object and calibration images to improve the statistical accuracy of
+the observations and to remove transient bad pixels. The combining
+operation scales images of different exposure times, adjusts for
+variable sky background, statistically weights the images by their
+signal-to-noise, and provides a number of useful algorithms for
+detecting and rejecting transient bad pixels.
+
+ Other tasks are provided for listing reduction information about
+the images, deriving secondary calibration images (such as sky
+corrected flat fields or iillumination correction images), and easily
+setting the package parameters for different instruments.
+
+ There are several important features provided by the package to
+make the reduction of CCD images convenient; particularly to minimize
+record keeping. One of these is the ability to recognize the different
+types of CCD images. This ability allows the user to select a certain
+class of images to be processed or listed and allows the processing
+tasks to identify calibration images and process them differently from
+object images. The standard CCD image types are \fIobject\fR,
+\fIzero\fR level, \fIdark\fR count, and \fIflat\fR field. For more on
+the image types see \fBccdtypes\fR.
+
+ The tasks can also identify the different filters (or other subset
+parameter) which require different flat field images. This means you don't
+have to separate the images by filter and process each set separately.
+This feature is discussed further in \fBsubsets\fR.
+
+ The tasks keep track of the reduction steps completed on each
+image and ignore images which have been processed. This feature,
+along with recognizing the image types and subsets, makes it possible to
+specify all the images to a task with a wildcard template, such as
+"*.imh", rather than indicating each image by name. You will find this
+extremely important with large sets of observations.
+
+ A fundamental aspect of the package is that the processing
+modifies the images. In other words, the reduction operations are
+performed directly on the image. This "feature" further simplifies
+record keeping, frees the user from having to form unique output image
+names, and minimizes the amount of disk space required. There
+are two safety features in this process. First, the modifications do
+not take effect until the operation is completed on the image. This
+allows you to abort the task without messing up the image data and
+protects data if the computer crashes. The second feature is that
+there is a package parameter which may be set to make a backup of the
+input data with a particular prefix such as "orig" or "imdir$". This
+backup feature may be used when there is sufficient disk space, when learning
+to use the package, or just to be cautious.
+
+ In a similar effort to efficiently manage disk space, when combining
+images into a master object or calibration image there is an option to
+delete the input images upon completion of the combining operation.
+Generally this is desirable when there are many calibration exposures,
+such as zero level or flat field images, which are not used after they
+are combined into a final calibration image.
+
+ The following sections guide you through the basic use of the
+\fBccdred\fR package. Only the important parameters which you might
+want to change are described. It is assumed that the support personnel
+have created the necessary instrument files (see \fBinstruments\fR)
+which will set the default parameters for the data you will be
+reducing. If this is not the case you may need to delve more deeply
+into the details of the tasks. Information about all the parameters
+and how the various tasks operate are given in the help documentation
+for the tasks and in additional special help topics. Some useful help
+documentation is indicated in the discussion and also in the
+\fBReferences\fR section.
+.sh
+2. Getting Started
+
+ The first step is to load \fBccdred\fR. This is done by loading
+the \fBnoao\fR package, followed by the image reduction package
+\fBimred\fR, and finally the \fBccdred\fR package. Loading a
+package consists of typing its name. Note that some of these packages may be
+loaded automatically when you logon to IRAF.
+
+ When you load the \fBccdred\fR package the menu of tasks or commands
+is listed. This appears as follows:
+
+.nf
+ cl> ccdred
+ badpiximage ccdtest mkfringecor setinstrument
+ ccdgroups combine mkillumcor zerocombine
+ ccdhedit cosmicrays mkillumflat
+ ccdlist darkcombine mkskycor
+ ccdproc flatcombine mkskyflat
+.fi
+
+A summary of the tasks and additional help topics is obtained by typing:
+
+ cl> help
+
+This list and how to get additional help on specific topics is described
+in the \fBReferences\fR section at the end of this guide.
+
+ The first command to use is \fBsetinstrument\fR, which sets the package
+appropriately for the CCD images to be reduced. The support personnel
+should tell you the instrument identification, but if not a list
+of known instruments may be listed by using '?' for the instrument name.
+
+.nf
+ cl> setinstrument
+ Instrument ID (type ? for a list) \fI<enter instrument id or ?>\fR
+ <Set ccdred package parameters using eparam>
+ <Set ccdproc task parameters using eparam>
+.fi
+
+This task sets the default parameters and then allows you to modify the
+package parameters and the processing parameters using the parameter
+editor \fBeparam\fR. If you are not familiar with \fBeparam\fR see the
+help or CL introduction documentation. For most terminals you move up
+and down through the parameters with the terminal arrow keys, you
+change the parameters by simply typing the desired value, and you exit
+with control Z or control D. Note that you can change parameters for
+any task at any time with \fBeparam\fR and you do not have to run
+\fBsetinstrument\fR again, even if you logout, until you need to reduce
+data from a different instrument.
+
+ The \fBccdred\fR package parameters control general I/O functions of
+the tasks in the package. The parameters you might wish to change are
+the output pixel type and the verbose option. Except when the input
+images are short integers, the noise is significantly greater than one
+digital unit, and disk space is critical, it is probably better to
+allow the processing to convert the images to real pixel datatype. The
+verbose parameter simply prints the information written to the log file
+on the terminal. This can be useful when little else is being done and
+you are just beginning. However, when doing background processing and
+other IRAF reduction tasks it is enough to simply look at the end of
+the logfile with the task \fBtail\fR to see the current state of the
+processing.
+
+ The \fBccdproc\fR parameters control the CCD processing. There are
+many parameters but they all may be conveniently set at this point.
+Many of the parameters have default values set appropriately for the
+instrument you specified. The images to be processed can be specified
+later. What needs to be set are the processing operations that you
+want done and the parameters required for each operation. The
+processing operations are selected by entering yes or no for each one.
+The following items briefly describe each of the possible processing
+operations and the additional parameters required.
+
+.ls \fIfixpix\fR - Fix bad CCD lines and columns?
+The bad pixels (cosmetic defects) in the detector are given in a file
+specified by the parameter \fIfixfile\fR. This information is used
+to replace the pixels by interpolating from the neighboring pixels.
+A standard file for your instrument may be set by \fBsetinstrument\fR
+or if the word "image" is given then the file is defined in the instrument
+data file. For more on the bad pixel file see \fBinstruments\fR.
+.le
+.ls \fIoverscan\fR - Apply overscan strip correction?
+The overscan or prescan region is specified by the parameter
+\fIbiassec\fR. This is given as an IRAF image section. Only the
+part of the section corresponding to the readout axis is used and
+the other part is ignored. The length of the overscan region is
+set by the \fItrimsec\fR parameter. The overscan
+region is averaged along the readout axis, specified by the parameter
+\fIreadaxis\fR, to create a one dimensional bias vector. This bias is
+fit by a function to remove cosmic rays and noise. There are a number
+of parameters at the end of the parameter list which control the
+fitting. The default overscan bias section and fitting parameters for
+your instrument should be set by \fBsetinstrument\fR. If the word
+"image" is given the overscan bias section is defined in the image
+header or the instrument translation file. If an overscan section is
+not set you can use \fBimplot\fR to determine the columns or rows for
+the bias region and define an overscan image section. If you are
+unsure about image sections consult with someone or read the
+introductory IRAF documentation.
+.le
+.ls \fItrim\fR - Trim the image?
+The image is trimmed to the image section given by the parameter
+\fItrimsec\fR. A default trim section for your instrument should be
+set by \fBsetinstrument\fR, however, you may override this default if
+desired. If the word "image" is given the data
+image section is given in the image header or the instrument
+translation file. As with the overscan image section it is
+straightforward to specify, but if you are unsure consult someone.
+.le
+.ls \fIzerocor\fR - Apply zero level correction?
+The zero level image to be subtracted is specified by the parameter
+\fIzero\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.le
+.ls \fIdarkcor\fR - Apply dark count correction?
+The dark count image to be subtracted is specified by the parameter
+\fIdark\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.le
+.ls \fIflatcor\fR - Apply flat field correction?
+The flat field images to be used are specified by the parameter
+\fIflat\fR. There must be one flat field image for each filter
+or subset (see \fBsubsets\fR) to be processed. If a flat field
+image is not given then the calibration image will be sought
+in the list of images to be processed.
+.le
+.ls \fIreadcor\fR - Convert zero level image to readout correction?
+If a one dimensional zero level readout correction vector is to be subtracted
+instead of a two dimensional zero level image then, when this parameter is set,
+the zero level images will be averaged to one dimension. The readout axis
+must be specified by the parameter \fIreadaxis\fR. The default for your
+instrument is set by \fBsetinstrument\fR.
+.le
+.ls \fIscancor\fR - Convert flat field image to scan correction?
+If the instrument is operated in a scan mode then a correction to the
+flat field may be required. There are two types of scan modes, "shortscan"
+and "longscan". In longscan mode flat field images will be averaged
+to one dimension and the readout axis must be specified. Shortscan mode
+is a little more complicated. The scan correction is used if the flat
+field images are not observed in scan mode. The number of scan lines
+must be specified by the parameter \fInscan\fR. If they are observed in
+scan mode, like the object observations, then the scan correction
+operations should \fInot\fR be specified. For details of scan mode operations
+see \fBccdproc\fR. The scan parameters
+should be set by \fBsetinstrument\fR. If in doubt consult someone
+familiar with the instrument and mode of operation.
+.le
+
+ This description of the parameters is longer than the actual operation of
+setting the parameters. The only parameters likely to change during processing
+are the calibration image parameters.
+
+ When processing many images using the same calibration files a modest
+performance improvement can be achieved by keeping (caching) the
+calibration images in memory to avoid disk accesses. This option
+is available by specifying the amount of memory available for image
+caching with the parameter \fImax_cache\fR. If the value is zero then
+the images are accessed from disk as needed while if there is
+sufficient memory the calibration images may be kept in memory during
+the task execution.
+.sh
+3. Processing Your Data
+
+ The processing path depends on the type of data, the type of
+instrument, types of calibration images, and the observing
+sequence. In this section we describe two types of operations common
+in reducing most data; combining calibration images and performing the
+standard calibration and correction operations. Some additional special
+operations are described in the following section.
+
+ However, the first thing you might want to try before any
+processing is to get a listing of the CCD images showing the CCD image
+types, subsets, and processing flags. The task for this is
+\fBccdlist\fR. It has three types of output; a short one line per
+image format, a longer format which shows the state of the processing,
+and a format which prints the image names only (used to create files
+containing lists of images of a particular CCD image type). To get a
+quick listing type:
+
+.nf
+ cl> ccdlist *.imh
+ ccd001.imh[544,512][short][unknown][V]:FOCUS L98-193
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+ ccd045.imh[544,512][short][flat][V]:dflat 5s
+ ccd066.imh[544,512][short][flat][B]:dflat 5s
+ ccd103.imh[544,512][short][flat][R]:dflat 5s
+ ccd104.imh[544,512][short][zero][]:bias
+ ccd105.imh[544,512][short][dark][]:dark 3600s
+.fi
+
+ The example shows only a sample of the images. The short format
+listing tells you the name of the image, its size and pixel type, the
+CCD image type as seen by the package, the subset identifier (in this
+case the filter), and the title. If the data had been processed then
+there would also be processing flags. If the CCD image types do not
+seem right then there may be a problem with the instrument
+specification.
+
+ Many of the tasks in the \fBccdred\fR package have the parameter
+\fIccdtype\fR which selects a particular type of image. To list
+only the object images from the previous example:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+.fi
+
+If no CCD image type is specified (by using the null string "")
+then all image types are selected. This may be
+necessary if your instrument data does not contain image type identifications.
+.sh
+3.1 Combining Calibration Images
+
+ If you do not need to combine calibration images because you only
+have one image of each type, you can skip this section. Calibration
+images, particularly zero level and flat field images, are combined in
+order to minimize the effects of noise and reject bad pixels in the
+calibrations. The basic tool for combining images is the task
+\fBcombine\fR. There are simple variants of this task whose default
+parameters are set appropriately for each type of calibration image.
+These are the ones you will use for calibration images leaving
+\fBcombine\fR for combining object images. Zero level images are
+combined with \fBzerocombine\fR, dark count images with
+\fBdarkcombine\fR, and flat field images with \fBflatcombine\fR.
+
+ For example, to combine flat field images the command is:
+
+.nf
+ cl> flatcombine *.imh
+ Jun 1 14:26 combine: maxreject
+ Images N Exp Mode Scale Offset Weight
+ ccd045.imh 1 5.0 INDEF 1.000 0. 0.048
+ ccd046.imh 1 5.0 INDEF 1.000 0. 0.048
+ <... list of files ...>
+ ccd065.imh 1 5.0 INDEF 1.000 0. 0.048
+ ----------- ------ ------
+ FlatV.imh 21 5.0
+.fi
+
+This output is printed when verbose mode is set. The same information
+is recorded in the log file. In this case the flat fields are combined
+by rejecting the maximum value at each point in the image (the
+"maxreject" algorithm). The images are scaled by the exposure times,
+which are all the same in this example. The mode is not evaluated for
+exposure scaling and the relative weights are the same because the
+exposure times are the same. The example only shows part of the
+output; \fBflatcombine\fR automatically groups the flat field images by
+filter to produce the calibration images "FlatV", "FlatB", and
+"FlatR".
+.sh
+3.2 Calibrations and Corrections
+
+ Processing the CCD data is easy and largely automated.
+First, set the task parameters with the following command:
+
+ cl> eparam ccdproc
+
+You may have already set the parameters when you ran
+\fBsetinstrument\fR, though the calibration image parameters
+\fIzero\fR, \fIdark\fR, and \fIflat\fR may still need to be set or
+changed. Once this is done simply give the command
+
+.nf
+ cl> ccdproc *.imh
+ ccd003: Jun 1 15:13 Overscan section is [520:540,*] with mean=485.0
+ ccd003: Jun 1 15:14 Trim data section is [3:510,3:510]
+ ccd003: Jun 1 15:14 Overscan section is [520:540,*] with mean=485.0
+ FlatV: Jun 1 15:14 Trim data section is [3:510,3:510]
+ FlatV: Jun 1 15:15 Overscan section is [520:540,*] with mean=486.4
+ ccd003: Jun 1 15:15 Flat field image is FlatV.imh with scale=138.2
+ ccd004: Jun 1 15:16 Trim data section is [3:510,3:510]
+ ccd004: Jun 1 15:16 Overscan section is [520:540,*] with mean=485.2
+ ccd004: Jun 1 15:16 Flat field image is FlatV.imh with scale=138.2
+ <... more ...>
+ ccd013: Jun 1 15:22 Trim data section is [3:510,3:510]
+ ccd013: Jun 1 15:23 Overscan section is [520:540,*] with mean=482.4
+ FlatB: Jun 1 15:23 Trim data section is [3:510,3:510]
+ FlatB: Jun 1 15:23 Overscan section is [520:540,*] with mean=486.4
+ ccd013: Jun 1 15:24 Flat field image is FlatB.imh with scale=132.3
+ <... more ...>
+.fi
+
+ The output shown is with verbose mode set. It is the same as
+recorded in the log file. It illustrates the principle of automatic
+calibration image processing. The first object image, "ccd003", was
+being processed when the flat field image was required. Since the
+image was taken with the V filter the appropriate flat field was
+determined to be "FlatV". Since it had not been processed, the
+processing of "ccd003" was interrupted to process "FlatV". The
+processed calibration image may have been cached if there was enough
+memory. Once "FlatV" was processed (note that the flat field was not
+flattened because the task knows this image is a flat field) the
+processing of "ccd003" was completed. The next image, "ccd004", is
+also a V filter image so the already processed, and possibly cached,
+flat field "FlatV" is used again. The first B band image is "ccd013"
+and, as before, the B filter flat field calibration image is processed
+automatically. The same automatic calibration processing and image
+caching occurs when using zero level and dark count calibration
+images.
+
+ Commonly the processing is done with the verbose mode turned off
+and the task run as a background job. This is done with the commands
+
+.nf
+ cl> ccdred.verbose=no
+ cl> ccdproc *.imh &
+.fi
+
+The already processed images in the input list are recognized as having been
+processed and are not affected. To check the status of the processing we
+can look at the end of the log file with:
+
+ cl> tail logfile
+
+After processing we can repeat the \fBccdlist\fR command to find:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[508,508][real][object][V][OTF]:N2968 V 600s
+ ccd015.imh[508,508][real][object][B][OTF]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R][OTF]:N4036 R 600s
+.fi
+
+The processing flags indicate the images have been overscan corrected,
+trimmed, and flat fielded.
+
+ As you can see, processing images is very easy. There is one source
+of minor confusion for beginning users and that is dealing with calibration
+images. First, there is no reason that calibration images
+may not be processed explicitly with \fBccdproc\fR, just remember to set
+the \fIccdtype\fR to the calibration image type or to "". When processing
+object images the calibration images to be used may be specified either
+with the task parameter for the particular calibration image or by
+including the calibration image in the list of input images. Calibration
+images specified by parameter value take precedence and the task
+does not check its CCD image type. Calibration images given in the
+input list must have a valid CCD image type. In case too many
+calibration images are specified, say because the calibration images
+combined to make the master calibration images were not deleted and
+so are part of the image list "*.imh", only the first one will be used.
+Another point to know is that flat field, iillumination, and fringe images
+are subset (filter) dependent and so a calibration image for each filter
+must be specified.
+.sh
+4. Special Processing Operations
+
+ The special processing operations are mostly concerned with the
+flat field response correction. There are also special processing
+operations available in \fBccdproc\fR for one dimensional readout
+corrections in the zero level and flat field calibrations. These
+were described briefly above and in more detail in \fBccdproc\fR
+and are not discussed further in this guide. The processing
+operations described in this section are for preparing flat fields
+for two dimensional spectroscopic data, for correcting flat fields
+for iilluminations effects, for making a separate iillumination correction,
+and for applying corrections for fringe effects. For additional
+discussion about flat fields and iillumination corrections see the
+help topic \fBflatfields\fR.
+.sh
+4.1 Spectroscopic Flat Fields
+
+ For spectroscopic data the flat fields may have to be processed to
+remove the general shape of the lamp spectrum and to replace regions outside
+of the aperture where there is no flat field information with values that
+will not cause bad response effects when the flat field is applied to the
+data. If the shape of the lamp spectrum is not important and if the
+longslit spectra have the regions outside of the slit either off the
+detector or trimmed then you may use the flat field without special
+processing.
+
+ First you must process the flat field images explicitly with
+
+ cl> ccdproc *.imh ccdtype=flat
+
+where "*.imh" may be replaced with any list containing the flat fields.
+If zero level and dark count corrections are required these calibration
+images must be available at this time.
+
+ Load the \fBtwodspec\fR package and then either the \fBlongslit\fR
+package, for longslit data, or the \fBapextract\fR package, for
+multiaperture data such as echelles, multifiber, or aperture mask
+spectra. The task for removing the longslit quartz spectrum is
+\fBresponse\fR. There is also a task for removing iillumination
+effects, including the slit profile, from longslit spectra called
+\fBiillumination\fR. For more about processing longslit spectra see the
+help for these tasks and the paper \fIReduction of Longslit Spectra
+with IRAF\fR. The cookbook \fIReduction of Longslit Spectroscopic
+Data Using IRAF (KPNO ICCD and Cryogenic Camera Data)\fR also provides
+a very good discussion even if your data is from a different instrument.
+
+ For multiaperture data the task for removing the relative shapes of
+the spectra is called \fBapnormalize\fR. Again, consult the help documentation
+for this task for further details. Since you will probably also be
+using the package for extracting the spectra you may be interested
+in the document \fIThe IRAF APEXTRACT Package\fR.
+.sh
+4.2 Iillumination Corrections
+
+ The flat field calibration images may not have the same iillumination
+pattern as the observations of the sky due to the way the lamp illuminates the
+optical system. In this case when the flat field correction is applied
+to the data there will be gradients in the sky background. To remove
+these gradients a blank sky calibration image is heavily smoothed
+to produce an iillumination image. The iillumination image
+is then divided into the images during processing to correct for the
+iillumination difference between the flat field and the objects.
+Like the flat fields, the iillumination corrections images may be subset
+dependent so there should be an iillumination image for each subset.
+
+The task which makes iillumination correction images is \fBmkskycor\fR.
+Some examples are
+
+.nf
+ cl> mkskycor sky004 Illum004
+ cl> mkskycor sky*.imh ""
+.fi
+
+In the first example the sky image "sky004" is used to make the iillumination
+correction image "Illum004". In the second example the sky images are
+converted to iillumination correction images by specifying no output image
+names. Like \fBccdproc\fR if the input images have not been processed they
+are first processed automatically.
+
+To apply the iillumination correction
+
+.nf
+ cl> ccdproc *.imh ccdtype=object illumcor+ illum=Illum004
+ cl> ccdproc *.imh ccdtype=object illumcor+ illum=sky*.imh
+.fi
+
+The iillumination images could also be set using \fBeparam\fR or given
+on the command line.
+.sh
+4.3 Sky Flat Fields
+
+ You will notice that when you process images with an iillumination
+correction you are dividing each image by a flat field calibration and
+an iillumination correction. If the iillumination corrections are not
+done as a later step but at the same time as the rest of the processing
+one will get the same calibration by multiplying the flat field by
+the iillumination correction and using this product alone as the
+flat field. Such an image is called a \fIsky flat\fR since it is
+a flat field which has been corrected to yield a flat sky when applied
+to the observations. This approach has the advantage of one less
+calibration image and two less computations (scaling and dividing the
+iillumination correction). As an added short cut, rather than compute
+the iillumination image with \fBmkskycor\fR and then multiplying, the
+task \fBmkskyflat\fR does all this in one step. Thus, \fBmkskyflat\fR
+takes an input blank sky image, processes it if needed, determines the
+appropriate flat field (sky flats are also subset dependent) from the
+\fBccdproc\fR parameters or the input image list, and produces an
+output sky flat. Further if no output image is specified the task
+converts the input blank sky calibration image into a sky flat.
+
+ Two examples in which a new image is created and in which the
+input images are converted to sky flats are
+
+.nf
+ cl> mkskyflat sky004 Skyflat
+ cl> mkskyflat sky*.imh ""
+.fi
+.sh
+4.4 Iillumination Corrected Flat Fields
+
+ A third method to account for iillumination problems in the flat fields
+is to remove the large scale pattern from the flat field itself. This is
+useful if there are no reasonable blank sky calibration images and the
+astronomical exposures are evenly illuminated but the flat fields are not.
+This is done by smoothing the flat field images instead of blank sky
+images. As with using the sky images there are two methods, creating
+an iillumination correction to be applied as a separate step or fixing
+the original flat field. The smoothing algorithm is
+the same as that used in the other tasks. The tasks to make these types
+of corrections are \fBmkillumcor\fR and \fBmkillumflat\fR. The usage
+is pretty much the same as the other iillumination correction tasks
+except that it is more reasonable to replace the original flat fields
+by the corrected flat fields when fixing the flat field. Examples
+of an iillumination correction and removing the iillumination pattern
+from the flat field are
+
+.nf
+ cl> mkillumcor flat025 Illum025
+ cl> mkillumflat flat*.imh ""
+.fi
+
+As with the other tasks, the input images are processed if necessary.
+.sh
+4.5 Fringe Corrections
+
+ Some CCD detectors suffer from fringing effects due to the night
+sky emission lines which are not removed by the other calibration
+and correction operations. To correct for the fringing you need a
+really blank sky image. There is not yet a task to remove objects from
+sky images because this is often done with an interactive image display
+tool (which will soon be added). The blank sky image is heavily smoothed
+to determine the mean sky background and then this is subtracted from the
+original image. The image should then be essentially zero except for the
+fringe pattern. This fringe correction image is scaled to the same
+exposure time as the image to be corrected and then subtracted to remove
+the fringing. Note that since the night sky lines are variable there
+may need to be an additional scaling applied. Determining this scaling
+requires either an interactive display tool or a very clever task.
+Such tasks will also be added in the future.
+
+ The task to make a fringe correction image is \fBmkfringecor\fR.
+the sky background is determined in exactly the same way as the iillumination
+pattern, in fact the same sky image may be used for both the sky
+iillumination and for the fringe correction. The task works consistently
+with the "mk" tasks in that the input images are processed first if needed
+and then the output correction image is produced with the specified name
+or replaces the input image if no output image is specified.
+As examples,
+
+.nf
+ cl> mkfringecor sky004 Fringe
+ cl> mkfringecor sky*.imh ""
+.fi
+.sh
+5. Demonstration
+
+ A simple demonstration task is available. To run this demonstration
+load the \fBccdtest\fR package; this is a subpackage of the main
+\fBccdred\fR package. Then simply type
+
+ cl> demo
+
+The demonstration will then create some artificial CCD data and reduce
+them giving descriptive comments as it goes along. This demonstration uses
+the "playback" facility of the command language and is actually substituting
+it's own commands for terminal input. Initially you must type carriage return
+or space after each comment ending with "...". If you wish to have the
+demonstration run completely automatically at it's own speed then type 'g'
+a the "..." prompt. Thereafter, it will simple pause long enough to give
+you a chance to read the comments. When the demo is finished you will
+need to remove the files created. However, feel free to examine the reduced
+images, the log file, etc. \fINote that the demonstration changes the
+setup parameters so be sure to run \fBsetinstrument\fI again and check
+the setup parameters.\fR
+.sh
+6. Summary
+
+ The \fBccdred\fR package is very easy to use. First load the package;
+it is in the \fBimred\fR package which is in the \fBnoao\fR package.
+If this is your first time reducing data from a particular instrument
+or if you have changed instruments then run \fBsetinstrument\fR.
+Set the processing parameters for the operations you want performed.
+If you need to combine calibration images to form a master calibration
+image use one of the combine tasks. Spectroscopic flat fields may
+need to be processed first in order to remove the lamp spectrum.
+Finally, just type
+
+ cl> ccdproc *.imh&
+.sh
+7. References
+
+ A general guide to using IRAF is \fIA User's Introduction to the IRAF
+Command Language\fR. This document may be found in the IRAF documentation
+sets and is available from the National Optical Astronomy Observatories,
+Central Computer Services (NOAO-CCS).
+
+ A more detailed description of the \fBccdred\fR package including
+a discussion of the design and some of the algorithms see \fIThe IRAF
+CCD Reduction Package -- CCDRED\fR by F. Valdes. This paper is available
+from NOAO-CCS and appears in the proceedings of the Santa Cruz Summer
+Workshop in Astronomy and Astrophysics, \fIInstrumentation for Ground-Based
+Optical Astronomy: Present and Future\fR, edited by Lloyd B. Robinson and
+published by Springer-Verlag.
+
+ The task descriptions and supplementary documentation are available
+in printed form in the IRAF documentation sets, a special set
+containing documentation for just the \fBccdred\fR package, and on-line
+through the help task by typing
+
+ cl> help \fItopic\fR
+
+where \fItopic\fR is one of the following.
+
+.nf
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdlist - List CCD processing information
+ ccdproc - Process CCD images
+ ccdtest - CCD test and demonstration package
+ combine - Combine CCD images
+ cosmicrays - Detect and replace cosmic rays
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field iillumination correction images
+ mkillumflat - Make iillumination corrected flat fields
+ mkskycor - Make sky iillumination correction images
+ mkskyflat - Make sky corrected flat field images
+setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+
+ ADDITIONAL HELP TOPICS
+
+ ccdred - CCD image reduction package
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ subsets - Description of CCD subsets
+.fi
+
+Printed copies of the on-line help documentation may be made with the
+command
+
+ cl> help topic | lprint
+
+ In addition to the package documentation for \fBccdred\fR,
+\fBlongslit\fR, and \fBapextract\fR there may be specific guides for
+certain instruments. These specific guides, called "cookbooks", give
+specific examples and parameter values for the CCD data.
+.endhelp
diff --git a/noao/imred/ccdred/doc/guide.ms b/noao/imred/ccdred/doc/guide.ms
new file mode 100644
index 00000000..62d87bb9
--- /dev/null
+++ b/noao/imred/ccdred/doc/guide.ms
@@ -0,0 +1,794 @@
+.RP
+.TL
+User's Guide to the CCDRED Package
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+P.O. Box 26732, Tucson, Arizona 85726
+June 1987
+Revised February 1988
+.AB
+The IRAF CCD reduction package, \fBccdred\fR, provides tools
+for the easy and efficient reduction of CCD images. The standard
+reduction operations are replacement of bad pixels, subtraction of an
+overscan or prescan bias, subtraction of a zero level image,
+subtraction of a dark count image, division by a flat field calibration
+image, division by an illumination correction, subtraction of a fringe
+image, and trimming unwanted lines or columns. Another common
+operation provided by the package is scaling and combining images with
+a number of algorithms for rejecting cosmic rays. Data in the image
+header is used to make the reductions largely automated and
+self-documenting though the package may still be used in the absence of
+this data. Also a translation mechanism is used to relate image header
+parameters to those used by the package to allow data from a variety of
+observatories and instruments to be processed. This guide provides a brief
+description of the IRAF CCD reduction package and examples of reducing
+simple CCD data.
+.AE
+.NH
+Introduction
+.LP
+ This guide provides a brief description of the IRAF CCD reduction
+package \fBccdred\fR and examples of reducing simple CCD data. It is a
+generic guide in that it is not tied to any particular type of data.
+There may be more specific guides (or "cookbooks") for your data.
+Detailed descriptions of the tasks and features of the package are
+provided in the help documentation for the package.
+
+ The purpose of the CCDRED package is to provide tools for the easy
+and efficient reduction of CCD images. The standard reduction
+operations are replacement of bad columns and lines by interpolation
+from neighboring columns and lines, subtraction of a bias level
+determined from overscan or prescan columns or lines, subtraction of a
+zero level using a zero length exposure calibration image, subtraction
+of a dark count calibration image appropriately scaled to the dark time
+exposure, division by a scaled flat field calibration image, division
+by an illumination image (derived from a blank sky image), subtraction
+of a scaled fringe image (also derived from a blank sky image), and
+trimming the image of unwanted lines or columns such as the overscan
+strip. Any set of operations may be done simultaneously over a list of
+images in a highly efficient manner. The reduction operations are
+recorded in the image header and may also be logged on the terminal and
+in a log file.
+
+ The package also provides tools for combining multiple exposures
+of object and calibration images to improve the statistical accuracy of
+the observations and to remove transient bad pixels. The combining
+operation scales images of different exposure times, adjusts for
+variable sky background, statistically weights the images by their
+signal-to-noise, and provides a number of useful algorithms for
+detecting and rejecting transient bad pixels.
+
+ Other tasks are provided for listing reduction information about
+the images, deriving secondary calibration images (such as sky
+corrected flat fields or illumination correction images), and easily
+setting the package parameters for different instruments.
+
+ There are several important features provided by the package to
+make the reduction of CCD images convenient; particularly to minimize
+record keeping. One of these is the ability to recognize the different
+types of CCD images. This ability allows the user to select a certain
+class of images to be processed or listed and allows the processing
+tasks to identify calibration images and process them differently from
+object images. The standard CCD image types are \fIobject\fR,
+\fIzero\fR level, \fIdark\fR count, and \fIflat\fR field. For more on
+the image types see \fBccdtypes\fR.
+
+ The tasks can also identify the different filters (or other subset
+parameter) which require different flat field images. This means you don't
+have to separate the images by filter and process each set separately.
+This feature is discussed further in \fBsubsets\fR.
+
+ The tasks keep track of the reduction steps completed on each
+image and ignore images which have been processed. This feature,
+along with recognizing the image types and subsets, makes it possible to
+specify all the images to a task with a wildcard template, such as
+"*.imh", rather than indicating each image by name. You will find this
+extremely important with large sets of observations.
+
+ A fundamental aspect of the package is that the processing
+modifies the images. In other words, the reduction operations are
+performed directly on the image. This "feature" further simplifies
+record keeping, frees the user from having to form unique output image
+names, and minimizes the amount of disk space required. There
+are two safety features in this process. First, the modifications do
+not take effect until the operation is completed on the image. This
+allows you to abort the task without messing up the image data and
+protects data if the computer crashes. The second feature is that
+there is a package parameter which may be set to make a backup of the
+input data with a particular prefix such as "orig" or "imdir$". This
+backup feature may be used when there is sufficient disk space, when learning
+to use the package, or just to be cautious.
+
+ In a similar effort to efficiently manage disk space, when combining
+images into a master object or calibration image there is an option to
+delete the input images upon completion of the combining operation.
+Generally this is desirable when there are many calibration exposures,
+such as zero level or flat field images, which are not used after they
+are combined into a final calibration image.
+
+ The following sections guide you through the basic use of the
+\fBccdred\fR package. Only the important parameters which you might
+want to change are described. It is assumed that the support personnel
+have created the necessary instrument files (see \fBinstruments\fR)
+which will set the default parameters for the data you will be
+reducing. If this is not the case you may need to delve more deeply
+into the details of the tasks. Information about all the parameters
+and how the various tasks operate are given in the help documentation
+for the tasks and in additional special help topics. Some useful help
+documentation is indicated in the discussion and also in the
+\fBReferences\fR section.
+.NH
+Getting Started
+.LP
+ The first step is to load \fBccdred\fR. This is done by loading
+the \fBnoao\fR package, followed by the image reduction package
+\fBimred\fR, and finally the \fBccdred\fR package. Loading a
+package consists of typing its name. Note that some of these packages may be
+loaded automatically when you logon to IRAF.
+
+ When you load the \fBccdred\fR package the menu of tasks or commands
+is listed. This appears as follows:
+
+.nf
+.KS
+.ft L
+ cl> ccdred
+ badpiximage ccdtest mkfringecor setinstrument
+ ccdgroups combine mkillumcor zerocombine
+ ccdhedit cosmicrays mkillumflat
+ ccdlist darkcombine mkskycor
+ ccdproc flatcombine mkskyflat
+.ft R
+.KE
+.fi
+
+A summary of the tasks and additional help topics is obtained by typing:
+
+.ft L
+ cl> help
+.ft R
+
+This list and how to get additional help on specific topics is described
+in the \fBReferences\fR section at the end of this guide.
+
+ The first command to use is \fBsetinstrument\fR, which sets the package
+appropriately for the CCD images to be reduced. The support personnel
+should tell you the instrument identification, but if not a list
+of known instruments may be listed by using '?' for the instrument name.
+
+.nf
+.ft L
+ cl> setinstrument
+ Instrument ID (type ? for a list) \fI<enter instrument id or ?>
+ <Set ccdred package parameters using eparam>
+ <Set ccdproc task parameters using eparam>
+.ft R
+.fi
+
+This task sets the default parameters and then allows you to modify the
+package parameters and the processing parameters using the parameter
+editor \fBeparam\fR. If you are not familiar with \fBeparam\fR see the
+help or CL introduction documentation. For most terminals you move up
+and down through the parameters with the terminal arrow keys, you
+change the parameters by simply typing the desired value, and you exit
+with control Z or control D. Note that you can change parameters for
+any task at any time with \fBeparam\fR and you do not have to run
+\fBsetinstrument\fR again, even if you logout, until you need to reduce
+data from a different instrument.
+
+ The \fBccdred\fR package parameters control general I/O functions of
+the tasks in the package. The parameters you might wish to change are
+the output pixel type and the verbose option. Except when the input
+images are short integers, the noise is significantly greater than one
+digital unit, and disk space is critical, it is probably better to
+allow the processing to convert the images to real pixel datatype. The
+verbose parameter simply prints the information written to the log file
+on the terminal. This can be useful when little else is being done and
+you are just beginning. However, when doing background processing and
+other IRAF reduction tasks it is enough to simply look at the end of
+the logfile with the task \fBtail\fR to see the current state of the
+processing.
+
+ The \fBccdproc\fR parameters control the CCD processing. There are
+many parameters but they all may be conveniently set at this point.
+Many of the parameters have default values set appropriately for the
+instrument you specified. The images to be processed can be specified
+later. What needs to be set are the processing operations that you
+want done and the parameters required for each operation. The
+processing operations are selected by entering yes or no for each one.
+The following items briefly describe each of the possible processing
+operations and the additional parameters required.
+
+.LP
+\fIfixpix\fR - Fix bad CCD lines and columns?
+.IP
+The bad pixels (cosmetic defects) in the detector are given in a file
+specified by the parameter \fIfixfile\fR. This information is used
+to replace the pixels by interpolating from the neighboring pixels.
+A standard file for your instrument may be set by \fBsetinstrument\fR
+or if the word "image" is given then the file is defined in the instrument
+data file. For more on the bad pixel file see \fBinstruments\fR.
+.LP
+\fIoverscan\fR - Apply overscan strip correction?
+.IP
+The overscan or prescan region is specified by the parameter
+\fIbiassec\fR. This is given as an IRAF image section. The overscan
+region is averaged along the readout axis, specified by the parameter
+\fIreadaxis\fR, to create a one dimensional bias vector. This bias is
+fit by a function to remove cosmic rays and noise. There are a number
+of parameters at the end of the parameter list which control the
+fitting. The default overscan bias section and fitting parameters for
+your instrument should be set by \fBsetinstrument\fR. If the word
+"image" is given the overscan bias section is defined in the image
+header or the instrument translation file. If an overscan section is
+not set you can use \fBimplot\fR to determine the columns or rows for
+the bias region and define an overscan image section. If you are
+unsure about image sections consult with someone or read the
+introductory IRAF documentation.
+.LP
+\fItrim\fR - Trim the image?
+.IP
+The image is trimmed to the image section given by the parameter
+\fItrimsec\fR. A default trim section for your instrument should be
+set by \fBsetinstrument\fR, however, you may override this default if
+desired. If the word "image" is given the data
+image section is given in the image header or the instrument
+translation file. As with the overscan image section it is
+straightforward to specify, but if you are unsure consult someone.
+.LP
+\fIzerocor\fR - Apply zero level correction?
+.IP
+The zero level image to be subtracted is specified by the parameter
+\fIzero\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.LP
+\fIdarkcor\fR - Apply dark count correction?
+.IP
+The dark count image to be subtracted is specified by the parameter
+\fIdark\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.LP
+\fIflatcor\fR - Apply flat field correction?
+.IP
+The flat field images to be used are specified by the parameter
+\fIflat\fR. There must be one flat field image for each filter
+or subset (see \fBsubsets\fR) to be processed. If a flat field
+image is not given then the calibration image will be sought
+in the list of images to be processed.
+.LP
+\fIreadcor\fR - Convert zero level image to readout correction?
+.IP
+If a one dimensional zero level readout correction vector is to be subtracted
+instead of a two dimensional zero level image then, when this parameter is set,
+the zero level images will be averaged to one dimension. The readout axis
+must be specified by the parameter \fIreadaxis\fR. The default for your
+instrument is set by \fBsetinstrument\fR.
+.LP
+\fIscancor\fR - Convert flat field image to scan correction?
+.IP
+If the instrument is operated in a scan mode then a correction to the
+flat field may be required. There are two types of scan modes, "shortscan"
+and "longscan". In longscan mode flat field images will be averaged
+to one dimension and the readout axis must be specified. Shortscan mode
+is a little more complicated. The scan correction is used if the flat
+field images are not observed in scan mode. The number of scan lines
+must be specified by the parameter \fInscan\fR. If they are observed in
+scan mode, like the object observations, then the scan correction
+operations should \fInot\fR be specified. For details of scan mode operations
+see \fBccdproc\fR. The scan parameters
+should be set by \fBsetinstrument\fR. If in doubt consult someone
+familiar with the instrument and mode of operation.
+.LP
+
+ This description of the parameters is longer than the actual operation of
+setting the parameters. The only parameters likely to change during processing
+are the calibration image parameters.
+
+ When processing many images using the same calibration files a modest
+performance improvement can be achieved by keeping (caching) the
+calibration images in memory to avoid disk accesses. This option
+is available by specifying the amount of memory available for image
+caching with the parameter \fImax_cache\fR. If the value is zero then
+the images are accessed from disk as needed while if there is
+sufficient memory the calibration images may be kept in memory during
+the task execution.
+.NH
+Processing Your Data
+.LP
+ The processing path depends on the type of data, the type of
+instrument, types of calibration images, and the observing
+sequence. In this section we describe two types of operations common
+in reducing most data; combining calibration images and performing the
+standard calibration and correction operations. Some additional special
+operations are described in the following section.
+
+ However, the first thing you might want to try before any
+processing is to get a listing of the CCD images showing the CCD image
+types, subsets, and processing flags. The task for this is
+\fBccdlist\fR. It has three types of of output; a short one line per
+image format, a longer format which shows the state of the processing,
+and a format which prints the image names only (used to create files
+containing lists of images of a particular CCD image type). To get a
+quick listing type:
+
+.nf
+.ft L
+ cl> ccdlist *.imh
+ ccd001.imh[544,512][short][unknown][V]:FOCUS L98-193
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+ ccd045.imh[544,512][short][flat][V]:dflat 5s
+ ccd066.imh[544,512][short][flat][B]:dflat 5s
+ ccd103.imh[544,512][short][flat][R]:dflat 5s
+ ccd104.imh[544,512][short][zero][]:bias
+ ccd105.imh[544,512][short][dark][]:dark 3600s
+.ft R
+.fi
+
+ The example shows only a sample of the images. The short format
+listing tells you the name of the image, its size and pixel type, the
+CCD image type as seen by the package, the subset identifier (in this
+case the filter), and the title. If the data had been processed then
+there would also be processing flags. If the CCD image types do not
+seem right then there may be a problem with the instrument
+specification.
+
+ Many of the tasks in the \fBccdred\fR package have the parameter
+\fIccdtype\fR which selects a particular type of image. To list
+only the object images from the previous example:
+
+.nf
+.ft L
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+.ft R
+.fi
+
+If no CCD image type is specified (by using the null string "")
+then all image types are selected. This may be
+necessary if your instrument data does not contain image type identifications.
+.NH 2
+Combining Calibration Images
+.LP
+ If you do not need to combine calibration images because you only
+have one image of each type, you can skip this section. Calibration
+images, particularly zero level and flat field images, are combined in
+order to minimize the effects of noise and reject bad pixels in the
+calibrations. The basic tool for combining images is the task
+\fBcombine\fR. There are simple variants of this task whose default
+parameters are set appropriately for each type of calibration image.
+These are the ones you will use for calibration images leaving
+\fBcombine\fR for combining object images. Zero level images are
+combined with \fBzerocombine\fR, dark count images with
+\fBdarkcombine\fR, and flat field images with \fBflatcombine\fR.
+
+ For example, to combine flat field images the command is:
+
+.nf
+.ft L
+ cl> flatcombine *.imh
+ Jun 1 14:26 combine: maxreject
+ Images N Exp Mode Scale Offset Weight
+ ccd045.imh 1 5.0 INDEF 1.000 0. 0.048
+ ccd046.imh 1 5.0 INDEF 1.000 0. 0.048
+ \fI<... list of files ...>\fL
+ ccd065.imh 1 5.0 INDEF 1.000 0. 0.048
+ ----------- ------ ------
+ FlatV.imh 21 5.0
+.ft R
+.fi
+
+This output is printed when verbose mode is set. The same information
+is recorded in the log file. In this case the flat fields are combined
+by rejecting the maximum value at each point in the image (the
+"maxreject" algorithm). The images are scaled by the exposure times,
+which are all the same in this example. The mode is not evaluated for
+exposure scaling and the relative weights are the same because the
+exposure times are the same. The example only shows part of the
+output; \fBflatcombine\fR automatically groups the flat field images by
+filter to produce the calibration images "FlatV", "FlatB", and
+"FlatR".
+.NH 2
+Calibrations and Corrections
+.LP
+ Processing the CCD data is easy and largely automated.
+First, set the task parameters with the following command:
+
+.ft L
+ cl> eparam ccdproc
+.ft R
+
+You may have already set the parameters when you ran
+\fBsetinstrument\fR, though the calibration image parameters
+\fIzero\fR, \fIdark\fR, and \fIflat\fR may still need to be set or
+changed. Once this is done simply give the command
+
+.nf
+.ft L
+ cl> ccdproc *.imh
+ ccd003: Jun 1 15:13 Overscan section is [520:540,*] with mean=485.0
+ ccd003: Jun 1 15:14 Trim data section is [3:510,3:510]
+ ccd003: Jun 1 15:14 Overscan section is [520:540,*] with mean=485.0
+ FlatV: Jun 1 15:14 Trim data section is [3:510,3:510]
+ FlatV: Jun 1 15:15 Overscan section is [520:540,*] with mean=486.4
+ ccd003: Jun 1 15:15 Flat field image is FlatV.imh with scale=138.2
+ ccd004: Jun 1 15:16 Trim data section is [3:510,3:510]
+ ccd004: Jun 1 15:16 Overscan section is [520:540,*] with mean=485.2
+ ccd004: Jun 1 15:16 Flat field image is FlatV.imh with scale=138.2
+ \fI<... more ...>\fL
+ ccd013: Jun 1 15:22 Trim data section is [3:510,3:510]
+ ccd013: Jun 1 15:23 Overscan section is [520:540,*] with mean=482.4
+ FlatB: Jun 1 15:23 Trim data section is [3:510,3:510]
+ FlatB: Jun 1 15:23 Overscan section is [520:540,*] with mean=486.4
+ ccd013: Jun 1 15:24 Flat field image is FlatB.imh with scale=132.3
+ \fI<... more ...>\fL
+.ft R
+.fi
+
+ The output shown is with verbose mode set. It is the same as
+recorded in the log file. It illustrates the principle of automatic
+calibration image processing. The first object image, "ccd003", was
+being processed when the flat field image was required. Since the
+image was taken with the V filter the appropriate flat field was
+determined to be "FlatV". Since it had not been processed, the
+processing of "ccd003" was interrupted to process "FlatV". The
+processed calibration image may have been cached if there was enough
+memory. Once "FlatV" was processed (note that the flat field was not
+flattened because the task knows this image is a flat field) the
+processing of "ccd003" was completed. The next image, "ccd004", is
+also a V filter image so the already processed, and possibly cached,
+flat field "FlatV" is used again. The first B band image is "ccd013"
+and, as before, the B filter flat field calibration image is processed
+automatically. The same automatic calibration processing and image
+caching occurs when using zero level and dark count calibration
+images.
+
+ Commonly the processing is done with the verbose mode turned off
+and the task run as a background job. This is done with the commands
+
+.nf
+.ft L
+ cl> ccdred.verbose=no
+ cl> ccdproc *.imh &
+.ft R
+.fi
+
+The already processed images in the input list are recognized as having been
+processed and are not affected. To check the status of the processing we
+can look at the end of the log file with:
+
+.ft L
+ cl> tail logfile
+.ft R
+
+After processing we can repeat the \fBccdlist\fR command to find:
+
+.nf
+.ft L
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[508,508][real][object][V][OTF]:N2968 V 600s
+ ccd015.imh[508,508][real][object][B][OTF]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R][OTF]:N4036 R 600s
+.ft R
+.fi
+
+The processing flags indicate the images have been overscan corrected,
+trimmed, and flat fielded.
+
+ As you can see, processing images is very easy. There is one source
+of minor confusion for beginning users and that is dealing with calibration
+images. First, there is no reason that calibration images
+may not be processed explicitly with \fBccdproc\fR, just remember to set
+the \fIccdtype\fR to the calibration image type or to "". When processing
+object images the calibration images to be used may be specified either
+with the task parameter for the particular calibration image or by
+including the calibration image in the list of input images. Calibration
+images specified by parameter value take precedence and the task
+does not check its CCD image type. Calibration images given in the
+input list must have a valid CCD image type. In case too many
+calibration images are specified, say because the calibration images
+combined to make the master calibration images were not deleted and
+so are part of the image list "*.imh", only the first one will be used.
+Another point to know is that flat field, illumination, and fringe images
+are subset (filter) dependent and so a calibration image for each filter
+must be specified.
+.NH
+Special Processing Operations
+.LP
+ The special processing operations are mostly concerned with the
+flat field response correction. There are also special processing
+operations available in \fBccdproc\fR for one dimensional readout
+corrections in the zero level and flat field calibrations. These
+were described briefly above and in more detail in \fBccdproc\fR
+and are not discussed further in this guide. The processing
+operations described in this section are for preparing flat fields
+for two dimensional spectroscopic data, for correcting flat fields
+for illuminations effects, for making a separate illumination correction,
+and for applying corrections for fringe effects. For additional
+discussion about flat fields and illumination corrections see the
+help topic \fBflatfields\fR.
+.NH 2
+Spectroscopic Flat Fields
+.LP
+ For spectroscopic data the flat fields may have to be processed to
+remove the general shape of the lamp spectrum and to replace regions outside
+of the aperture where there is no flat field information with values that
+will not cause bad response effects when the flat field is applied to the
+data. If the shape of the lamp spectrum is not important and if the
+longslit spectra have the regions outside of the slit either off the
+detector or trimmed then you may use the flat field without special
+processing.
+
+ First you must process the flat field images explicitly with
+
+.ft L
+ cl> ccdproc *.imh ccdtype=flat
+.ft R
+
+where "*.imh" may be replaced with any list containing the flat fields.
+If zero level and dark count corrections are required these calibration
+images must be available at this time.
+
+ Load the \fBtwodspec\fR package and then either the \fBlongslit\fR
+package, for longslit data, or the \fBapextract\fR package, for
+multiaperture data such as echelles, multifiber, or aperture mask
+spectra. The task for removing the longslit quartz spectrum is
+\fBresponse\fR. There is also a task for removing illumination
+effects, including the slit profile, from longslit spectra called
+\fBillumination\fR. For more about processing longslit spectra see the
+help for these tasks and the paper \fIReduction of Longslit Spectra
+with IRAF\fR. The cookbook \fIReduction of Longslit Spectroscopic
+Data Using IRAF (KPNO ICCD and Cryogenic Camera Data)\fR also provides
+a very good discussion even if your data is from a different instrument.
+
+ For multiaperture data the task for removing the relative shapes of
+the spectra is called \fBapnormalize\fR. Again, consult the help documentation
+for this task for further details. Since you will probably also be
+using the package for extracting the spectra you may be interested
+in the document \fIThe IRAF APEXTRACT Package\fR.
+.NH 2
+Illumination Corrections
+.LP
+ The flat field calibration images may not have the same illumination
+pattern as the observations of the sky due to the way the lamp illuminates the
+optical system. In this case when the flat field correction is applied
+to the data there will be gradients in the sky background. To remove
+these gradients a blank sky calibration image is heavily smoothed
+to produce an illumination image. The illumination image
+is then divided into the images during processing to correct for the
+illumination difference between the flat field and the objects.
+Like the flat fields, the illumination corrections images may be subset
+dependent so there should be an illumination image for each subset.
+
+The task which makes illumination correction images is \fBmkskycor\fR.
+Some examples are
+
+.nf
+.ft L
+ cl> mkskycor sky004 Illum004
+ cl> mkskycor sky*.imh ""
+.ft R
+.fi
+
+In the first example the sky image "sky004" is used to make the illumination
+correction image "Illum004". In the second example the sky images are
+converted to illumination correction images by specifying no output image
+names. Like \fBccdproc\fR if the input images have not been processed they
+are first processed automatically.
+
+To apply the illumination correction
+
+.nf
+.ft L
+ cl> ccdproc *.imh ccdtype=object illumcor+ illum=Illum004
+ cl> ccdproc *.imh ccdtype=object illumcor+ illum=sky*.imh
+.ft R
+.fi
+
+The illumination images could also be set using \fBeparam\fR or given
+on the command line.
+.NH 2
+Sky Flat Fields
+.LP
+ You will notice that when you process images with an illumination
+correction you are dividing each image by a flat field calibration and
+an illumination correction. If the illumination corrections are not
+done as a later step but at the same time as the rest of the processing
+one will get the same calibration by multiplying the flat field by
+the illumination correction and using this product alone as the
+flat field. Such an image is called a \fIsky flat\fR since it is
+a flat field which has been corrected to yield a flat sky when applied
+to the observations. This approach has the advantage of one less
+calibration image and two less computations (scaling and dividing the
+illumination correction). As an added short cut, rather than compute
+the illumination image with \fBmkskycor\fR and then multiplying, the
+task \fBmkskyflat\fR does all this in one step. Thus, \fBmkskyflat\fR
+takes an input blank sky image, processes it if needed, determines the
+appropriate flat field (sky flats are also subset dependent) from the
+\fBccdproc\fR parameters or the input image list, and produces an
+output sky flat. Further if no output image is specified the task
+converts the input blank sky calibration image into a sky flat.
+
+ Two examples in which a new image is created and in which the
+input images are converted to sky flats are
+
+.nf
+.ft L
+ cl> mkskyflat sky004 Skyflat
+ cl> mkskyflat sky*.imh ""
+.ft R
+.fi
+.NH 2
+Illumination Corrected Flat Fields
+.LP
+ A third method to account for illumination problems in the flat fields
+is to remove the large scale pattern from the flat field itself. This is
+useful if there are no reasonable blank sky calibration images and the
+astronomical exposures are evenly illuminated but the flat fields are not.
+This is done by smoothing the flat field images instead of blank sky
+images. As with using the sky images there are two methods, creating
+an illumination correction to be applied as a separate step or fixing
+the original flat field. The smoothing algorithm is
+the same as that used in the other tasks. The tasks to make these types
+of corrections are \fBmkillumcor\fR and \fBmkillumflat\fR. The usage
+is pretty much the same as the other illumination correction tasks
+except that it is more reasonable to replace the original flat fields
+by the corrected flat fields when fixing the flat field. Examples
+of an illumination correction and removing the illumination pattern
+from the flat field are
+
+.nf
+.ft L
+ cl> mkillumcor flat025 Illum025
+ cl> mkillumflat flat*.imh ""
+.ft R
+.fi
+
+As with the other tasks, the input images are processed if necessary.
+.NH 2
+Fringe Corrections
+.LP
+ Some CCD detectors suffer from fringing effects due to the night
+sky emission lines which are not removed by the other calibration
+and correction operations. To correct for the fringing you need a
+really blank sky image. There is not yet a task to remove objects from
+sky images because this is often done with an interactive image display
+tool (which will soon be added). The blank sky image is heavily smoothed
+to determine the mean sky background and then this is subtracted from the
+original image. The image should then be essentially zero except for the
+fringe pattern. This fringe correction image is scaled to the same
+exposure time as the image to be corrected and then subtracted to remove
+the fringing. Note that since the night sky lines are variable there
+may need to be an additional scaling applied. Determining this scaling
+requires either an interactive display tool or a very clever task.
+Such tasks will also be added in the future.
+
+ The task to make a fringe correction image is \fBmkfringecor\fR.
+the sky background is determined in exactly the same way as the illumination
+pattern, in fact the same sky image may be used for both the sky
+illumination and for the fringe correction. The task works consistently
+with the "mk" tasks in that the input images are processed first if needed
+and then the output correction image is produced with the specified name
+or replaces the input image if no output image is specified.
+As examples,
+
+.nf
+.ft L
+ cl> mkfringecor sky004 Fringe
+ cl> mkfringecor sky*.imh ""
+.ft R
+.fi
+.NH
+Demonstration
+.LP
+ A simple demonstration task is available. To run this demonstration
+load the \fBccdtest\fR package; this is a subpackage of the main
+\fBccdred\fR package. Then simply type
+
+.ft L
+ cl> demo
+.ft R
+
+The demonstration will then create some artificial CCD data and reduce
+them giving descriptive comments as it goes along. This demonstration uses
+the "playback" facility of the command language and is actually substituting
+it's own commands for terminal input. Initially you must type carriage return
+or space after each comment ending with "...". If you wish to have the
+demonstration run completely automatically at it's own speed then type 'g'
+a the "..." prompt. Thereafter, it will simple pause long enough to give
+you a chance to read the comments. When the demo is finished you will
+need to remove the files created. However, feel free to examine the reduced
+images, the log file, etc. \fINote that the demonstration changes the
+setup parameters so be sure to run \fBsetinstrument\fI again and check
+the setup parameters.\fR
+.NH
+Summary
+.LP
+ The \fBccdred\fR package is very easy to use. First load the package;
+it is in the \fBimred\fR package which is in the \fBnoao\fR package.
+If this is your first time reducing data from a particular instrument
+or if you have changed instruments then run \fBsetinstrument\fR.
+Set the processing parameters for the operations you want performed.
+If you need to combine calibration images to form a master calibration
+image use one of the combine tasks. Spectroscopic flat fields may
+need to be processed first in order to remove the lamp spectrum.
+Finally, just type
+
+.ft L
+ cl> ccdproc *.imh&
+.ft R
+.SH
+References
+.LP
+ A general guide to using IRAF is \fIA User's Introduction to the IRAF
+Command Language\fR. This document may be found in the IRAF documentation
+sets and is available from the National Optical Astronomy Observatories,
+Central Computer Services (NOAO-CCS).
+
+ A more detailed description of the \fBccdred\fR package including
+a discussion of the design and some of the algorithms see \fIThe IRAF
+CCD Reduction Package -- CCDRED\fR" by F. Valdes. This paper is available
+from NOAO-CCS and appears in the proceedings of the Santa Cruz Summer
+Workshop in Astronomy and Astrophysics, \fIInstrumentation for Ground-Based
+Optical Astronomy: Present and Future\fR, edited by Lloyd B. Robinson and
+published by Springer-Verlag.
+
+ The task descriptions and supplementary documentation are available
+in printed form in the IRAF documentation sets, a special set
+containing documentation for just the \fBccdred\fR package, and on-line
+through the help task by typing
+
+.ft L
+ cl> help \fItopic\fR
+.ft R
+
+where \fItopic\fR is one of the following.
+
+.nf
+.ft L
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdlist - List CCD processing information
+ ccdproc - Process CCD images
+ ccdtest - CCD test and demonstration package
+ combine - Combine CCD images
+ cosmicrays - Detect and replace cosmic rays
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+
+ ADDITIONAL HELP TOPICS
+
+ ccdred - CCD image reduction package
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ subsets - Description of CCD subsets
+.ft R
+.fi
+
+Printed copies of the on-line help documentation may be made with the
+command
+
+.ft L
+ cl> help \fItopic\fL | lprint
+.ft R
+
+ In addition to the package documentation for \fBccdred\fR,
+\fBlongslit\fR, and \fBapextract\fR there may be specific guides for
+certain instruments. These specific guides, called "cookbooks", give
+specific examples and parameter values for the CCD data.
diff --git a/noao/imred/ccdred/doc/instruments.hlp b/noao/imred/ccdred/doc/instruments.hlp
new file mode 100644
index 00000000..95baff37
--- /dev/null
+++ b/noao/imred/ccdred/doc/instruments.hlp
@@ -0,0 +1,256 @@
+.help instruments Dec93 noao.imred.ccdred
+
+.ih
+NAME
+instruments -- Instrument specific data files
+.ih
+DESCRIPTION
+The \fBccdred\fR package has been designed to accommodate many different
+instruments, detectors, and observatories. This is done by having
+instrument specific data files. Note that by instrument we mean a
+combination of detector, instrument, application, and observatory, so
+there might be several "instruments" associated with a particular CCD
+detector. Creating and maintaining the instrument files is generally
+the responsibility of the support staff, though the user may create or
+copy and modify his/her own instrument/application specific files. The
+task \fBsetinstrument\fR makes this information available to the user
+and package easily.
+
+There are three instrument data files, all of which are optional. The
+package may be used without the instrument files but much of the
+convenience of the package, particularly with respect to using the CCD
+image types, will be lost. The three files are an instrument image
+header translation file, an initialization task which mainly sets
+default task parameters, and a bad pixel file identifying the cosmic
+bad pixels in the detector. These files are generally stored in a
+system data directory which is a subdirectory of the logical
+directory "ccddb$". Each file has a root name which identifies the
+instrument.
+.sh
+1. Instrument Translation File
+The instrument translation file translates the parameter names used by
+the \fBccdred\fR pacakge into instrument specific parameters and also
+supplies instrument specific default values. The package parameter
+\fIccdred.instrument\fR specifies this file to the package. The task
+\fBsetinstrument\fR sets this parameter, though it can be set
+explicitly like any other parameter. For the standard instrument
+translation file the root name is the instrument identification and the
+extension is "dat" ("*.dat" files are protected from being removed in a
+"stripped" system, i.e. when all nonessential files are removed).
+Private instrument files may be given any name desired.
+
+The instrument translation proceeds as follows. When a package task needs
+a parameter for an image, for example "imagetyp", it looks in the instrument
+translation file. If the file is not found or none is specified then the
+image header keyword that is requested has the same name. If an
+instrument translation file is defined then the requested
+parameter is translated to an image header keyword, provided a translation
+entry is given. If no translation is given the package name is used. For
+example the package parameter "imagetyp" might be translated to "data-typ"
+(the old NOAO CCD keyword). If the parameter is not found then the default
+value specified in the translation file, if present, is returned. For recording
+parameter information in the header, such as processing flags, the
+translation is also used. The default value has no meaning in this case.
+For example, if the flag specifying that the image has been corrected
+by a flat field is to be set then the package parameter name "flatcor"
+might be translated to "ff-flag". If no translation is given then the
+new image header parameter is entered as "flatcor".
+
+The format of the translation file are lines consisting of the package
+parameter name, followed by the image header keyword, followed by the
+default value. The first two fields are parameter names. The fields
+are separated by whitespace (blanks and tabs). String default values
+containing blanks must be quoted. An example is given below.
+
+.nf
+ # Sample translation file.
+ exptime itime
+ darktime itime
+ imagetyp data-typ
+ subset f1pos
+ biassec biassec [411:431,2:573]
+ datasec datasec [14:385,2:573]
+
+ fixpix bp-flag 0
+ overscan bt-flag 0
+ zerocor bi-flag 0
+ darkcor dk-flag 0
+ flatcor ff-flag 0
+ fringcor fr-flag 0
+.fi
+
+The first comment line is ignored as are blank lines.
+The first two lines translate the CCD image type, and the subset parameter
+without default values (see \fBccdtypes\fR and \fBsubsets\fR for more
+information). The next two lines give the overscan bias strip
+section and the data section with default values for the instrument.
+Note that these parameters may be overridden in the task \fBccdproc\fR.
+
+The next set of translations requires further discussion. For processing
+flags the package assumes that the absence of a keyword means that the
+processing has not been done. If processing is always to be done with
+the \fBCCDRED\fR package and no processing keywords are recorded in the raw data
+then these parameters should be absent (unless you don't like the names
+used by the package). However, for compatibility with the original NOAO
+CCD images, which may be processed outside of IRAF and which use 0 as the
+no processing value, the processing flags are translated and the false values
+are indicated by the default values.
+
+If there is more than one translation for the same CCDRED parameter,
+for example more than one exptime, then the last one is used.
+
+In addition to the parameter name translations the translation file
+contains translations between the value of the image type parameter
+and the image types used by the package. These lines
+consist of the image header type string as the first field (with quotes
+if there are blanks) and the image type as recognized by the package. The
+following example will make this clearer.
+
+.nf
+ 'OBJECT (0)' object
+ 'DARK (1)' dark
+ 'PROJECTOR FLAT (2)' flat
+ 'SKY FLAT (3)' other
+ 'COMPARISON LAMP (4)' other
+ 'BIAS (5)' zero
+ 'DOME FLAT (6)' flat
+.fi
+
+The values of the image type strings in the header contain blanks so they
+are quoted. Also the case of the strings is important. Note that there
+are two types of flat field images and three types of object images.
+
+The CCD image types recognized by the package are:
+
+.nf
+ zero - zero level image such as a bias or preflash
+ dark - dark count image
+ flat - flat field image
+ illum - iillumination image such as a sky image
+ fringe - fringe correction image
+ object - object image
+.fi
+
+There may be more than one image type that maps to the same package
+type. In particular other standard CCD image types, such as comparison
+spectra, multiple exposure, standard star, etc., should be mapped to
+object or other. There may also be more than one type of flat field,
+i.e. dome flat, sky flat, and lamp flat. For more on the CCD image
+types see \fBccdtypes\fR.
+
+The complete set of package parameters are given below.
+The package parameter names are generally the same as the
+standard image header keywords being adopted by NOAO.
+
+.nf
+ General Image Header and Default Parameters
+ ccdmean darktime exptime fixfile
+ imagetyp ncombine biassec subset
+ title datasec nscanrow
+
+ CCDRED Processing Flags
+ ccdproc darkcor fixpix flatcor
+ fringcor illumcor overscan trim
+ zerocor
+
+ CCDRED CCD Image Types
+ dark flat fringe illum
+ none object unknown zero
+.fi
+
+The translation mechanism described here may become more
+sophisticated in the future and a general IRAF system facility may be
+implemented eventually. For the present the translation mechanism is
+quite simple.
+.sh
+2. Instrument Setup Script
+The task \fBsetinstrument\fR translates an instrument ID into a
+CL script in the instrument directory. This script is then executed.
+Generally this script simply sets the task parameters for an
+instrument/application. However, it could do anything else the support
+staff desires. Below are the first few lines of a typical instrument setup
+script.
+
+.nf
+ ccdred.instrument = "ccddb$kpno/example.dat"
+ ccdred.pixeltype = "real"
+ ccdproc.fixpix = yes
+ ccdproc.overscan = yes
+ ccdproc.trim = yes
+ ccdproc.zerocor = no
+ ccdproc.darkcor = no
+ ccdproc.flatcor = yes
+ ccdproc.biassec = "[411:431,2:573]"
+ ccdproc.datasec = "[14:385,2:573]"
+.fi
+
+The instrument parameter should always be set unless there is no
+translation file for the instrument. The \fBccdproc\fR parameters
+illustrate setting the appropriate processing flags for the
+instrument. The overscan bias and trim data sections show an alternate
+method of setting these instrument specific parameters. They may be
+set in the setup script in which case they are given explicitly in the
+user parameter list for \fBccdproc\fR. If the value is "image" then
+the parameters may be determined either through the default value in
+the instrument translation file, as illustrated in the previous
+section, or from the image header itself.
+
+The instrument setup script for setting default task parameters may be
+easily created by the support person as follows. Set the package
+parameters using \fBeparam\fR or with CL statements. Setting the
+parameters might involve testing. When satisfied with the way the
+package is set then the parameters may be dumped to a setup script
+using the task \fBdparam\fR. The final step is editing this script to
+delete unimportant and query parameters. For example,
+
+.nf
+ cl> dparam ccdred >> file.cl
+ cl> dparam ccdproc >> file.cl
+ cl> dparam combine >> file.cl
+ ...
+ cl> ed file.cl
+.fi
+.sh
+3. Instrument Bad Pixel File
+The bad pixel file describes the bad pixels, columns, and lines in the
+detector which are to be replaced by interpolation when processing the
+images. This file is clearly detector specific. The file consists of
+lines describing rectangular regions of the image.
+The regions are specified by four numbers giving the starting and ending
+columns followed by the starting and ending lines. The starting and
+ending points may be the same to specify a single column or line. The
+example below illustrates a bad pixel file.
+
+.nf
+ # RCA1 CCD untrimmed
+ 25 25 1 512
+ 108 108 1 512
+ 302 302 403 512
+ 1 512 70 70
+ 245 246 312 315
+.fi
+
+If there is a comment line in the file containing the word "untrimmed"
+then the coordinates of the bad pixel regions apply to the original CCD
+detector coordinates.
+If the image has been trimmed and the bad pixels are replaced at a later
+stage then this word indicates that the trim region be determined from the
+image header and the necessary coordinate conversion made to the original
+CCD pixel coordinates. Note that if a subraster readout is used the
+coordinates must still refer to the original CCD coordinates and
+not the raw, untrimmed readout image. If the word
+"untrimmed" does not appear then the coordinates are assumed to apply to
+the image directly; i.e. the trimmed coordinates if the image has been
+trimmed or the original coordinates if the image has not been trimmed.
+The standard bad pixel files should always refer to the original, untrimmed
+coordinates.
+
+The first two bad pixel regions are complete bad columns (the image
+is 512 x 512), the next line is a partial bad column, the next line is
+a bad line, and the last line is a small bad region. These files are
+easy to create, provided you have a good image to work from and a way
+to measure the positions with an image or graphics display.
+.ih
+SEE ALSO
+ccdtypes, subsets, setinstrument
+.endhelp
diff --git a/noao/imred/ccdred/doc/mkfringecor.hlp b/noao/imred/ccdred/doc/mkfringecor.hlp
new file mode 100644
index 00000000..797f4d11
--- /dev/null
+++ b/noao/imred/ccdred/doc/mkfringecor.hlp
@@ -0,0 +1,90 @@
+.help mkfringecor Feb88 noao.imred.ccdred
+.ih
+NAME
+mkfringecor -- Make fringe correction images from sky images
+.ih
+USAGE
+mkfringecor input output
+.ih
+PARAMETERS
+.ls input
+List of input images for making fringe correction images.
+.le
+.ls output
+List of output fringe correction images. If none is
+specified or if the name is the same as the input image then the output
+image replaces the input image.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input images. If none is specified
+then all types are used.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed background.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+The input blank sky images are automatically processed up through the
+iillumination correction before computing the fringe correction images.
+The fringe corrections are subset dependent.
+The slowly varying background is determined and subtracted leaving only
+the fringe pattern caused by the sky emission lines. These fringe images
+are then scaled and subtracted from the observations by \fBccdproc\fR.
+The background is determined by heavily smoothing the image using a
+moving "boxcar" average. The effects of the objects and fringes in the
+image is minimized by using a sigma clipping algorithm to detect and
+exclude them from the average. Note, however, that objects left in the
+fringe image will affect the fringe corrected observations. Any objects
+in the sky image should be removed using \fBskyreplace\fR (not yet
+available).
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+To minimize the effects of the fringes and any objects in the blank sky
+calibration images a sigma clipping algorithm is used to detect and
+exclude features from the background. This is done by computing the
+rms of the image lines relative to the smoothed background and
+excluding points exceeding the specified threshold factors times the
+rms. This is done before each image line is added to the moving
+average, except for the first few lines where an iterative process is
+used.
+.ih
+EXAMPLES
+1. The two examples below make an fringe correction image from a blank
+sky image, "sky017". In the first example a separate fringe
+image is created and in the second the fringe image replaces the
+sky image.
+
+.nf
+ cl> mkskycor sky017 Fringe
+ cl> mkskycor sky017 frg017
+.fi
+.ih
+SEE ALSO
+ccdproc
+.endhelp
diff --git a/noao/imred/ccdred/doc/mkillumcor.hlp b/noao/imred/ccdred/doc/mkillumcor.hlp
new file mode 100644
index 00000000..0effd7a2
--- /dev/null
+++ b/noao/imred/ccdred/doc/mkillumcor.hlp
@@ -0,0 +1,92 @@
+.help mkillumcor Oct88 noao.imred.ccdred
+.ih
+NAME
+mkillumcor -- Make flat field iillumination correction images
+.ih
+USAGE
+mkillumcor input output
+.ih
+PARAMETERS
+.ls input
+List of input images for making flat field iillumination correction images.
+.le
+.ls output
+List of output flat field iillumination correction images. If none is
+specified or if the name is the same as the input image then the output
+image replaces the input image.
+.le
+.ls ccdtype = "flat"
+CCD image type to select from the input images. If none is specified
+then all types are used.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude deviant points from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed iillumination.
+.le
+.ls divbyzero = 1.
+The iillumination correction is the inverse of the smoothed flat field.
+This may produce division by zero. A warning is given if division
+by zero takes place and the result (the iillumination correction value)
+is replaced by the value of this parameter.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+First, the input flat field images are automatically processed if
+needed. Then, the large scale iillumination pattern of the images is
+determined by heavily smoothing them using a moving "boxcar" average.
+The iillumination correction, the inverse of the iillumination pattern,
+is applied by \fBccdproc\fR to CCD images to remove the iillumination
+pattern introduced by the flat field. The combination of the flat
+field calibration and the iillumination correction based on the flat
+field is equivalent to removing the iillumination from the flat field
+(see \fBmkillumflat\fR). This two step calibration is generally used
+when the observations have been previously flat field calibrated. This
+task is closely related to \fBmkskycor\fR which determines the
+iillumination correction from a blank sky image; this is preferable to
+using the iillumination from the flat field as it corrects for the
+residual iillumination error. For a general discussion of the options
+for flat fields and iillumination corrections see \fBflatfields\fR.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+To minimize the effects of bad pixels a sigma clipping algorithm is
+used to detect and reject these pixels from the iillumination. This is
+done by computing the rms of the image lines relative to the smoothed
+iillumination and excluding points exceeding the specified threshold
+factors times the rms. This is done before each image line is added to
+the moving average, except for the first few lines where an iterative
+process is used.
+.ih
+EXAMPLES
+1. The example below makes an iillumination correction image from the
+flat field image, "flat017".
+
+ cl> mkillumcor flat017 Illum
+.ih
+SEE ALSO
+ccdproc, flatfields, mkillumflat, mkskycor, mkskyflat
+.endhelp
diff --git a/noao/imred/ccdred/doc/mkillumflat.hlp b/noao/imred/ccdred/doc/mkillumflat.hlp
new file mode 100644
index 00000000..8288fb85
--- /dev/null
+++ b/noao/imred/ccdred/doc/mkillumflat.hlp
@@ -0,0 +1,101 @@
+.help mkillumflat Oct88 noao.imred.ccdred
+.ih
+NAME
+mkillumflat -- Make illumination corrected flat fields
+.ih
+USAGE
+mkillumflat input output
+.ih
+PARAMETERS
+.ls input
+List of input flat field images to be illumination corrected.
+.le
+.ls output
+List of output illumination corrected flat field images.
+If none is specified or if the name is the same as the
+input image then the output image replaces the input image.
+.le
+.ls ccdtype = "flat"
+CCD image type to select from the input images.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed illumination.
+.le
+.ls divbyzero = 1.
+The illumination flat field is the ratio of the flat field to a
+smoothed flat field. This may produce division by zero. A warning is
+given if division by zero takes place and the result (the illumination
+corrected flat field value) is replaced by the value of this
+parameter.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+First, the input flat field images are processed as needed. Then the
+large scale illumination pattern of the images is removed. The
+illumination pattern is determined by heavily smoothing the image using
+a moving "boxcar" average. The output image is the ratio of the input
+image to the illumination pattern. The illumination pattern is
+normalized by its mean to preserve the mean level of the input image.
+
+When this task is applied to flat field images only the small scale
+response effects are retained. This is appropriate if the flat field
+images have illumination effects which differ from the astronomical
+images and blank sky images are not available for creating sky
+corrected flat fields. When a high quality blank sky image is
+available the related task \fBmkskyflat\fR should be used. Note that
+the illumination correction, whether from the flat field or a sky
+image, may be applied as a separate step by using the task
+\fBmkillumcor\fR or \fBmkskycor\fR and applying the illumination
+correction as a separate operation in \fBccdproc\fR. However, creating
+an illumination corrected flat field image before processing is more
+efficient since one less operation per image processed is needed. For
+more discussion about flat fields and illumination corrections see
+\fBflatfields\fR.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+To minimize the effects of bad pixels a sigma clipping algorithm is
+used to detect and reject these pixels from the illumination. This is
+done by computing the rms of the image lines relative to the smoothed
+illumination and excluding points exceeding the specified threshold
+factors times the rms. This is done before each image line is added to
+the moving average, except for the first few lines where an iterative
+process is used.
+.ih
+EXAMPLES
+1. Two examples in which a new image is created and in which the
+input flat fields are corrected in place are:
+
+.nf
+ cl> mkllumflat flat004 FlatV
+ cl> mkillumflat flat* ""
+.fi
+.ih
+SEE ALSO
+ccdproc, flatfields, mkfringecor, mkillumcor, mkskycor, mkskyflat
+.endhelp
diff --git a/noao/imred/ccdred/doc/mkskycor.hlp b/noao/imred/ccdred/doc/mkskycor.hlp
new file mode 100644
index 00000000..15cfacf6
--- /dev/null
+++ b/noao/imred/ccdred/doc/mkskycor.hlp
@@ -0,0 +1,103 @@
+.help mkskycor Feb88 noao.imred.ccdred
+.ih
+NAME
+mkskycor -- Make sky iillumination correction images
+.ih
+USAGE
+mkskycor input output
+.ih
+PARAMETERS
+.ls input
+List of input images for making sky iillumination correction images.
+.le
+.ls output
+List of output flat field iillumination correction images. If none is
+specified or if the name is the same as the input image then the output
+image replaces the input image.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input images. If none is specified
+then all types are used.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed iillumination.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+The large scale iillumination pattern of the input images, generally
+blank sky calibration images, is determined by heavily smoothing
+the image using a moving "boxcar" average. The effects of objects in
+the image may be minimized by using a sigma clipping algorithm to
+detect and exclude the objects from the average. This
+iillumination image is applied by \fBccdproc\fR to CCD images to remove
+the iillumination pattern.
+
+The input images are automatically processed up through flat field
+calibration before computing the iillumination. The iillumination
+correction is that needed to make the processed images flat
+over large scales. The input images are generally blank sky calibration
+images which have the same iillumination and instrumental effects
+as the object observations. Object images may be used but removal
+of the objects may not be very good; particularly large, bright objects.
+For further discussion of flat fields and iillumination corrections
+see \fBflatfields\fR.
+
+You will notice that when you process images with an iillumination
+correction you are dividing each image by a flat field calibration and
+an iillumination correction. If the iillumination corrections are not
+done as a later step but at the same time as the rest of the processing
+one will get the same calibration by multiplying the flat field by the
+iillumination correction and using this product alone as the flat
+field. This approach has the advantage of one less calibration image
+and two less computations (scaling and dividing the iillumination
+correction). Such an image, called a \fIsky flat\fR, may be created by
+\fBmkskyflat\fR as an alternative to this task.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+Blank sky images may not be completely blank so a sigma clipping
+algorithm may be used to detect and exclude objects from the
+iillumination pattern. This is done by computing the rms of the image
+lines relative to the smoothed background and excluding points
+exceeding the specified threshold factors times the rms. This is done
+before each image line is added to the moving average, except for the
+first few lines where an iterative process is used.
+.ih
+EXAMPLES
+1. The two examples below make an iillumination image from a blank sky image,
+"sky017". In the first example a separate iillumination image is created
+and in the second the iillumination image replaces the sky image.
+
+.nf
+ cl> mkskycor sky017 Illum
+ cl> mkskycor sky017 sky017
+.fi
+.ih
+SEE ALSO
+ccdproc, flatfields, mkillumcor, mkillumflat, mkskyflat
+.endhelp
diff --git a/noao/imred/ccdred/doc/mkskyflat.hlp b/noao/imred/ccdred/doc/mkskyflat.hlp
new file mode 100644
index 00000000..d28e2301
--- /dev/null
+++ b/noao/imred/ccdred/doc/mkskyflat.hlp
@@ -0,0 +1,110 @@
+.help mkskyflat Feb88 noao.imred.ccdred
+.ih
+NAME
+mkskyflat -- Make sky corrected flat field images
+.ih
+USAGE
+mkskyflat input output
+.ih
+PARAMETERS
+.ls input
+List of blank sky images to be used to create sky corrected flat field
+calibration images.
+.le
+.ls output
+List of output sky corrected flat field calibration images (called
+sky flats). If none is specified or if the name is the same as the
+input image then the output image replaces the input image.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input images.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed iillumination.
+.le
+.ls ccdproc (pset)
+CCD processing parameter set.
+.le
+.ih
+DESCRIPTION
+A sky corrected flat field calibration image, called a sky flat, is a
+flat field that when applied to observations of the sky have no large
+scale gradients. Flat field images are generally obtained by exposures
+to lamps either illuminating the telescope field or a surface in the dome
+at which the telescope is pointed. Because the detector is not illuminated
+in the same way as an observation of the sky there may be large
+scale iillumination patterns introduced into the observations with such
+a flat field. To correct this type of flat field a blank sky observation
+(which has been divided by the original flat field) is heavily smoothed
+to remove the noise leaving only the residual large scale iillumination
+pattern. This iillumination pattern is divided into the original flat
+field to remove this residual.
+
+The advantage of creating a sky flat field is that when processing
+the observations no additional operations are required. However,
+if the observations have already been processed with the original
+flat field then the residual iillumination pattern of blank sky
+calibration images may be created as an iillumination correction
+to be applied by \fBccdproc\fR. Such a correction is created by the
+task \fBmkskycor\fR. If a good blank sky image is not
+available then it may be desirable to remove the iillumination pattern
+of the flat field image using \fBmkillumflat\fR or \fBmkillumcor\fR
+provided the sky observations are truly uniformly illuminated.
+For more on flat fields and iillumination corrections see \fBflatfields\fR.
+
+The input, blank sky images are first processed, based on the
+\fBccdproc\fR parameters, if needed. These parameters also determine
+the flat field image to be used in making the sky flat. The residual
+iillumination pattern is determined by heavily smoothing the image using
+a moving "boxcar" average. The effects of objects in the input image
+may be minimized by using a sigma clipping algorithm to detect and
+exclude the objects from the average. The output image is ratio of the
+flat field image, for the same subset as the input image, to the
+residual iillumination pattern determined from the processed blank sky
+input image. The iillumination pattern is normalized by its mean to
+preserve the mean level of the flat field image.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+Blank sky images may not be completely blank so a sigma clipping
+algorithm may be used to detect and exclude objects from the
+iillumination pattern. This is done by computing the rms of the image
+lines relative to the smoothed background and excluding points
+exceeding the specified threshold factors times the rms. This is done
+before each image line is added to the moving average, except for the
+first few lines where an iterative process is used.
+.ih
+EXAMPLES
+1. Two examples in which a new image is created and in which the
+input sky images are converted to sky flats are:
+
+.nf
+ cl> mkskyflat sky004 Skyflat
+ cl> mkskyflat sky* ""
+.fi
+.ih
+SEE ALSO
+ccdproc, flatfields, mkfringecor, mkillumcor, mkillumflat, mkskycor
+.endhelp
diff --git a/noao/imred/ccdred/doc/setinstrument.hlp b/noao/imred/ccdred/doc/setinstrument.hlp
new file mode 100644
index 00000000..410dd20f
--- /dev/null
+++ b/noao/imred/ccdred/doc/setinstrument.hlp
@@ -0,0 +1,97 @@
+.help setinstrument Oct87 noao.imred.ccdred
+.ih
+NAME
+setinstrument -- Set instrument parameters
+.ih
+USAGE
+setinstrument instrument
+.ih
+PARAMETERS
+.ls instrument
+Instrument identification for instrument parameters to be set. If '?'
+then a list of the instrument identifiers is printed.
+.le
+.ls site = "kpno"
+Site ID.
+.le
+.ls directory = "ccddb$"
+Instrument directory containing instrument files. The instrument files
+are found in the subdirectory given by the site ID.
+.le
+.ls review = yes
+Review the instrument parameters? If yes then \fBeparam\fR is run for
+the parameters of \fBccdred\fR and \fBccdproc\fR.
+.le
+.ls query
+Parameter query if initial instrument is not found.
+.le
+.ih
+DESCRIPTION
+The purpose of the task is to allow the user to easily set default
+parameters for a new instrument. The default parameters are generally
+defined by support personal in an instrument directory for a particular
+site. The instrument directory is the concatenation of the specified
+directory and the site. For example if the directory is "ccddb$" and
+the site is "kpno" then the instrument directory is "ccddb$kpno/".
+The user may have his own set of instrument files in a local directory.
+The current directory is used by setting the directory and site to the
+null string ("").
+
+The user specifies an instrument identifier. This instrument may
+be specific to a particular observatory, telescope, instrument, and
+detector. If the character '?' is specified or the instrument file is
+not found then a list of instruments
+in the instrument directory is produced by paging the file "instruments.men".
+The task then performs the following operations:
+.ls (1)
+If an instrument translation file with the name given by the instrument
+ID and the extension ".dat" is found then the instrument translation
+file parameter, \fIccdred.instrument\fR, is set to this file.
+If it does not exist then the user is queried again. Note that a
+null instrument, "", is allowed to set no translation file.
+.le
+.ls (2)
+If an instrument setup script with the name given by the instrument ID
+and the extension ".cl" is found then the commands in the file are
+executed (using the command \fIcl < script\fR. This script generally
+sets default parameters.
+.le
+.ls (3)
+If the review flag is set the task \fBeparam\fR is run to allow the user
+to examine and modify the parameters for the package \fBccdred\fR and task
+\fBccdproc\fR.
+.le
+.ih
+EXAMPLES
+1. To get a list of the instruments;
+
+.nf
+ cl> setinstrument ?
+ [List of instruments]
+
+2. To set the instrument and edit the processing parameters:
+
+ cl> setinstrument ccdlink
+ [Edit CCDRED parameters]
+ [Edit CCDPROC parameters]
+
+3. To use your own instrument translation file and/or setup script in
+your working directory.
+
+ cl> setinst.site=""
+ cl> setinst.dir=""
+ cl> setinst myinstrument
+
+To make these files see help under \fBinstruments\fR. Copying and modifying
+system files is also straightforward.
+
+ cl> copy ccddb$kpno/fits.dat .
+ cl> edit fits.dat
+ cl> setinst.site=""
+ cl> setinst.dir=""
+ cl> setinst fits
+.fi
+.ih
+SEE ALSO
+instruments, ccdred, ccdproc
+.endhelp
diff --git a/noao/imred/ccdred/doc/subsets.hlp b/noao/imred/ccdred/doc/subsets.hlp
new file mode 100644
index 00000000..78aafb01
--- /dev/null
+++ b/noao/imred/ccdred/doc/subsets.hlp
@@ -0,0 +1,99 @@
+.help subsets Jun87 noao.imred.ccdred
+.ih
+NAME
+subsets -- Description of CCD subsets
+.ih
+DESCRIPTION
+The \fBccdred\fR package groups observation into subsets.
+The image header parameter used to identify the subsets is defined
+in the instrument translation file (see help for \fBinstruments\fR).
+For example to select subsets by the header parameter "filters" the
+instrument translation file would contain the line:
+
+ subset filters
+
+Observations are generally grouped into subsets based on a common
+instrument configuration such as a filter, aperture mask,
+grating setting, etc. This allows combining images from several
+different subsets automatically and applying the appropriate
+flat field image when processing the observations. For example
+if the subsets are by filter then \fBflatcombine\fR will search
+through all the images, find the flat field images (based on the
+CCD type parameter), and combine the flat field images from
+each filter separately. Then when processing the images the
+flat field with the same filter as the observation is used.
+
+Each subset is assigned a short identifier. This is listed when
+using \fBccdlist\fR and is appended to a root name when combining
+images. Because the subset parameter in the image header may be
+any string there must be a mapping applied to generate unique
+identifiers. This mapping is defined in the file given by
+the package parameter \fIccdred.ssfile\fR. The file consists of
+lines with two fields (except that comment lines may be included
+as a line by itself or following the second field):
+
+ 'subset string' subset_id
+
+where the subset string is the image header string and the subset_id is
+the identifier. A field must be quoted if it contains blanks. The
+user may create this file but generally it is created by the tasks. The
+tasks use the first word of the subset string as the default identifier
+and a number is appended if the first word is not unique. The
+following steps define the subset identifier:
+
+.ls (1)
+Search the subset file, if present, for a matching subset string and
+use the defined subset identifier.
+.le
+.ls (2)
+If there is no matching subset string use the first word of the
+image header subset string and, if it is not unique,
+add successive integers until it is unique.
+.le
+.ls (3)
+If the identifier is not in the subset file create the file and add an
+entry if necessary.
+.le
+.ih
+EXAMPLES
+1. The subset file is "subsets" (the default). The subset parameter is
+translated to "f1pos" in the image header (the old NOAO CCD parameter)
+which is an integer filter position. After running a task, say
+"ccdlist *.imh" to cause all filters to be checked, the subset file contains:
+
+.nf
+ '2' 2
+ '5' 5
+ '3' 3
+.fi
+
+The order reflects the order in which the filters were encountered.
+Suppose the user wants to have more descriptive names then the subset
+file can be created or edited to the form:
+
+.nf
+ # Sample translation file.
+ '2' U
+ '3' B
+ '4' V
+.fi
+
+(This is only an example and does not mean these are standard filters.)
+
+2. As another example suppose the image header parameter is "filter" and
+contains more descriptive strings. The subset file might become:
+
+.nf
+ 'GG 385 Filter' GG
+ 'GG 495 Filter' GG1
+ 'RG 610 Filter' RG
+ 'H-ALPHA' H_ALPHA
+.fi
+
+In this case use of the first word was not very good but it is unique.
+It is better if the filters are encoded with the thought that the first
+word will be used by \fBccdred\fR; it should be short and unique.
+.ih
+SEE ALSO
+instruments
+.endhelp
diff --git a/noao/imred/ccdred/doc/zerocombine.hlp b/noao/imred/ccdred/doc/zerocombine.hlp
new file mode 100644
index 00000000..1646ea9c
--- /dev/null
+++ b/noao/imred/ccdred/doc/zerocombine.hlp
@@ -0,0 +1,121 @@
+.help zerocombine Aug91 noao.imred.ccdred
+.ih
+NAME
+zerocombine -- Combine and process zero level images
+.ih
+USAGE
+zerocombine input
+.ih
+PARAMETERS
+.ls input
+List of zero level images to combine. The \fIccdtype\fR parameter
+may be used to select the zero level images from a list containing all
+types of data.
+.le
+.ls output = "Zero"
+Output zero level root image name.
+.le
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "minmax" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation. See \fBcombine\fR for details.
+.le
+.ls ccdtype = "zero"
+CCD image type to combine. If no image type is given then all input images
+are combined.
+.le
+.ls process = no
+Process the input images before combining?
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+.ls scale = "none" (none|mode|median|mean|exposure)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, or scale
+by the exposure time given in the image header.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling.
+If no section is given then the entire region of the image is
+sampled (for efficiency the images are sampled if they are big enough).
+.le
+
+.ce
+Algorithm Parameters
+.ls nlow = 0, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See \fBcombine\fR for further details.
+.le
+.ls blank = 0.
+Output value to be used when there are no pixels.
+.le
+.ih
+DESCRIPTION
+The zero level images in the input image list are combined.
+The input images may be processed first if desired.
+The original images may be deleted automatically if desired.
+The output pixel datatype will be real.
+
+This task is a script which applies \fBccdproc\fR and \fBcombine\fR. The
+parameters and combining algorithms are described in detail in the help for
+\fBcombine\fR. This script has default parameters specifically set for
+zero level images and simplifies the combining parameters. There are other
+combining options not included in this task. For these additional
+features, such as thresholding, offseting, masking, and projecting, use
+\fBcombine\fR.
+.ih
+EXAMPLES
+1. The image data contains four zero level images.
+To automatically select them and combine them as a background job
+using the default combining algorithm:
+
+ cl> zerocombine ccd*.imh&
+.ih
+SEE ALSO
+ccdproc, combine
+.endhelp
diff --git a/noao/imred/ccdred/flatcombine.cl b/noao/imred/ccdred/flatcombine.cl
new file mode 100644
index 00000000..78bd1e80
--- /dev/null
+++ b/noao/imred/ccdred/flatcombine.cl
@@ -0,0 +1,49 @@
+# FLATCOMBINE -- Process and combine flat field CCD images.
+
+procedure flatcombine (input)
+
+string input {prompt="List of flat field images to combine"}
+file output="Flat" {prompt="Output flat field root name"}
+string combine="average" {prompt="Type of combine operation",
+ enum="average|median"}
+string reject="avsigclip" {prompt="Type of rejection",
+ enum="none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip"}
+string ccdtype="flat" {prompt="CCD image type to combine"}
+bool process=yes {prompt="Process images before combining?"}
+bool subsets=yes {prompt="Combine images by subset parameter?"}
+bool delete=no {prompt="Delete input images after combining?"}
+bool clobber=no {prompt="Clobber existing output image?"}
+string scale="mode" {prompt="Image scaling",
+ enum="none|mode|median|mean|exposure"}
+string statsec="" {prompt="Image section for computing statistics"}
+int nlow=1 {prompt="minmax: Number of low pixels to reject"}
+int nhigh=1 {prompt="minmax: Number of high pixels to reject"}
+int nkeep=1 {prompt="Minimum to keep (pos) or maximum to reject (neg)"}
+bool mclip=yes {prompt="Use median in sigma clipping algorithms?"}
+real lsigma=3. {prompt="Lower sigma clipping factor"}
+real hsigma=3. {prompt="Upper sigma clipping factor"}
+string rdnoise="0." {prompt="ccdclip: CCD readout noise (electrons)"}
+string gain="1." {prompt="ccdclip: CCD gain (electrons/DN)"}
+string snoise="0." {prompt="ccdclip: Sensitivity noise (fraction)"}
+real pclip=-0.5 {prompt="pclip: Percentile clipping parameter"}
+real blank=1. {prompt="Value if there are no pixels"}
+
+begin
+ string ims
+
+ ims = input
+
+ # Process images first if desired.
+ if (process == YES)
+ ccdproc (ims, output="", ccdtype=ccdtype, noproc=no)
+
+ # Combine the flat field images.
+ combine (ims, output=output, plfile="", sigma="", combine=combine,
+ reject=reject, ccdtype=ccdtype, subsets=subsets, delete=delete,
+ clobber=clobber, project=no, outtype="real", offsets="none",
+ masktype="none", blank=blank, scale=scale, zero="none", weight=no,
+ statsec=statsec, lthreshold=INDEF, hthreshold=INDEF, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, lsigma=lsigma, hsigma=hsigma,
+ rdnoise=rdnoise, gain=gain, snoise=snoise, sigscale=0.1,
+ pclip=pclip, grow=0)
+end
diff --git a/noao/imred/ccdred/mkfringecor.par b/noao/imred/ccdred/mkfringecor.par
new file mode 100644
index 00000000..c088fe8b
--- /dev/null
+++ b/noao/imred/ccdred/mkfringecor.par
@@ -0,0 +1,11 @@
+input,s,a,,,,Input CCD images
+output,s,h,"",,,Output fringe images (same as input if none given)
+ccdtype,s,h,"",,,CCD image type to select
+xboxmin,r,h,5,0.,,Minimum smoothing box size in x at edges
+xboxmax,r,h,0.25,0.,,Maximum smoothing box size in x
+yboxmin,r,h,5,0.,,Minimum moothing box size in y at edges
+yboxmax,r,h,0.25,0.,,Maximum moothing box size in y
+clip,b,h,yes,,,Clip input pixels?
+lowsigma,r,h,2.5,0.,,Low clipping sigma
+highsigma,r,h,2.5,0.,,High clipping sigma
+ccdproc,pset,h,,,,CCD processing parameters
diff --git a/noao/imred/ccdred/mkillumcor.par b/noao/imred/ccdred/mkillumcor.par
new file mode 100644
index 00000000..cda8eb54
--- /dev/null
+++ b/noao/imred/ccdred/mkillumcor.par
@@ -0,0 +1,12 @@
+input,s,a,,,,Input CCD images
+output,s,a,,,,Output images (same as input if none given)
+ccdtype,s,h,"flat",,,CCD image type to select
+xboxmin,r,h,5,0.,,Minimum smoothing box size in x at edges
+xboxmax,r,h,0.25,0.,,Maximum smoothing box size in x
+yboxmin,r,h,5,0.,,Minimum smoothing box size in y at edges
+yboxmax,r,h,0.25,0.,,Maximum smoothing box size in y
+clip,b,h,yes,,,Clip input pixels?
+lowsigma,r,h,2.5,0.,,Low clipping sigma
+highsigma,r,h,2.5,0.,,High clipping sigma
+divbyzero,r,h,1.,,,Result for division by zero
+ccdproc,pset,h,,,,CCD processing parameters
diff --git a/noao/imred/ccdred/mkillumflat.par b/noao/imred/ccdred/mkillumflat.par
new file mode 100644
index 00000000..67897f46
--- /dev/null
+++ b/noao/imred/ccdred/mkillumflat.par
@@ -0,0 +1,12 @@
+input,s,a,,,,Input CCD flat field images
+output,s,a,,,,Output images (same as input if none given)
+ccdtype,s,h,"flat",,,CCD image type to select
+xboxmin,r,h,5,0.,,Minimum smoothing box size in x at edges
+xboxmax,r,h,0.25,0.,,Maximum smoothing box size in x
+yboxmin,r,h,5,0.,,Minimum moothing box size in y at edges
+yboxmax,r,h,0.25,0.,,Maximum moothing box size in y
+clip,b,h,yes,,,Clip input pixels?
+lowsigma,r,h,2.5,0.,,Low clipping sigma
+highsigma,r,h,2.5,0.,,High clipping sigma
+divbyzero,r,h,1.,,,Result for division by zero
+ccdproc,pset,h,,,,CCD processing parameters
diff --git a/noao/imred/ccdred/mkpkg b/noao/imred/ccdred/mkpkg
new file mode 100644
index 00000000..dab87bc3
--- /dev/null
+++ b/noao/imred/ccdred/mkpkg
@@ -0,0 +1,29 @@
+# Make CCDRED Package.
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $call ccdred
+ ;
+
+install:
+ $move xx_ccdred.e noaobin$x_ccdred.e
+ ;
+
+ccdred:
+ $omake x_ccdred.x
+ $link x_ccdred.o libpkg.a -lxtools -lcurfit -lgsurfit -lncar -lgks\
+ -o xx_ccdred.e
+ ;
+
+libpkg.a:
+ @src
+ @ccdtest
+ ;
diff --git a/noao/imred/ccdred/mkskycor.par b/noao/imred/ccdred/mkskycor.par
new file mode 100644
index 00000000..e719dfa0
--- /dev/null
+++ b/noao/imred/ccdred/mkskycor.par
@@ -0,0 +1,11 @@
+input,s,a,,,,Input CCD images
+output,s,a,,,,Output images (same as input if none given)
+ccdtype,s,h,"",,,CCD image type to select
+xboxmin,r,h,5,0.,,Minimum smoothing box size in x at edges
+xboxmax,r,h,0.25,0.,,Maximum smoothing box size in x
+yboxmin,r,h,5,0.,,Minimum moothing box size in y at edges
+yboxmax,r,h,0.25,0.,,Maximum moothing box size in y
+clip,b,h,yes,,,Clip input pixels?
+lowsigma,r,h,2.5,0.,,Low clipping sigma
+highsigma,r,h,2.5,0.,,High clipping sigma
+ccdproc,pset,h,,,,CCD processing parameters
diff --git a/noao/imred/ccdred/mkskyflat.par b/noao/imred/ccdred/mkskyflat.par
new file mode 100644
index 00000000..e719dfa0
--- /dev/null
+++ b/noao/imred/ccdred/mkskyflat.par
@@ -0,0 +1,11 @@
+input,s,a,,,,Input CCD images
+output,s,a,,,,Output images (same as input if none given)
+ccdtype,s,h,"",,,CCD image type to select
+xboxmin,r,h,5,0.,,Minimum smoothing box size in x at edges
+xboxmax,r,h,0.25,0.,,Maximum smoothing box size in x
+yboxmin,r,h,5,0.,,Minimum moothing box size in y at edges
+yboxmax,r,h,0.25,0.,,Maximum moothing box size in y
+clip,b,h,yes,,,Clip input pixels?
+lowsigma,r,h,2.5,0.,,Low clipping sigma
+highsigma,r,h,2.5,0.,,High clipping sigma
+ccdproc,pset,h,,,,CCD processing parameters
diff --git a/noao/imred/ccdred/setinstrument.cl b/noao/imred/ccdred/setinstrument.cl
new file mode 100644
index 00000000..c10a7427
--- /dev/null
+++ b/noao/imred/ccdred/setinstrument.cl
@@ -0,0 +1,57 @@
+# SETINSTRUMENT -- Set up instrument parameters for the CCD reduction tasks.
+#
+# This task sets default parameters based on an instrument ID.
+
+procedure setinstrument (instrument)
+
+char instrument {prompt="Instrument ID (type ? for a list)"}
+char site="kpno" {prompt="Site ID"}
+char directory="ccddb$" {prompt="Instrument directory"}
+bool review=yes {prompt="Review instrument parameters?"}
+char query {prompt="Instrument ID (type q to quit)",
+ mode="q"}
+
+begin
+ string inst, instdir, instmen, instfile
+
+ # Define instrument directory, menu, and file
+ instdir = directory
+ if (site != "")
+ instdir = instdir // site // "/"
+ instmen = instdir // "instruments.men"
+ inst = instrument
+ instfile = instdir // inst // ".dat"
+
+ # Loop until a valid instrument file is given.
+ while (inst != "" && !access (instfile)) {
+ if (access (instmen))
+ page (instmen)
+ else if (inst == "?")
+ print ("Instrument list ", instmen, " not found")
+ else
+ print ("Instrument file ", instfile, " not found")
+ print ("")
+ inst = query
+ if (inst == "q")
+ return
+ instrument = inst
+ instfile = instdir // inst // ".dat"
+ }
+
+ # Set instrument parameter.
+ if (access (instfile))
+ ccdred.instrument = instfile
+ else
+ ccdred.instrument = ""
+
+ # Run instrument setup script.
+ instfile = instdir // inst // ".cl"
+ if (access (instfile))
+ cl (< instfile)
+
+ # Review parameters if desired.
+ if (review) {
+ eparam ("ccdred")
+ eparam ("ccdproc")
+ }
+end
diff --git a/noao/imred/ccdred/skyreplace.par b/noao/imred/ccdred/skyreplace.par
new file mode 100644
index 00000000..b611c30d
--- /dev/null
+++ b/noao/imred/ccdred/skyreplace.par
@@ -0,0 +1,3 @@
+image,f,a,,,,Image to be modified
+frame,i,h,1,,,Image display frame
+cursor,*gcur,h,,,,Cursor
diff --git a/noao/imred/ccdred/src/calimage.x b/noao/imred/ccdred/src/calimage.x
new file mode 100644
index 00000000..82efdf54
--- /dev/null
+++ b/noao/imred/ccdred/src/calimage.x
@@ -0,0 +1,367 @@
+include <error.h>
+include <imset.h>
+include "ccdtypes.h"
+
+define SZ_SUBSET 16 # Maximum size of subset string
+define IMAGE Memc[$1+($2-1)*SZ_FNAME] # Image string
+define SUBSET Memc[$1+($2-1)*(SZ_SUBSET+1)] # Subset string
+
+# CAL_IMAGE -- Return a calibration image for a specified input image.
+# CAL_OPEN -- Open the calibration image list.
+# CAL_CLOSE -- Close the calibration image list.
+# CAL_LIST -- Add images to the calibration image list.
+#
+# The open procedure is called first to get the calibration image
+# lists and add them to an internal list. Calibration images from the
+# input list are also added so that calibration images may be specified
+# either from the calibration image list parameters or in the input image list.
+# Existence errors and duplicate calibration images are ignored.
+# Validity checks are made when the calibration images are requested.
+#
+# During processing the calibration image names are requested for each input
+# image. The calibration image list is searched for a calibration image of
+# the right type and subset. If more than one is found the first one is
+# returned and a warning given for the others. The warning is only issued
+# once. If no calibration image is found then an error is returned.
+#
+# The calibration image list must be closed at the end of processing the
+# input images.
+
+
+# CAL_IMAGE -- Return a calibration image of a particular type.
+# Search the calibration list for the first calibration image of the desired
+# type and subset. Print a warning if there is more than one possible
+# calibration image and return an error if there is no calibration image.
+
+procedure cal_image (im, ccdtype, nscan, image, maxchars)
+
+pointer im # Image to be processed
+int ccdtype # Callibration CCD image type desired
+int nscan # Number of scan rows desired
+char image[maxchars] # Calibration image (returned)
+int maxchars # Maximum number chars in image name
+
+int i, m, n
+pointer sp, subset, str
+bool strne(), ccd_cmp()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+begin
+ call smark (sp)
+ call salloc (subset, SZ_SUBSET, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ m = 0
+ n = 0
+ switch (ccdtype) {
+ case ZERO, DARK:
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ n = n + 1
+ if (n == 1) {
+ m = i
+ } else {
+ if (Memi[nscans+i-1] == Memi[nscans+m-1]) {
+# call eprintf (
+# "Warning: Extra calibration image %s ignored\n")
+# call pargstr (IMAGE(images,i))
+
+ # Reset the image type to eliminate further warnings.
+ Memi[ccdtypes+i-1] = UNKNOWN
+ } else if (Memi[nscans+m-1] != nscan &&
+ (Memi[nscans+i-1] == nscan ||
+ Memi[nscans+i-1] == 1)) {
+ m = i
+ }
+ }
+ }
+ case FLAT, ILLUM, FRINGE:
+ call ccdsubset (im, Memc[subset], SZ_SUBSET)
+
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ if (strne (SUBSET(subsets,i), Memc[subset]))
+ next
+ n = n + 1
+ if (n == 1) {
+ m = i
+ } else {
+ if (Memi[nscans+i-1] == Memi[nscans+m-1]) {
+# call eprintf (
+# "Warning: Extra calibration image %s ignored\n")
+# call pargstr (IMAGE(images,i))
+
+ # Reset the image type to eliminate further warnings.
+ Memi[ccdtypes+i-1] = UNKNOWN
+ } else if (Memi[nscans+m-1] != nscan &&
+ (Memi[nscans+i-1] == nscan ||
+ Memi[nscans+i-1] == 1)) {
+ m = i
+ }
+ }
+ }
+ }
+
+ # If no calibration image is found then it is an error.
+ if (m == 0) {
+ switch (ccdtype) {
+ case ZERO:
+ call error (0, "No zero level calibration image found")
+ case DARK:
+ call error (0, "No dark count calibration image found")
+ case FLAT:
+ call sprintf (Memc[str], SZ_LINE,
+ "No flat field calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ case ILLUM:
+ call sprintf (Memc[str], SZ_LINE,
+ "No illumination calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ case FRINGE:
+ call sprintf (Memc[str], SZ_LINE,
+ "No fringe calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ }
+ }
+
+ call strcpy (IMAGE(images,m), image, maxchars)
+ if (nscan != Memi[nscans+m-1]) {
+ if (nscan != 1 && Memi[nscans+m-1] == 1)
+ call cal_scan (nscan, image, maxchars)
+ else {
+ call sprintf (Memc[str], SZ_LINE,
+ "Cannot find or create calibration with nscan of %d")
+ call pargi (nscan)
+ call error (0, Memc[str])
+ }
+ }
+
+ # Check that the input image is not the same as the calibration image.
+ call imstats (im, IM_IMAGENAME, Memc[str], SZ_LINE)
+ if (ccd_cmp (Memc[str], IMAGE(images,m))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Calibration image %s is the same as the input image")
+ call pargstr (image)
+ call error (0, Memc[str])
+ }
+
+ call sfree (sp)
+end
+
+
+# CAL_OPEN -- Create a list of calibration images from the input image list
+# and the calibration image lists.
+
+procedure cal_open (list)
+
+int list # List of input images
+int list1 # List of calibration images
+
+pointer sp, str
+int ccdtype, strdic(), imtopenp()
+bool clgetb()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subset numbers
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+errchk cal_list
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ call clgstr ("ccdtype", Memc[str], SZ_LINE)
+ call xt_stripwhite (Memc[str])
+ if (Memc[str] == EOS)
+ ccdtype = NONE
+ else
+ ccdtype = strdic (Memc[str], Memc[str], SZ_LINE, CCDTYPES)
+
+ # Add calibration images to list.
+ nimages = 0
+ if (ccdtype != ZERO && clgetb ("zerocor")) {
+ list1 = imtopenp ("zero")
+ call cal_list (list1, ZERO)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && clgetb ("darkcor")) {
+ list1 = imtopenp ("dark")
+ call cal_list (list1, DARK)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ clgetb ("flatcor")) {
+ list1 = imtopenp ("flat")
+ call cal_list (list1, FLAT)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ ccdtype != ILLUM && clgetb ("illumcor")) {
+ list1 = imtopenp ("illum")
+ call cal_list (list1, ILLUM)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ ccdtype != FRINGE && clgetb ("fringecor")) {
+ list1 = imtopenp ("fringe")
+ call cal_list (list1, FRINGE)
+ call imtclose (list1)
+ }
+ if (list != NULL) {
+ call cal_list (list, UNKNOWN)
+ call imtrew (list)
+ }
+
+ call sfree (sp)
+end
+
+
+# CAL_CLOSE -- Free memory from the internal calibration image list.
+
+procedure cal_close ()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subset
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+begin
+ if (nimages > 0) {
+ call mfree (ccdtypes, TY_INT)
+ call mfree (subsets, TY_CHAR)
+ call mfree (nscans, TY_INT)
+ call mfree (images, TY_CHAR)
+ }
+end
+
+
+# CAL_LIST -- Add calibration images to an internal list.
+# Map each image and get the CCD image type and subset.
+# If the ccdtype is given as a procedure argument this overrides the
+# image header type. For the calibration images add the type, subset,
+# and image name to dynamic arrays. Ignore duplicate names.
+
+procedure cal_list (list, listtype)
+
+pointer list # Image list
+int listtype # CCD type of image in list.
+ # Overrides header type if not UNKNOWN.
+
+int i, ccdtype, ccdtypei(), ccdnscan(), imtgetim()
+pointer sp, image, im, immap()
+bool streq()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+
+ while (imtgetim (list, Memc[image], SZ_FNAME) != EOF) {
+ # Open the image. If an explicit type is given it is an
+ # error if the image can't be opened.
+ iferr (im = immap (Memc[image], READ_ONLY, 0)) {
+ if (listtype == UNKNOWN)
+ next
+ else
+ call erract (EA_ERROR)
+ }
+
+ # Override image header CCD type if a list type is given.
+ if (listtype == UNKNOWN)
+ ccdtype = ccdtypei (im)
+ else
+ ccdtype = listtype
+
+ switch (ccdtype) {
+ case ZERO, DARK, FLAT, ILLUM, FRINGE:
+ # Check for duplication.
+ for (i=1; i<=nimages; i=i+1)
+ if (streq (Memc[image], IMAGE(images,i)))
+ break
+ if (i <= nimages)
+ break
+
+ # Allocate memory for a new image.
+ if (i == 1) {
+ call malloc (ccdtypes, i, TY_INT)
+ call malloc (subsets, i * (SZ_SUBSET+1), TY_CHAR)
+ call malloc (nscans, i, TY_INT)
+ call malloc (images, i * SZ_FNAME, TY_CHAR)
+ } else {
+ call realloc (ccdtypes, i, TY_INT)
+ call realloc (subsets, i * SZ_FNAME, TY_CHAR)
+ call realloc (nscans, i, TY_INT)
+ call realloc (images, i * SZ_FNAME, TY_CHAR)
+ }
+
+ # Enter the ccdtype, subset, and image name.
+ Memi[ccdtypes+i-1] = ccdtype
+ Memi[nscans+i-1] = ccdnscan (im, ccdtype)
+ call ccdsubset (im, SUBSET(subsets,i), SZ_SUBSET)
+ call strcpy (Memc[image], IMAGE(images,i), SZ_FNAME-1)
+ nimages = i
+ }
+ call imunmap (im)
+ }
+ call sfree (sp)
+end
+
+
+# CAL_SCAN -- Generate name for scan corrected calibration image.
+
+procedure cal_scan (nscan, image, maxchar)
+
+int nscan #I Number of scan lines
+char image[maxchar] #U Input root name, output scan name
+int maxchar #I Maximum number of chars in image name
+
+bool clgetb()
+pointer sp, root, ext
+
+begin
+ # Check if this operation is desired.
+ if (!clgetb ("scancor") || nscan == 1)
+ return
+
+ call smark (sp)
+ call salloc (root, SZ_FNAME, TY_CHAR)
+ call salloc (ext, SZ_FNAME, TY_CHAR)
+
+ call xt_imroot (image, Memc[root], SZ_FNAME)
+ call xt_imext (image, Memc[ext], SZ_FNAME)
+ if (IS_INDEFI (nscan)) {
+ call sprintf (image, maxchar, "%s.1d%s")
+ call pargstr (Memc[root])
+ call pargstr (Memc[ext])
+ } else {
+ call sprintf (image, maxchar, "%s.%d%s")
+ call pargstr (Memc[root])
+ call pargi (nscan)
+ call pargstr (Memc[ext])
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/ccdcache.com b/noao/imred/ccdred/src/ccdcache.com
new file mode 100644
index 00000000..91ffae12
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdcache.com
@@ -0,0 +1,10 @@
+# Common data defining the cached images and data.
+
+int ccd_ncache # Number of images cached
+int ccd_maxcache # Maximum size of cache
+int ccd_szcache # Current size of cache
+int ccd_oldsize # Original memory size
+int ccd_pcache # Pointer to image cache structures
+
+common /ccdcache_com/ ccd_ncache, ccd_maxcache, ccd_szcache, ccd_oldsize,
+ ccd_pcache
diff --git a/noao/imred/ccdred/src/ccdcache.h b/noao/imred/ccdred/src/ccdcache.h
new file mode 100644
index 00000000..f7de3a2c
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdcache.h
@@ -0,0 +1,10 @@
+# Definition for image cache structure.
+
+define CCD_LENCACHE 6
+
+define CCD_IM Memi[$1] # IMIO pointer
+define CCD_NACCESS Memi[$1+1] # Number of accesses requested
+define CCD_SZDATA Memi[$1+2] # Size of data in cache in chars
+define CCD_DATA Memi[$1+3] # Pointer to data cache
+define CCD_BUFR Memi[$1+4] # Pointer to real image line
+define CCD_BUFS Memi[$1+5] # Pointer to short image line
diff --git a/noao/imred/ccdred/src/ccdcache.x b/noao/imred/ccdred/src/ccdcache.x
new file mode 100644
index 00000000..78f84ace
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdcache.x
@@ -0,0 +1,381 @@
+include <imhdr.h>
+include <imset.h>
+include <mach.h>
+include "ccdcache.h"
+
+.help ccdcache Jun87
+.nf ---------------------------------------------------------------------
+The purpose of the CCD image caching package is to minimize image mapping
+time, to prevent multiple mapping of the same image, and to keep entire
+calibration images in memory for extended periods to minimize disk
+I/O. It is selected by specifying a maximum caching size based on the
+available memory. When there is not enough memory for caching (or by
+setting the size to 0) then standard IMIO is used. When there is
+enough memory then as many images as will fit into the specified cache
+size are kept in memory. Images are also kept mapped until explicitly
+flushed or the entire package is closed.
+
+This is a special purpose interface intended only for the CCDRED package.
+It has the following restrictions.
+
+ 1. Images must be processed to be cached.
+ 2. Images must be 2 dimensional to be cached
+ 3. Images must be real or short to be cached.
+ 4. Images must be read_only to be cached.
+ 5. Cached images remain in memory until they are displaced,
+ flushed, or the package is closed.
+
+The package consists of the following procedures.
+
+ ccd_open ()
+ im = ccd_cache (image)
+ ptr = ccd_glr (im, col1, col2, line)
+ ptr = ccd_gls (im, col1, col2, line)
+ ccd_unmap (im)
+ ccd_flush (im)
+ ccd_close ()
+
+
+CCD_OPEN: Initialize the image cache. Called at the beginning.
+CCD_CLOSE: Flush the image cache and restore memory. Called at the end.
+
+CCD_CACHE: Open an image and save the IMIO pointer. If the image has been
+opened previously it need not be opened again. If image data caching
+is specified the image data may be read it into memory. In order for
+image data caching to occur the the image has to have been processed,
+be two dimensional, be real or short, and the total cache memory not
+be exceeded. If an error occurs in reading the image into memory
+the data is not cached.
+
+CCD_UNMAP: The image access number is decremented but the image
+is not closed against the event it will be used again.
+
+CCD_FLUSH: The image is closed and flushed from the cache.
+
+CCD_GLR, CCD_GLS: Get a real or short image line. If the image data is cached
+then a pointer to the line is quickly returned. If the data is not cached then
+IMIO is used to get the pointer.
+.endhelp ---------------------------------------------------------------------
+
+
+
+# CCD_CACHE -- Open an image and possibly cache it in memory.
+
+pointer procedure ccd_cache (image, ccdtype)
+
+char image[ARB] # Image to be opened
+int ccdtype # Image type
+
+int i, nc, nl, nbytes
+pointer sp, str, pcache, pcache1, im
+
+int sizeof()
+pointer immap(), imgs2r(), imgs2s()
+bool streq(), ccdcheck()
+errchk immap, imgs2r, imgs2s
+
+include "ccdcache.com"
+
+define done_ 99
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Check if the image is cached.
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ im = CCD_IM(pcache)
+ call imstats (im, IM_IMAGENAME, Memc[str], SZ_LINE)
+ if (streq (image, Memc[str]))
+ break
+ }
+
+ # If the image is not cached open it and allocate memory.
+ if (i > ccd_ncache) {
+ im = immap (image, READ_ONLY, 0)
+ ccd_ncache = i
+ call realloc (ccd_pcache, ccd_ncache, TY_INT)
+ call malloc (pcache, CCD_LENCACHE, TY_STRUCT)
+ Memi[ccd_pcache+i-1] = pcache
+ CCD_IM(pcache) = im
+ CCD_NACCESS(pcache) = 0
+ CCD_SZDATA(pcache) = 0
+ CCD_DATA(pcache) = NULL
+ CCD_BUFR(pcache) = NULL
+ CCD_BUFS(pcache) = NULL
+ }
+
+ # If not caching the image data or if the image data has already
+ # been cached we are done.
+ if ((ccd_maxcache == 0) || (CCD_SZDATA(pcache) > 0))
+ goto done_
+
+ # Don't cache unprocessed calibration image data.
+ # This is the only really CCDRED specific code.
+ if (ccdcheck (im, ccdtype))
+ goto done_
+
+ # Check image is 2D and a supported pixel type.
+ if (IM_NDIM(im) != 2)
+ goto done_
+ if ((IM_PIXTYPE(im) != TY_REAL) && (IM_PIXTYPE(im) !=TY_SHORT))
+ goto done_
+
+ # Compute the size of the image data.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ nbytes = nc * nl * sizeof (IM_PIXTYPE(im)) * SZB_CHAR
+
+ # Free memory not in use.
+ if (ccd_szcache + nbytes > ccd_maxcache) {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache1 = Memi[ccd_pcache+i-1]
+ if (CCD_NACCESS(pcache1) == 0) {
+ if (CCD_SZDATA(pcache1) > 0) {
+ ccd_szcache = ccd_szcache - CCD_SZDATA(pcache1)
+ CCD_SZDATA(pcache1) = 0
+ CCD_DATA(pcache1) = NULL
+ call mfree (CCD_BUFR(pcache1), TY_REAL)
+ call mfree (CCD_BUFS(pcache1), TY_SHORT)
+ call imseti (CCD_IM(pcache1), IM_CANCEL, YES)
+ if (ccd_szcache + nbytes > ccd_maxcache)
+ break
+ }
+ }
+ }
+ }
+ if (ccd_szcache + nbytes > ccd_maxcache)
+ goto done_
+
+ # Cache the image data
+ iferr {
+ switch (IM_PIXTYPE (im)) {
+ case TY_SHORT:
+ CCD_DATA(pcache) = imgs2s (im, 1, nc, 1, nl)
+ case TY_REAL:
+ CCD_DATA(pcache) = imgs2r (im, 1, nc, 1, nl)
+ }
+ ccd_szcache = ccd_szcache + nbytes
+ CCD_SZDATA(pcache) = nbytes
+ } then {
+ call imunmap (im)
+ im = immap (image, READ_ONLY, 0)
+ CCD_IM(pcache) = im
+ CCD_SZDATA(pcache) = 0
+ }
+
+done_
+ CCD_NACCESS(pcache) = CCD_NACCESS(pcache) + 1
+ call sfree (sp)
+ return (im)
+end
+
+
+# CCD_OPEN -- Initialize the CCD image cache.
+
+procedure ccd_open (max_cache)
+
+int max_cache # Maximum cache size in bytes
+
+int max_size, begmem()
+include "ccdcache.com"
+
+begin
+ ccd_ncache = 0
+ ccd_maxcache = max_cache
+ ccd_szcache = 0
+ call malloc (ccd_pcache, 1, TY_INT)
+
+ # Ask for the maximum physical memory.
+ if (ccd_maxcache > 0) {
+ ccd_oldsize = begmem (0, ccd_oldsize, max_size)
+ call fixmem (max_size)
+ }
+end
+
+
+# CCD_UNMAP -- Unmap an image.
+# Don't actually unmap the image since it may be opened again.
+
+procedure ccd_unmap (im)
+
+pointer im # IMIO pointer
+
+int i
+pointer pcache
+include "ccdcache.com"
+
+begin
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ CCD_NACCESS(pcache) = CCD_NACCESS(pcache) - 1
+ return
+ }
+ }
+
+ call imunmap (im)
+end
+
+
+# CCD_FLUSH -- Close image and flush from cache.
+
+procedure ccd_flush (im)
+
+pointer im # IMIO pointer
+
+int i
+pointer pcache
+include "ccdcache.com"
+
+begin
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ ccd_ncache = ccd_ncache - 1
+ ccd_szcache = ccd_szcache - CCD_SZDATA(pcache)
+ call mfree (CCD_BUFR(pcache), TY_REAL)
+ call mfree (CCD_BUFS(pcache), TY_SHORT)
+ call mfree (pcache, TY_STRUCT)
+ for (; i<=ccd_ncache; i=i+1)
+ Memi[ccd_pcache+i-1] = Memi[ccd_pcache+i]
+ break
+ }
+ }
+
+ call imunmap (im)
+end
+
+
+# CCD_CLOSE -- Close the image cache.
+
+procedure ccd_close ()
+
+int i
+pointer pcache
+include "ccdcache.com"
+
+begin
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ call imunmap (CCD_IM(pcache))
+ call mfree (CCD_BUFR(pcache), TY_REAL)
+ call mfree (CCD_BUFS(pcache), TY_SHORT)
+ call mfree (pcache, TY_STRUCT)
+ }
+ call mfree (ccd_pcache, TY_INT)
+
+ # Restore memory.
+ call fixmem (ccd_oldsize)
+end
+
+
+# CCD_GLR -- Get a line of real data from the image.
+# If the image data is cached this is fast (particularly if the datatype
+# matches). If the image data is not cached then use IMIO.
+
+pointer procedure ccd_glr (im, col1, col2, line)
+
+pointer im # IMIO pointer
+int col1, col2 # Columns
+int line # Line
+
+int i
+pointer pcache, data, bufr, imgs2r()
+errchk malloc
+include "ccdcache.com"
+
+begin
+ # Quick test for cached data.
+ if (ccd_maxcache == 0)
+ return (imgs2r (im, col1, col2, line, line))
+
+ # Return cached data.
+ if (IM_PIXTYPE(im) == TY_REAL) {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0)
+ return (CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1)
+ else
+ break
+ }
+ }
+ } else {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0) {
+ data = CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1
+ bufr = CCD_BUFR(pcache)
+ if (bufr == NULL) {
+ call malloc (bufr, IM_LEN(im,1), TY_REAL)
+ CCD_BUFR(pcache) = bufr
+ }
+ call achtsr (Mems[data], Memr[bufr], IM_LEN(im,1))
+ return (bufr)
+ } else
+ break
+ }
+ }
+ }
+
+ # Return uncached data.
+ return (imgs2r (im, col1, col2, line, line))
+end
+
+
+# CCD_GLS -- Get a line of short data from the image.
+# If the image data is cached this is fast (particularly if the datatype
+# matches). If the image data is not cached then use IMIO.
+
+pointer procedure ccd_gls (im, col1, col2, line)
+
+pointer im # IMIO pointer
+int col1, col2 # Columns
+int line # Line
+
+int i
+pointer pcache, data, bufs, imgs2s()
+errchk malloc
+include "ccdcache.com"
+
+begin
+ # Quick test for cached data.
+ if (ccd_maxcache == 0)
+ return (imgs2s (im, col1, col2, line, line))
+
+ # Return cached data.
+ if (IM_PIXTYPE(im) == TY_SHORT) {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0)
+ return (CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1)
+ else
+ break
+ }
+ }
+ } else {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0) {
+ data = CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1
+ bufs = CCD_BUFS(pcache)
+ if (bufs == NULL) {
+ call malloc (bufs, IM_LEN(im,1), TY_SHORT)
+ CCD_BUFS(pcache) = bufs
+ }
+ call achtrs (Memr[data], Mems[bufs], IM_LEN(im,1))
+ return (bufs)
+ } else
+ break
+ }
+ }
+ }
+
+ # Return uncached data.
+ return (imgs2s (im, col1, col2, line, line))
+end
diff --git a/noao/imred/ccdred/src/ccdcheck.x b/noao/imred/ccdred/src/ccdcheck.x
new file mode 100644
index 00000000..0dde14f9
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdcheck.x
@@ -0,0 +1,67 @@
+include <imhdr.h>
+include "ccdtypes.h"
+
+# CCDCHECK -- Check processing status.
+
+bool procedure ccdcheck (im, ccdtype)
+
+pointer im # IMIO pointer
+int ccdtype # CCD type
+
+real ccdmean, hdmgetr()
+bool clgetb(), ccdflag()
+long time
+int hdmgeti()
+
+begin
+ if (clgetb ("trim") && !ccdflag (im, "trim"))
+ return (true)
+ if (clgetb ("fixpix") && !ccdflag (im, "fixpix"))
+ return (true)
+ if (clgetb ("overscan") && !ccdflag (im, "overscan"))
+ return (true)
+
+ switch (ccdtype) {
+ case ZERO:
+ if (clgetb ("readcor") && !ccdflag (im, "readcor"))
+ return (true)
+ case DARK:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ case FLAT:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ if (clgetb ("darkcor") && !ccdflag (im, "darkcor"))
+ return (true)
+ if (clgetb ("scancor") && !ccdflag (im, "scancor"))
+ return (true)
+ iferr (ccdmean = hdmgetr (im, "ccdmean"))
+ return (true)
+ iferr (time = hdmgeti (im, "ccdmeant"))
+ time = IM_MTIME(im)
+ if (time < IM_MTIME(im))
+ return (true)
+ case ILLUM:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ if (clgetb ("darkcor") && !ccdflag (im, "darkcor"))
+ return (true)
+ if (clgetb ("flatcor") && !ccdflag (im, "flatcor"))
+ return (true)
+ iferr (ccdmean = hdmgetr (im, "ccdmean"))
+ return (true)
+ default:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ if (clgetb ("darkcor") && !ccdflag (im, "darkcor"))
+ return (true)
+ if (clgetb ("flatcor") && !ccdflag (im, "flatcor"))
+ return (true)
+ if (clgetb ("illumcor") && !ccdflag (im, "illumcor"))
+ return (true)
+ if (clgetb ("fringecor") && !ccdflag (im, "fringcor"))
+ return (true)
+ }
+
+ return (false)
+end
diff --git a/noao/imred/ccdred/src/ccdcmp.x b/noao/imred/ccdred/src/ccdcmp.x
new file mode 100644
index 00000000..a2687934
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdcmp.x
@@ -0,0 +1,23 @@
+# CCD_CMP -- Compare two image names with extensions ignored.
+
+bool procedure ccd_cmp (image1, image2)
+
+char image1[ARB] # First image
+char image2[ARB] # Second image
+
+int i, j, strmatch(), strlen(), strncmp()
+bool streq()
+
+begin
+ if (streq (image1, image2))
+ return (true)
+
+ i = max (strmatch (image1, ".imh"), strmatch (image1, ".hhh"))
+ if (i == 0)
+ i = strlen (image1)
+ j = max (strmatch (image2, ".imh"), strmatch (image2, ".hhh"))
+ if (j == 0)
+ j = strlen (image2)
+
+ return (strncmp (image1, image2, max (i, j)) == 0)
+end
diff --git a/noao/imred/ccdred/src/ccdcopy.x b/noao/imred/ccdred/src/ccdcopy.x
new file mode 100644
index 00000000..a12b2123
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdcopy.x
@@ -0,0 +1,31 @@
+include <imhdr.h>
+
+# CCDCOPY -- Copy an image. This should be done with an IMIO procedure
+# but there isn't one yet.
+
+procedure ccdcopy (old, new)
+
+char old[ARB] # Image to be copied
+char new[ARB] # New copy
+
+int i, nc, nl
+pointer in, out, immap(), imgl2s(), impl2s(), imgl2r(), impl2r()
+
+begin
+ in = immap (old, READ_ONLY, 0)
+ out = immap (new, NEW_COPY, in)
+
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ switch (IM_PIXTYPE(in)) {
+ case TY_SHORT:
+ do i = 1, nl
+ call amovs (Mems[imgl2s(in,i)], Mems[impl2s(out,i)], nc)
+ default:
+ do i = 1, nl
+ call amovr (Memr[imgl2r(in,i)], Memr[impl2r(out,i)], nc)
+ }
+
+ call imunmap (in)
+ call imunmap (out)
+end
diff --git a/noao/imred/ccdred/src/ccddelete.x b/noao/imred/ccdred/src/ccddelete.x
new file mode 100644
index 00000000..90931135
--- /dev/null
+++ b/noao/imred/ccdred/src/ccddelete.x
@@ -0,0 +1,55 @@
+# CCDDELETE -- Delete an image by renaming it to a backup image.
+#
+# 1. Get the backup prefix which may be a path name.
+# 2. If no prefix is specified then delete the image without a backup.
+# 3. If there is a prefix then make a backup image name.
+# Rename the image to the backup image name.
+#
+# The backup image name is formed by prepending the backup prefix to the
+# image name. If a previous backup exist append integers to the backup
+# prefix until a nonexistant image name is created.
+
+procedure ccddelete (image)
+
+char image[ARB] # Image to delete (backup)
+
+int i, imaccess()
+pointer sp, prefix, backup
+errchk imdelete, imrename
+
+begin
+ call smark (sp)
+ call salloc (prefix, SZ_FNAME, TY_CHAR)
+ call salloc (backup, SZ_FNAME, TY_CHAR)
+
+ # Get the backup prefix.
+ call clgstr ("backup", Memc[prefix], SZ_FNAME)
+ call xt_stripwhite (Memc[prefix])
+
+ # If there is no prefix then simply delete the image.
+ if (Memc[prefix] == EOS)
+ call imdelete (image)
+
+ # Otherwise create a backup image name which does not exist and
+ # rename the image to the backup image.
+
+ else {
+ i = 0
+ repeat {
+ if (i == 0) {
+ call sprintf (Memc[backup], SZ_FNAME, "%s%s")
+ call pargstr (Memc[prefix])
+ call pargstr (image)
+ } else {
+ call sprintf (Memc[backup], SZ_FNAME, "%s%d%s")
+ call pargstr (Memc[prefix])
+ call pargi (i)
+ call pargstr (image)
+ }
+ i = i + 1
+ } until (imaccess (Memc[backup], READ_ONLY) == NO)
+ call imrename (image, Memc[backup])
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/ccdflag.x b/noao/imred/ccdred/src/ccdflag.x
new file mode 100644
index 00000000..427365d2
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdflag.x
@@ -0,0 +1,27 @@
+# CCDFLAG -- Determine if a CCD processing flag is set. This is less than
+# obvious because of the need to use the default value to indicate a
+# false flag.
+
+bool procedure ccdflag (im, name)
+
+pointer im # IMIO pointer
+char name[ARB] # CCD flag name
+
+bool flag, strne()
+pointer sp, str1, str2
+
+begin
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the flag string value and the default value.
+ # The flag is true if the value and the default do not match.
+
+ call hdmgstr (im, name, Memc[str1], SZ_LINE)
+ call hdmgdef (name, Memc[str2], SZ_LINE)
+ flag = strne (Memc[str1], Memc[str2])
+
+ call sfree (sp)
+ return (flag)
+end
diff --git a/noao/imred/ccdred/src/ccdinst1.key b/noao/imred/ccdred/src/ccdinst1.key
new file mode 100644
index 00000000..2a3ef1d4
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdinst1.key
@@ -0,0 +1,27 @@
+ CCDINSTRUMENT COMMANDS
+
+? Print command summary
+help Print command summary
+imheader Page image header
+instrument Print current instrument translation file
+next Next image
+newimage Select a new image
+quit Quit
+read Read instrument translation file
+show Show current translations
+write Write instrument translation file
+
+translate Translate image string selected by the imagetyp parameter
+ to one of the CCDRED types given as an argument or queried:
+ object, zero, dark, flat, comp, illum, fringe, other
+
+The following are CCDRED parameters which may be translated. You are
+queried for the image keyword to use or it may be typed after the command.
+An optional default value (returned if the image does not contain the
+keyword) may be typed as the second argument of the command.
+
+ BASIC PARAMETERS
+imagetyp Image type parameter (see also translate)
+subset Subset or filter parameter
+exptime Exposure time
+darktime Dark time (may be same as the exposure time)
diff --git a/noao/imred/ccdred/src/ccdinst2.key b/noao/imred/ccdred/src/ccdinst2.key
new file mode 100644
index 00000000..bd909433
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdinst2.key
@@ -0,0 +1,39 @@
+ CCDINSTRUMENT COMMANDS
+
+? Print command summary
+help Print command summary
+imheader Page image header
+instrument Print current instrument translation file
+next Next image
+newimage Select a new image
+quit Quit
+read Read instrument translation file
+show Show current translations
+write Write instrument translation file
+
+translate Translate image string selected by the imagetyp parameter
+ to one of the CCDRED types given as an argument or queried:
+ object, zero, dark, flat, comp, illum, fringe, other
+
+The following are CCDRED parameters which may be translated. You are
+queried for the image keyword to use or it may be typed after the command.
+An optional default value (returned if the image does not contain the
+keyword) may be typed as the second argument of the command.
+
+ BASIC PARAMETERS
+imagetyp Image type parameter (see also translate)
+subset Subset or filter parameter
+exptime Exposure time
+darktime Dark time (may be same as the exposure time)
+
+ USEFUL DEFAULT GEOMETRY PARAMETERS
+biassec Bias section (often has a default value)
+trimsec Trim section (often has a default value)
+
+ COMMON PROCESSING FLAGS
+fixpix Bad pixel replacement flag
+overscan Overscan correction flag
+trim Trim flag
+zerocor Zero level correction flag
+darkcor Dark count correction flag
+flatcor Flat field correction flag
diff --git a/noao/imred/ccdred/src/ccdinst3.key b/noao/imred/ccdred/src/ccdinst3.key
new file mode 100644
index 00000000..7215aa67
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdinst3.key
@@ -0,0 +1,62 @@
+ CCDINSTRUMENT COMMANDS
+
+? Print command summary
+help Print command summary
+imheader Page image header
+instrument Print current instrument translation file
+next Next image
+newimage Select a new image
+quit Quit
+read Read instrument translation file
+show Show current translations
+write Write instrument translation file
+
+translate Translate image string selected by the imagetyp parameter
+ to one of the CCDRED types given as an argument or queried:
+ object, zero, dark, flat, comp, illum, fringe, other
+
+The following are CCDRED parameters which may be translated. You are
+queried for the image keyword to use or it may be typed after the command.
+An optional default value (returned if the image does not contain the
+keyword) may be typed as the second argument of the command.
+
+ BASIC PARAMETERS
+imagetyp Image type parameter (see also translate)
+subset Subset or filter parameter
+exptime Exposure time
+darktime Dark time (may be same as the exposure time)
+
+ USEFUL DEFAULT GEOMETRY PARAMETERS
+biassec Bias section (often has a default value)
+trimsec Trim section (often has a default value)
+
+ COMMON PROCESSING FLAGS
+fixpix Bad pixel replacement flag
+overscan Overscan correction flag
+trim Trim flag
+zerocor Zero level correction flag
+darkcor Dark count correction flag
+flatcor Flat field correction flag
+
+ RARELY TRANSLATED PARAMETERS
+ccdsec CCD section
+datasec Data section
+fixfile Bad pixel file
+
+fringcor Fringe correction flag
+illumcor Ilumination correction flag
+readcor One dimensional zero level read out correction flag
+scancor Scan mode correction flag
+
+illumflt Ilumination flat image
+mkfringe Fringe image
+mkillum Illumination image
+skyflat Sky flat image
+
+ccdmean Mean value
+fringscl Fringe scale factor
+ncombine Number of images combined
+date-obs Date of observations
+dec Declination
+ra Right Ascension
+title Image title
diff --git a/noao/imred/ccdred/src/ccdlog.x b/noao/imred/ccdred/src/ccdlog.x
new file mode 100644
index 00000000..48453704
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdlog.x
@@ -0,0 +1,46 @@
+include <imhdr.h>
+include <imset.h>
+
+# CCDLOG -- Log information about the processing with the image name.
+#
+# 1. If the package "verbose" parameter is set print the string preceded
+# by the image name.
+# 2. If the package "logfile" parameter is not null append the string,
+# preceded by the image name, to the file.
+
+procedure ccdlog (im, str)
+
+pointer im # IMIO pointer
+char str[ARB] # Log string
+
+int fd, open()
+bool clgetb()
+pointer sp, fname
+errchk open
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+
+ # Write to the standard error output if "verbose".
+ if (clgetb ("verbose")) {
+ call imstats (im, IM_IMAGENAME, Memc[fname], SZ_FNAME)
+ call eprintf ("%s: %s\n")
+ call pargstr (Memc[fname])
+ call pargstr (str)
+ }
+
+ # Append to the "logfile" if not null.
+ call clgstr ("logfile", Memc[fname], SZ_FNAME)
+ call xt_stripwhite (Memc[fname])
+ if (Memc[fname] != EOS) {
+ fd = open (Memc[fname], APPEND, TEXT_FILE)
+ call imstats (im, IM_IMAGENAME, Memc[fname], SZ_FNAME)
+ call fprintf (fd, "%s: %s\n")
+ call pargstr (Memc[fname])
+ call pargstr (str)
+ call close (fd)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/ccdmean.x b/noao/imred/ccdred/src/ccdmean.x
new file mode 100644
index 00000000..d38ea97b
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdmean.x
@@ -0,0 +1,50 @@
+include <imhdr.h>
+
+
+# CCDMEAN -- Compute mean and add to header if needed.
+
+procedure ccdmean (input)
+
+char input[ARB] # Input image
+
+int i, nc, nl, hdmgeti()
+long time, clktime()
+bool clgetb()
+real mean, hdmgetr(), asumr()
+pointer in, immap(), imgl2r()
+errchk immap
+
+begin
+ # Check if this operation has been done.
+
+ in = immap (input, READ_WRITE, 0)
+ ifnoerr (mean = hdmgetr (in, "ccdmean")) {
+ iferr (time = hdmgeti (in, "ccdmeant"))
+ time = IM_MTIME(in)
+ if (time >= IM_MTIME(in)) {
+ call imunmap (in)
+ return
+ }
+ }
+
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Compute mean of image\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ # Compute and record the mean.
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ mean = 0.
+ do i = 1, nl
+ mean = mean + asumr (Memr[imgl2r(in,i)], nc)
+ mean = mean / (nc * nl)
+ time = clktime (long(0))
+ call hdmputr (in, "ccdmean", mean)
+ call hdmputi (in, "ccdmeant", int (time))
+
+ call imunmap (in)
+end
diff --git a/noao/imred/ccdred/src/ccdnscan.x b/noao/imred/ccdred/src/ccdnscan.x
new file mode 100644
index 00000000..3a9fbeba
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdnscan.x
@@ -0,0 +1,38 @@
+include "ccdtypes.h"
+
+
+# CCDNSCAN -- Return the number CCD scan rows.
+#
+# If not found in the header return the "nscan" parameter for objects and
+# 1 for calibration images.
+
+int procedure ccdnscan (im, ccdtype)
+
+pointer im #I Image
+int ccdtype #I CCD type
+int nscan #O Number of scan lines
+
+bool clgetb()
+char type, clgetc()
+int hdmgeti(), clgeti()
+
+begin
+ iferr (nscan = hdmgeti (im, "nscanrow")) {
+ switch (ccdtype) {
+ case ZERO, DARK, FLAT, ILLUM, FRINGE:
+ nscan = 1
+ default:
+ type = clgetc ("scantype")
+ if (type == 's')
+ nscan = clgeti ("nscan")
+ else {
+ if (clgetb ("scancor"))
+ nscan = INDEFI
+ else
+ nscan = 1
+ }
+ }
+ }
+
+ return (nscan)
+end
diff --git a/noao/imred/ccdred/src/ccdproc.x b/noao/imred/ccdred/src/ccdproc.x
new file mode 100644
index 00000000..1b2a133c
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdproc.x
@@ -0,0 +1,106 @@
+include <error.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# CCDPROC -- Process a CCD image of a specified CCD image type.
+#
+# The input image is corrected for bad pixels, overscan levels, zero
+# levels, dark counts, flat field, illumination, and fringing. It may also
+# be trimmed. The checking of whether to apply each correction, getting the
+# required parameters, and logging the operations is left to separate
+# procedures, one for each correction. The actual processing is done by
+# a specialized procedure designed to be very efficient. These
+# procedures may also process calibration images if necessary.
+# The specified image type overrides the image type in the image header.
+# There are two data type paths; one for short data types and one for
+# all other data types (usually real).
+
+procedure ccdproc (input, ccdtype)
+
+char input[ARB] # CCD image to process
+int ccdtype # CCD type of image (independent of header).
+
+pointer sp, output, str, in, out, ccd, immap()
+errchk immap, set_output, ccddelete
+errchk set_fixpix, set_zero, set_dark, set_flat, set_illum, set_fringe
+
+begin
+ call smark (sp)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Map the image, make a working output image and set the processing
+ # parameters.
+
+ in = immap (input, READ_ONLY, 0)
+ call mktemp ("tmp", Memc[output], SZ_FNAME)
+ call set_output (in, out, Memc[output])
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+
+ # Set processing appropriate for the various image types.
+ switch (ccdtype) {
+ case ZERO:
+ case DARK:
+ call set_zero (ccd)
+ case FLAT:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ CORS(ccd, FINDMEAN) = YES
+ CORS(ccd, MINREP) = YES
+ case ILLUM:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ case OBJECT, COMP:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ default:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ CORS(ccd, FINDMEAN) = YES
+ }
+
+ # Do the processing if the COR flag is set.
+ if (COR(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Replace the input by the output image.
+ call imunmap (in)
+ call imunmap (out)
+ iferr (call ccddelete (input)) {
+ call imdelete (Memc[output])
+ call error (1,
+ "Can't delete or make backup of original image")
+ }
+ call imrename (Memc[output], input)
+ } else {
+ # Delete the temporary output image leaving the input unchanged.
+ call imunmap (in)
+ iferr (call imunmap (out))
+ ;
+ iferr (call imdelete (Memc[output]))
+ ;
+ }
+ call free_proc (ccd)
+
+ # Do special processing for calibration images.
+ switch (ccdtype) {
+ case ZERO:
+ call readcor (input)
+ case FLAT:
+ call ccdmean (input)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/ccdred.h b/noao/imred/ccdred/src/ccdred.h
new file mode 100644
index 00000000..2d370d86
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdred.h
@@ -0,0 +1,150 @@
+# CCDRED Data Structures and Definitions
+
+# The CCD structure: This structure is used to communicate processing
+# parameters between the package procedures. It contains pointers to
+# data, calibration image IMIO pointers, scaling parameters, and the
+# correction flags. The corrections flags indicate which processing
+# operations are to be performed. The subsection parameters do not
+# include a step size. A step size is assumed. If arbitrary subsampling
+# is desired this would be the next generalization.
+
+define LEN_CCD 131 # Length of CCD structure
+
+# CCD data coordinates
+define CCD_C1 Memi[$1] # CCD starting column
+define CCD_C2 Memi[$1+1] # CCD ending column
+define CCD_L1 Memi[$1+2] # CCD starting line
+define CCD_L2 Memi[$1+3] # CCD ending line
+
+# Input data
+define IN_IM Memi[$1+10] # Input image pointer
+define IN_C1 Memi[$1+11] # Input data starting column
+define IN_C2 Memi[$1+12] # Input data ending column
+define IN_L1 Memi[$1+13] # Input data starting line
+define IN_L2 Memi[$1+14] # Input data ending line
+
+# Output data
+define OUT_IM Memi[$1+20] # Output image pointer
+define OUT_C1 Memi[$1+21] # Output data starting column
+define OUT_C2 Memi[$1+22] # Output data ending column
+define OUT_L1 Memi[$1+23] # Output data starting line
+define OUT_L2 Memi[$1+24] # Output data ending line
+
+# Mask data
+define MASK_IM Memi[$1+30] # Mask image pointer
+define MASK_C1 Memi[$1+31] # Mask data starting column
+define MASK_C2 Memi[$1+32] # Mask data ending column
+define MASK_L1 Memi[$1+33] # Mask data starting line
+define MASK_L2 Memi[$1+34] # Mask data ending line
+define MASK_PM Memi[$1+35] # Mask pointer
+define MASK_FP Memi[$1+36] # Mask fixpix data
+
+# Zero level data
+define ZERO_IM Memi[$1+40] # Zero level image pointer
+define ZERO_C1 Memi[$1+41] # Zero level data starting column
+define ZERO_C2 Memi[$1+42] # Zero level data ending column
+define ZERO_L1 Memi[$1+43] # Zero level data starting line
+define ZERO_L2 Memi[$1+44] # Zero level data ending line
+
+# Dark count data
+define DARK_IM Memi[$1+50] # Dark count image pointer
+define DARK_C1 Memi[$1+51] # Dark count data starting column
+define DARK_C2 Memi[$1+52] # Dark count data ending column
+define DARK_L1 Memi[$1+53] # Dark count data starting line
+define DARK_L2 Memi[$1+54] # Dark count data ending line
+
+# Flat field data
+define FLAT_IM Memi[$1+60] # Flat field image pointer
+define FLAT_C1 Memi[$1+61] # Flat field data starting column
+define FLAT_C2 Memi[$1+62] # Flat field data ending column
+define FLAT_L1 Memi[$1+63] # Flat field data starting line
+define FLAT_L2 Memi[$1+64] # Flat field data ending line
+
+# Illumination data
+define ILLUM_IM Memi[$1+70] # Illumination image pointer
+define ILLUM_C1 Memi[$1+71] # Illumination data starting column
+define ILLUM_C2 Memi[$1+72] # Illumination data ending column
+define ILLUM_L1 Memi[$1+73] # Illumination data starting line
+define ILLUM_L2 Memi[$1+74] # Illumination data ending line
+
+# Fringe data
+define FRINGE_IM Memi[$1+80] # Fringe image pointer
+define FRINGE_C1 Memi[$1+81] # Fringe data starting column
+define FRINGE_C2 Memi[$1+82] # Fringe data ending column
+define FRINGE_L1 Memi[$1+83] # Fringe data starting line
+define FRINGE_L2 Memi[$1+84] # Fringe data ending line
+
+# Trim section
+define TRIM_C1 Memi[$1+90] # Trim starting column
+define TRIM_C2 Memi[$1+91] # Trim ending column
+define TRIM_L1 Memi[$1+92] # Trim starting line
+define TRIM_L2 Memi[$1+93] # Trim ending line
+
+# Bias section
+define BIAS_C1 Memi[$1+100] # Bias starting column
+define BIAS_C2 Memi[$1+101] # Bias ending column
+define BIAS_L1 Memi[$1+102] # Bias starting line
+define BIAS_L2 Memi[$1+103] # Bias ending line
+
+define READAXIS Memi[$1+110] # Read out axis (1=cols, 2=lines)
+define CALCTYPE Memi[$1+111] # Calculation data type
+define OVERSCAN_TYPE Memi[$1+112] # Overscan type
+define OVERSCAN_VEC Memi[$1+113] # Pointer to overscan vector
+define DARKSCALE Memr[P2R($1+114)] # Dark count scale factor
+define FRINGESCALE Memr[P2R($1+115)] # Fringe scale factor
+define FLATSCALE Memr[P2R($1+116)] # Flat field scale factor
+define ILLUMSCALE Memr[P2R($1+117)] # Illumination scale factor
+define MINREPLACE Memr[P2R($1+118)] # Minimum replacement value
+define MEAN Memr[P2R($1+119)] # Mean of output image
+define COR Memi[$1+120] # Overall correction flag
+define CORS Memi[$1+121+($2-1)] # Individual correction flags
+
+# The correction array contains the following elements with array indices
+# given by the macro definitions.
+
+define NCORS 10 # Number of corrections
+
+define FIXPIX 1 # Fix bad pixels
+define TRIM 2 # Trim image
+define OVERSCAN 3 # Apply overscan correction
+define ZEROCOR 4 # Apply zero level correction
+define DARKCOR 5 # Apply dark count correction
+define FLATCOR 6 # Apply flat field correction
+define ILLUMCOR 7 # Apply illumination correction
+define FRINGECOR 8 # Apply fringe correction
+define FINDMEAN 9 # Find the mean of the output image
+define MINREP 10 # Check and replace minimum value
+
+# The following definitions identify the correction values in the correction
+# array. They are defined in terms of bit fields so that it is possible to
+# add corrections to form unique combination corrections. Some of
+# these combinations are implemented as compound operations for efficiency.
+
+define O 001B # overscan
+define Z 002B # zero level
+define D 004B # dark count
+define F 010B # flat field
+define I 020B # Illumination
+define Q 040B # Fringe
+
+# The following correction combinations are recognized.
+
+define ZO 003B # zero level + overscan
+define DO 005B # dark count + overscan
+define DZ 006B # dark count + zero level
+define DZO 007B # dark count + zero level + overscan
+define FO 011B # flat field + overscan
+define FZ 012B # flat field + zero level
+define FZO 013B # flat field + zero level + overscan
+define FD 014B # flat field + dark count
+define FDO 015B # flat field + dark count + overscan
+define FDZ 016B # flat field + dark count + zero level
+define FDZO 017B # flat field + dark count + zero level + overscan
+define QI 060B # fringe + illumination
+
+# The following overscan functions are recognized.
+define OVERSCAN_TYPES "|mean|median|minmax|chebyshev|legendre|spline3|spline1|"
+define OVERSCAN_MEAN 1 # Mean of overscan
+define OVERSCAN_MEDIAN 2 # Median of overscan
+define OVERSCAN_MINMAX 3 # Minmax of overscan
+define OVERSCAN_FIT 4 # Following codes are function fits
diff --git a/noao/imred/ccdred/src/ccdsection.x b/noao/imred/ccdred/src/ccdsection.x
new file mode 100644
index 00000000..aced216a
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdsection.x
@@ -0,0 +1,100 @@
+include <ctype.h>
+
+# CCD_SECTION -- Parse a 2D image section into its elements.
+# 1. The default values must be set by the caller.
+# 2. A null image section is OK.
+# 3. The first nonwhitespace character must be '['.
+# 4. The last interpreted character must be ']'.
+#
+# This procedure should be replaced with an IMIO procedure at some
+# point.
+
+procedure ccd_section (section, x1, x2, xstep, y1, y2, ystep)
+
+char section[ARB] # Image section
+int x1, x2, xstep # X image section parameters
+int y1, y2, ystep # X image section parameters
+
+int i, ip, a, b, c, temp, ctoi()
+define error_ 99
+
+begin
+ # Decode the section string.
+ ip = 1
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == '[')
+ ip = ip + 1
+ else if (section[ip] == EOS)
+ return
+ else
+ goto error_
+
+ do i = 1, 2 {
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+
+ # Default values
+ if (i == 1) {
+ a = x1
+ b = x2
+ c = xstep
+ } else {
+ a = y1
+ b = y2
+ c = ystep
+ }
+
+ # Get a:b:c. Allow notation such as "-*:c"
+ # (or even "-:c") where the step is obviously negative.
+
+ if (ctoi (section, ip, temp) > 0) { # a
+ a = temp
+ if (section[ip] == ':') {
+ ip = ip + 1
+ if (ctoi (section, ip, b) == 0) # a:b
+ goto error_
+ } else
+ b = a
+ } else if (section[ip] == '-') { # -*
+ temp = a
+ a = b
+ b = temp
+ ip = ip + 1
+ if (section[ip] == '*')
+ ip = ip + 1
+ } else if (section[ip] == '*') # *
+ ip = ip + 1
+ if (section[ip] == ':') { # ..:step
+ ip = ip + 1
+ if (ctoi (section, ip, c) == 0)
+ goto error_
+ else if (c == 0)
+ goto error_
+ }
+ if (a > b && c > 0)
+ c = -c
+
+ if (i == 1) {
+ x1 = a
+ x2 = b
+ xstep = c
+ } else {
+ y1 = a
+ y2 = b
+ ystep = c
+ }
+
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ',')
+ ip = ip + 1
+ }
+
+ if (section[ip] != ']')
+ goto error_
+
+ return
+error_
+ call error (0, "Error in image section specification")
+end
diff --git a/noao/imred/ccdred/src/ccdsubsets.x b/noao/imred/ccdred/src/ccdsubsets.x
new file mode 100644
index 00000000..528b0223
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdsubsets.x
@@ -0,0 +1,93 @@
+include <ctype.h>
+
+
+# CCDSUBSET -- Return the CCD subset identifier.
+#
+# 1. Get the subset string and search the subset record file for the ID string.
+# 2. If the subset string is not in the record file define a default ID string
+# based on the first word of the subset string. If the first word is not
+# unique append a integer to the first word until it is unique.
+# 3. Add the new subset string and identifier to the record file.
+# 4. Since the ID string is used to generate image names replace all
+# nonimage name characters with '_'.
+#
+# It is an error if the record file cannot be created or written when needed.
+
+procedure ccdsubset (im, subset, sz_name)
+
+pointer im # Image
+char subset[sz_name] # CCD subset identifier
+int sz_name # Size of subset string
+
+bool streq()
+int i, fd, ctowrd(), open(), fscan()
+pointer sp, fname, str1, str2, subset1, subset2, subset3
+errchk open
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+ call salloc (subset1, SZ_LINE, TY_CHAR)
+ call salloc (subset2, SZ_LINE, TY_CHAR)
+ call salloc (subset3, SZ_LINE, TY_CHAR)
+
+ # Get the subset record file and the subset string.
+ call clgstr ("ssfile", Memc[fname], SZ_LINE)
+ call hdmgstr (im, "subset", Memc[str1], SZ_LINE)
+
+ # The default subset identifier is the first word of the subset string.
+ i = 1
+ i = ctowrd (Memc[str1], i, Memc[subset1], SZ_LINE)
+
+ # A null subset string is ok. If not null check for conflict
+ # with previous subset IDs.
+ if (Memc[str1] != EOS) {
+ call strcpy (Memc[subset1], Memc[subset3], SZ_LINE)
+
+ # Search the subset record file for the same subset string.
+ # If found use the ID string. If the subset ID has been
+ # used for another subset string then increment an integer
+ # suffix to the default ID and check the list again.
+
+ i = 1
+ ifnoerr (fd = open (Memc[fname], READ_ONLY, TEXT_FILE)) {
+ while (fscan (fd) != EOF) {
+ call gargwrd (Memc[str2], SZ_LINE)
+ call gargwrd (Memc[subset2], SZ_LINE)
+ if (streq (Memc[str1], Memc[str2])) {
+ i = 0
+ call strcpy (Memc[subset2], Memc[subset1], SZ_LINE)
+ break
+ } if (streq (Memc[subset1], Memc[subset2])) {
+ call sprintf (Memc[subset1], SZ_LINE, "%s%d")
+ call pargstr (Memc[subset3])
+ call pargi (i)
+ i = i + 1
+ call seek (fd, BOF)
+ }
+ }
+ call close (fd)
+ }
+
+ # If the subset is not in the record file add it.
+ if (i > 0) {
+ fd = open (Memc[fname], APPEND, TEXT_FILE)
+ call fprintf (fd, "'%s'\t%s\n")
+ call pargstr (Memc[str1])
+ call pargstr (Memc[subset1])
+ call close (fd)
+ }
+ }
+
+ # Set the subset ID string and replace magic characters by '_'
+ # since the subset ID is used in forming image names.
+
+ call strcpy (Memc[subset1], subset, sz_name)
+ for (i=1; subset[i]!=EOS; i=i+1)
+ if (!(IS_ALNUM(subset[i])||subset[i]=='.'))
+ subset[i] = '_'
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/ccdtypes.h b/noao/imred/ccdred/src/ccdtypes.h
new file mode 100644
index 00000000..0d5d4caf
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdtypes.h
@@ -0,0 +1,14 @@
+# Standard CCD image types.
+
+define CCDTYPES "|object|zero|dark|flat|illum|fringe|other|comp|"
+
+define NONE -1
+define UNKNOWN 0
+define OBJECT 1
+define ZERO 2
+define DARK 3
+define FLAT 4
+define ILLUM 5
+define FRINGE 6
+define OTHER 7
+define COMP 8
diff --git a/noao/imred/ccdred/src/ccdtypes.x b/noao/imred/ccdred/src/ccdtypes.x
new file mode 100644
index 00000000..bf6d29e2
--- /dev/null
+++ b/noao/imred/ccdred/src/ccdtypes.x
@@ -0,0 +1,72 @@
+include "ccdtypes.h"
+
+# CCDTYPES -- Return the CCD type name string.
+# CCDTYPEI -- Return the CCD type code.
+
+
+# CCDTYPES -- Return the CCD type name string.
+
+procedure ccdtypes (im, name, sz_name)
+
+pointer im # Image
+char name[sz_name] # CCD type name
+int sz_name # Size of name string
+
+int strdic()
+pointer sp, str
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the image type string. If none then return "none".
+ # Otherwise get the corresponding package image type string.
+ # If the image type is unknown return "unknown" otherwise return
+ # the package name.
+
+ call hdmgstr (im, "imagetyp", Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call strcpy ("none", name, sz_name)
+ } else {
+ call hdmname (Memc[str], name, sz_name)
+ if (name[1] == EOS)
+ call strcpy (Memc[str], name, sz_name)
+ if (strdic (name, name, sz_name, CCDTYPES) == UNKNOWN)
+ call strcpy ("unknown", name, sz_name)
+ }
+
+ call sfree (sp)
+end
+
+
+# CCDTYPEI -- Return the CCD type code.
+
+int procedure ccdtypei (im)
+
+pointer im # Image
+int ccdtype # CCD type (returned)
+
+pointer sp, str1, str2
+int strdic()
+
+begin
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the image type and if there is none then return the NONE code.
+ call hdmgstr (im, "imagetyp", Memc[str1], SZ_LINE)
+ if (Memc[str1] == EOS) {
+ ccdtype = NONE
+
+ # Otherwise get the package type and convert to an image type code.
+ } else {
+ call hdmname (Memc[str1], Memc[str2], SZ_LINE)
+ if (Memc[str2] == EOS)
+ call strcpy (Memc[str1], Memc[str2], SZ_LINE)
+ ccdtype = strdic (Memc[str2], Memc[str2], SZ_LINE, CCDTYPES)
+ }
+
+ call sfree (sp)
+ return (ccdtype)
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icaclip.x b/noao/imred/ccdred/src/combine/generic/icaclip.x
new file mode 100644
index 00000000..1530145c
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icaclip.x
@@ -0,0 +1,1102 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number of images for this algorithm
+
+
+# IC_AAVSIGCLIP -- Reject pixels using an average sigma about the average
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_aavsigclips (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, s1, r, one
+data one /1.0/
+pointer sp, sums, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (sums, npts, TY_REAL)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Since the unweighted average is computed here possibly skip combining
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Compute the unweighted average with the high and low rejected and
+ # the poisson scaled average sigma. There must be at least three
+ # pixels at each point to define the average and contributions to
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ nin = n[1]
+ s = 0.
+ n2 = 0
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3)
+ next
+
+ # Unweighted average with the high and low rejected
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mems[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mems[dp1]
+ l = Memi[mp1]
+ s1 = max (one, (a + zeros[l]) / scales[l])
+ s = s + (d1 - a) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, a)
+ do j = 1, n1
+ s = s + (Mems[d[j]+k] - a) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the average and sum for later.
+ average[i] = a
+ Memr[sums+k] = sum
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+
+ # Reject pixels and compute the final average (if needed).
+ # There must be at least three pixels at each point for rejection.
+ # Iteratively scale the mean sigma and reject pixels
+ # Compact the data and keep track of the image IDs if needed.
+
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (2, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mems[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ a = average[i]
+ sum = Memr[sums+k]
+
+ repeat {
+ n2 = n1
+ if (s > 0.) {
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mems[dp1]
+ l = Memi[mp1]
+ s1 = s * sqrt (max (one, (a+zeros[l]) / scales[l]))
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ s1 = s * sqrt (max (one, a))
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mems[dp1]
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MAVSIGCLIP -- Reject pixels using an average sigma about the median
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_mavsigclips (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+pointer sp, resid, mp1, mp2
+real med, low, high, r, s, s1, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute the poisson scaled average sigma about the median.
+ # There must be at least three pixels at each point to define
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ s = 0.
+ n2 = 0
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3) {
+ if (n1 == 0)
+ median[i] = blank
+ else if (n1 == 1)
+ median[i] = Mems[d[1]+k]
+ else {
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ median[i] = (low + high) / 2.
+ }
+ next
+ }
+
+ # Median
+ n3 = 1 + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mems[d[n3-1]+k]
+ high = Mems[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mems[d[n3]+k]
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ l = Memi[m[j]+k]
+ s1 = max (one, (med + zeros[l]) / scales[l])
+ s = s + (Mems[d[j]+k] - med) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, med)
+ do j = 1, n1
+ s = s + (Mems[d[j]+k] - med) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the median for later.
+ median[i] = med
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+ else
+ return
+
+ # Compute individual sigmas and iteratively clip.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 < max (3, maxkeep+1))
+ next
+ nl = 1
+ nh = n1
+ med = median[i]
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 >= max (MINCLIP, maxkeep+1) && s > 0.) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (med - Mems[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (Mems[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ s1 = s * sqrt (max (one, med))
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mems[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mems[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mems[d[n3-1]+k]
+ high = Mems[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mems[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mems[d[n3-1]+k]
+ high = Mems[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mems[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_AAVSIGCLIP -- Reject pixels using an average sigma about the average
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_aavsigclipr (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, s1, r, one
+data one /1.0/
+pointer sp, sums, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (sums, npts, TY_REAL)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Since the unweighted average is computed here possibly skip combining
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Compute the unweighted average with the high and low rejected and
+ # the poisson scaled average sigma. There must be at least three
+ # pixels at each point to define the average and contributions to
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ nin = n[1]
+ s = 0.
+ n2 = 0
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3)
+ next
+
+ # Unweighted average with the high and low rejected
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Memr[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Memr[dp1]
+ l = Memi[mp1]
+ s1 = max (one, (a + zeros[l]) / scales[l])
+ s = s + (d1 - a) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, a)
+ do j = 1, n1
+ s = s + (Memr[d[j]+k] - a) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the average and sum for later.
+ average[i] = a
+ Memr[sums+k] = sum
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+
+ # Reject pixels and compute the final average (if needed).
+ # There must be at least three pixels at each point for rejection.
+ # Iteratively scale the mean sigma and reject pixels
+ # Compact the data and keep track of the image IDs if needed.
+
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (2, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Memr[d[1]+k]
+ do j = 2, n1
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ a = average[i]
+ sum = Memr[sums+k]
+
+ repeat {
+ n2 = n1
+ if (s > 0.) {
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Memr[dp1]
+ l = Memi[mp1]
+ s1 = s * sqrt (max (one, (a+zeros[l]) / scales[l]))
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ s1 = s * sqrt (max (one, a))
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Memr[dp1]
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MAVSIGCLIP -- Reject pixels using an average sigma about the median
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_mavsigclipr (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+pointer sp, resid, mp1, mp2
+real med, low, high, r, s, s1, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute the poisson scaled average sigma about the median.
+ # There must be at least three pixels at each point to define
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ s = 0.
+ n2 = 0
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3) {
+ if (n1 == 0)
+ median[i] = blank
+ else if (n1 == 1)
+ median[i] = Memr[d[1]+k]
+ else {
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ median[i] = (low + high) / 2.
+ }
+ next
+ }
+
+ # Median
+ n3 = 1 + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Memr[d[n3-1]+k]
+ high = Memr[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Memr[d[n3]+k]
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ l = Memi[m[j]+k]
+ s1 = max (one, (med + zeros[l]) / scales[l])
+ s = s + (Memr[d[j]+k] - med) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, med)
+ do j = 1, n1
+ s = s + (Memr[d[j]+k] - med) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the median for later.
+ median[i] = med
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+ else
+ return
+
+ # Compute individual sigmas and iteratively clip.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 < max (3, maxkeep+1))
+ next
+ nl = 1
+ nh = n1
+ med = median[i]
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 >= max (MINCLIP, maxkeep+1) && s > 0.) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (med - Memr[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (Memr[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ s1 = s * sqrt (max (one, med))
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Memr[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Memr[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Memr[d[n3-1]+k]
+ high = Memr[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Memr[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Memr[d[n3-1]+k]
+ high = Memr[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Memr[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icaverage.x b/noao/imred/ccdred/src/combine/generic/icaverage.x
new file mode 100644
index 00000000..3646b725
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icaverage.x
@@ -0,0 +1,163 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+
+# IC_AVERAGE -- Compute the average image line.
+# Options include a weight average.
+
+procedure ic_averages (d, m, n, wts, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average (returned)
+
+int i, j, k
+real sumwt, wt
+real sum
+
+include "../icombine.com"
+
+begin
+ # If no data has been excluded do the average without checking the
+ # number of points and using the fact that the weights are normalized.
+ # If all the data has been excluded set the average to the blank value.
+
+ if (dflag == D_ALL) {
+ if (dowts) {
+ do i = 1, npts {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mems[d[1]+k] * wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mems[d[j]+k] * wt
+ }
+ average[i] = sum
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ sum = Mems[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n[i]
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ average[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mems[d[1]+k] * wt
+ sumwt = wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mems[d[j]+k] * wt
+ sumwt = sumwt + wt
+ }
+ average[i] = sum / sumwt
+ } else
+ average[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ sum = Mems[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n[i]
+ } else
+ average[i] = blank
+ }
+ }
+ }
+end
+
+# IC_AVERAGE -- Compute the average image line.
+# Options include a weight average.
+
+procedure ic_averager (d, m, n, wts, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average (returned)
+
+int i, j, k
+real sumwt, wt
+real sum
+
+include "../icombine.com"
+
+begin
+ # If no data has been excluded do the average without checking the
+ # number of points and using the fact that the weights are normalized.
+ # If all the data has been excluded set the average to the blank value.
+
+ if (dflag == D_ALL) {
+ if (dowts) {
+ do i = 1, npts {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Memr[d[1]+k] * wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Memr[d[j]+k] * wt
+ }
+ average[i] = sum
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ sum = Memr[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n[i]
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ average[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Memr[d[1]+k] * wt
+ sumwt = wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Memr[d[j]+k] * wt
+ sumwt = sumwt + wt
+ }
+ average[i] = sum / sumwt
+ } else
+ average[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ sum = Memr[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n[i]
+ } else
+ average[i] = blank
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/combine/generic/iccclip.x b/noao/imred/ccdred/src/combine/generic/iccclip.x
new file mode 100644
index 00000000..57709064
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/iccclip.x
@@ -0,0 +1,898 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 2 # Mininum number of images for algorithm
+
+
+# IC_ACCDCLIP -- Reject pixels using CCD noise parameters about the average
+
+procedure ic_accdclips (d, m, n, scales, zeros, nm, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model parameters
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, zero
+data zero /0.0/
+pointer sp, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are no pixels go on to the combining. Since the unweighted
+ # average is computed here possibly skip the combining later.
+
+ # There must be at least max (1, nkeep) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ } else if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # There must be at least two pixels for rejection. The initial
+ # average is the low/high rejected average except in the case of
+ # just two pixels. The rejections are iterated and the average
+ # is recomputed. Corrections for scaling may be performed.
+ # Depending on other flags the image IDs may also need to be adjusted.
+
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (MINCLIP-1, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mems[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ repeat {
+ if (n1 == 2) {
+ sum = Mems[d[1]+k]
+ sum = sum + Mems[d[2]+k]
+ a = sum / 2
+ } else {
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mems[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+ }
+ n2 = n1
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ l = Memi[mp1]
+ s = scales[l]
+ d1 = max (zero, s * (a + zeros[l]))
+ s = sqrt (nm[1,l] + d1/nm[2,l] + (d1*nm[3,l])**2) / s
+
+ d1 = Mems[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, a)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (j=1; j<=n1; j=j+1) {
+ if (keepids) {
+ l = Memi[m[j]+k]
+ s = max (zero, a)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ dp1 = d[j] + k
+ d1 = Mems[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ }
+
+ n[i] = n1
+ if (!docombine)
+ if (n1 > 0)
+ average[i] = sum / n1
+ else
+ average[i] = blank
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_CCDCLIP -- Reject pixels using CCD noise parameters about the median
+
+procedure ic_mccdclips (d, m, n, scales, zeros, nm, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, mp1, mp2
+real med, zero
+data zero /0.0/
+
+include "../icombine.com"
+
+begin
+ # There must be at least max (MINCLIP, nkeep+1) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0) {
+ med = Mems[d[n3-1]+k]
+ med = (med + Mems[d[n3]+k]) / 2.
+ } else
+ med = Mems[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (med - Mems[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (Mems[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, med)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (; nl <= n2; nl = nl + 1) {
+ if (keepids) {
+ l = Memi[m[nl]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (med - Mems[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ if (keepids) {
+ l = Memi[m[nh]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (Mems[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_ACCDCLIP -- Reject pixels using CCD noise parameters about the average
+
+procedure ic_accdclipr (d, m, n, scales, zeros, nm, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model parameters
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, zero
+data zero /0.0/
+pointer sp, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are no pixels go on to the combining. Since the unweighted
+ # average is computed here possibly skip the combining later.
+
+ # There must be at least max (1, nkeep) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ } else if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # There must be at least two pixels for rejection. The initial
+ # average is the low/high rejected average except in the case of
+ # just two pixels. The rejections are iterated and the average
+ # is recomputed. Corrections for scaling may be performed.
+ # Depending on other flags the image IDs may also need to be adjusted.
+
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (MINCLIP-1, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Memr[d[1]+k]
+ do j = 2, n1
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ repeat {
+ if (n1 == 2) {
+ sum = Memr[d[1]+k]
+ sum = sum + Memr[d[2]+k]
+ a = sum / 2
+ } else {
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Memr[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+ }
+ n2 = n1
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ l = Memi[mp1]
+ s = scales[l]
+ d1 = max (zero, s * (a + zeros[l]))
+ s = sqrt (nm[1,l] + d1/nm[2,l] + (d1*nm[3,l])**2) / s
+
+ d1 = Memr[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, a)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (j=1; j<=n1; j=j+1) {
+ if (keepids) {
+ l = Memi[m[j]+k]
+ s = max (zero, a)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ dp1 = d[j] + k
+ d1 = Memr[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ }
+
+ n[i] = n1
+ if (!docombine)
+ if (n1 > 0)
+ average[i] = sum / n1
+ else
+ average[i] = blank
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_CCDCLIP -- Reject pixels using CCD noise parameters about the median
+
+procedure ic_mccdclipr (d, m, n, scales, zeros, nm, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, mp1, mp2
+real med, zero
+data zero /0.0/
+
+include "../icombine.com"
+
+begin
+ # There must be at least max (MINCLIP, nkeep+1) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0) {
+ med = Memr[d[n3-1]+k]
+ med = (med + Memr[d[n3]+k]) / 2.
+ } else
+ med = Memr[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (med - Memr[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (Memr[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, med)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (; nl <= n2; nl = nl + 1) {
+ if (keepids) {
+ l = Memi[m[nl]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (med - Memr[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ if (keepids) {
+ l = Memi[m[nh]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (Memr[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icgdata.x b/noao/imred/ccdred/src/combine/generic/icgdata.x
new file mode 100644
index 00000000..5c6ac18c
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icgdata.x
@@ -0,0 +1,459 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <mach.h>
+include "../icombine.h"
+
+
+# IC_GDATA -- Get line of image and mask data and apply threshold and scaling.
+# Entirely empty lines are excluded. The data are compacted within the
+# input data buffers. If it is required, the connection to the original
+# image index is keeped in the returned m data pointers.
+
+procedure ic_gdatas (in, out, dbuf, d, id, n, m, lflag, offsets, scales,
+ zeros, nimages, npts, v1, v2)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Empty mask flags
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+int nimages # Number of input images
+int npts # NUmber of output points per line
+long v1[ARB], v2[ARB] # Line vectors
+
+int i, j, k, l, ndim, nused
+real a, b
+pointer buf, dp, ip, mp, imgnls()
+
+include "../icombine.com"
+
+begin
+ # Get masks and return if there is no data
+ call ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+ if (dflag == D_NONE)
+ return
+
+ # Get data and fill data buffers. Correct for offsets if needed.
+ ndim = IM_NDIM(out[1])
+ do i = 1, nimages {
+ if (lflag[i] == D_NONE)
+ next
+ if (aligned) {
+ call amovl (v1, v2, IM_MAXDIM)
+ if (project)
+ v2[ndim+1] = i
+ j = imgnls (in[i], d[i], v2)
+ } else {
+ v2[1] = v1[1]
+ do j = 2, ndim
+ v2[j] = v1[j] - offsets[i,j]
+ if (project)
+ v2[ndim+1] = i
+ j = imgnls (in[i], buf, v2)
+ call amovs (Mems[buf], Mems[dbuf[i]+offsets[i,1]],
+ IM_LEN(in[i],1))
+ d[i] = dbuf[i]
+ }
+ }
+
+ # Apply threshold if needed
+ if (dothresh) {
+ do i = 1, nimages {
+ dp = d[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ a = Mems[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ lflag[i] = D_MIX
+ dflag = D_MIX
+ }
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ a = Mems[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ dflag = D_MIX
+ }
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+
+ # Check for completely empty lines
+ if (lflag[i] == D_MIX) {
+ lflag[i] = D_NONE
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Apply scaling (avoiding masked pixels which might overflow?)
+ if (doscale) {
+ if (dflag == D_ALL) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ do j = 1, npts {
+ Mems[dp] = Mems[dp] / a + b
+ dp = dp + 1
+ }
+ }
+ } else if (dflag == D_MIX) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ Mems[dp] = Mems[dp] / a + b
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0)
+ Mems[dp] = Mems[dp] / a + b
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+ }
+
+ # Sort pointers to exclude unused images.
+ # Use the lflag array to keep track of the image index.
+
+ if (dflag == D_ALL)
+ nused = nimages
+ else {
+ nused = 0
+ do i = 1, nimages
+ if (lflag[i] != D_NONE) {
+ nused = nused + 1
+ d[nused] = d[i]
+ m[nused] = m[i]
+ lflag[nused] = i
+ }
+ if (nused == 0)
+ dflag = D_NONE
+ }
+
+ # Compact data to remove bad pixels
+ # Keep track of the image indices if needed
+ # If growing mark the end of the included image indices with zero
+
+ if (dflag == D_ALL) {
+ call amovki (nused, n, npts)
+ if (keepids)
+ do i = 1, nimages
+ call amovki (i, Memi[id[i]], npts)
+ } else if (dflag == D_NONE)
+ call aclri (n, npts)
+ else {
+ call aclri (n, npts)
+ if (keepids) {
+ do i = 1, nused {
+ l = lflag[i]
+ dp = d[i]
+ ip = id[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i) {
+ Mems[d[k]+j-1] = Mems[dp]
+ Memi[id[k]+j-1] = l
+ } else
+ Memi[ip] = l
+ }
+ dp = dp + 1
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (grow > 0) {
+ do j = 1, npts {
+ do i = n[j]+1, nimages
+ Memi[id[i]+j-1] = 0
+ }
+ }
+ } else {
+ do i = 1, nused {
+ dp = d[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i)
+ Mems[d[k]+j-1] = Mems[dp]
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Sort the pixels and IDs if needed
+ if (mclip) {
+ call malloc (dp, nimages, TY_SHORT)
+ if (keepids) {
+ call malloc (ip, nimages, TY_INT)
+ call ic_2sorts (d, Mems[dp], id, Memi[ip], n, npts)
+ call mfree (ip, TY_INT)
+ } else
+ call ic_sorts (d, Mems[dp], n, npts)
+ call mfree (dp, TY_SHORT)
+ }
+end
+
+# IC_GDATA -- Get line of image and mask data and apply threshold and scaling.
+# Entirely empty lines are excluded. The data are compacted within the
+# input data buffers. If it is required, the connection to the original
+# image index is keeped in the returned m data pointers.
+
+procedure ic_gdatar (in, out, dbuf, d, id, n, m, lflag, offsets, scales,
+ zeros, nimages, npts, v1, v2)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Empty mask flags
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+int nimages # Number of input images
+int npts # NUmber of output points per line
+long v1[ARB], v2[ARB] # Line vectors
+
+int i, j, k, l, ndim, nused
+real a, b
+pointer buf, dp, ip, mp, imgnlr()
+
+include "../icombine.com"
+
+begin
+ # Get masks and return if there is no data
+ call ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+ if (dflag == D_NONE)
+ return
+
+ # Get data and fill data buffers. Correct for offsets if needed.
+ ndim = IM_NDIM(out[1])
+ do i = 1, nimages {
+ if (lflag[i] == D_NONE)
+ next
+ if (aligned) {
+ call amovl (v1, v2, IM_MAXDIM)
+ if (project)
+ v2[ndim+1] = i
+ j = imgnlr (in[i], d[i], v2)
+ } else {
+ v2[1] = v1[1]
+ do j = 2, ndim
+ v2[j] = v1[j] - offsets[i,j]
+ if (project)
+ v2[ndim+1] = i
+ j = imgnlr (in[i], buf, v2)
+ call amovr (Memr[buf], Memr[dbuf[i]+offsets[i,1]],
+ IM_LEN(in[i],1))
+ d[i] = dbuf[i]
+ }
+ }
+
+ # Apply threshold if needed
+ if (dothresh) {
+ do i = 1, nimages {
+ dp = d[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ a = Memr[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ lflag[i] = D_MIX
+ dflag = D_MIX
+ }
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ a = Memr[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ dflag = D_MIX
+ }
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+
+ # Check for completely empty lines
+ if (lflag[i] == D_MIX) {
+ lflag[i] = D_NONE
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Apply scaling (avoiding masked pixels which might overflow?)
+ if (doscale) {
+ if (dflag == D_ALL) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ do j = 1, npts {
+ Memr[dp] = Memr[dp] / a + b
+ dp = dp + 1
+ }
+ }
+ } else if (dflag == D_MIX) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ Memr[dp] = Memr[dp] / a + b
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0)
+ Memr[dp] = Memr[dp] / a + b
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+ }
+
+ # Sort pointers to exclude unused images.
+ # Use the lflag array to keep track of the image index.
+
+ if (dflag == D_ALL)
+ nused = nimages
+ else {
+ nused = 0
+ do i = 1, nimages
+ if (lflag[i] != D_NONE) {
+ nused = nused + 1
+ d[nused] = d[i]
+ m[nused] = m[i]
+ lflag[nused] = i
+ }
+ if (nused == 0)
+ dflag = D_NONE
+ }
+
+ # Compact data to remove bad pixels
+ # Keep track of the image indices if needed
+ # If growing mark the end of the included image indices with zero
+
+ if (dflag == D_ALL) {
+ call amovki (nused, n, npts)
+ if (keepids)
+ do i = 1, nimages
+ call amovki (i, Memi[id[i]], npts)
+ } else if (dflag == D_NONE)
+ call aclri (n, npts)
+ else {
+ call aclri (n, npts)
+ if (keepids) {
+ do i = 1, nused {
+ l = lflag[i]
+ dp = d[i]
+ ip = id[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i) {
+ Memr[d[k]+j-1] = Memr[dp]
+ Memi[id[k]+j-1] = l
+ } else
+ Memi[ip] = l
+ }
+ dp = dp + 1
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (grow > 0) {
+ do j = 1, npts {
+ do i = n[j]+1, nimages
+ Memi[id[i]+j-1] = 0
+ }
+ }
+ } else {
+ do i = 1, nused {
+ dp = d[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i)
+ Memr[d[k]+j-1] = Memr[dp]
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Sort the pixels and IDs if needed
+ if (mclip) {
+ call malloc (dp, nimages, TY_REAL)
+ if (keepids) {
+ call malloc (ip, nimages, TY_INT)
+ call ic_2sortr (d, Memr[dp], id, Memi[ip], n, npts)
+ call mfree (ip, TY_INT)
+ } else
+ call ic_sortr (d, Memr[dp], n, npts)
+ call mfree (dp, TY_REAL)
+ }
+end
+
diff --git a/noao/imred/ccdred/src/combine/generic/icgrow.x b/noao/imred/ccdred/src/combine/generic/icgrow.x
new file mode 100644
index 00000000..b94e1cbc
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icgrow.x
@@ -0,0 +1,148 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+
+# IC_GROW -- Reject neigbors of rejected pixels.
+# The rejected pixels are marked by having nonzero ids beyond the number
+# of included pixels. The pixels rejected here are given zero ids
+# to avoid growing of the pixels rejected here. The unweighted average
+# can be updated but any rejected pixels requires the median to be
+# recomputed. When the number of pixels at a grow point reaches nkeep
+# no further pixels are rejected. Note that the rejection order is not
+# based on the magnitude of the residuals and so a grow from a weakly
+# rejected image pixel may take precedence over a grow from a strongly
+# rejected image pixel.
+
+procedure ic_grows (d, m, n, nimages, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i1, i2, j1, j2, k1, k2, l, is, ie, n2, maxkeep
+pointer mp1, mp2
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ do i1 = 1, npts {
+ k1 = i1 - 1
+ is = max (1, i1 - grow)
+ ie = min (npts, i1 + grow)
+ do j1 = n[i1]+1, nimages {
+ l = Memi[m[j1]+k1]
+ if (l == 0)
+ next
+ if (combine == MEDIAN)
+ docombine = true
+
+ do i2 = is, ie {
+ if (i2 == i1)
+ next
+ k2 = i2 - 1
+ n2 = n[i2]
+ if (nkeep < 0)
+ maxkeep = max (0, n2 + nkeep)
+ else
+ maxkeep = min (n2, nkeep)
+ if (n2 <= maxkeep)
+ next
+ do j2 = 1, n2 {
+ mp1 = m[j2] + k2
+ if (Memi[mp1] == l) {
+ if (!docombine && n2 > 1)
+ average[i2] =
+ (n2*average[i2] - Mems[d[j2]+k2]) / (n2-1)
+ mp2 = m[n2] + k2
+ if (j2 < n2) {
+ Mems[d[j2]+k2] = Mems[d[n2]+k2]
+ Memi[mp1] = Memi[mp2]
+ }
+ Memi[mp2] = 0
+ n[i2] = n2 - 1
+ break
+ }
+ }
+ }
+ }
+ }
+end
+
+# IC_GROW -- Reject neigbors of rejected pixels.
+# The rejected pixels are marked by having nonzero ids beyond the number
+# of included pixels. The pixels rejected here are given zero ids
+# to avoid growing of the pixels rejected here. The unweighted average
+# can be updated but any rejected pixels requires the median to be
+# recomputed. When the number of pixels at a grow point reaches nkeep
+# no further pixels are rejected. Note that the rejection order is not
+# based on the magnitude of the residuals and so a grow from a weakly
+# rejected image pixel may take precedence over a grow from a strongly
+# rejected image pixel.
+
+procedure ic_growr (d, m, n, nimages, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i1, i2, j1, j2, k1, k2, l, is, ie, n2, maxkeep
+pointer mp1, mp2
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ do i1 = 1, npts {
+ k1 = i1 - 1
+ is = max (1, i1 - grow)
+ ie = min (npts, i1 + grow)
+ do j1 = n[i1]+1, nimages {
+ l = Memi[m[j1]+k1]
+ if (l == 0)
+ next
+ if (combine == MEDIAN)
+ docombine = true
+
+ do i2 = is, ie {
+ if (i2 == i1)
+ next
+ k2 = i2 - 1
+ n2 = n[i2]
+ if (nkeep < 0)
+ maxkeep = max (0, n2 + nkeep)
+ else
+ maxkeep = min (n2, nkeep)
+ if (n2 <= maxkeep)
+ next
+ do j2 = 1, n2 {
+ mp1 = m[j2] + k2
+ if (Memi[mp1] == l) {
+ if (!docombine && n2 > 1)
+ average[i2] =
+ (n2*average[i2] - Memr[d[j2]+k2]) / (n2-1)
+ mp2 = m[n2] + k2
+ if (j2 < n2) {
+ Memr[d[j2]+k2] = Memr[d[n2]+k2]
+ Memi[mp1] = Memi[mp2]
+ }
+ Memi[mp2] = 0
+ n[i2] = n2 - 1
+ break
+ }
+ }
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icmedian.x b/noao/imred/ccdred/src/combine/generic/icmedian.x
new file mode 100644
index 00000000..ec0166ba
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icmedian.x
@@ -0,0 +1,343 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+
+# IC_MEDIAN -- Median of lines
+
+procedure ic_medians (d, n, npts, median)
+
+pointer d[ARB] # Input data line pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, j1, j2, n1, lo, up, lo1, up1
+bool even
+real val1, val2, val3
+short temp, wtemp
+
+include "../icombine.com"
+
+begin
+ # If no data return after possibly setting blank values.
+ if (dflag == D_NONE) {
+ do i = 1, npts
+ median[i]= blank
+ return
+ }
+
+ # If the data were previously sorted then directly compute the median.
+ if (mclip) {
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ even = (mod (n1, 2) == 0)
+ j1 = n1 / 2 + 1
+ j2 = n1 / 2
+ do i = 1, npts {
+ k = i - 1
+ if (even) {
+ val1 = Mems[d[j1]+k]
+ val2 = Mems[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mems[d[j1]+k]
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 > 0) {
+ j1 = n1 / 2 + 1
+ if (mod (n1, 2) == 0) {
+ j2 = n1 / 2
+ val1 = Mems[d[j1]+k]
+ val2 = Mems[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mems[d[j1]+k]
+ } else
+ median[i] = blank
+ }
+ }
+ return
+ }
+
+ # Compute the median.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+
+ # If there are more than 3 points use Wirth algorithm. This
+ # is the same as vops$amed.gx except for an even number of
+ # points it selects the middle two and averages.
+ if (n1 > 3) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2))
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mems[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Mems[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Mems[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mems[d[lo1]+k]
+ Mems[d[lo1]+k] = Mems[d[up1]+k]
+ Mems[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+
+ median[i] = Mems[d[j]+k]
+
+ if (mod (n1,2) == 0) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2)+1)
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mems[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Mems[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Mems[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mems[d[lo1]+k]
+ Mems[d[lo1]+k] = Mems[d[up1]+k]
+ Mems[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+ median[i] = (median[i] + Mems[d[j]+k]) / 2
+ }
+
+ # If 3 points find the median directly.
+ } else if (n1 == 3) {
+ val1 = Mems[d[1]+k]
+ val2 = Mems[d[2]+k]
+ val3 = Mems[d[3]+k]
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = val2
+ else if (val1 < val3) # acb
+ median[i] = val3
+ else # cab
+ median[i] = val1
+ } else {
+ if (val2 > val3) # cba
+ median[i] = val2
+ else if (val1 > val3) # bca
+ median[i] = val3
+ else # bac
+ median[i] = val1
+ }
+
+ # If 2 points average.
+ } else if (n1 == 2) {
+ val1 = Mems[d[1]+k]
+ val2 = Mems[d[2]+k]
+ median[i] = (val1 + val2) / 2
+
+ # If 1 point return the value.
+ } else if (n1 == 1)
+ median[i] = Mems[d[1]+k]
+
+ # If no points return with a possibly blank value.
+ else
+ median[i] = blank
+ }
+end
+
+# IC_MEDIAN -- Median of lines
+
+procedure ic_medianr (d, n, npts, median)
+
+pointer d[ARB] # Input data line pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, j1, j2, n1, lo, up, lo1, up1
+bool even
+real val1, val2, val3
+real temp, wtemp
+
+include "../icombine.com"
+
+begin
+ # If no data return after possibly setting blank values.
+ if (dflag == D_NONE) {
+ do i = 1, npts
+ median[i]= blank
+ return
+ }
+
+ # If the data were previously sorted then directly compute the median.
+ if (mclip) {
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ even = (mod (n1, 2) == 0)
+ j1 = n1 / 2 + 1
+ j2 = n1 / 2
+ do i = 1, npts {
+ k = i - 1
+ if (even) {
+ val1 = Memr[d[j1]+k]
+ val2 = Memr[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Memr[d[j1]+k]
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 > 0) {
+ j1 = n1 / 2 + 1
+ if (mod (n1, 2) == 0) {
+ j2 = n1 / 2
+ val1 = Memr[d[j1]+k]
+ val2 = Memr[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Memr[d[j1]+k]
+ } else
+ median[i] = blank
+ }
+ }
+ return
+ }
+
+ # Compute the median.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+
+ # If there are more than 3 points use Wirth algorithm. This
+ # is the same as vops$amed.gx except for an even number of
+ # points it selects the middle two and averages.
+ if (n1 > 3) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2))
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Memr[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Memr[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Memr[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Memr[d[lo1]+k]
+ Memr[d[lo1]+k] = Memr[d[up1]+k]
+ Memr[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+
+ median[i] = Memr[d[j]+k]
+
+ if (mod (n1,2) == 0) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2)+1)
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Memr[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Memr[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Memr[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Memr[d[lo1]+k]
+ Memr[d[lo1]+k] = Memr[d[up1]+k]
+ Memr[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+ median[i] = (median[i] + Memr[d[j]+k]) / 2
+ }
+
+ # If 3 points find the median directly.
+ } else if (n1 == 3) {
+ val1 = Memr[d[1]+k]
+ val2 = Memr[d[2]+k]
+ val3 = Memr[d[3]+k]
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = val2
+ else if (val1 < val3) # acb
+ median[i] = val3
+ else # cab
+ median[i] = val1
+ } else {
+ if (val2 > val3) # cba
+ median[i] = val2
+ else if (val1 > val3) # bca
+ median[i] = val3
+ else # bac
+ median[i] = val1
+ }
+
+ # If 2 points average.
+ } else if (n1 == 2) {
+ val1 = Memr[d[1]+k]
+ val2 = Memr[d[2]+k]
+ median[i] = (val1 + val2) / 2
+
+ # If 1 point return the value.
+ } else if (n1 == 1)
+ median[i] = Memr[d[1]+k]
+
+ # If no points return with a possibly blank value.
+ else
+ median[i] = blank
+ }
+end
+
diff --git a/noao/imred/ccdred/src/combine/generic/icmm.x b/noao/imred/ccdred/src/combine/generic/icmm.x
new file mode 100644
index 00000000..259759bd
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icmm.x
@@ -0,0 +1,300 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+
+# IC_MM -- Reject a specified number of high and low pixels
+
+procedure ic_mms (d, m, n, npts)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+
+int n1, ncombine, npairs, nlow, nhigh, np
+int i, i1, j, jmax, jmin
+pointer k, kmax, kmin
+short d1, d2, dmin, dmax
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = n1 - nlow - nhigh
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ do i = 1, npts {
+ i1 = i - 1
+ n1 = n[i]
+ if (dflag == D_MIX) {
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = max (ncombine, n1 - nlow - nhigh)
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ # Reject the npairs low and high points.
+ do np = 1, npairs {
+ k = d[1] + i1
+ d1 = Mems[k]
+ dmax = d1; dmin = d1; jmax = 1; jmin = 1; kmax = k; kmin = k
+ do j = 2, n1 {
+ d2 = d1
+ k = d[j] + i1
+ d1 = Mems[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ } else if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ j = n1 - 1
+ if (keepids) {
+ if (jmax < j) {
+ if (jmin != j) {
+ Mems[kmax] = d2
+ Memi[m[jmax]+i1] = Memi[m[j]+i1]
+ } else {
+ Mems[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ }
+ if (jmin < j) {
+ if (jmax != n1) {
+ Mems[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ } else {
+ Mems[kmin] = d2
+ Memi[m[jmin]+i1] = Memi[m[j]+i1]
+ }
+ }
+ } else {
+ if (jmax < j) {
+ if (jmin != j)
+ Mems[kmax] = d2
+ else
+ Mems[kmax] = d1
+ }
+ if (jmin < j) {
+ if (jmax != n1)
+ Mems[kmin] = d1
+ else
+ Mems[kmin] = d2
+ }
+ }
+ n1 = n1 - 2
+ }
+
+ # Reject the excess low points.
+ do np = 1, nlow {
+ k = d[1] + i1
+ d1 = Mems[k]
+ dmin = d1; jmin = 1; kmin = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Mems[k]
+ if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ if (keepids) {
+ if (jmin < n1) {
+ Mems[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmin < n1)
+ Mems[kmin] = d1
+ }
+ n1 = n1 - 1
+ }
+
+ # Reject the excess high points.
+ do np = 1, nhigh {
+ k = d[1] + i1
+ d1 = Mems[k]
+ dmax = d1; jmax = 1; kmax = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Mems[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ }
+ }
+ if (keepids) {
+ if (jmax < n1) {
+ Mems[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmax < n1)
+ Mems[kmax] = d1
+ }
+ n1 = n1 - 1
+ }
+ n[i] = n1
+ }
+
+ if (dflag == D_ALL && npairs + nlow + nhigh > 0)
+ dflag = D_MIX
+end
+
+# IC_MM -- Reject a specified number of high and low pixels
+
+procedure ic_mmr (d, m, n, npts)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+
+int n1, ncombine, npairs, nlow, nhigh, np
+int i, i1, j, jmax, jmin
+pointer k, kmax, kmin
+real d1, d2, dmin, dmax
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = n1 - nlow - nhigh
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ do i = 1, npts {
+ i1 = i - 1
+ n1 = n[i]
+ if (dflag == D_MIX) {
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = max (ncombine, n1 - nlow - nhigh)
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ # Reject the npairs low and high points.
+ do np = 1, npairs {
+ k = d[1] + i1
+ d1 = Memr[k]
+ dmax = d1; dmin = d1; jmax = 1; jmin = 1; kmax = k; kmin = k
+ do j = 2, n1 {
+ d2 = d1
+ k = d[j] + i1
+ d1 = Memr[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ } else if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ j = n1 - 1
+ if (keepids) {
+ if (jmax < j) {
+ if (jmin != j) {
+ Memr[kmax] = d2
+ Memi[m[jmax]+i1] = Memi[m[j]+i1]
+ } else {
+ Memr[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ }
+ if (jmin < j) {
+ if (jmax != n1) {
+ Memr[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ } else {
+ Memr[kmin] = d2
+ Memi[m[jmin]+i1] = Memi[m[j]+i1]
+ }
+ }
+ } else {
+ if (jmax < j) {
+ if (jmin != j)
+ Memr[kmax] = d2
+ else
+ Memr[kmax] = d1
+ }
+ if (jmin < j) {
+ if (jmax != n1)
+ Memr[kmin] = d1
+ else
+ Memr[kmin] = d2
+ }
+ }
+ n1 = n1 - 2
+ }
+
+ # Reject the excess low points.
+ do np = 1, nlow {
+ k = d[1] + i1
+ d1 = Memr[k]
+ dmin = d1; jmin = 1; kmin = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Memr[k]
+ if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ if (keepids) {
+ if (jmin < n1) {
+ Memr[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmin < n1)
+ Memr[kmin] = d1
+ }
+ n1 = n1 - 1
+ }
+
+ # Reject the excess high points.
+ do np = 1, nhigh {
+ k = d[1] + i1
+ d1 = Memr[k]
+ dmax = d1; jmax = 1; kmax = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Memr[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ }
+ }
+ if (keepids) {
+ if (jmax < n1) {
+ Memr[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmax < n1)
+ Memr[kmax] = d1
+ }
+ n1 = n1 - 1
+ }
+ n[i] = n1
+ }
+
+ if (dflag == D_ALL && npairs + nlow + nhigh > 0)
+ dflag = D_MIX
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icombine.x b/noao/imred/ccdred/src/combine/generic/icombine.x
new file mode 100644
index 00000000..b4ff60be
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icombine.x
@@ -0,0 +1,607 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <error.h>
+include <syserr.h>
+include <mach.h>
+include "../icombine.h"
+
+
+# ICOMBINE -- Combine images
+#
+# The memory and open file descriptor limits are checked and an attempt
+# to recover is made either by setting the image pixel files to be
+# closed after I/O or by notifying the calling program that memory
+# ran out and the IMIO buffer size should be reduced. After the checks
+# a procedure for the selected combine option is called.
+# Because there may be several failure modes when reaching the file
+# limits we first assume an error is due to the file limit, except for
+# out of memory, and close some pixel files. If the error then repeats
+# on accessing the pixels the error is passed back.
+
+
+procedure icombines (in, out, offsets, nimages, bufsize)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Input image offsets
+int nimages # Number of input images
+int bufsize # IMIO buffer size
+
+char str[1]
+int i, j, npts, fd, stropen(), errcode(), imstati()
+pointer sp, d, id, n, m, lflag, scales, zeros, wts, dbuf
+pointer buf, imgl1s(), impl1i()
+errchk stropen, imgl1s, impl1i
+pointer impl1r()
+errchk impl1r
+
+include "../icombine.com"
+
+begin
+ npts = IM_LEN(out[1],1)
+
+ # Allocate memory.
+ call smark (sp)
+ call salloc (d, nimages, TY_POINTER)
+ call salloc (id, nimages, TY_POINTER)
+ call salloc (n, npts, TY_INT)
+ call salloc (m, nimages, TY_POINTER)
+ call salloc (lflag, nimages, TY_INT)
+ call salloc (scales, nimages, TY_REAL)
+ call salloc (zeros, nimages, TY_REAL)
+ call salloc (wts, nimages, TY_REAL)
+ call amovki (D_ALL, Memi[lflag], nimages)
+
+ # If aligned use the IMIO buffer otherwise we need vectors of
+ # output length.
+
+ if (!aligned) {
+ call salloc (dbuf, nimages, TY_POINTER)
+ do i = 1, nimages
+ call salloc (Memi[dbuf+i-1], npts, TY_SHORT)
+ }
+
+ if (project) {
+ call imseti (in[1], IM_NBUFS, nimages)
+ call imseti (in[1], IM_BUFSIZE, bufsize)
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ } else {
+ # Reserve FD for string operations.
+ fd = stropen (str, 1, NEW_FILE)
+
+ # Do I/O to the images.
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ buf = impl1r (out[1])
+ call aclrr (Memr[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1r (out[3])
+ call aclrr (Memr[buf], npts)
+ }
+ if (out[2] != NULL) {
+ buf = impl1i (out[2])
+ call aclri (Memi[buf], npts)
+ }
+
+ do i = 1, nimages {
+ call imseti (in[i], IM_BUFSIZE, bufsize)
+ iferr (buf = imgl1s (in[i])) {
+ switch (errcode()) {
+ case SYS_MFULL:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (imstati (in[i], IM_CLOSEFD) == YES) {
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ do j = i-2, nimages
+ call imseti (in[j], IM_CLOSEFD, YES)
+ buf = imgl1s (in[i])
+ default:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ }
+ }
+
+ call strclose (fd)
+ }
+
+ call ic_combines (in, out, Memi[dbuf], Memi[d], Memi[id], Memi[n],
+ Memi[m], Memi[lflag], offsets, Memr[scales], Memr[zeros],
+ Memr[wts], nimages, npts)
+end
+
+
+# IC_COMBINE -- Combine images.
+
+procedure ic_combines (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, wts, nimages, npts)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output image
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # Image index ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Line flags
+int offsets[nimages,ARB] # Input image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+real wts[nimages] # Combining weights
+int nimages # Number of input images
+int npts # Number of points per output line
+
+int i, ctor()
+real r, imgetr()
+pointer sp, v1, v2, v3, outdata, buf, nm, impnli()
+pointer impnlr()
+errchk ic_scale, imgetr
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (v3, IM_MAXDIM, TY_LONG)
+ call amovkl (long(1), Meml[v1], IM_MAXDIM)
+ call amovkl (long(1), Meml[v2], IM_MAXDIM)
+ call amovkl (long(1), Meml[v3], IM_MAXDIM)
+
+ call ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+ # Set combine parameters
+ switch (combine) {
+ case AVERAGE:
+ if (dowts)
+ keepids = true
+ else
+ keepids = false
+ case MEDIAN:
+ dowts = false
+ keepids = false
+ }
+ docombine = true
+
+ # Set rejection algorithm specific parameters
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ call salloc (nm, 3*nimages, TY_REAL)
+ i = 1
+ if (ctor (Memc[rdnoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = r
+ } else {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = imgetr (in[i], Memc[rdnoise])
+ }
+ i = 1
+ if (ctor (Memc[gain], i, r) > 0) {
+ do i = 1, nimages {
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[gain])
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ }
+ i = 1
+ if (ctor (Memc[snoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)+2] = r
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[snoise])
+ Memr[nm+3*(i-1)+2] = r
+ }
+ }
+ if (!keepids) {
+ if (doscale1 || grow > 0)
+ keepids = true
+ else {
+ do i = 2, nimages {
+ if (Memr[nm+3*(i-1)] != Memr[nm] ||
+ Memr[nm+3*(i-1)+1] != Memr[nm+1] ||
+ Memr[nm+3*(i-1)+2] != Memr[nm+2]) {
+ keepids = true
+ break
+ }
+ }
+ }
+ }
+ if (reject == CRREJECT)
+ lsigma = MAX_REAL
+ case MINMAX:
+ mclip = false
+ if (grow > 0)
+ keepids = true
+ case PCLIP:
+ mclip = true
+ if (grow > 0)
+ keepids = true
+ case AVSIGCLIP, SIGCLIP:
+ if (doscale1 || grow > 0)
+ keepids = true
+ case NONE:
+ mclip = false
+ grow = 0
+ }
+
+ if (keepids) {
+ do i = 1, nimages
+ call salloc (id[i], npts, TY_INT)
+ }
+
+ while (impnlr (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdatas (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclips (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ else
+ call ic_accdclips (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ case MINMAX:
+ call ic_mms (d, id, n, npts)
+ case PCLIP:
+ call ic_pclips (d, id, n, nimages, npts, Memr[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclips (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ else
+ call ic_asigclips (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclips (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ else
+ call ic_aavsigclips (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ }
+
+ if (grow > 0)
+ call ic_grows (d, id, n, nimages, npts, Memr[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_averages (d, id, n, wts, npts, Memr[outdata])
+ case MEDIAN:
+ call ic_medians (d, n, npts, Memr[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnlr (out[3], buf, Meml[v1])
+ call ic_sigmas (d, id, n, wts, npts, Memr[outdata],
+ Memr[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+
+ call sfree (sp)
+end
+
+procedure icombiner (in, out, offsets, nimages, bufsize)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Input image offsets
+int nimages # Number of input images
+int bufsize # IMIO buffer size
+
+char str[1]
+int i, j, npts, fd, stropen(), errcode(), imstati()
+pointer sp, d, id, n, m, lflag, scales, zeros, wts, dbuf
+pointer buf, imgl1r(), impl1i()
+errchk stropen, imgl1r, impl1i
+pointer impl1r()
+errchk impl1r
+
+include "../icombine.com"
+
+begin
+ npts = IM_LEN(out[1],1)
+
+ # Allocate memory.
+ call smark (sp)
+ call salloc (d, nimages, TY_POINTER)
+ call salloc (id, nimages, TY_POINTER)
+ call salloc (n, npts, TY_INT)
+ call salloc (m, nimages, TY_POINTER)
+ call salloc (lflag, nimages, TY_INT)
+ call salloc (scales, nimages, TY_REAL)
+ call salloc (zeros, nimages, TY_REAL)
+ call salloc (wts, nimages, TY_REAL)
+ call amovki (D_ALL, Memi[lflag], nimages)
+
+ # If aligned use the IMIO buffer otherwise we need vectors of
+ # output length.
+
+ if (!aligned) {
+ call salloc (dbuf, nimages, TY_POINTER)
+ do i = 1, nimages
+ call salloc (Memi[dbuf+i-1], npts, TY_REAL)
+ }
+
+ if (project) {
+ call imseti (in[1], IM_NBUFS, nimages)
+ call imseti (in[1], IM_BUFSIZE, bufsize)
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ } else {
+ # Reserve FD for string operations.
+ fd = stropen (str, 1, NEW_FILE)
+
+ # Do I/O to the images.
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ buf = impl1r (out[1])
+ call aclrr (Memr[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1r (out[3])
+ call aclrr (Memr[buf], npts)
+ }
+ if (out[2] != NULL) {
+ buf = impl1i (out[2])
+ call aclri (Memi[buf], npts)
+ }
+
+ do i = 1, nimages {
+ call imseti (in[i], IM_BUFSIZE, bufsize)
+ iferr (buf = imgl1r (in[i])) {
+ switch (errcode()) {
+ case SYS_MFULL:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (imstati (in[i], IM_CLOSEFD) == YES) {
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ do j = i-2, nimages
+ call imseti (in[j], IM_CLOSEFD, YES)
+ buf = imgl1r (in[i])
+ default:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ }
+ }
+
+ call strclose (fd)
+ }
+
+ call ic_combiner (in, out, Memi[dbuf], Memi[d], Memi[id], Memi[n],
+ Memi[m], Memi[lflag], offsets, Memr[scales], Memr[zeros],
+ Memr[wts], nimages, npts)
+end
+
+
+# IC_COMBINE -- Combine images.
+
+procedure ic_combiner (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, wts, nimages, npts)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output image
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # Image index ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Line flags
+int offsets[nimages,ARB] # Input image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+real wts[nimages] # Combining weights
+int nimages # Number of input images
+int npts # Number of points per output line
+
+int i, ctor()
+real r, imgetr()
+pointer sp, v1, v2, v3, outdata, buf, nm, impnli()
+pointer impnlr()
+errchk ic_scale, imgetr
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (v3, IM_MAXDIM, TY_LONG)
+ call amovkl (long(1), Meml[v1], IM_MAXDIM)
+ call amovkl (long(1), Meml[v2], IM_MAXDIM)
+ call amovkl (long(1), Meml[v3], IM_MAXDIM)
+
+ call ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+ # Set combine parameters
+ switch (combine) {
+ case AVERAGE:
+ if (dowts)
+ keepids = true
+ else
+ keepids = false
+ case MEDIAN:
+ dowts = false
+ keepids = false
+ }
+ docombine = true
+
+ # Set rejection algorithm specific parameters
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ call salloc (nm, 3*nimages, TY_REAL)
+ i = 1
+ if (ctor (Memc[rdnoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = r
+ } else {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = imgetr (in[i], Memc[rdnoise])
+ }
+ i = 1
+ if (ctor (Memc[gain], i, r) > 0) {
+ do i = 1, nimages {
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[gain])
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ }
+ i = 1
+ if (ctor (Memc[snoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)+2] = r
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[snoise])
+ Memr[nm+3*(i-1)+2] = r
+ }
+ }
+ if (!keepids) {
+ if (doscale1 || grow > 0)
+ keepids = true
+ else {
+ do i = 2, nimages {
+ if (Memr[nm+3*(i-1)] != Memr[nm] ||
+ Memr[nm+3*(i-1)+1] != Memr[nm+1] ||
+ Memr[nm+3*(i-1)+2] != Memr[nm+2]) {
+ keepids = true
+ break
+ }
+ }
+ }
+ }
+ if (reject == CRREJECT)
+ lsigma = MAX_REAL
+ case MINMAX:
+ mclip = false
+ if (grow > 0)
+ keepids = true
+ case PCLIP:
+ mclip = true
+ if (grow > 0)
+ keepids = true
+ case AVSIGCLIP, SIGCLIP:
+ if (doscale1 || grow > 0)
+ keepids = true
+ case NONE:
+ mclip = false
+ grow = 0
+ }
+
+ if (keepids) {
+ do i = 1, nimages
+ call salloc (id[i], npts, TY_INT)
+ }
+
+ while (impnlr (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdatar (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclipr (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ else
+ call ic_accdclipr (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ case MINMAX:
+ call ic_mmr (d, id, n, npts)
+ case PCLIP:
+ call ic_pclipr (d, id, n, nimages, npts, Memr[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclipr (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ else
+ call ic_asigclipr (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclipr (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ else
+ call ic_aavsigclipr (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ }
+
+ if (grow > 0)
+ call ic_growr (d, id, n, nimages, npts, Memr[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_averager (d, id, n, wts, npts, Memr[outdata])
+ case MEDIAN:
+ call ic_medianr (d, n, npts, Memr[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnlr (out[3], buf, Meml[v1])
+ call ic_sigmar (d, id, n, wts, npts, Memr[outdata],
+ Memr[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+
+ call sfree (sp)
+end
+
diff --git a/noao/imred/ccdred/src/combine/generic/icpclip.x b/noao/imred/ccdred/src/combine/generic/icpclip.x
new file mode 100644
index 00000000..da09bb75
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icpclip.x
@@ -0,0 +1,442 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number for clipping
+
+
+# IC_PCLIP -- Percentile clip
+#
+# 1) Find the median
+# 2) Find the pixel which is the specified order index away
+# 3) Use the data value difference as a sigma and apply clipping
+# 4) Since the median is known return it so it does not have to be recomputed
+
+procedure ic_pclips (d, m, n, nimages, npts, median)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of input images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, n4, n5, nl, nh, nin, maxkeep
+bool even, fp_equalr()
+real sigma, r, s, t
+pointer sp, resid, mp1, mp2
+real med
+
+include "../icombine.com"
+
+begin
+ # There must be at least MINCLIP and more than nkeep pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Set sign of pclip parameter
+ if (pclip < 0)
+ t = -1.
+ else
+ t = 1.
+
+ # If there are no rejected pixels compute certain parameters once.
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0.) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ nin = n1
+ }
+
+ # Now apply clipping.
+ do i = 1, npts {
+ # Compute median.
+ if (dflag == D_MIX) {
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 == 0) {
+ if (combine == MEDIAN)
+ median[i] = blank
+ next
+ }
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ }
+
+ j = i - 1
+ if (even) {
+ med = Mems[d[n2-1]+j]
+ med = (med + Mems[d[n2]+j]) / 2.
+ } else
+ med = Mems[d[n2]+j]
+
+ if (n1 < max (MINCLIP, maxkeep+1)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Define sigma for clipping
+ sigma = t * (Mems[d[n3]+j] - med)
+ if (fp_equalr (sigma, 0.)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Reject pixels and save residuals.
+ # Check if any pixels are clipped.
+ # If so recompute the median and reset the number of good pixels.
+ # Only reorder if needed.
+
+ for (nl=1; nl<=n1; nl=nl+1) {
+ r = (med - Mems[d[nl]+j]) / sigma
+ if (r < lsigma)
+ break
+ Memr[resid+nl] = r
+ }
+ for (nh=n1; nh>=1; nh=nh-1) {
+ r = (Mems[d[nh]+j] - med) / sigma
+ if (r < hsigma)
+ break
+ Memr[resid+nh] = r
+ }
+ n4 = nh - nl + 1
+
+ # If too many pixels are rejected add some back in.
+ # All pixels with the same residual are added.
+ while (n4 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n4 = nh - nl + 1
+ }
+
+ # If any pixels are rejected recompute the median.
+ if (nl > 1 || nh < n1) {
+ n5 = nl + n4 / 2
+ if (mod (n4, 2) == 0) {
+ med = Mems[d[n5-1]+j]
+ med = (med + Mems[d[n5]+j]) / 2.
+ } else
+ med = Mems[d[n5]+j]
+ n[i] = n4
+ }
+ if (combine == MEDIAN)
+ median[i] = med
+
+ # Reorder if pixels only if necessary.
+ if (nl > 1 && (combine != MEDIAN || grow > 0)) {
+ k = max (nl, n4 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+j] = Mems[d[k]+j]
+ if (grow > 0) {
+ mp1 = m[l] + j
+ mp2 = m[k] + j
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+j] = Memi[m[k]+j]
+ k = k + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+j] = Mems[d[k]+j]
+ k = k + 1
+ }
+ }
+ }
+ }
+
+ # Check if data flag needs to be reset for rejected pixels.
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag whether the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_PCLIP -- Percentile clip
+#
+# 1) Find the median
+# 2) Find the pixel which is the specified order index away
+# 3) Use the data value difference as a sigma and apply clipping
+# 4) Since the median is known return it so it does not have to be recomputed
+
+procedure ic_pclipr (d, m, n, nimages, npts, median)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of input images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, n4, n5, nl, nh, nin, maxkeep
+bool even, fp_equalr()
+real sigma, r, s, t
+pointer sp, resid, mp1, mp2
+real med
+
+include "../icombine.com"
+
+begin
+ # There must be at least MINCLIP and more than nkeep pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Set sign of pclip parameter
+ if (pclip < 0)
+ t = -1.
+ else
+ t = 1.
+
+ # If there are no rejected pixels compute certain parameters once.
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0.) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ nin = n1
+ }
+
+ # Now apply clipping.
+ do i = 1, npts {
+ # Compute median.
+ if (dflag == D_MIX) {
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 == 0) {
+ if (combine == MEDIAN)
+ median[i] = blank
+ next
+ }
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ }
+
+ j = i - 1
+ if (even) {
+ med = Memr[d[n2-1]+j]
+ med = (med + Memr[d[n2]+j]) / 2.
+ } else
+ med = Memr[d[n2]+j]
+
+ if (n1 < max (MINCLIP, maxkeep+1)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Define sigma for clipping
+ sigma = t * (Memr[d[n3]+j] - med)
+ if (fp_equalr (sigma, 0.)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Reject pixels and save residuals.
+ # Check if any pixels are clipped.
+ # If so recompute the median and reset the number of good pixels.
+ # Only reorder if needed.
+
+ for (nl=1; nl<=n1; nl=nl+1) {
+ r = (med - Memr[d[nl]+j]) / sigma
+ if (r < lsigma)
+ break
+ Memr[resid+nl] = r
+ }
+ for (nh=n1; nh>=1; nh=nh-1) {
+ r = (Memr[d[nh]+j] - med) / sigma
+ if (r < hsigma)
+ break
+ Memr[resid+nh] = r
+ }
+ n4 = nh - nl + 1
+
+ # If too many pixels are rejected add some back in.
+ # All pixels with the same residual are added.
+ while (n4 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n4 = nh - nl + 1
+ }
+
+ # If any pixels are rejected recompute the median.
+ if (nl > 1 || nh < n1) {
+ n5 = nl + n4 / 2
+ if (mod (n4, 2) == 0) {
+ med = Memr[d[n5-1]+j]
+ med = (med + Memr[d[n5]+j]) / 2.
+ } else
+ med = Memr[d[n5]+j]
+ n[i] = n4
+ }
+ if (combine == MEDIAN)
+ median[i] = med
+
+ # Reorder if pixels only if necessary.
+ if (nl > 1 && (combine != MEDIAN || grow > 0)) {
+ k = max (nl, n4 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+j] = Memr[d[k]+j]
+ if (grow > 0) {
+ mp1 = m[l] + j
+ mp2 = m[k] + j
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+j] = Memi[m[k]+j]
+ k = k + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+j] = Memr[d[k]+j]
+ k = k + 1
+ }
+ }
+ }
+ }
+
+ # Check if data flag needs to be reset for rejected pixels.
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag whether the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icsclip.x b/noao/imred/ccdred/src/combine/generic/icsclip.x
new file mode 100644
index 00000000..d7ccfd84
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icsclip.x
@@ -0,0 +1,964 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Mininum number of images for algorithm
+
+
+# IC_ASIGCLIP -- Reject pixels using sigma clipping about the average
+# The initial average rejects the high and low pixels. A correction for
+# different scalings of the images may be made. Weights are not used.
+
+procedure ic_asigclips (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, one
+data one /1.0/
+pointer sp, resid, w, wp, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Flag whether returned average needs to be recomputed.
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Save the residuals and the sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Do sigma clipping.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+
+ # If there are not enough pixels simply compute the average.
+ if (n1 < max (3, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mems[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ # Compute average with the high and low rejected.
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mems[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Iteratively reject pixels and compute the final average if needed.
+ # Compact the data and keep track of the image IDs if needed.
+
+ repeat {
+ n2 = n1
+ if (doscale1) {
+ # Compute sigma corrected for scaling.
+ s = 0.
+ wp = w - 1
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mems[dp1]
+ l = Memi[mp1]
+ r = sqrt (max (one, (a + zeros[l]) / scales[l]))
+ s = s + ((d1 - a) / r) ** 2
+ Memr[wp] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ wp = w - 1
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mems[dp1]
+ r = (d1 - a) / (s * Memr[wp])
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ Memr[wp] = Memr[w+n1-1]
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } else {
+ # Compute the sigma without scale correction.
+ s = 0.
+ do j = 1, n1
+ s = s + (Mems[d[j]+k] - a) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mems[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MSIGCLIP -- Reject pixels using sigma clipping about the median
+
+procedure ic_msigclips (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, w, mp1, mp2
+real med, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Save the residuals and sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0)
+ med = (Mems[d[n3-1]+k] + Mems[d[n3]+k]) / 2.
+ else
+ med = Mems[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ # Compute the sigma with scaling correction.
+ s = 0.
+ do j = nl, nh {
+ l = Memi[m[j]+k]
+ r = sqrt (max (one, (med + zeros[l]) / scales[l]))
+ s = s + ((Mems[d[j]+k] - med) / r) ** 2
+ Memr[w+j-1] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mems[d[nl]+k]) / (s * Memr[w+nl-1])
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mems[d[nh]+k] - med) / (s * Memr[w+nh-1])
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ # Compute the sigma without scaling correction.
+ s = 0.
+ do j = nl, nh
+ s = s + (Mems[d[j]+k] - med) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mems[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mems[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_ASIGCLIP -- Reject pixels using sigma clipping about the average
+# The initial average rejects the high and low pixels. A correction for
+# different scalings of the images may be made. Weights are not used.
+
+procedure ic_asigclipr (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, one
+data one /1.0/
+pointer sp, resid, w, wp, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Flag whether returned average needs to be recomputed.
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Save the residuals and the sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Do sigma clipping.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+
+ # If there are not enough pixels simply compute the average.
+ if (n1 < max (3, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Memr[d[1]+k]
+ do j = 2, n1
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ # Compute average with the high and low rejected.
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Memr[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Iteratively reject pixels and compute the final average if needed.
+ # Compact the data and keep track of the image IDs if needed.
+
+ repeat {
+ n2 = n1
+ if (doscale1) {
+ # Compute sigma corrected for scaling.
+ s = 0.
+ wp = w - 1
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Memr[dp1]
+ l = Memi[mp1]
+ r = sqrt (max (one, (a + zeros[l]) / scales[l]))
+ s = s + ((d1 - a) / r) ** 2
+ Memr[wp] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ wp = w - 1
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Memr[dp1]
+ r = (d1 - a) / (s * Memr[wp])
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ Memr[wp] = Memr[w+n1-1]
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } else {
+ # Compute the sigma without scale correction.
+ s = 0.
+ do j = 1, n1
+ s = s + (Memr[d[j]+k] - a) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Memr[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MSIGCLIP -- Reject pixels using sigma clipping about the median
+
+procedure ic_msigclipr (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, w, mp1, mp2
+real med, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Save the residuals and sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0)
+ med = (Memr[d[n3-1]+k] + Memr[d[n3]+k]) / 2.
+ else
+ med = Memr[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ # Compute the sigma with scaling correction.
+ s = 0.
+ do j = nl, nh {
+ l = Memi[m[j]+k]
+ r = sqrt (max (one, (med + zeros[l]) / scales[l]))
+ s = s + ((Memr[d[j]+k] - med) / r) ** 2
+ Memr[w+j-1] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Memr[d[nl]+k]) / (s * Memr[w+nl-1])
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Memr[d[nh]+k] - med) / (s * Memr[w+nh-1])
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ # Compute the sigma without scaling correction.
+ s = 0.
+ do j = nl, nh
+ s = s + (Memr[d[j]+k] - med) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Memr[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Memr[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icsigma.x b/noao/imred/ccdred/src/combine/generic/icsigma.x
new file mode 100644
index 00000000..bc0d9788
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icsigma.x
@@ -0,0 +1,205 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+
+# IC_SIGMA -- Compute the sigma image line.
+# The estimated sigma includes a correction for the finite population.
+# Weights are used if desired.
+
+procedure ic_sigmas (d, m, n, wts, npts, average, sigma)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average
+real sigma[npts] # Sigma line (returned)
+
+int i, j, k, n1
+real wt, sigcor, sumwt
+real a, sum
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (dowts) {
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mems[d[1]+k] - a) ** 2 * wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mems[d[j]+k] - a) ** 2 * wt
+ }
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ } else {
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ sum = (Mems[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mems[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ sigma[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 -1)
+ else
+ sigcor = 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mems[d[1]+k] - a) ** 2 * wt
+ sumwt = wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mems[d[j]+k] - a) ** 2 * wt
+ sumwt = sumwt + wt
+ }
+ sigma[i] = sqrt (sum / sumwt * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ a = average[i]
+ sum = (Mems[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mems[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ }
+ }
+end
+
+# IC_SIGMA -- Compute the sigma image line.
+# The estimated sigma includes a correction for the finite population.
+# Weights are used if desired.
+
+procedure ic_sigmar (d, m, n, wts, npts, average, sigma)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average
+real sigma[npts] # Sigma line (returned)
+
+int i, j, k, n1
+real wt, sigcor, sumwt
+real a, sum
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (dowts) {
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Memr[d[1]+k] - a) ** 2 * wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Memr[d[j]+k] - a) ** 2 * wt
+ }
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ } else {
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ sum = (Memr[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Memr[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ sigma[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 -1)
+ else
+ sigcor = 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Memr[d[1]+k] - a) ** 2 * wt
+ sumwt = wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Memr[d[j]+k] - a) ** 2 * wt
+ sumwt = sumwt + wt
+ }
+ sigma[i] = sqrt (sum / sumwt * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ a = average[i]
+ sum = (Memr[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Memr[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icsort.x b/noao/imred/ccdred/src/combine/generic/icsort.x
new file mode 100644
index 00000000..a39b68e2
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icsort.x
@@ -0,0 +1,550 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+define LOGPTR 32 # log2(maxpts) (4e9)
+
+
+# IC_SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially.
+
+procedure ic_sorts (a, b, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+short b[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+short pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR]
+define swap {temp=$1;$1=$2;$2=temp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix
+ b[i] = Mems[a[i]+l]
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ do i = 1, npix
+ b[i] = Mems[a[i]+l]
+
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix
+ Mems[a[i]+l] = b[i]
+ }
+end
+
+
+# IC_2SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially. A second integer set of
+# vectors is sorted.
+
+procedure ic_2sorts (a, b, c, d, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+short b[ARB] # work array
+pointer c[ARB] # pointer to associated integer vectors
+int d[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+short pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR], itemp
+define swap {temp=$1;$1=$2;$2=temp}
+define iswap {itemp=$1;$1=$2;$2=itemp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix {
+ b[i] = Mems[a[i]+l]
+ d[i] = Memi[c[i]+l]
+ }
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k]); swap (d[j], d[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) { # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+ }
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix {
+ Mems[a[i]+l] = b[i]
+ Memi[c[i]+l] = d[i]
+ }
+ }
+end
+
+# IC_SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially.
+
+procedure ic_sortr (a, b, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+real b[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+real pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR]
+define swap {temp=$1;$1=$2;$2=temp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix
+ b[i] = Memr[a[i]+l]
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ do i = 1, npix
+ b[i] = Memr[a[i]+l]
+
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix
+ Memr[a[i]+l] = b[i]
+ }
+end
+
+
+# IC_2SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially. A second integer set of
+# vectors is sorted.
+
+procedure ic_2sortr (a, b, c, d, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+real b[ARB] # work array
+pointer c[ARB] # pointer to associated integer vectors
+int d[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+real pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR], itemp
+define swap {temp=$1;$1=$2;$2=temp}
+define iswap {itemp=$1;$1=$2;$2=itemp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix {
+ b[i] = Memr[a[i]+l]
+ d[i] = Memi[c[i]+l]
+ }
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k]); swap (d[j], d[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) { # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+ }
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix {
+ Memr[a[i]+l] = b[i]
+ Memi[c[i]+l] = d[i]
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/combine/generic/icstat.x b/noao/imred/ccdred/src/combine/generic/icstat.x
new file mode 100644
index 00000000..41512ccb
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/icstat.x
@@ -0,0 +1,444 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+define NMAX 10000 # Maximum number of pixels to sample
+
+
+# IC_STAT -- Compute image statistics within specified section.
+# The image section is relative to a reference image which may be
+# different than the input image and may have an offset. Only a
+# subsample of pixels is used. Masked and thresholded pixels are
+# ignored. Only the desired statistics are computed to increase
+# efficiency.
+
+procedure ic_stats (im, imref, section, offsets, image, nimages,
+ domode, domedian, domean, mode, median, mean)
+
+pointer im # Data image
+pointer imref # Reference image for image section
+char section[ARB] # Image section
+int offsets[nimages,ARB] # Image section offset from data to reference
+int image # Image index (for mask I/O)
+int nimages # Number of images in offsets.
+bool domode, domedian, domean # Statistics to compute
+real mode, median, mean # Statistics
+
+int i, j, ndim, n, nv
+real a
+pointer sp, v1, v2, dv, va, vb
+pointer data, mask, dp, lp, mp, imgnls()
+short ic_modes()
+real asums()
+
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (dv, IM_MAXDIM, TY_LONG)
+ call salloc (va, IM_MAXDIM, TY_LONG)
+ call salloc (vb, IM_MAXDIM, TY_LONG)
+
+ # Determine the image section parameters. This must be in terms of
+ # the data image pixel coordinates though the section may be specified
+ # in terms of the reference image coordinates. Limit the number of
+ # pixels in each dimension to a maximum.
+
+ ndim = IM_NDIM(im)
+ if (project)
+ ndim = ndim - 1
+ call amovki (1, Memi[v1], IM_MAXDIM)
+ call amovki (1, Memi[va], IM_MAXDIM)
+ call amovki (1, Memi[dv], IM_MAXDIM)
+ call amovi (IM_LEN(imref,1), Memi[vb], ndim)
+ call ic_section (section, Memi[va], Memi[vb], Memi[dv], ndim)
+ if (im != imref)
+ do i = 1, ndim {
+ Memi[va+i-1] = Memi[va+i-1] - offsets[image,i]
+ Memi[vb+i-1] = Memi[vb+i-1] - offsets[image,i]
+ }
+
+ do j = 1, 10 {
+ n = 1
+ do i = 0, ndim-1 {
+ Memi[v1+i] = max (1, min (Memi[va+i], Memi[vb+i]))
+ Memi[v2+i] = min (IM_LEN(im,i+1), max (Memi[va+i], Memi[vb+i]))
+ Memi[dv+i] = j
+ nv = max (1, (Memi[v2+i] - Memi[v1+i]) / Memi[dv+i] + 1)
+ Memi[v2+i] = Memi[v1+i] + (nv - 1) * Memi[dv+i]
+ n = n * nv
+ }
+ if (n < NMAX)
+ break
+ }
+
+ call amovl (Memi[v1], Memi[va], IM_MAXDIM)
+ Memi[va] = 1
+ if (project)
+ Memi[va+ndim] = image
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+
+ # Accumulate the pixel values within the section. Masked pixels and
+ # thresholded pixels are ignored.
+
+ call salloc (data, n, TY_SHORT)
+ dp = data
+ while (imgnls (im, lp, Memi[vb]) != EOF) {
+ call ic_mget1 (im, image, offsets[image,1], Memi[va], mask)
+ lp = lp + Memi[v1] - 1
+ if (dflag == D_ALL) {
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ a = Mems[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mems[dp] = a
+ dp = dp + 1
+ }
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ Mems[dp] = Mems[lp]
+ dp = dp + 1
+ lp = lp + Memi[dv]
+ }
+ }
+ } else if (dflag == D_MIX) {
+ mp = mask + Memi[v1] - 1
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ a = Mems[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mems[dp] = a
+ dp = dp + 1
+ }
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ Mems[dp] = Mems[lp]
+ dp = dp + 1
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ }
+ }
+ for (i=2; i<=ndim; i=i+1) {
+ Memi[va+i-1] = Memi[va+i-1] + Memi[dv+i-1]
+ if (Memi[va+i-1] <= Memi[v2+i-1])
+ break
+ Memi[va+i-1] = Memi[v1+i-1]
+ }
+ if (i > ndim)
+ break
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+ }
+
+ n = dp - data
+ if (n < 1) {
+ call sfree (sp)
+ call error (1, "Image section contains no pixels")
+ }
+
+ # Compute only statistics needed.
+ if (domode || domedian) {
+ call asrts (Mems[data], Mems[data], n)
+ mode = ic_modes (Mems[data], n)
+ median = Mems[data+n/2-1]
+ }
+ if (domean)
+ mean = asums (Mems[data], n) / n
+
+ call sfree (sp)
+end
+
+
+define NMIN 10 # Minimum number of pixels for mode calculation
+define ZRANGE 0.8 # Fraction of pixels about median to use
+define ZSTEP 0.01 # Step size for search for mode
+define ZBIN 0.1 # Bin size for mode.
+
+# IC_MODE -- Compute mode of an array. The mode is found by binning
+# with a bin size based on the data range over a fraction of the
+# pixels about the median and a bin step which may be smaller than the
+# bin size. If there are too few points the median is returned.
+# The input array must be sorted.
+
+short procedure ic_modes (a, n)
+
+short a[n] # Data array
+int n # Number of points
+
+int i, j, k, nmax
+real z1, z2, zstep, zbin
+short mode
+bool fp_equalr()
+
+begin
+ if (n < NMIN)
+ return (a[n/2])
+
+ # Compute the mode. The array must be sorted. Consider a
+ # range of values about the median point. Use a bin size which
+ # is ZBIN of the range. Step the bin limits in ZSTEP fraction of
+ # the bin size.
+
+ i = 1 + n * (1. - ZRANGE) / 2.
+ j = 1 + n * (1. + ZRANGE) / 2.
+ z1 = a[i]
+ z2 = a[j]
+ if (fp_equalr (z1, z2)) {
+ mode = z1
+ return (mode)
+ }
+
+ zstep = ZSTEP * (z2 - z1)
+ zbin = ZBIN * (z2 - z1)
+ zstep = max (1., zstep)
+ zbin = max (1., zbin)
+
+ z1 = z1 - zstep
+ k = i
+ nmax = 0
+ repeat {
+ z1 = z1 + zstep
+ z2 = z1 + zbin
+ for (; i < j && a[i] < z1; i=i+1)
+ ;
+ for (; k < j && a[k] < z2; k=k+1)
+ ;
+ if (k - i > nmax) {
+ nmax = k - i
+ mode = a[(i+k)/2]
+ }
+ } until (k >= j)
+
+ return (mode)
+end
+
+# IC_STAT -- Compute image statistics within specified section.
+# The image section is relative to a reference image which may be
+# different than the input image and may have an offset. Only a
+# subsample of pixels is used. Masked and thresholded pixels are
+# ignored. Only the desired statistics are computed to increase
+# efficiency.
+
+procedure ic_statr (im, imref, section, offsets, image, nimages,
+ domode, domedian, domean, mode, median, mean)
+
+pointer im # Data image
+pointer imref # Reference image for image section
+char section[ARB] # Image section
+int offsets[nimages,ARB] # Image section offset from data to reference
+int image # Image index (for mask I/O)
+int nimages # Number of images in offsets.
+bool domode, domedian, domean # Statistics to compute
+real mode, median, mean # Statistics
+
+int i, j, ndim, n, nv
+real a
+pointer sp, v1, v2, dv, va, vb
+pointer data, mask, dp, lp, mp, imgnlr()
+real ic_moder()
+real asumr()
+
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (dv, IM_MAXDIM, TY_LONG)
+ call salloc (va, IM_MAXDIM, TY_LONG)
+ call salloc (vb, IM_MAXDIM, TY_LONG)
+
+ # Determine the image section parameters. This must be in terms of
+ # the data image pixel coordinates though the section may be specified
+ # in terms of the reference image coordinates. Limit the number of
+ # pixels in each dimension to a maximum.
+
+ ndim = IM_NDIM(im)
+ if (project)
+ ndim = ndim - 1
+ call amovki (1, Memi[v1], IM_MAXDIM)
+ call amovki (1, Memi[va], IM_MAXDIM)
+ call amovki (1, Memi[dv], IM_MAXDIM)
+ call amovi (IM_LEN(imref,1), Memi[vb], ndim)
+ call ic_section (section, Memi[va], Memi[vb], Memi[dv], ndim)
+ if (im != imref)
+ do i = 1, ndim {
+ Memi[va+i-1] = Memi[va+i-1] - offsets[image,i]
+ Memi[vb+i-1] = Memi[vb+i-1] - offsets[image,i]
+ }
+
+ do j = 1, 10 {
+ n = 1
+ do i = 0, ndim-1 {
+ Memi[v1+i] = max (1, min (Memi[va+i], Memi[vb+i]))
+ Memi[v2+i] = min (IM_LEN(im,i+1), max (Memi[va+i], Memi[vb+i]))
+ Memi[dv+i] = j
+ nv = max (1, (Memi[v2+i] - Memi[v1+i]) / Memi[dv+i] + 1)
+ Memi[v2+i] = Memi[v1+i] + (nv - 1) * Memi[dv+i]
+ n = n * nv
+ }
+ if (n < NMAX)
+ break
+ }
+
+ call amovl (Memi[v1], Memi[va], IM_MAXDIM)
+ Memi[va] = 1
+ if (project)
+ Memi[va+ndim] = image
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+
+ # Accumulate the pixel values within the section. Masked pixels and
+ # thresholded pixels are ignored.
+
+ call salloc (data, n, TY_REAL)
+ dp = data
+ while (imgnlr (im, lp, Memi[vb]) != EOF) {
+ call ic_mget1 (im, image, offsets[image,1], Memi[va], mask)
+ lp = lp + Memi[v1] - 1
+ if (dflag == D_ALL) {
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ a = Memr[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Memr[dp] = a
+ dp = dp + 1
+ }
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ Memr[dp] = Memr[lp]
+ dp = dp + 1
+ lp = lp + Memi[dv]
+ }
+ }
+ } else if (dflag == D_MIX) {
+ mp = mask + Memi[v1] - 1
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ a = Memr[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Memr[dp] = a
+ dp = dp + 1
+ }
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ Memr[dp] = Memr[lp]
+ dp = dp + 1
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ }
+ }
+ for (i=2; i<=ndim; i=i+1) {
+ Memi[va+i-1] = Memi[va+i-1] + Memi[dv+i-1]
+ if (Memi[va+i-1] <= Memi[v2+i-1])
+ break
+ Memi[va+i-1] = Memi[v1+i-1]
+ }
+ if (i > ndim)
+ break
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+ }
+
+ n = dp - data
+ if (n < 1) {
+ call sfree (sp)
+ call error (1, "Image section contains no pixels")
+ }
+
+ # Compute only statistics needed.
+ if (domode || domedian) {
+ call asrtr (Memr[data], Memr[data], n)
+ mode = ic_moder (Memr[data], n)
+ median = Memr[data+n/2-1]
+ }
+ if (domean)
+ mean = asumr (Memr[data], n) / n
+
+ call sfree (sp)
+end
+
+
+define NMIN 10 # Minimum number of pixels for mode calculation
+define ZRANGE 0.8 # Fraction of pixels about median to use
+define ZSTEP 0.01 # Step size for search for mode
+define ZBIN 0.1 # Bin size for mode.
+
+# IC_MODE -- Compute mode of an array. The mode is found by binning
+# with a bin size based on the data range over a fraction of the
+# pixels about the median and a bin step which may be smaller than the
+# bin size. If there are too few points the median is returned.
+# The input array must be sorted.
+
+real procedure ic_moder (a, n)
+
+real a[n] # Data array
+int n # Number of points
+
+int i, j, k, nmax
+real z1, z2, zstep, zbin
+real mode
+bool fp_equalr()
+
+begin
+ if (n < NMIN)
+ return (a[n/2])
+
+ # Compute the mode. The array must be sorted. Consider a
+ # range of values about the median point. Use a bin size which
+ # is ZBIN of the range. Step the bin limits in ZSTEP fraction of
+ # the bin size.
+
+ i = 1 + n * (1. - ZRANGE) / 2.
+ j = 1 + n * (1. + ZRANGE) / 2.
+ z1 = a[i]
+ z2 = a[j]
+ if (fp_equalr (z1, z2)) {
+ mode = z1
+ return (mode)
+ }
+
+ zstep = ZSTEP * (z2 - z1)
+ zbin = ZBIN * (z2 - z1)
+
+ z1 = z1 - zstep
+ k = i
+ nmax = 0
+ repeat {
+ z1 = z1 + zstep
+ z2 = z1 + zbin
+ for (; i < j && a[i] < z1; i=i+1)
+ ;
+ for (; k < j && a[k] < z2; k=k+1)
+ ;
+ if (k - i > nmax) {
+ nmax = k - i
+ mode = a[(i+k)/2]
+ }
+ } until (k >= j)
+
+ return (mode)
+end
+
diff --git a/noao/imred/ccdred/src/combine/generic/mkpkg b/noao/imred/ccdred/src/combine/generic/mkpkg
new file mode 100644
index 00000000..63695459
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/generic/mkpkg
@@ -0,0 +1,23 @@
+# Make CCDRED Package.
+
+$checkout libpkg.a ../../..
+$update libpkg.a
+$checkin libpkg.a ../../..
+$exit
+
+libpkg.a:
+ icaclip.x ../icombine.com ../icombine.h
+ icaverage.x ../icombine.com ../icombine.h <imhdr.h>
+ iccclip.x ../icombine.com ../icombine.h
+ icgdata.x ../icombine.com ../icombine.h <imhdr.h> <mach.h>
+ icgrow.x ../icombine.com ../icombine.h
+ icmedian.x ../icombine.com ../icombine.h
+ icmm.x ../icombine.com ../icombine.h
+ icombine.x ../icombine.com ../icombine.h <error.h> <syserr.h>\
+ <imhdr.h> <imset.h> <mach.h>
+ icpclip.x ../icombine.com ../icombine.h
+ icsclip.x ../icombine.com ../icombine.h
+ icsigma.x ../icombine.com ../icombine.h <imhdr.h>
+ icsort.x
+ icstat.x ../icombine.com ../icombine.h <imhdr.h>
+ ;
diff --git a/noao/imred/ccdred/src/combine/icaclip.gx b/noao/imred/ccdred/src/combine/icaclip.gx
new file mode 100644
index 00000000..bb592542
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icaclip.gx
@@ -0,0 +1,573 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number of images for this algorithm
+
+$for (sr)
+# IC_AAVSIGCLIP -- Reject pixels using an average sigma about the average
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_aavsigclip$t (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+$if (datatype == sil)
+real d1, low, high, sum, a, s, s1, r, one
+data one /1.0/
+$else
+PIXEL d1, low, high, sum, a, s, s1, r, one
+data one /1$f/
+$endif
+pointer sp, sums, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (sums, npts, TY_REAL)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Since the unweighted average is computed here possibly skip combining
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Compute the unweighted average with the high and low rejected and
+ # the poisson scaled average sigma. There must be at least three
+ # pixels at each point to define the average and contributions to
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ nin = n[1]
+ s = 0.
+ n2 = 0
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3)
+ next
+
+ # Unweighted average with the high and low rejected
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mem$t[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mem$t[dp1]
+ l = Memi[mp1]
+ s1 = max (one, (a + zeros[l]) / scales[l])
+ s = s + (d1 - a) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, a)
+ do j = 1, n1
+ s = s + (Mem$t[d[j]+k] - a) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the average and sum for later.
+ average[i] = a
+ Memr[sums+k] = sum
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+
+ # Reject pixels and compute the final average (if needed).
+ # There must be at least three pixels at each point for rejection.
+ # Iteratively scale the mean sigma and reject pixels
+ # Compact the data and keep track of the image IDs if needed.
+
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (2, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mem$t[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ a = average[i]
+ sum = Memr[sums+k]
+
+ repeat {
+ n2 = n1
+ if (s > 0.) {
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mem$t[dp1]
+ l = Memi[mp1]
+ s1 = s * sqrt (max (one, (a+zeros[l]) / scales[l]))
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ s1 = s * sqrt (max (one, a))
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MAVSIGCLIP -- Reject pixels using an average sigma about the median
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_mavsigclip$t (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+pointer sp, resid, mp1, mp2
+$if (datatype == sil)
+real med, low, high, r, s, s1, one
+data one /1.0/
+$else
+PIXEL med, low, high, r, s, s1, one
+data one /1$f/
+$endif
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute the poisson scaled average sigma about the median.
+ # There must be at least three pixels at each point to define
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ s = 0.
+ n2 = 0
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3) {
+ if (n1 == 0)
+ median[i] = blank
+ else if (n1 == 1)
+ median[i] = Mem$t[d[1]+k]
+ else {
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ median[i] = (low + high) / 2.
+ }
+ next
+ }
+
+ # Median
+ n3 = 1 + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mem$t[d[n3-1]+k]
+ high = Mem$t[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ l = Memi[m[j]+k]
+ s1 = max (one, (med + zeros[l]) / scales[l])
+ s = s + (Mem$t[d[j]+k] - med) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, med)
+ do j = 1, n1
+ s = s + (Mem$t[d[j]+k] - med) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the median for later.
+ median[i] = med
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+ else
+ return
+
+ # Compute individual sigmas and iteratively clip.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 < max (3, maxkeep+1))
+ next
+ nl = 1
+ nh = n1
+ med = median[i]
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 >= max (MINCLIP, maxkeep+1) && s > 0.) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (med - Mem$t[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (Mem$t[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ s1 = s * sqrt (max (one, med))
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mem$t[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mem$t[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mem$t[d[n3-1]+k]
+ high = Mem$t[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mem$t[d[n3-1]+k]
+ high = Mem$t[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icaverage.gx b/noao/imred/ccdred/src/combine/icaverage.gx
new file mode 100644
index 00000000..c145bb33
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icaverage.gx
@@ -0,0 +1,93 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+$for (sr)
+# IC_AVERAGE -- Compute the average image line.
+# Options include a weight average.
+
+procedure ic_average$t (d, m, n, wts, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average (returned)
+$else
+PIXEL average[npts] # Average (returned)
+$endif
+
+int i, j, k
+real sumwt, wt
+$if (datatype == sil)
+real sum
+$else
+PIXEL sum
+$endif
+
+include "../icombine.com"
+
+begin
+ # If no data has been excluded do the average without checking the
+ # number of points and using the fact that the weights are normalized.
+ # If all the data has been excluded set the average to the blank value.
+
+ if (dflag == D_ALL) {
+ if (dowts) {
+ do i = 1, npts {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mem$t[d[1]+k] * wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mem$t[d[j]+k] * wt
+ }
+ average[i] = sum
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ sum = Mem$t[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n[i]
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ average[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mem$t[d[1]+k] * wt
+ sumwt = wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mem$t[d[j]+k] * wt
+ sumwt = sumwt + wt
+ }
+ average[i] = sum / sumwt
+ } else
+ average[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ sum = Mem$t[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n[i]
+ } else
+ average[i] = blank
+ }
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/iccclip.gx b/noao/imred/ccdred/src/combine/iccclip.gx
new file mode 100644
index 00000000..69df984c
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/iccclip.gx
@@ -0,0 +1,471 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 2 # Mininum number of images for algorithm
+
+$for (sr)
+# IC_ACCDCLIP -- Reject pixels using CCD noise parameters about the average
+
+procedure ic_accdclip$t (d, m, n, scales, zeros, nm, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model parameters
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+$if (datatype == sil)
+real d1, low, high, sum, a, s, r, zero
+data zero /0.0/
+$else
+PIXEL d1, low, high, sum, a, s, r, zero
+data zero /0$f/
+$endif
+pointer sp, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are no pixels go on to the combining. Since the unweighted
+ # average is computed here possibly skip the combining later.
+
+ # There must be at least max (1, nkeep) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ } else if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # There must be at least two pixels for rejection. The initial
+ # average is the low/high rejected average except in the case of
+ # just two pixels. The rejections are iterated and the average
+ # is recomputed. Corrections for scaling may be performed.
+ # Depending on other flags the image IDs may also need to be adjusted.
+
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (MINCLIP-1, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mem$t[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ repeat {
+ if (n1 == 2) {
+ sum = Mem$t[d[1]+k]
+ sum = sum + Mem$t[d[2]+k]
+ a = sum / 2
+ } else {
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mem$t[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+ }
+ n2 = n1
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ l = Memi[mp1]
+ s = scales[l]
+ d1 = max (zero, s * (a + zeros[l]))
+ s = sqrt (nm[1,l] + d1/nm[2,l] + (d1*nm[3,l])**2) / s
+
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, a)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (j=1; j<=n1; j=j+1) {
+ if (keepids) {
+ l = Memi[m[j]+k]
+ s = max (zero, a)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ dp1 = d[j] + k
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ }
+
+ n[i] = n1
+ if (!docombine)
+ if (n1 > 0)
+ average[i] = sum / n1
+ else
+ average[i] = blank
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_CCDCLIP -- Reject pixels using CCD noise parameters about the median
+
+procedure ic_mccdclip$t (d, m, n, scales, zeros, nm, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, mp1, mp2
+$if (datatype == sil)
+real med, zero
+data zero /0.0/
+$else
+PIXEL med, zero
+data zero /0$f/
+$endif
+
+include "../icombine.com"
+
+begin
+ # There must be at least max (MINCLIP, nkeep+1) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0) {
+ med = Mem$t[d[n3-1]+k]
+ med = (med + Mem$t[d[n3]+k]) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (med - Mem$t[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (Mem$t[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, med)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (; nl <= n2; nl = nl + 1) {
+ if (keepids) {
+ l = Memi[m[nl]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (med - Mem$t[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ if (keepids) {
+ l = Memi[m[nh]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (Mem$t[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icgdata.gx b/noao/imred/ccdred/src/combine/icgdata.gx
new file mode 100644
index 00000000..41cf5810
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icgdata.gx
@@ -0,0 +1,233 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <mach.h>
+include "../icombine.h"
+
+$for (sr)
+# IC_GDATA -- Get line of image and mask data and apply threshold and scaling.
+# Entirely empty lines are excluded. The data are compacted within the
+# input data buffers. If it is required, the connection to the original
+# image index is keeped in the returned m data pointers.
+
+procedure ic_gdata$t (in, out, dbuf, d, id, n, m, lflag, offsets, scales,
+ zeros, nimages, npts, v1, v2)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Empty mask flags
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+int nimages # Number of input images
+int npts # NUmber of output points per line
+long v1[ARB], v2[ARB] # Line vectors
+
+int i, j, k, l, ndim, nused
+real a, b
+pointer buf, dp, ip, mp, imgnl$t()
+
+include "../icombine.com"
+
+begin
+ # Get masks and return if there is no data
+ call ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+ if (dflag == D_NONE)
+ return
+
+ # Get data and fill data buffers. Correct for offsets if needed.
+ ndim = IM_NDIM(out[1])
+ do i = 1, nimages {
+ if (lflag[i] == D_NONE)
+ next
+ if (aligned) {
+ call amovl (v1, v2, IM_MAXDIM)
+ if (project)
+ v2[ndim+1] = i
+ j = imgnl$t (in[i], d[i], v2)
+ } else {
+ v2[1] = v1[1]
+ do j = 2, ndim
+ v2[j] = v1[j] - offsets[i,j]
+ if (project)
+ v2[ndim+1] = i
+ j = imgnl$t (in[i], buf, v2)
+ call amov$t (Mem$t[buf], Mem$t[dbuf[i]+offsets[i,1]],
+ IM_LEN(in[i],1))
+ d[i] = dbuf[i]
+ }
+ }
+
+ # Apply threshold if needed
+ if (dothresh) {
+ do i = 1, nimages {
+ dp = d[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ a = Mem$t[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ lflag[i] = D_MIX
+ dflag = D_MIX
+ }
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ a = Mem$t[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ dflag = D_MIX
+ }
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+
+ # Check for completely empty lines
+ if (lflag[i] == D_MIX) {
+ lflag[i] = D_NONE
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Apply scaling (avoiding masked pixels which might overflow?)
+ if (doscale) {
+ if (dflag == D_ALL) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ do j = 1, npts {
+ Mem$t[dp] = Mem$t[dp] / a + b
+ dp = dp + 1
+ }
+ }
+ } else if (dflag == D_MIX) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ Mem$t[dp] = Mem$t[dp] / a + b
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0)
+ Mem$t[dp] = Mem$t[dp] / a + b
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+ }
+
+ # Sort pointers to exclude unused images.
+ # Use the lflag array to keep track of the image index.
+
+ if (dflag == D_ALL)
+ nused = nimages
+ else {
+ nused = 0
+ do i = 1, nimages
+ if (lflag[i] != D_NONE) {
+ nused = nused + 1
+ d[nused] = d[i]
+ m[nused] = m[i]
+ lflag[nused] = i
+ }
+ if (nused == 0)
+ dflag = D_NONE
+ }
+
+ # Compact data to remove bad pixels
+ # Keep track of the image indices if needed
+ # If growing mark the end of the included image indices with zero
+
+ if (dflag == D_ALL) {
+ call amovki (nused, n, npts)
+ if (keepids)
+ do i = 1, nimages
+ call amovki (i, Memi[id[i]], npts)
+ } else if (dflag == D_NONE)
+ call aclri (n, npts)
+ else {
+ call aclri (n, npts)
+ if (keepids) {
+ do i = 1, nused {
+ l = lflag[i]
+ dp = d[i]
+ ip = id[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i) {
+ Mem$t[d[k]+j-1] = Mem$t[dp]
+ Memi[id[k]+j-1] = l
+ } else
+ Memi[ip] = l
+ }
+ dp = dp + 1
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (grow > 0) {
+ do j = 1, npts {
+ do i = n[j]+1, nimages
+ Memi[id[i]+j-1] = 0
+ }
+ }
+ } else {
+ do i = 1, nused {
+ dp = d[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i)
+ Mem$t[d[k]+j-1] = Mem$t[dp]
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Sort the pixels and IDs if needed
+ if (mclip) {
+ call malloc (dp, nimages, TY_PIXEL)
+ if (keepids) {
+ call malloc (ip, nimages, TY_INT)
+ call ic_2sort$t (d, Mem$t[dp], id, Memi[ip], n, npts)
+ call mfree (ip, TY_INT)
+ } else
+ call ic_sort$t (d, Mem$t[dp], n, npts)
+ call mfree (dp, TY_PIXEL)
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icgrow.gx b/noao/imred/ccdred/src/combine/icgrow.gx
new file mode 100644
index 00000000..e3cf6228
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icgrow.gx
@@ -0,0 +1,81 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+$for (sr)
+# IC_GROW -- Reject neigbors of rejected pixels.
+# The rejected pixels are marked by having nonzero ids beyond the number
+# of included pixels. The pixels rejected here are given zero ids
+# to avoid growing of the pixels rejected here. The unweighted average
+# can be updated but any rejected pixels requires the median to be
+# recomputed. When the number of pixels at a grow point reaches nkeep
+# no further pixels are rejected. Note that the rejection order is not
+# based on the magnitude of the residuals and so a grow from a weakly
+# rejected image pixel may take precedence over a grow from a strongly
+# rejected image pixel.
+
+procedure ic_grow$t (d, m, n, nimages, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i1, i2, j1, j2, k1, k2, l, is, ie, n2, maxkeep
+pointer mp1, mp2
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ do i1 = 1, npts {
+ k1 = i1 - 1
+ is = max (1, i1 - grow)
+ ie = min (npts, i1 + grow)
+ do j1 = n[i1]+1, nimages {
+ l = Memi[m[j1]+k1]
+ if (l == 0)
+ next
+ if (combine == MEDIAN)
+ docombine = true
+
+ do i2 = is, ie {
+ if (i2 == i1)
+ next
+ k2 = i2 - 1
+ n2 = n[i2]
+ if (nkeep < 0)
+ maxkeep = max (0, n2 + nkeep)
+ else
+ maxkeep = min (n2, nkeep)
+ if (n2 <= maxkeep)
+ next
+ do j2 = 1, n2 {
+ mp1 = m[j2] + k2
+ if (Memi[mp1] == l) {
+ if (!docombine && n2 > 1)
+ average[i2] =
+ (n2*average[i2] - Mem$t[d[j2]+k2]) / (n2-1)
+ mp2 = m[n2] + k2
+ if (j2 < n2) {
+ Mem$t[d[j2]+k2] = Mem$t[d[n2]+k2]
+ Memi[mp1] = Memi[mp2]
+ }
+ Memi[mp2] = 0
+ n[i2] = n2 - 1
+ break
+ }
+ }
+ }
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icimstack.x b/noao/imred/ccdred/src/combine/icimstack.x
new file mode 100644
index 00000000..2a19751d
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icimstack.x
@@ -0,0 +1,125 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <error.h>
+include <imhdr.h>
+
+
+# IC_IMSTACK -- Stack images into a single image of higher dimension.
+
+procedure ic_imstack (images, nimages, output)
+
+char images[SZ_FNAME-1, nimages] #I Input images
+int nimages #I Number of images
+char output #I Name of output image
+
+int i, j, npix
+long line_in[IM_MAXDIM], line_out[IM_MAXDIM]
+pointer sp, key, in, out, buf_in, buf_out, ptr
+
+int imgnls(), imgnli(), imgnll(), imgnlr(), imgnld(), imgnlx()
+int impnls(), impnli(), impnll(), impnlr(), impnld(), impnlx()
+pointer immap()
+errchk immap
+
+begin
+ call smark (sp)
+ call salloc (key, SZ_FNAME, TY_CHAR)
+
+ iferr {
+ # Add each input image to the output image.
+ out = NULL
+ do i = 1, nimages {
+ in = NULL
+ ptr = immap (images[1,i], READ_ONLY, 0)
+ in = ptr
+
+ # For the first input image map the output image as a copy
+ # and increment the dimension. Set the output line counter.
+
+ if (i == 1) {
+ ptr = immap (output, NEW_COPY, in)
+ out = ptr
+ IM_NDIM(out) = IM_NDIM(out) + 1
+ IM_LEN(out, IM_NDIM(out)) = nimages
+ npix = IM_LEN(out, 1)
+ call amovkl (long(1), line_out, IM_MAXDIM)
+ }
+
+ # Check next input image for consistency with the output image.
+ if (IM_NDIM(in) != IM_NDIM(out) - 1)
+ call error (0, "Input images not consistent")
+ do j = 1, IM_NDIM(in) {
+ if (IM_LEN(in, j) != IM_LEN(out, j))
+ call error (0, "Input images not consistent")
+ }
+
+ call sprintf (Memc[key], SZ_FNAME, "stck%04d")
+ call pargi (i)
+ call imastr (out, Memc[key], images[1,i])
+
+ # Copy the input lines from the image to the next lines of
+ # the output image. Switch on the output data type to optimize
+ # IMIO.
+
+ call amovkl (long(1), line_in, IM_MAXDIM)
+ switch (IM_PIXTYPE (out)) {
+ case TY_SHORT:
+ while (imgnls (in, buf_in, line_in) != EOF) {
+ if (impnls (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovs (Mems[buf_in], Mems[buf_out], npix)
+ }
+ case TY_INT:
+ while (imgnli (in, buf_in, line_in) != EOF) {
+ if (impnli (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovi (Memi[buf_in], Memi[buf_out], npix)
+ }
+ case TY_USHORT, TY_LONG:
+ while (imgnll (in, buf_in, line_in) != EOF) {
+ if (impnll (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovl (Meml[buf_in], Meml[buf_out], npix)
+ }
+ case TY_REAL:
+ while (imgnlr (in, buf_in, line_in) != EOF) {
+ if (impnlr (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovr (Memr[buf_in], Memr[buf_out], npix)
+ }
+ case TY_DOUBLE:
+ while (imgnld (in, buf_in, line_in) != EOF) {
+ if (impnld (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovd (Memd[buf_in], Memd[buf_out], npix)
+ }
+ case TY_COMPLEX:
+ while (imgnlx (in, buf_in, line_in) != EOF) {
+ if (impnlx (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovx (Memx[buf_in], Memx[buf_out], npix)
+ }
+ default:
+ while (imgnlr (in, buf_in, line_in) != EOF) {
+ if (impnlr (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovr (Memr[buf_in], Memr[buf_out], npix)
+ }
+ }
+ call imunmap (in)
+ }
+ } then {
+ if (out != NULL) {
+ call imunmap (out)
+ call imdelete (out)
+ }
+ if (in != NULL)
+ call imunmap (in)
+ call sfree (sp)
+ call erract (EA_ERROR)
+ }
+
+ # Finish up.
+ call imunmap (out)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/iclog.x b/noao/imred/ccdred/src/combine/iclog.x
new file mode 100644
index 00000000..82135866
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/iclog.x
@@ -0,0 +1,378 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <mach.h>
+include "icombine.h"
+include "icmask.h"
+
+# IC_LOG -- Output log information is a log file has been specfied.
+
+procedure ic_log (in, out, ncombine, exptime, sname, zname, wname,
+ mode, median, mean, scales, zeros, wts, offsets, nimages,
+ dozero, nout, expname, exposure)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int ncombine[nimages] # Number of previous combined images
+real exptime[nimages] # Exposure times
+char sname[ARB] # Scale name
+char zname[ARB] # Zero name
+char wname[ARB] # Weight name
+real mode[nimages] # Modes
+real median[nimages] # Medians
+real mean[nimages] # Means
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero or sky levels
+real wts[nimages] # Weights
+int offsets[nimages,ARB] # Image offsets
+int nimages # Number of images
+bool dozero # Zero flag
+int nout # Number of images combined in output
+char expname[ARB] # Exposure name
+real exposure # Output exposure
+
+int i, j, stack, ctor()
+real rval, imgetr()
+long clktime()
+bool prncombine, prexptime, prmode, prmedian, prmean, prmask
+bool prrdn, prgain, prsn
+pointer sp, fname, key
+errchk imgetr
+
+include "icombine.com"
+
+begin
+ if (logfd == NULL)
+ return
+
+ call smark (sp)
+ call salloc (fname, SZ_LINE, TY_CHAR)
+
+ stack = NO
+ if (project) {
+ ifnoerr (call imgstr (in[1], "stck0001", Memc[fname], SZ_LINE))
+ stack = YES
+ }
+ if (stack == YES)
+ call salloc (key, SZ_FNAME, TY_CHAR)
+
+ # Time stamp the log and print parameter information.
+
+ call cnvdate (clktime(0), Memc[fname], SZ_LINE)
+ call fprintf (logfd, "\n%s: IMCOMBINE\n")
+ call pargstr (Memc[fname])
+ switch (combine) {
+ case AVERAGE:
+ call fprintf (logfd, " combine = average, ")
+ case MEDIAN:
+ call fprintf (logfd, " combine = median, ")
+ }
+ call fprintf (logfd, "scale = %s, zero = %s, weight = %s\n")
+ call pargstr (sname)
+ call pargstr (zname)
+ call pargstr (wname)
+
+ switch (reject) {
+ case MINMAX:
+ call fprintf (logfd, " reject = minmax, nlow = %d, nhigh = %d\n")
+ call pargi (nint (flow * nimages))
+ call pargi (nint (fhigh * nimages))
+ case CCDCLIP:
+ call fprintf (logfd, " reject = ccdclip, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd,
+ " rdnoise = %s, gain = %s, snoise = %s, sigma = %g, hsigma = %g\n")
+ call pargstr (Memc[rdnoise])
+ call pargstr (Memc[gain])
+ call pargstr (Memc[snoise])
+ call pargr (lsigma)
+ call pargr (hsigma)
+ case CRREJECT:
+ call fprintf (logfd,
+ " reject = crreject, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd,
+ " rdnoise = %s, gain = %s, snoise = %s, hsigma = %g\n")
+ call pargstr (Memc[rdnoise])
+ call pargstr (Memc[gain])
+ call pargstr (Memc[snoise])
+ call pargr (hsigma)
+ case PCLIP:
+ call fprintf (logfd, " reject = pclip, nkeep = %d\n")
+ call pargi (nkeep)
+ call fprintf (logfd, " pclip = %g, lsigma = %g, hsigma = %g\n")
+ call pargr (pclip)
+ call pargr (lsigma)
+ call pargr (hsigma)
+ case SIGCLIP:
+ call fprintf (logfd, " reject = sigclip, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd, " lsigma = %g, hsigma = %g\n")
+ call pargr (lsigma)
+ call pargr (hsigma)
+ case AVSIGCLIP:
+ call fprintf (logfd,
+ " reject = avsigclip, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd, " lsigma = %g, hsigma = %g\n")
+ call pargr (lsigma)
+ call pargr (hsigma)
+ }
+ if (reject != NONE && grow > 0) {
+ call fprintf (logfd, " grow = %d\n")
+ call pargi (grow)
+ }
+ if (dothresh) {
+ if (lthresh > -MAX_REAL && hthresh < MAX_REAL) {
+ call fprintf (logfd, " lthreshold = %g, hthreshold = %g\n")
+ call pargr (lthresh)
+ call pargr (hthresh)
+ } else if (lthresh > -MAX_REAL) {
+ call fprintf (logfd, " lthreshold = %g\n")
+ call pargr (lthresh)
+ } else {
+ call fprintf (logfd, " hthreshold = %g\n")
+ call pargr (hthresh)
+ }
+ }
+ call fprintf (logfd, " blank = %g\n")
+ call pargr (blank)
+ call clgstr ("statsec", Memc[fname], SZ_LINE)
+ if (Memc[fname] != EOS) {
+ call fprintf (logfd, " statsec = %s\n")
+ call pargstr (Memc[fname])
+ }
+
+ if (ICM_TYPE(icm) != M_NONE) {
+ switch (ICM_TYPE(icm)) {
+ case M_BOOLEAN, M_GOODVAL:
+ call fprintf (logfd, " masktype = goodval, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ case M_BADVAL:
+ call fprintf (logfd, " masktype = badval, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ case M_GOODBITS:
+ call fprintf (logfd, " masktype = goodbits, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ case M_BADBITS:
+ call fprintf (logfd, " masktype = badbits, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ }
+ }
+
+ # Print information pertaining to individual images as a set of
+ # columns with the image name being the first column. Determine
+ # what information is relevant and print the appropriate header.
+
+ prncombine = false
+ prexptime = false
+ prmode = false
+ prmedian = false
+ prmean = false
+ prmask = false
+ prrdn = false
+ prgain = false
+ prsn = false
+ do i = 1, nimages {
+ if (ncombine[i] != ncombine[1])
+ prncombine = true
+ if (exptime[i] != exptime[1])
+ prexptime = true
+ if (mode[i] != mode[1])
+ prmode = true
+ if (median[i] != median[1])
+ prmedian = true
+ if (mean[i] != mean[1])
+ prmean = true
+ if (ICM_TYPE(icm) != M_NONE && Memi[ICM_PMS(icm)+i-1] != NULL)
+ prmask = true
+ if (reject == CCDCLIP || reject == CRREJECT) {
+ j = 1
+ if (ctor (Memc[rdnoise], j, rval) == 0)
+ prrdn = true
+ j = 1
+ if (ctor (Memc[gain], j, rval) == 0)
+ prgain = true
+ j = 1
+ if (ctor (Memc[snoise], j, rval) == 0)
+ prsn = true
+ }
+ }
+
+ call fprintf (logfd, " %20s ")
+ call pargstr ("Images")
+ if (prncombine) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("N")
+ }
+ if (prexptime) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Exp")
+ }
+ if (prmode) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Mode")
+ }
+ if (prmedian) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Median")
+ }
+ if (prmean) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Mean")
+ }
+ if (prrdn) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Rdnoise")
+ }
+ if (prgain) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Gain")
+ }
+ if (prsn) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Snoise")
+ }
+ if (doscale) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Scale")
+ }
+ if (dozero) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Zero")
+ }
+ if (dowts) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Weight")
+ }
+ if (!aligned) {
+ call fprintf (logfd, " %9s")
+ call pargstr ("Offsets")
+ }
+ if (prmask) {
+ call fprintf (logfd, " %s")
+ call pargstr ("Maskfile")
+ }
+ call fprintf (logfd, "\n")
+
+ do i = 1, nimages {
+ if (stack == YES) {
+ call sprintf (Memc[key], SZ_FNAME, "stck%04d")
+ call pargi (i)
+ ifnoerr (call imgstr (in[i], Memc[key], Memc[fname], SZ_LINE)) {
+ call fprintf (logfd, " %21s")
+ call pargstr (Memc[fname])
+ } else {
+ call imstats (in[i], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %16s[%3d]")
+ call pargstr (Memc[fname])
+ call pargi (i)
+ }
+ } else if (project) {
+ call imstats (in[i], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %16s[%3d]")
+ call pargstr (Memc[fname])
+ call pargi (i)
+ } else {
+ call imstats (in[i], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %21s")
+ call pargstr (Memc[fname])
+ }
+ if (prncombine) {
+ call fprintf (logfd, " %6d")
+ call pargi (ncombine[i])
+ }
+ if (prexptime) {
+ call fprintf (logfd, " %6.1f")
+ call pargr (exptime[i])
+ }
+ if (prmode) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (mode[i])
+ }
+ if (prmedian) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (median[i])
+ }
+ if (prmean) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (mean[i])
+ }
+ if (prrdn) {
+ rval = imgetr (in[i], Memc[rdnoise])
+ call fprintf (logfd, " %7g")
+ call pargr (rval)
+ }
+ if (prgain) {
+ rval = imgetr (in[i], Memc[gain])
+ call fprintf (logfd, " %6g")
+ call pargr (rval)
+ }
+ if (prsn) {
+ rval = imgetr (in[i], Memc[snoise])
+ call fprintf (logfd, " %6g")
+ call pargr (rval)
+ }
+ if (doscale) {
+ call fprintf (logfd, " %6.3f")
+ call pargr (1./scales[i])
+ }
+ if (dozero) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (-zeros[i])
+ }
+ if (dowts) {
+ call fprintf (logfd, " %6.3f")
+ call pargr (wts[i])
+ }
+ if (!aligned) {
+ if (IM_NDIM(out[1]) == 1) {
+ call fprintf (logfd, " %9d")
+ call pargi (offsets[i,1])
+ } else {
+ do j = 1, IM_NDIM(out[1]) {
+ call fprintf (logfd, " %4d")
+ call pargi (offsets[i,j])
+ }
+ }
+ }
+ if (prmask && Memi[ICM_PMS(icm)+i-1] != NULL) {
+ call imgstr (in[i], "BPM", Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %s")
+ call pargstr (Memc[fname])
+ }
+ call fprintf (logfd, "\n")
+ }
+
+ # Log information about the output images.
+ call imstats (out[1], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, "\n Output image = %s, ncombine = %d")
+ call pargstr (Memc[fname])
+ call pargi (nout)
+ if (expname[1] != EOS) {
+ call fprintf (logfd, ", %s = %g")
+ call pargstr (expname)
+ call pargr (exposure)
+ }
+ call fprintf (logfd, "\n")
+
+ if (out[2] != NULL) {
+ call imstats (out[2], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " Pixel list image = %s\n")
+ call pargstr (Memc[fname])
+ }
+
+ if (out[3] != NULL) {
+ call imstats (out[3], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " Sigma image = %s\n")
+ call pargstr (Memc[fname])
+ }
+
+ call flush (logfd)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/icmask.com b/noao/imred/ccdred/src/combine/icmask.com
new file mode 100644
index 00000000..baba6f6a
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icmask.com
@@ -0,0 +1,8 @@
+# IMCMASK -- Common for IMCOMBINE mask interface.
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+common /imcmask/ mtype, mvalue, bufs, pms
diff --git a/noao/imred/ccdred/src/combine/icmask.h b/noao/imred/ccdred/src/combine/icmask.h
new file mode 100644
index 00000000..b2d30530
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icmask.h
@@ -0,0 +1,7 @@
+# ICMASK -- Data structure for IMCOMBINE mask interface.
+
+define ICM_LEN 4 # Structure length
+define ICM_TYPE Memi[$1] # Mask type
+define ICM_VALUE Memi[$1+1] # Mask value
+define ICM_BUFS Memi[$1+2] # Pointer to data line buffers
+define ICM_PMS Memi[$1+3] # Pointer to array of PMIO pointers
diff --git a/noao/imred/ccdred/src/combine/icmask.x b/noao/imred/ccdred/src/combine/icmask.x
new file mode 100644
index 00000000..ba448b68
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icmask.x
@@ -0,0 +1,354 @@
+include <imhdr.h>
+include <pmset.h>
+include "icombine.h"
+include "icmask.h"
+
+# IC_MASK -- ICOMBINE mask interface
+#
+# IC_MOPEN -- Open masks
+# IC_MCLOSE -- Close the mask interface
+# IC_MGET -- Get lines of mask pixels for all the images
+# IC_MGET1 -- Get a line of mask pixels for the specified image
+
+
+# IC_MOPEN -- Open masks.
+# Parse and interpret the mask selection parameters.
+
+procedure ic_mopen (in, out, nimages)
+
+pointer in[nimages] #I Input images
+pointer out[ARB] #I Output images
+int nimages #I Number of images
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+int i, npix, npms, clgwrd()
+real clgetr()
+pointer sp, fname, title, pm, pm_open()
+bool invert, pm_empty()
+errchk calloc, pm_open, pm_loadf
+
+include "icombine.com"
+
+begin
+ icm = NULL
+ if (IM_NDIM(out[1]) == 0)
+ return
+
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (title, SZ_FNAME, TY_CHAR)
+
+ # Determine the mask parameters and allocate memory.
+ # The mask buffers are initialize to all excluded so that
+ # output points outside the input data are always excluded
+ # and don't need to be set on a line-by-line basis.
+
+ mtype = clgwrd ("masktype", Memc[title], SZ_FNAME, MASKTYPES)
+ mvalue = clgetr ("maskvalue")
+ npix = IM_LEN(out[1],1)
+ call calloc (pms, nimages, TY_POINTER)
+ call calloc (bufs, nimages, TY_POINTER)
+ do i = 1, nimages {
+ call malloc (Memi[bufs+i-1], npix, TY_INT)
+ call amovki (1, Memi[Memi[bufs+i-1]], npix)
+ }
+
+ # Check for special cases. The BOOLEAN type is used when only
+ # zero and nonzero are significant; i.e. the actual mask values are
+ # not important. The invert flag is used to indicate that
+ # empty masks are all bad rather the all good.
+
+ if (mtype == 0)
+ mtype = M_NONE
+ if (mtype == M_BADBITS && mvalue == 0)
+ mtype = M_NONE
+ if (mvalue == 0 && (mtype == M_GOODVAL || mtype == M_GOODBITS))
+ mtype = M_BOOLEAN
+ if ((mtype == M_BADVAL && mvalue == 0) ||
+ (mtype == M_GOODVAL && mvalue != 0) ||
+ (mtype == M_GOODBITS && mvalue == 0))
+ invert = true
+ else
+ invert = false
+
+ # If mask images are to be used, get the mask name from the image
+ # header and open it saving the descriptor in the pms array.
+ # Empty masks (all good) are treated as if there was no mask image.
+
+ npms = 0
+ do i = 1, nimages {
+ if (mtype != M_NONE) {
+ ifnoerr (call imgstr (in[i], "BPM", Memc[fname], SZ_FNAME)) {
+ pm = pm_open (NULL)
+ call pm_loadf (pm, Memc[fname], Memc[title], SZ_FNAME)
+ call pm_seti (pm, P_REFIM, in[i])
+ if (pm_empty (pm) && !invert)
+ call pm_close (pm)
+ else {
+ if (project) {
+ npms = nimages
+ call amovki (pm, Memi[pms], nimages)
+ } else {
+ npms = npms + 1
+ Memi[pms+i-1] = pm
+ }
+ }
+ if (project)
+ break
+ }
+ }
+ }
+
+ # If no mask images are found and the mask parameters imply that
+ # good values are 0 then use the special case of no masks.
+
+ if (npms == 0) {
+ if (!invert)
+ mtype = M_NONE
+ }
+
+ # Set up mask structure.
+ call calloc (icm, ICM_LEN, TY_STRUCT)
+ ICM_TYPE(icm) = mtype
+ ICM_VALUE(icm) = mvalue
+ ICM_BUFS(icm) = bufs
+ ICM_PMS(icm) = pms
+
+ call sfree (sp)
+end
+
+
+# IC_MCLOSE -- Close the mask interface.
+
+procedure ic_mclose (nimages)
+
+int nimages # Number of images
+
+int i
+include "icombine.com"
+
+begin
+ if (icm == NULL)
+ return
+
+ do i = 1, nimages
+ call mfree (Memi[ICM_BUFS(icm)+i-1], TY_INT)
+ do i = 1, nimages {
+ if (Memi[ICM_PMS(icm)+i-1] != NULL)
+ call pm_close (Memi[ICM_PMS(icm)+i-1])
+ if (project)
+ break
+ }
+ call mfree (ICM_BUFS(icm), TY_POINTER)
+ call mfree (ICM_PMS(icm), TY_POINTER)
+ call mfree (icm, TY_STRUCT)
+end
+
+
+# IC_MGET -- Get lines of mask pixels in the output coordinate system.
+# This converts the mask format to an array where zero is good and nonzero
+# is bad. This has special cases for optimization.
+
+procedure ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+
+pointer in[nimages] # Input image pointers
+pointer out[ARB] # Output image pointer
+int offsets[nimages,ARB] # Offsets to output image
+long v1[IM_MAXDIM] # Data vector desired in output image
+long v2[IM_MAXDIM] # Data vector in input image
+pointer m[nimages] # Pointer to mask pointers
+int lflag[nimages] # Line flags
+int nimages # Number of images
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+int i, j, ndim, nout, npix
+pointer buf, pm
+bool pm_linenotempty()
+errchk pm_glpi
+
+include "icombine.com"
+
+begin
+ # Determine if masks are needed at all. Note that the threshold
+ # is applied by simulating mask values so the mask pointers have to
+ # be set.
+
+ dflag = D_ALL
+ if (icm == NULL)
+ return
+ if (ICM_TYPE(icm) == M_NONE && aligned && !dothresh)
+ return
+
+ mtype = ICM_TYPE(icm)
+ mvalue = ICM_VALUE(icm)
+ bufs = ICM_BUFS(icm)
+ pms = ICM_PMS(icm)
+
+ # Set the mask pointers and line flags and apply offsets if needed.
+
+ ndim = IM_NDIM(out[1])
+ nout = IM_LEN(out[1],1)
+ do i = 1, nimages {
+ npix = IM_LEN(in[i],1)
+ j = offsets[i,1]
+ m[i] = Memi[bufs+i-1]
+ buf = Memi[bufs+i-1] + j
+ pm = Memi[pms+i-1]
+ if (npix == nout)
+ lflag[i] = D_ALL
+ else
+ lflag[i] = D_MIX
+
+ v2[1] = v1[1]
+ do j = 2, ndim {
+ v2[j] = v1[j] - offsets[i,j]
+ if (v2[j] < 1 || v2[j] > IM_LEN(in[i],j)) {
+ lflag[i] = D_NONE
+ break
+ }
+ }
+ if (project)
+ v2[ndim+1] = i
+
+ if (lflag[i] == D_NONE)
+ next
+
+ if (pm == NULL) {
+ call aclri (Memi[buf], npix)
+ next
+ }
+
+ # Do mask I/O and convert to appropriate values in order of
+ # expected usage.
+
+ if (pm_linenotempty (pm, v2)) {
+ call pm_glpi (pm, v2, Memi[buf], 32, npix, 0)
+
+ if (mtype == M_BOOLEAN)
+ ;
+ else if (mtype == M_BADBITS)
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_BADVAL)
+ call abeqki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_GOODBITS) {
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ call abeqki (Memi[buf], 0, Memi[buf], npix)
+ } else if (mtype == M_GOODVAL)
+ call abneki (Memi[buf], mvalue, Memi[buf], npix)
+
+ lflag[i] = D_NONE
+ do j = 1, npix
+ if (Memi[buf+j-1] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ } else {
+ if (mtype == M_BOOLEAN || mtype == M_BADBITS) {
+ call aclri (Memi[buf], npix)
+ } else if ((mtype == M_BADVAL && mvalue != 0) ||
+ (mtype == M_GOODVAL && mvalue == 0)) {
+ call aclri (Memi[buf], npix)
+ } else {
+ call amovki (1, Memi[buf], npix)
+ lflag[i] = D_NONE
+ }
+ }
+ }
+
+ # Set overall data flag
+ dflag = lflag[1]
+ do i = 2, nimages {
+ if (lflag[i] != dflag) {
+ dflag = D_MIX
+ break
+ }
+ }
+end
+
+
+# IC_MGET1 -- Get line of mask pixels from a specified image.
+# This is used by the IC_STAT procedure. This procedure converts the
+# stored mask format to an array where zero is good and nonzero is bad.
+# The data vector and returned mask array are in the input image pixel system.
+
+procedure ic_mget1 (in, image, offset, v, m)
+
+pointer in # Input image pointer
+int image # Image index
+int offset # Column offset
+long v[IM_MAXDIM] # Data vector desired
+pointer m # Pointer to mask
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+int i, npix
+pointer buf, pm
+bool pm_linenotempty()
+errchk pm_glpi
+
+include "icombine.com"
+
+begin
+ dflag = D_ALL
+ if (icm == NULL)
+ return
+ if (ICM_TYPE(icm) == M_NONE)
+ return
+
+ mtype = ICM_TYPE(icm)
+ mvalue = ICM_VALUE(icm)
+ bufs = ICM_BUFS(icm)
+ pms = ICM_PMS(icm)
+
+ npix = IM_LEN(in,1)
+ m = Memi[bufs+image-1] + offset
+ pm = Memi[pms+image-1]
+ if (pm == NULL)
+ return
+
+ # Do mask I/O and convert to appropriate values in order of
+ # expected usage.
+
+ buf = m
+ if (pm_linenotempty (pm, v)) {
+ call pm_glpi (pm, v, Memi[buf], 32, npix, 0)
+
+ if (mtype == M_BOOLEAN)
+ ;
+ else if (mtype == M_BADBITS)
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_BADVAL)
+ call abeqki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_GOODBITS) {
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ call abeqki (Memi[buf], 0, Memi[buf], npix)
+ } else if (mtype == M_GOODVAL)
+ call abneki (Memi[buf], mvalue, Memi[buf], npix)
+
+ dflag = D_NONE
+ do i = 1, npix
+ if (Memi[buf+i-1] == 0) {
+ dflag = D_MIX
+ break
+ }
+ } else {
+ if (mtype == M_BOOLEAN || mtype == M_BADBITS) {
+ ;
+ } else if ((mtype == M_BADVAL && mvalue != 0) ||
+ (mtype == M_GOODVAL && mvalue == 0)) {
+ ;
+ } else
+ dflag = D_NONE
+ }
+end
diff --git a/noao/imred/ccdred/src/combine/icmedian.gx b/noao/imred/ccdred/src/combine/icmedian.gx
new file mode 100644
index 00000000..dc8488d9
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icmedian.gx
@@ -0,0 +1,228 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+$for (sr)
+# IC_MEDIAN -- Median of lines
+
+procedure ic_median$t (d, n, npts, median)
+
+pointer d[ARB] # Input data line pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, j1, j2, n1, lo, up, lo1, up1
+bool even
+$if (datatype == silx)
+real val1, val2, val3
+$else
+PIXEL val1, val2, val3
+$endif
+PIXEL temp, wtemp
+$if (datatype == x)
+real abs_temp
+$endif
+
+include "../icombine.com"
+
+begin
+ # If no data return after possibly setting blank values.
+ if (dflag == D_NONE) {
+ do i = 1, npts
+ median[i]= blank
+ return
+ }
+
+ # If the data were previously sorted then directly compute the median.
+ if (mclip) {
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ even = (mod (n1, 2) == 0)
+ j1 = n1 / 2 + 1
+ j2 = n1 / 2
+ do i = 1, npts {
+ k = i - 1
+ if (even) {
+ val1 = Mem$t[d[j1]+k]
+ val2 = Mem$t[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mem$t[d[j1]+k]
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 > 0) {
+ j1 = n1 / 2 + 1
+ if (mod (n1, 2) == 0) {
+ j2 = n1 / 2
+ val1 = Mem$t[d[j1]+k]
+ val2 = Mem$t[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mem$t[d[j1]+k]
+ } else
+ median[i] = blank
+ }
+ }
+ return
+ }
+
+ # Compute the median.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+
+ # If there are more than 3 points use Wirth algorithm. This
+ # is the same as vops$amed.gx except for an even number of
+ # points it selects the middle two and averages.
+ if (n1 > 3) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2))
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mem$t[d[j]+k]; lo1 = lo; up1 = up
+ $if (datatype == x)
+ abs_temp = abs (temp)
+ $endif
+
+ repeat {
+ $if (datatype == x)
+ while (abs (Mem$t[d[lo1]+k]) < abs_temp)
+ $else
+ while (Mem$t[d[lo1]+k] < temp)
+ $endif
+ lo1 = lo1 + 1
+ $if (datatype == x)
+ while (abs_temp < abs (Mem$t[d[up1]+k]))
+ $else
+ while (temp < Mem$t[d[up1]+k])
+ $endif
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mem$t[d[lo1]+k]
+ Mem$t[d[lo1]+k] = Mem$t[d[up1]+k]
+ Mem$t[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+
+ median[i] = Mem$t[d[j]+k]
+
+ if (mod (n1,2) == 0) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2)+1)
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mem$t[d[j]+k]; lo1 = lo; up1 = up
+ $if (datatype == x)
+ abs_temp = abs (temp)
+ $endif
+
+ repeat {
+ $if (datatype == x)
+ while (abs (Mem$t[d[lo1]+k]) < abs_temp)
+ $else
+ while (Mem$t[d[lo1]+k] < temp)
+ $endif
+ lo1 = lo1 + 1
+ $if (datatype == x)
+ while (abs_temp < abs (Mem$t[d[up1]+k]))
+ $else
+ while (temp < Mem$t[d[up1]+k])
+ $endif
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mem$t[d[lo1]+k]
+ Mem$t[d[lo1]+k] = Mem$t[d[up1]+k]
+ Mem$t[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+ median[i] = (median[i] + Mem$t[d[j]+k]) / 2
+ }
+
+ # If 3 points find the median directly.
+ } else if (n1 == 3) {
+ $if (datatype == x)
+ val1 = abs (Mem$t[d[1]+k])
+ val2 = abs (Mem$t[d[2]+k])
+ val3 = abs (Mem$t[d[3]+k])
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = Mem$t[d[2]+k]
+ else if (val1 < val3) # acb
+ median[i] = Mem$t[d[3]+k]
+ else # cab
+ median[i] = Mem$t[d[1]+k]
+ } else {
+ if (val2 > val3) # cba
+ median[i] = Mem$t[d[2]+k]
+ else if (val1 > val3) # bca
+ median[i] = Mem$t[d[3]+k]
+ else # bac
+ median[i] = Mem$t[d[1]+k]
+ }
+ $else
+ val1 = Mem$t[d[1]+k]
+ val2 = Mem$t[d[2]+k]
+ val3 = Mem$t[d[3]+k]
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = val2
+ else if (val1 < val3) # acb
+ median[i] = val3
+ else # cab
+ median[i] = val1
+ } else {
+ if (val2 > val3) # cba
+ median[i] = val2
+ else if (val1 > val3) # bca
+ median[i] = val3
+ else # bac
+ median[i] = val1
+ }
+ $endif
+
+ # If 2 points average.
+ } else if (n1 == 2) {
+ val1 = Mem$t[d[1]+k]
+ val2 = Mem$t[d[2]+k]
+ median[i] = (val1 + val2) / 2
+
+ # If 1 point return the value.
+ } else if (n1 == 1)
+ median[i] = Mem$t[d[1]+k]
+
+ # If no points return with a possibly blank value.
+ else
+ median[i] = blank
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icmm.gx b/noao/imred/ccdred/src/combine/icmm.gx
new file mode 100644
index 00000000..90837ae5
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icmm.gx
@@ -0,0 +1,177 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+$for (sr)
+# IC_MM -- Reject a specified number of high and low pixels
+
+procedure ic_mm$t (d, m, n, npts)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+
+int n1, ncombine, npairs, nlow, nhigh, np
+int i, i1, j, jmax, jmin
+pointer k, kmax, kmin
+PIXEL d1, d2, dmin, dmax
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = n1 - nlow - nhigh
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ do i = 1, npts {
+ i1 = i - 1
+ n1 = n[i]
+ if (dflag == D_MIX) {
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = max (ncombine, n1 - nlow - nhigh)
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ # Reject the npairs low and high points.
+ do np = 1, npairs {
+ k = d[1] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ dmax = d1; dmin = d1; jmax = 1; jmin = 1; kmax = k; kmin = k
+ do j = 2, n1 {
+ d2 = d1
+ k = d[j] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ } else if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ j = n1 - 1
+ if (keepids) {
+ if (jmax < j) {
+ if (jmin != j) {
+ Mem$t[kmax] = d2
+ Memi[m[jmax]+i1] = Memi[m[j]+i1]
+ } else {
+ Mem$t[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ }
+ if (jmin < j) {
+ if (jmax != n1) {
+ Mem$t[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ } else {
+ Mem$t[kmin] = d2
+ Memi[m[jmin]+i1] = Memi[m[j]+i1]
+ }
+ }
+ } else {
+ if (jmax < j) {
+ if (jmin != j)
+ Mem$t[kmax] = d2
+ else
+ Mem$t[kmax] = d1
+ }
+ if (jmin < j) {
+ if (jmax != n1)
+ Mem$t[kmin] = d1
+ else
+ Mem$t[kmin] = d2
+ }
+ }
+ n1 = n1 - 2
+ }
+
+ # Reject the excess low points.
+ do np = 1, nlow {
+ k = d[1] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ dmin = d1; jmin = 1; kmin = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ if (keepids) {
+ if (jmin < n1) {
+ Mem$t[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmin < n1)
+ Mem$t[kmin] = d1
+ }
+ n1 = n1 - 1
+ }
+
+ # Reject the excess high points.
+ do np = 1, nhigh {
+ k = d[1] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ dmax = d1; jmax = 1; kmax = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ }
+ }
+ if (keepids) {
+ if (jmax < n1) {
+ Mem$t[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmax < n1)
+ Mem$t[kmax] = d1
+ }
+ n1 = n1 - 1
+ }
+ n[i] = n1
+ }
+
+ if (dflag == D_ALL && npairs + nlow + nhigh > 0)
+ dflag = D_MIX
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icombine.com b/noao/imred/ccdred/src/combine/icombine.com
new file mode 100644
index 00000000..cb826d58
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icombine.com
@@ -0,0 +1,40 @@
+# ICOMBINE Common
+
+int combine # Combine algorithm
+int reject # Rejection algorithm
+bool project # Combine across the highest dimension?
+real blank # Blank value
+pointer rdnoise # CCD read noise
+pointer gain # CCD gain
+pointer snoise # CCD sensitivity noise
+real lthresh # Low threshold
+real hthresh # High threshold
+int nkeep # Minimum to keep
+real lsigma # Low sigma cutoff
+real hsigma # High sigma cutoff
+real pclip # Number or fraction of pixels from median
+real flow # Fraction of low pixels to reject
+real fhigh # Fraction of high pixels to reject
+int grow # Grow radius
+bool mclip # Use median in sigma clipping?
+real sigscale # Sigma scaling tolerance
+int logfd # Log file descriptor
+
+# These flags allow special conditions to be optimized.
+
+int dflag # Data flag (D_ALL, D_NONE, D_MIX)
+bool aligned # Are the images aligned?
+bool doscale # Do the images have to be scaled?
+bool doscale1 # Do the sigma calculations have to be scaled?
+bool dothresh # Check pixels outside specified thresholds?
+bool dowts # Does the final average have to be weighted?
+bool keepids # Keep track of the image indices?
+bool docombine # Call the combine procedure?
+bool sort # Sort data?
+
+pointer icm # Mask data structure
+
+common /imccom/ combine, reject, blank, rdnoise, gain, snoise, lsigma, hsigma,
+ lthresh, hthresh, nkeep, pclip, flow, fhigh, grow, logfd,
+ dflag, sigscale, project, mclip, aligned, doscale, doscale1,
+ dothresh, dowts, keepids, docombine, sort, icm
diff --git a/noao/imred/ccdred/src/combine/icombine.gx b/noao/imred/ccdred/src/combine/icombine.gx
new file mode 100644
index 00000000..d6e93ef0
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icombine.gx
@@ -0,0 +1,395 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <error.h>
+include <syserr.h>
+include <mach.h>
+include "../icombine.h"
+
+
+# ICOMBINE -- Combine images
+#
+# The memory and open file descriptor limits are checked and an attempt
+# to recover is made either by setting the image pixel files to be
+# closed after I/O or by notifying the calling program that memory
+# ran out and the IMIO buffer size should be reduced. After the checks
+# a procedure for the selected combine option is called.
+# Because there may be several failure modes when reaching the file
+# limits we first assume an error is due to the file limit, except for
+# out of memory, and close some pixel files. If the error then repeats
+# on accessing the pixels the error is passed back.
+
+$for (sr)
+procedure icombine$t (in, out, offsets, nimages, bufsize)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Input image offsets
+int nimages # Number of input images
+int bufsize # IMIO buffer size
+
+char str[1]
+int i, j, npts, fd, stropen(), errcode(), imstati()
+pointer sp, d, id, n, m, lflag, scales, zeros, wts, dbuf
+pointer buf, imgl1$t(), impl1i()
+errchk stropen, imgl1$t, impl1i
+$if (datatype == sil)
+pointer impl1r()
+errchk impl1r
+$else
+pointer impl1$t()
+errchk impl1$t
+$endif
+
+include "../icombine.com"
+
+begin
+ npts = IM_LEN(out[1],1)
+
+ # Allocate memory.
+ call smark (sp)
+ call salloc (d, nimages, TY_POINTER)
+ call salloc (id, nimages, TY_POINTER)
+ call salloc (n, npts, TY_INT)
+ call salloc (m, nimages, TY_POINTER)
+ call salloc (lflag, nimages, TY_INT)
+ call salloc (scales, nimages, TY_REAL)
+ call salloc (zeros, nimages, TY_REAL)
+ call salloc (wts, nimages, TY_REAL)
+ call amovki (D_ALL, Memi[lflag], nimages)
+
+ # If aligned use the IMIO buffer otherwise we need vectors of
+ # output length.
+
+ if (!aligned) {
+ call salloc (dbuf, nimages, TY_POINTER)
+ do i = 1, nimages
+ call salloc (Memi[dbuf+i-1], npts, TY_PIXEL)
+ }
+
+ if (project) {
+ call imseti (in[1], IM_NBUFS, nimages)
+ call imseti (in[1], IM_BUFSIZE, bufsize)
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ } else {
+ # Reserve FD for string operations.
+ fd = stropen (str, 1, NEW_FILE)
+
+ # Do I/O to the images.
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ $if (datatype == sil)
+ buf = impl1r (out[1])
+ call aclrr (Memr[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1r (out[3])
+ call aclrr (Memr[buf], npts)
+ }
+ $else
+ buf = impl1$t (out[1])
+ call aclr$t (Mem$t[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1$t (out[3])
+ call aclr$t (Mem$t[buf], npts)
+ }
+ $endif
+ if (out[2] != NULL) {
+ buf = impl1i (out[2])
+ call aclri (Memi[buf], npts)
+ }
+
+ do i = 1, nimages {
+ call imseti (in[i], IM_BUFSIZE, bufsize)
+ iferr (buf = imgl1$t (in[i])) {
+ switch (errcode()) {
+ case SYS_MFULL:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (imstati (in[i], IM_CLOSEFD) == YES) {
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ do j = i-2, nimages
+ call imseti (in[j], IM_CLOSEFD, YES)
+ buf = imgl1$t (in[i])
+ default:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ }
+ }
+
+ call strclose (fd)
+ }
+
+ call ic_combine$t (in, out, Memi[dbuf], Memi[d], Memi[id], Memi[n],
+ Memi[m], Memi[lflag], offsets, Memr[scales], Memr[zeros],
+ Memr[wts], nimages, npts)
+end
+
+
+# IC_COMBINE -- Combine images.
+
+procedure ic_combine$t (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, wts, nimages, npts)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output image
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # Image index ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Line flags
+int offsets[nimages,ARB] # Input image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+real wts[nimages] # Combining weights
+int nimages # Number of input images
+int npts # Number of points per output line
+
+int i, ctor()
+real r, imgetr()
+pointer sp, v1, v2, v3, outdata, buf, nm, impnli()
+$if (datatype == sil)
+pointer impnlr()
+$else
+pointer impnl$t()
+$endif
+errchk ic_scale, imgetr
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (v3, IM_MAXDIM, TY_LONG)
+ call amovkl (long(1), Meml[v1], IM_MAXDIM)
+ call amovkl (long(1), Meml[v2], IM_MAXDIM)
+ call amovkl (long(1), Meml[v3], IM_MAXDIM)
+
+ call ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+ # Set combine parameters
+ switch (combine) {
+ case AVERAGE:
+ if (dowts)
+ keepids = true
+ else
+ keepids = false
+ case MEDIAN:
+ dowts = false
+ keepids = false
+ }
+ docombine = true
+
+ # Set rejection algorithm specific parameters
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ call salloc (nm, 3*nimages, TY_REAL)
+ i = 1
+ if (ctor (Memc[rdnoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = r
+ } else {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = imgetr (in[i], Memc[rdnoise])
+ }
+ i = 1
+ if (ctor (Memc[gain], i, r) > 0) {
+ do i = 1, nimages {
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[gain])
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ }
+ i = 1
+ if (ctor (Memc[snoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)+2] = r
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[snoise])
+ Memr[nm+3*(i-1)+2] = r
+ }
+ }
+ if (!keepids) {
+ if (doscale1 || grow > 0)
+ keepids = true
+ else {
+ do i = 2, nimages {
+ if (Memr[nm+3*(i-1)] != Memr[nm] ||
+ Memr[nm+3*(i-1)+1] != Memr[nm+1] ||
+ Memr[nm+3*(i-1)+2] != Memr[nm+2]) {
+ keepids = true
+ break
+ }
+ }
+ }
+ }
+ if (reject == CRREJECT)
+ lsigma = MAX_REAL
+ case MINMAX:
+ mclip = false
+ if (grow > 0)
+ keepids = true
+ case PCLIP:
+ mclip = true
+ if (grow > 0)
+ keepids = true
+ case AVSIGCLIP, SIGCLIP:
+ if (doscale1 || grow > 0)
+ keepids = true
+ case NONE:
+ mclip = false
+ grow = 0
+ }
+
+ if (keepids) {
+ do i = 1, nimages
+ call salloc (id[i], npts, TY_INT)
+ }
+
+ $if (datatype == sil)
+ while (impnlr (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdata$t (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ else
+ call ic_accdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ case MINMAX:
+ call ic_mm$t (d, id, n, npts)
+ case PCLIP:
+ call ic_pclip$t (d, id, n, nimages, npts, Memr[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ else
+ call ic_asigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ else
+ call ic_aavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ }
+
+ if (grow > 0)
+ call ic_grow$t (d, id, n, nimages, npts, Memr[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_average$t (d, id, n, wts, npts, Memr[outdata])
+ case MEDIAN:
+ call ic_median$t (d, n, npts, Memr[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnlr (out[3], buf, Meml[v1])
+ call ic_sigma$t (d, id, n, wts, npts, Memr[outdata],
+ Memr[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+ $else
+ while (impnl$t (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdata$t (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Mem$t[outdata])
+ else
+ call ic_accdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Mem$t[outdata])
+ case MINMAX:
+ call ic_mm$t (d, id, n, npts)
+ case PCLIP:
+ call ic_pclip$t (d, id, n, nimages, npts, Mem$t[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Mem$t[outdata])
+ else
+ call ic_asigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Mem$t[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Mem$t[outdata])
+ else
+ call ic_aavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Mem$t[outdata])
+ }
+
+ if (grow > 0)
+ call ic_grow$t (d, id, n, nimages, npts, Mem$t[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_average$t (d, id, n, wts, npts, Mem$t[outdata])
+ case MEDIAN:
+ call ic_median$t (d, n, npts, Mem$t[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnl$t (out[3], buf, Meml[v1])
+ call ic_sigma$t (d, id, n, wts, npts, Mem$t[outdata],
+ Mem$t[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+ $endif
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icombine.h b/noao/imred/ccdred/src/combine/icombine.h
new file mode 100644
index 00000000..13b77117
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icombine.h
@@ -0,0 +1,52 @@
+# ICOMBINE Definitions
+
+# Memory management parameters;
+define DEFBUFSIZE 65536 # default IMIO buffer size
+define FUDGE 0.8 # fudge factor
+
+# Rejection options:
+define REJECT "|none|ccdclip|crreject|minmax|pclip|sigclip|avsigclip|"
+define NONE 1 # No rejection algorithm
+define CCDCLIP 2 # CCD noise function clipping
+define CRREJECT 3 # CCD noise function clipping
+define MINMAX 4 # Minmax rejection
+define PCLIP 5 # Percentile clip
+define SIGCLIP 6 # Sigma clip
+define AVSIGCLIP 7 # Sigma clip with average poisson sigma
+
+# Combine options:
+define COMBINE "|average|median|"
+define AVERAGE 1
+define MEDIAN 2
+
+# Scaling options:
+define STYPES "|none|mode|median|mean|exposure|"
+define ZTYPES "|none|mode|median|mean|"
+define WTYPES "|none|mode|median|mean|exposure|"
+define S_NONE 1
+define S_MODE 2
+define S_MEDIAN 3
+define S_MEAN 4
+define S_EXPOSURE 5
+define S_FILE 6
+define S_KEYWORD 7
+define S_SECTION "|input|output|overlap|"
+define S_INPUT 1
+define S_OUTPUT 2
+define S_OVERLAP 3
+
+# Mask options
+define MASKTYPES "|none|goodvalue|badvalue|goodbits|badbits|"
+define M_NONE 1 # Don't use mask images
+define M_GOODVAL 2 # Value selecting good pixels
+define M_BADVAL 3 # Value selecting bad pixels
+define M_GOODBITS 4 # Bits selecting good pixels
+define M_BADBITS 5 # Bits selecting bad pixels
+define M_BOOLEAN -1 # Ignore mask values
+
+# Data flag
+define D_ALL 0 # All pixels are good
+define D_NONE 1 # All pixels are bad or rejected
+define D_MIX 2 # Mixture of good and bad pixels
+
+define TOL 0.001 # Tolerance for equal residuals
diff --git a/noao/imred/ccdred/src/combine/icpclip.gx b/noao/imred/ccdred/src/combine/icpclip.gx
new file mode 100644
index 00000000..223396c3
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icpclip.gx
@@ -0,0 +1,233 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number for clipping
+
+$for (sr)
+# IC_PCLIP -- Percentile clip
+#
+# 1) Find the median
+# 2) Find the pixel which is the specified order index away
+# 3) Use the data value difference as a sigma and apply clipping
+# 4) Since the median is known return it so it does not have to be recomputed
+
+procedure ic_pclip$t (d, m, n, nimages, npts, median)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of input images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, n4, n5, nl, nh, nin, maxkeep
+bool even, fp_equalr()
+real sigma, r, s, t
+pointer sp, resid, mp1, mp2
+$if (datatype == sil)
+real med
+$else
+PIXEL med
+$endif
+
+include "../icombine.com"
+
+begin
+ # There must be at least MINCLIP and more than nkeep pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Set sign of pclip parameter
+ if (pclip < 0)
+ t = -1.
+ else
+ t = 1.
+
+ # If there are no rejected pixels compute certain parameters once.
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0.) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ nin = n1
+ }
+
+ # Now apply clipping.
+ do i = 1, npts {
+ # Compute median.
+ if (dflag == D_MIX) {
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 == 0) {
+ if (combine == MEDIAN)
+ median[i] = blank
+ next
+ }
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ }
+
+ j = i - 1
+ if (even) {
+ med = Mem$t[d[n2-1]+j]
+ med = (med + Mem$t[d[n2]+j]) / 2.
+ } else
+ med = Mem$t[d[n2]+j]
+
+ if (n1 < max (MINCLIP, maxkeep+1)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Define sigma for clipping
+ sigma = t * (Mem$t[d[n3]+j] - med)
+ if (fp_equalr (sigma, 0.)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Reject pixels and save residuals.
+ # Check if any pixels are clipped.
+ # If so recompute the median and reset the number of good pixels.
+ # Only reorder if needed.
+
+ for (nl=1; nl<=n1; nl=nl+1) {
+ r = (med - Mem$t[d[nl]+j]) / sigma
+ if (r < lsigma)
+ break
+ Memr[resid+nl] = r
+ }
+ for (nh=n1; nh>=1; nh=nh-1) {
+ r = (Mem$t[d[nh]+j] - med) / sigma
+ if (r < hsigma)
+ break
+ Memr[resid+nh] = r
+ }
+ n4 = nh - nl + 1
+
+ # If too many pixels are rejected add some back in.
+ # All pixels with the same residual are added.
+ while (n4 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n4 = nh - nl + 1
+ }
+
+ # If any pixels are rejected recompute the median.
+ if (nl > 1 || nh < n1) {
+ n5 = nl + n4 / 2
+ if (mod (n4, 2) == 0) {
+ med = Mem$t[d[n5-1]+j]
+ med = (med + Mem$t[d[n5]+j]) / 2.
+ } else
+ med = Mem$t[d[n5]+j]
+ n[i] = n4
+ }
+ if (combine == MEDIAN)
+ median[i] = med
+
+ # Reorder if pixels only if necessary.
+ if (nl > 1 && (combine != MEDIAN || grow > 0)) {
+ k = max (nl, n4 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+j] = Mem$t[d[k]+j]
+ if (grow > 0) {
+ mp1 = m[l] + j
+ mp2 = m[k] + j
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+j] = Memi[m[k]+j]
+ k = k + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+j] = Mem$t[d[k]+j]
+ k = k + 1
+ }
+ }
+ }
+ }
+
+ # Check if data flag needs to be reset for rejected pixels.
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag whether the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icscale.x b/noao/imred/ccdred/src/combine/icscale.x
new file mode 100644
index 00000000..fc4efb2f
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icscale.x
@@ -0,0 +1,376 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <error.h>
+include "icombine.h"
+
+# IC_SCALE -- Get the scale factors for the images.
+# 1. This procedure does CLIO to determine the type of scaling desired.
+# 2. The output header parameters for exposure time and NCOMBINE are set.
+
+procedure ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero or sky levels
+real wts[nimages] # Weights
+int nimages # Number of images
+
+int stype, ztype, wtype
+int i, j, k, l, nout
+real mode, median, mean, exposure, zmean, darktime, dark
+pointer sp, ncombine, exptime, modes, medians, means
+pointer section, str, sname, zname, wname, imref
+bool domode, domedian, domean, dozero, snorm, znorm, wflag
+
+bool clgetb()
+int hdmgeti(), strdic(), ic_gscale()
+real hdmgetr(), asumr(), asumi()
+errchk ic_gscale, ic_statr
+
+include "icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (ncombine, nimages, TY_INT)
+ call salloc (exptime, nimages, TY_REAL)
+ call salloc (modes, nimages, TY_REAL)
+ call salloc (medians, nimages, TY_REAL)
+ call salloc (means, nimages, TY_REAL)
+ call salloc (section, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (sname, SZ_FNAME, TY_CHAR)
+ call salloc (zname, SZ_FNAME, TY_CHAR)
+ call salloc (wname, SZ_FNAME, TY_CHAR)
+
+ # Set the defaults.
+ call amovki (1, Memi[ncombine], nimages)
+ call amovkr (0., Memr[exptime], nimages)
+ call amovkr (INDEF, Memr[modes], nimages)
+ call amovkr (INDEF, Memr[medians], nimages)
+ call amovkr (INDEF, Memr[means], nimages)
+ call amovkr (1., scales, nimages)
+ call amovkr (0., zeros, nimages)
+ call amovkr (1., wts, nimages)
+
+ # Get the number of images previously combined and the exposure times.
+ # The default combine number is 1 and the default exposure is 0.
+
+ do i = 1, nimages {
+ iferr (Memi[ncombine+i-1] = hdmgeti (in[i], "ncombine"))
+ Memi[ncombine+i-1] = 1
+ iferr (Memr[exptime+i-1] = hdmgetr (in[i], "exptime"))
+ Memr[exptime+i-1] = 0.
+ if (project) {
+ call amovki (Memi[ncombine], Memi[ncombine], nimages)
+ call amovkr (Memr[exptime], Memr[exptime], nimages)
+ break
+ }
+ }
+
+ # Set scaling factors.
+
+ stype = ic_gscale ("scale", Memc[sname], STYPES, in, Memr[exptime],
+ scales, nimages)
+ ztype = ic_gscale ("zero", Memc[zname], ZTYPES, in, Memr[exptime],
+ zeros, nimages)
+ wtype = ic_gscale ("weight", Memc[wname], WTYPES, in, Memr[exptime],
+ wts, nimages)
+
+ # Get image statistics only if needed.
+ domode = ((stype==S_MODE)||(ztype==S_MODE)||(wtype==S_MODE))
+ domedian = ((stype==S_MEDIAN)||(ztype==S_MEDIAN)||(wtype==S_MEDIAN))
+ domean = ((stype==S_MEAN)||(ztype==S_MEAN)||(wtype==S_MEAN))
+ if (domode || domedian || domean) {
+ Memc[section] = EOS
+ Memc[str] = EOS
+ call clgstr ("statsec", Memc[section], SZ_FNAME)
+ call sscan (Memc[section])
+ call gargwrd (Memc[section], SZ_FNAME)
+ call gargwrd (Memc[str], SZ_LINE)
+
+ i = strdic (Memc[section], Memc[section], SZ_FNAME, S_SECTION)
+ switch (i) {
+ case S_INPUT:
+ call strcpy (Memc[str], Memc[section], SZ_FNAME)
+ imref = NULL
+ case S_OUTPUT:
+ call strcpy (Memc[str], Memc[section], SZ_FNAME)
+ imref = out[1]
+ case S_OVERLAP:
+ call strcpy ("[", Memc[section], SZ_FNAME)
+ do i = 1, IM_NDIM(out[1]) {
+ k = offsets[1,i] + 1
+ l = offsets[1,i] + IM_LEN(in[1],i)
+ do j = 2, nimages {
+ k = max (k, offsets[j,i]+1)
+ l = min (l, offsets[j,i]+IM_LEN(in[j],i))
+ }
+ if (i < IM_NDIM(out[1]))
+ call sprintf (Memc[str], SZ_LINE, "%d:%d,")
+ else
+ call sprintf (Memc[str], SZ_LINE, "%d:%d]")
+ call pargi (k)
+ call pargi (l)
+ call strcat (Memc[str], Memc[section], SZ_FNAME)
+ }
+ imref = out[1]
+ default:
+ imref = NULL
+ }
+
+ do i = 1, nimages {
+ if (imref != out[1])
+ imref = in[i]
+ call ic_statr (in[i], imref, Memc[section], offsets,
+ i, nimages, domode, domedian, domean, mode, median, mean)
+ if (domode) {
+ Memr[modes+i-1] = mode
+ if (stype == S_MODE)
+ scales[i] = mode
+ if (ztype == S_MODE)
+ zeros[i] = mode
+ if (wtype == S_MODE)
+ wts[i] = mode
+ }
+ if (domedian) {
+ Memr[medians+i-1] = median
+ if (stype == S_MEDIAN)
+ scales[i] = median
+ if (ztype == S_MEDIAN)
+ zeros[i] = median
+ if (wtype == S_MEDIAN)
+ wts[i] = median
+ }
+ if (domean) {
+ Memr[means+i-1] = mean
+ if (stype == S_MEAN)
+ scales[i] = mean
+ if (ztype == S_MEAN)
+ zeros[i] = mean
+ if (wtype == S_MEAN)
+ wts[i] = mean
+ }
+ }
+ }
+
+ do i = 1, nimages
+ if (scales[i] <= 0.) {
+ call eprintf ("WARNING: Negative scale factors")
+ call eprintf (" -- ignoring scaling\n")
+ call amovkr (1., scales, nimages)
+ break
+ }
+
+ # Convert to relative factors if needed.
+ snorm = (stype == S_FILE || stype == S_KEYWORD)
+ znorm = (ztype == S_FILE || ztype == S_KEYWORD)
+ wflag = (wtype == S_FILE || wtype == S_KEYWORD)
+ if (snorm)
+ call arcpr (1., scales, scales, nimages)
+ else {
+ mean = asumr (scales, nimages) / nimages
+ call adivkr (scales, mean, scales, nimages)
+ }
+ call adivr (zeros, scales, zeros, nimages)
+ zmean = asumr (zeros, nimages) / nimages
+
+ if (wtype != S_NONE) {
+ do i = 1, nimages {
+ if (wts[i] <= 0.) {
+ call eprintf ("WARNING: Negative weights")
+ call eprintf (" -- using only NCOMBINE weights\n")
+ do j = 1, nimages
+ wts[j] = Memi[ncombine+j-1]
+ break
+ }
+ if (ztype == S_NONE || znorm || wflag)
+ wts[i] = Memi[ncombine+i-1] * wts[i]
+ else {
+ if (zeros[i] <= 0.) {
+ call eprintf ("WARNING: Negative zero offsets")
+ call eprintf (" -- ignoring zero weight adjustments\n")
+ do j = 1, nimages
+ wts[j] = Memi[ncombine+j-1] * wts[j]
+ break
+ }
+ wts[i] = Memi[ncombine+i-1] * wts[i] * zmean / zeros[i]
+ }
+ }
+ }
+
+ if (znorm)
+ call anegr (zeros, zeros, nimages)
+ else {
+ # Because of finite arithmetic it is possible for the zero offsets
+ # to be nonzero even when they are all equal. Just for the sake of
+ # a nice log set the zero offsets in this case.
+
+ call asubkr (zeros, zmean, zeros, nimages)
+ for (i=2; (i<=nimages)&&(zeros[i]==zeros[1]); i=i+1)
+ ;
+ if (i > nimages)
+ call aclrr (zeros, nimages)
+ }
+ mean = asumr (wts, nimages)
+ call adivkr (wts, mean, wts, nimages)
+
+ # Set flags for scaling, zero offsets, sigma scaling, weights.
+ # Sigma scaling may be suppressed if the scales or zeros are
+ # different by a specified tolerance.
+
+ doscale = false
+ dozero = false
+ doscale1 = false
+ dowts = false
+ do i = 2, nimages {
+ if (snorm || scales[i] != scales[1])
+ doscale = true
+ if (znorm || zeros[i] != zeros[1])
+ dozero = true
+ if (wts[i] != wts[1])
+ dowts = true
+ }
+ if (doscale && sigscale != 0.) {
+ do i = 1, nimages {
+ if (abs (scales[i] - 1) > sigscale) {
+ doscale1 = true
+ break
+ }
+ }
+ if (!doscale1 && zmean > 0.) {
+ do i = 1, nimages {
+ if (abs (zeros[i] / zmean) > sigscale) {
+ doscale1 = true
+ break
+ }
+ }
+ }
+ }
+
+ # Set the output header parameters.
+ nout = asumi (Memi[ncombine], nimages)
+ call hdmputi (out[1], "ncombine", nout)
+ exposure = 0.
+ darktime = 0.
+ mean = 0.
+ do i = 1, nimages {
+ exposure = exposure + wts[i] * Memr[exptime+i-1] / scales[i]
+ ifnoerr (dark = hdmgetr (in[i], "darktime"))
+ darktime = darktime + wts[i] * dark / scales[i]
+ else
+ darktime = darktime + wts[i] * Memr[exptime+i-1] / scales[i]
+ ifnoerr (mode = hdmgetr (in[i], "ccdmean"))
+ mean = mean + wts[i] * mode / scales[i]
+ }
+ call hdmputr (out[1], "exptime", exposure)
+ call hdmputr (out[1], "darktime", darktime)
+ ifnoerr (mode = hdmgetr (out[1], "ccdmean")) {
+ call hdmputr (out[1], "ccdmean", mean)
+ iferr (call imdelf (out[1], "ccdmeant"))
+ ;
+ }
+ if (out[2] != NULL) {
+ call imstats (out[2], IM_IMAGENAME, Memc[str], SZ_FNAME)
+ call imastr (out[1], "BPM", Memc[str])
+ }
+
+ # Start the log here since much of the info is only available here.
+ if (clgetb ("verbose")) {
+ i = logfd
+ logfd = STDOUT
+ call ic_log (in, out, Memi[ncombine], Memr[exptime], Memc[sname],
+ Memc[zname], Memc[wname], Memr[modes], Memr[medians],
+ Memr[means], scales, zeros, wts, offsets, nimages, dozero,
+ nout, "", exposure)
+
+ logfd = i
+ }
+ call ic_log (in, out, Memi[ncombine], Memr[exptime], Memc[sname],
+ Memc[zname], Memc[wname], Memr[modes], Memr[medians], Memr[means],
+ scales, zeros, wts, offsets, nimages, dozero, nout,
+ "", exposure)
+
+ doscale = (doscale || dozero)
+
+ call sfree (sp)
+end
+
+
+# IC_GSCALE -- Get scale values as directed by CL parameter
+# The values can be one of those in the dictionary, from a file specified
+# with a @ prefix, or from an image header keyword specified by a ! prefix.
+
+int procedure ic_gscale (param, name, dic, in, exptime, values, nimages)
+
+char param[ARB] #I CL parameter name
+char name[SZ_FNAME] #O Parameter value
+char dic[ARB] #I Dictionary string
+pointer in[nimages] #I IMIO pointers
+real exptime[nimages] #I Exposure times
+real values[nimages] #O Values
+int nimages #I Number of images
+
+int type #O Type of value
+
+int fd, i, nowhite(), open(), fscan(), nscan(), strdic()
+real rval, hdmgetr()
+pointer errstr
+errchk open, hdmgetr()
+
+include "icombine.com"
+
+begin
+ call clgstr (param, name, SZ_FNAME)
+ if (nowhite (name, name, SZ_FNAME) == 0)
+ type = S_NONE
+ else if (name[1] == '@') {
+ type = S_FILE
+ fd = open (name[2], READ_ONLY, TEXT_FILE)
+ i = 0
+ while (fscan (fd) != EOF) {
+ call gargr (rval)
+ if (nscan() != 1)
+ next
+ if (i == nimages) {
+ call eprintf (
+ "Warning: Ignoring additional %s values in %s\n")
+ call pargstr (param)
+ call pargstr (name[2])
+ break
+ }
+ i = i + 1
+ values[i] = rval
+ }
+ call close (fd)
+ if (i < nimages) {
+ call salloc (errstr, SZ_LINE, TY_CHAR)
+ call sprintf (Memc[errstr], SZ_FNAME,
+ "Insufficient %s values in %s")
+ call pargstr (param)
+ call pargstr (name[2])
+ call error (1, Memc[errstr])
+ }
+ } else if (name[1] == '!') {
+ type = S_KEYWORD
+ do i = 1, nimages {
+ values[i] = hdmgetr (in[i], name[2])
+ if (project) {
+ call amovkr (values, values, nimages)
+ break
+ }
+ }
+ } else {
+ type = strdic (name, name, SZ_FNAME, dic)
+ if (type == 0)
+ call error (1, "Unknown scale, zero, or weight type")
+ if (type==S_EXPOSURE)
+ do i = 1, nimages
+ values[i] = max (0.001, exptime[i])
+ }
+
+ return (type)
+end
diff --git a/noao/imred/ccdred/src/combine/icsclip.gx b/noao/imred/ccdred/src/combine/icsclip.gx
new file mode 100644
index 00000000..f70611aa
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icsclip.gx
@@ -0,0 +1,504 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Mininum number of images for algorithm
+
+$for (sr)
+# IC_ASIGCLIP -- Reject pixels using sigma clipping about the average
+# The initial average rejects the high and low pixels. A correction for
+# different scalings of the images may be made. Weights are not used.
+
+procedure ic_asigclip$t (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+$if (datatype == sil)
+real d1, low, high, sum, a, s, r, one
+data one /1.0/
+$else
+PIXEL d1, low, high, sum, a, s, r, one
+data one /1$f/
+$endif
+pointer sp, resid, w, wp, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Flag whether returned average needs to be recomputed.
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Save the residuals and the sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Do sigma clipping.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+
+ # If there are not enough pixels simply compute the average.
+ if (n1 < max (3, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mem$t[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ # Compute average with the high and low rejected.
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mem$t[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Iteratively reject pixels and compute the final average if needed.
+ # Compact the data and keep track of the image IDs if needed.
+
+ repeat {
+ n2 = n1
+ if (doscale1) {
+ # Compute sigma corrected for scaling.
+ s = 0.
+ wp = w - 1
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mem$t[dp1]
+ l = Memi[mp1]
+ r = sqrt (max (one, (a + zeros[l]) / scales[l]))
+ s = s + ((d1 - a) / r) ** 2
+ Memr[wp] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ wp = w - 1
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / (s * Memr[wp])
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ Memr[wp] = Memr[w+n1-1]
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } else {
+ # Compute the sigma without scale correction.
+ s = 0.
+ do j = 1, n1
+ s = s + (Mem$t[d[j]+k] - a) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MSIGCLIP -- Reject pixels using sigma clipping about the median
+
+procedure ic_msigclip$t (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, w, mp1, mp2
+$if (datatype == sil)
+real med, one
+data one /1.0/
+$else
+PIXEL med, one
+data one /1$f/
+$endif
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Save the residuals and sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0)
+ med = (Mem$t[d[n3-1]+k] + Mem$t[d[n3]+k]) / 2.
+ else
+ med = Mem$t[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ # Compute the sigma with scaling correction.
+ s = 0.
+ do j = nl, nh {
+ l = Memi[m[j]+k]
+ r = sqrt (max (one, (med + zeros[l]) / scales[l]))
+ s = s + ((Mem$t[d[j]+k] - med) / r) ** 2
+ Memr[w+j-1] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mem$t[d[nl]+k]) / (s * Memr[w+nl-1])
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mem$t[d[nh]+k] - med) / (s * Memr[w+nh-1])
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ # Compute the sigma without scaling correction.
+ s = 0.
+ do j = nl, nh
+ s = s + (Mem$t[d[j]+k] - med) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mem$t[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mem$t[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icsection.x b/noao/imred/ccdred/src/combine/icsection.x
new file mode 100644
index 00000000..746c1f51
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icsection.x
@@ -0,0 +1,94 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <ctype.h>
+
+# IC_SECTION -- Parse an image section into its elements.
+# 1. The default values must be set by the caller.
+# 2. A null image section is OK.
+# 3. The first nonwhitespace character must be '['.
+# 4. The last interpreted character must be ']'.
+#
+# This procedure should be replaced with an IMIO procedure at some
+# point.
+
+procedure ic_section (section, x1, x2, xs, ndim)
+
+char section[ARB] # Image section
+int x1[ndim] # Starting pixel
+int x2[ndim] # Ending pixel
+int xs[ndim] # Step
+int ndim # Number of dimensions
+
+int i, ip, a, b, c, temp, ctoi()
+define error_ 99
+
+begin
+ # Decode the section string.
+ ip = 1
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == '[')
+ ip = ip + 1
+ else if (section[ip] == EOS)
+ return
+ else
+ goto error_
+
+ do i = 1, ndim {
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ']')
+ break
+
+ # Default values
+ a = x1[i]
+ b = x2[i]
+ c = xs[i]
+
+ # Get a:b:c. Allow notation such as "-*:c"
+ # (or even "-:c") where the step is obviously negative.
+
+ if (ctoi (section, ip, temp) > 0) { # a
+ a = temp
+ if (section[ip] == ':') {
+ ip = ip + 1
+ if (ctoi (section, ip, b) == 0) # a:b
+ goto error_
+ } else
+ b = a
+ } else if (section[ip] == '-') { # -*
+ temp = a
+ a = b
+ b = temp
+ ip = ip + 1
+ if (section[ip] == '*')
+ ip = ip + 1
+ } else if (section[ip] == '*') # *
+ ip = ip + 1
+ if (section[ip] == ':') { # ..:step
+ ip = ip + 1
+ if (ctoi (section, ip, c) == 0)
+ goto error_
+ else if (c == 0)
+ goto error_
+ }
+ if (a > b && c > 0)
+ c = -c
+
+ x1[i] = a
+ x2[i] = b
+ xs[i] = c
+
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ',')
+ ip = ip + 1
+ }
+
+ if (section[ip] != ']')
+ goto error_
+
+ return
+error_
+ call error (0, "Error in image section specification")
+end
diff --git a/noao/imred/ccdred/src/combine/icsetout.x b/noao/imred/ccdred/src/combine/icsetout.x
new file mode 100644
index 00000000..bd1d75ec
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icsetout.x
@@ -0,0 +1,193 @@
+include <imhdr.h>
+include <mwset.h>
+
+# IC_SETOUT -- Set output image size and offsets of input images.
+
+procedure ic_setout (in, out, offsets, nimages)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Offsets
+int nimages # Number of images
+
+int i, j, indim, outdim, mwdim, a, b, amin, bmax, fd
+real val
+bool reloff, streq()
+pointer sp, fname, lref, wref, cd, coord, shift, axno, axval
+pointer mw, ct, mw_openim(), mw_sctran()
+int open(), fscan(), nscan(), mw_stati()
+errchk mw_openim, mw_gwtermd, mw_gltermd, mw_gaxmap
+errchk mw_sctran, mw_ctrand, open
+
+include "icombine.com"
+define newscan_ 10
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (lref, IM_MAXDIM, TY_DOUBLE)
+ call salloc (wref, IM_MAXDIM, TY_DOUBLE)
+ call salloc (cd, IM_MAXDIM*IM_MAXDIM, TY_DOUBLE)
+ call salloc (coord, IM_MAXDIM, TY_DOUBLE)
+ call salloc (shift, IM_MAXDIM, TY_REAL)
+ call salloc (axno, IM_MAXDIM, TY_INT)
+ call salloc (axval, IM_MAXDIM, TY_INT)
+
+ # Check and set the image dimensionality.
+ indim = IM_NDIM(in[1])
+ outdim = IM_NDIM(out[1])
+ if (project) {
+ outdim = indim - 1
+ IM_NDIM(out[1]) = outdim
+ } else {
+ do i = 1, nimages
+ if (IM_NDIM(in[i]) != outdim) {
+ call sfree (sp)
+ call error (1, "Image dimensions are not the same")
+ }
+ }
+
+ # Set the reference point to that of the first image.
+ mw = mw_openim (in[1])
+ mwdim = mw_stati (mw, MW_NPHYSDIM)
+ call mw_gwtermd (mw, Memd[lref], Memd[wref], Memd[cd], mwdim)
+ ct = mw_sctran (mw, "world", "logical", 0)
+ call mw_ctrand (ct, Memd[wref], Memd[lref], mwdim)
+ call mw_ctfree (ct)
+ if (project)
+ Memd[lref+outdim] = 1
+
+ # Parse the user offset string. If "none" then there are no offsets.
+ # If "wcs" then set the offsets based on the image WCS.
+ # If "grid" then set the offsets based on the input grid parameters.
+ # If a file scan it.
+
+ call clgstr ("offsets", Memc[fname], SZ_FNAME)
+ call sscan (Memc[fname])
+ call gargwrd (Memc[fname], SZ_FNAME)
+ if (nscan() == 0 || streq (Memc[fname], "none")) {
+ call aclri (offsets, outdim*nimages)
+ reloff = true
+ } else if (streq (Memc[fname], "wcs")) {
+ do j = 1, outdim
+ offsets[1,j] = 0
+ if (project) {
+ ct = mw_sctran (mw, "world", "logical", 0)
+ do i = 2, nimages {
+ Memd[wref+outdim] = i
+ call mw_ctrand (ct, Memd[wref], Memd[coord], indim)
+ do j = 1, outdim
+ offsets[i,j] = nint (Memd[lref+j-1] - Memd[coord+j-1])
+ }
+ call mw_ctfree (ct)
+ call mw_close (mw)
+ } else {
+ do i = 2, nimages {
+ call mw_close (mw)
+ mw = mw_openim (in[i])
+ ct = mw_sctran (mw, "world", "logical", 0)
+ call mw_ctrand (ct, Memd[wref], Memd[coord], indim)
+ do j = 1, outdim
+ offsets[i,j] = nint (Memd[lref+j-1] - Memd[coord+j-1])
+ call mw_ctfree (ct)
+ }
+ }
+ reloff = true
+ } else if (streq (Memc[fname], "grid")) {
+ amin = 1
+ do j = 1, outdim {
+ call gargi (a)
+ call gargi (b)
+ if (nscan() < 1+2*j)
+ break
+ do i = 1, nimages
+ offsets[i,j] = mod ((i-1)/amin, a) * b
+ amin = amin * a
+ }
+ reloff = true
+ } else {
+ reloff = true
+ fd = open (Memc[fname], READ_ONLY, TEXT_FILE)
+ do i = 1, nimages {
+newscan_ if (fscan (fd) == EOF)
+ call error (1, "IMCOMBINE: Offset list too short")
+ call gargwrd (Memc[fname], SZ_FNAME)
+ if (Memc[fname] == '#') {
+ call gargwrd (Memc[fname], SZ_FNAME)
+ call strlwr (Memc[fname])
+ if (streq (Memc[fname], "absolute"))
+ reloff = false
+ else if (streq (Memc[fname], "relative"))
+ reloff = true
+ goto newscan_
+ }
+ call reset_scan ()
+ do j = 1, outdim {
+ call gargr (val)
+ offsets[i,j] = nint (val)
+ }
+ if (nscan() < outdim)
+ call error (1, "IMCOMBINE: Error in offset list")
+ }
+ call close (fd)
+ }
+
+ # Set the output image size and the aligned flag
+ aligned = true
+ do j = 1, outdim {
+ a = offsets[1,j]
+ b = IM_LEN(in[1],j) + a
+ amin = a
+ bmax = b
+ do i = 2, nimages {
+ a = offsets[i,j]
+ b = IM_LEN(in[i],j) + a
+ if (a != amin || b != bmax || !reloff)
+ aligned = false
+ amin = min (a, amin)
+ bmax = max (b, bmax)
+ }
+ IM_LEN(out[1],j) = bmax
+ if (reloff || amin < 0) {
+ do i = 1, nimages
+ offsets[i,j] = offsets[i,j] - amin
+ IM_LEN(out[1],j) = IM_LEN(out[1],j) - amin
+ }
+ }
+
+ # Update the WCS.
+ if (project || !aligned || !reloff) {
+ call mw_close (mw)
+ mw = mw_openim (out[1])
+ mwdim = mw_stati (mw, MW_NPHYSDIM)
+ call mw_gaxmap (mw, Memi[axno], Memi[axval], mwdim)
+ if (!aligned || !reloff) {
+ call mw_gltermd (mw, Memd[cd], Memd[lref], mwdim)
+ do i = 1, mwdim {
+ j = Memi[axno+i-1]
+ if (j > 0 && j <= indim)
+ Memd[lref+i-1] = Memd[lref+i-1] + offsets[1,j]
+ }
+ call mw_sltermd (mw, Memd[cd], Memd[lref], mwdim)
+ }
+ if (project) {
+ # Apply dimensional reduction.
+ do i = 1, mwdim {
+ j = Memi[axno+i-1]
+ if (j <= outdim)
+ next
+ else if (j > outdim+1)
+ Memi[axno+i-1] = j - 1
+ else {
+ Memi[axno+i-1] = 0
+ Memi[axval+i-1] = 0
+ }
+ }
+ call mw_saxmap (mw, Memi[axno], Memi[axval], mwdim)
+ }
+ call mw_saveim (mw, out)
+ }
+ call mw_close (mw)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/combine/icsigma.gx b/noao/imred/ccdred/src/combine/icsigma.gx
new file mode 100644
index 00000000..d0ae28d4
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icsigma.gx
@@ -0,0 +1,115 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+$for (sr)
+# IC_SIGMA -- Compute the sigma image line.
+# The estimated sigma includes a correction for the finite population.
+# Weights are used if desired.
+
+procedure ic_sigma$t (d, m, n, wts, npts, average, sigma)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+real sigma[npts] # Sigma line (returned)
+$else
+PIXEL average[npts] # Average
+PIXEL sigma[npts] # Sigma line (returned)
+$endif
+
+int i, j, k, n1
+real wt, sigcor, sumwt
+$if (datatype == sil)
+real a, sum
+$else
+PIXEL a, sum
+$endif
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (dowts) {
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mem$t[d[1]+k] - a) ** 2 * wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2 * wt
+ }
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ } else {
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ sum = (Mem$t[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ sigma[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 -1)
+ else
+ sigcor = 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mem$t[d[1]+k] - a) ** 2 * wt
+ sumwt = wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2 * wt
+ sumwt = sumwt + wt
+ }
+ sigma[i] = sqrt (sum / sumwt * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ a = average[i]
+ sum = (Mem$t[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icsort.gx b/noao/imred/ccdred/src/combine/icsort.gx
new file mode 100644
index 00000000..2235dbd0
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icsort.gx
@@ -0,0 +1,386 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+define LOGPTR 32 # log2(maxpts) (4e9)
+
+$for (sr)
+# IC_SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially.
+
+procedure ic_sort$t (a, b, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+PIXEL b[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+PIXEL pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR]
+define swap {temp=$1;$1=$2;$2=temp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix
+ b[i] = Mem$t[a[i]+l]
+
+ # Special cases
+ $if (datatype == x)
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (abs (temp) < abs (pivot)) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (abs (temp) < abs (pivot)) { # bac|bca|cba
+ if (abs (temp) < abs (temp3)) { # bac|bca
+ b[1] = temp
+ if (abs (pivot) < abs (temp3)) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (abs (temp3) < abs (temp)) { # acb|cab
+ b[3] = temp
+ if (abs (pivot) < abs (temp3)) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $else
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $endif
+
+ # General case
+ do i = 1, npix
+ b[i] = Mem$t[a[i]+l]
+
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ $if (datatype == x)
+ for (i=i+1; abs(b[i]) < abs(pivot); i=i+1)
+ $else
+ for (i=i+1; b[i] < pivot; i=i+1)
+ $endif
+ ;
+ for (j=j-1; j > i; j=j-1)
+ $if (datatype == x)
+ if (abs(b[j]) <= abs(pivot))
+ $else
+ if (b[j] <= pivot)
+ $endif
+ break
+ if (i < j) # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix
+ Mem$t[a[i]+l] = b[i]
+ }
+end
+
+
+# IC_2SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially. A second integer set of
+# vectors is sorted.
+
+procedure ic_2sort$t (a, b, c, d, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+PIXEL b[ARB] # work array
+pointer c[ARB] # pointer to associated integer vectors
+int d[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+PIXEL pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR], itemp
+define swap {temp=$1;$1=$2;$2=temp}
+define iswap {itemp=$1;$1=$2;$2=itemp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix {
+ b[i] = Mem$t[a[i]+l]
+ d[i] = Memi[c[i]+l]
+ }
+
+ # Special cases
+ $if (datatype == x)
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (abs (temp) < abs (pivot)) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (abs (temp) < abs (pivot)) { # bac|bca|cba
+ if (abs (temp) < abs (temp3)) { # bac|bca
+ b[1] = temp
+ if (abs (pivot) < abs (temp3)) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (abs (temp3) < abs (temp)) { # acb|cab
+ b[3] = temp
+ if (abs (pivot) < abs (temp3)) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $else
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $endif
+
+ # General case
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k]); swap (d[j], d[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ $if (datatype == x)
+ for (i=i+1; abs(b[i]) < abs(pivot); i=i+1)
+ $else
+ for (i=i+1; b[i] < pivot; i=i+1)
+ $endif
+ ;
+ for (j=j-1; j > i; j=j-1)
+ $if (datatype == x)
+ if (abs(b[j]) <= abs(pivot))
+ $else
+ if (b[j] <= pivot)
+ $endif
+ break
+ if (i < j) { # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+ }
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix {
+ Mem$t[a[i]+l] = b[i]
+ Memi[c[i]+l] = d[i]
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/icstat.gx b/noao/imred/ccdred/src/combine/icstat.gx
new file mode 100644
index 00000000..099ddf5e
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/icstat.gx
@@ -0,0 +1,237 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+define NMAX 10000 # Maximum number of pixels to sample
+
+$for (sr)
+# IC_STAT -- Compute image statistics within specified section.
+# The image section is relative to a reference image which may be
+# different than the input image and may have an offset. Only a
+# subsample of pixels is used. Masked and thresholded pixels are
+# ignored. Only the desired statistics are computed to increase
+# efficiency.
+
+procedure ic_stat$t (im, imref, section, offsets, image, nimages,
+ domode, domedian, domean, mode, median, mean)
+
+pointer im # Data image
+pointer imref # Reference image for image section
+char section[ARB] # Image section
+int offsets[nimages,ARB] # Image section offset from data to reference
+int image # Image index (for mask I/O)
+int nimages # Number of images in offsets.
+bool domode, domedian, domean # Statistics to compute
+real mode, median, mean # Statistics
+
+int i, j, ndim, n, nv
+real a
+pointer sp, v1, v2, dv, va, vb
+pointer data, mask, dp, lp, mp, imgnl$t()
+PIXEL ic_mode$t()
+$if (datatype == irs)
+real asum$t()
+$endif
+$if (datatype == dl)
+double asum$t()
+$endif
+$if (datatype == x)
+complex asum$t()
+$endif
+
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (dv, IM_MAXDIM, TY_LONG)
+ call salloc (va, IM_MAXDIM, TY_LONG)
+ call salloc (vb, IM_MAXDIM, TY_LONG)
+
+ # Determine the image section parameters. This must be in terms of
+ # the data image pixel coordinates though the section may be specified
+ # in terms of the reference image coordinates. Limit the number of
+ # pixels in each dimension to a maximum.
+
+ ndim = IM_NDIM(im)
+ if (project)
+ ndim = ndim - 1
+ call amovki (1, Memi[v1], IM_MAXDIM)
+ call amovki (1, Memi[va], IM_MAXDIM)
+ call amovki (1, Memi[dv], IM_MAXDIM)
+ call amovi (IM_LEN(imref,1), Memi[vb], ndim)
+ call ic_section (section, Memi[va], Memi[vb], Memi[dv], ndim)
+ if (im != imref)
+ do i = 1, ndim {
+ Memi[va+i-1] = Memi[va+i-1] - offsets[image,i]
+ Memi[vb+i-1] = Memi[vb+i-1] - offsets[image,i]
+ }
+
+ do j = 1, 10 {
+ n = 1
+ do i = 0, ndim-1 {
+ Memi[v1+i] = max (1, min (Memi[va+i], Memi[vb+i]))
+ Memi[v2+i] = min (IM_LEN(im,i+1), max (Memi[va+i], Memi[vb+i]))
+ Memi[dv+i] = j
+ nv = max (1, (Memi[v2+i] - Memi[v1+i]) / Memi[dv+i] + 1)
+ Memi[v2+i] = Memi[v1+i] + (nv - 1) * Memi[dv+i]
+ n = n * nv
+ }
+ if (n < NMAX)
+ break
+ }
+
+ call amovl (Memi[v1], Memi[va], IM_MAXDIM)
+ Memi[va] = 1
+ if (project)
+ Memi[va+ndim] = image
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+
+ # Accumulate the pixel values within the section. Masked pixels and
+ # thresholded pixels are ignored.
+
+ call salloc (data, n, TY_PIXEL)
+ dp = data
+ while (imgnl$t (im, lp, Memi[vb]) != EOF) {
+ call ic_mget1 (im, image, offsets[image,1], Memi[va], mask)
+ lp = lp + Memi[v1] - 1
+ if (dflag == D_ALL) {
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ a = Mem$t[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mem$t[dp] = a
+ dp = dp + 1
+ }
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ Mem$t[dp] = Mem$t[lp]
+ dp = dp + 1
+ lp = lp + Memi[dv]
+ }
+ }
+ } else if (dflag == D_MIX) {
+ mp = mask + Memi[v1] - 1
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ a = Mem$t[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mem$t[dp] = a
+ dp = dp + 1
+ }
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ Mem$t[dp] = Mem$t[lp]
+ dp = dp + 1
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ }
+ }
+ for (i=2; i<=ndim; i=i+1) {
+ Memi[va+i-1] = Memi[va+i-1] + Memi[dv+i-1]
+ if (Memi[va+i-1] <= Memi[v2+i-1])
+ break
+ Memi[va+i-1] = Memi[v1+i-1]
+ }
+ if (i > ndim)
+ break
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+ }
+
+ n = dp - data
+ if (n < 1) {
+ call sfree (sp)
+ call error (1, "Image section contains no pixels")
+ }
+
+ # Compute only statistics needed.
+ if (domode || domedian) {
+ call asrt$t (Mem$t[data], Mem$t[data], n)
+ mode = ic_mode$t (Mem$t[data], n)
+ median = Mem$t[data+n/2-1]
+ }
+ if (domean)
+ mean = asum$t (Mem$t[data], n) / n
+
+ call sfree (sp)
+end
+
+
+define NMIN 10 # Minimum number of pixels for mode calculation
+define ZRANGE 0.8 # Fraction of pixels about median to use
+define ZSTEP 0.01 # Step size for search for mode
+define ZBIN 0.1 # Bin size for mode.
+
+# IC_MODE -- Compute mode of an array. The mode is found by binning
+# with a bin size based on the data range over a fraction of the
+# pixels about the median and a bin step which may be smaller than the
+# bin size. If there are too few points the median is returned.
+# The input array must be sorted.
+
+PIXEL procedure ic_mode$t (a, n)
+
+PIXEL a[n] # Data array
+int n # Number of points
+
+int i, j, k, nmax
+real z1, z2, zstep, zbin
+PIXEL mode
+bool fp_equalr()
+
+begin
+ if (n < NMIN)
+ return (a[n/2])
+
+ # Compute the mode. The array must be sorted. Consider a
+ # range of values about the median point. Use a bin size which
+ # is ZBIN of the range. Step the bin limits in ZSTEP fraction of
+ # the bin size.
+
+ i = 1 + n * (1. - ZRANGE) / 2.
+ j = 1 + n * (1. + ZRANGE) / 2.
+ z1 = a[i]
+ z2 = a[j]
+ if (fp_equalr (z1, z2)) {
+ mode = z1
+ return (mode)
+ }
+
+ zstep = ZSTEP * (z2 - z1)
+ zbin = ZBIN * (z2 - z1)
+ $if (datatype == sil)
+ zstep = max (1., zstep)
+ zbin = max (1., zbin)
+ $endif
+
+ z1 = z1 - zstep
+ k = i
+ nmax = 0
+ repeat {
+ z1 = z1 + zstep
+ z2 = z1 + zbin
+ for (; i < j && a[i] < z1; i=i+1)
+ ;
+ for (; k < j && a[k] < z2; k=k+1)
+ ;
+ if (k - i > nmax) {
+ nmax = k - i
+ mode = a[(i+k)/2]
+ }
+ } until (k >= j)
+
+ return (mode)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/combine/mkpkg b/noao/imred/ccdred/src/combine/mkpkg
new file mode 100644
index 00000000..2c5c0795
--- /dev/null
+++ b/noao/imred/ccdred/src/combine/mkpkg
@@ -0,0 +1,51 @@
+# Make CCDRED Package.
+
+$checkout libpkg.a ../..
+$update libpkg.a
+$checkin libpkg.a ../..
+$exit
+
+generic:
+ $set GEN = "$$generic -k"
+
+ $ifolder (generic/icaclip.x, icaclip.gx)
+ $(GEN) icaclip.gx -o generic/icaclip.x $endif
+ $ifolder (generic/icaverage.x, icaverage.gx)
+ $(GEN) icaverage.gx -o generic/icaverage.x $endif
+ $ifolder (generic/iccclip.x, iccclip.gx)
+ $(GEN) iccclip.gx -o generic/iccclip.x $endif
+ $ifolder (generic/icgdata.x, icgdata.gx)
+ $(GEN) icgdata.gx -o generic/icgdata.x $endif
+ $ifolder (generic/icgrow.x, icgrow.gx)
+ $(GEN) icgrow.gx -o generic/icgrow.x $endif
+ $ifolder (generic/icmedian.x, icmedian.gx)
+ $(GEN) icmedian.gx -o generic/icmedian.x $endif
+ $ifolder (generic/icmm.x, icmm.gx)
+ $(GEN) icmm.gx -o generic/icmm.x $endif
+ $ifolder (generic/icombine.x, icombine.gx)
+ $(GEN) icombine.gx -o generic/icombine.x $endif
+ $ifolder (generic/icpclip.x, icpclip.gx)
+ $(GEN) icpclip.gx -o generic/icpclip.x $endif
+ $ifolder (generic/icsclip.x, icsclip.gx)
+ $(GEN) icsclip.gx -o generic/icsclip.x $endif
+ $ifolder (generic/icsigma.x, icsigma.gx)
+ $(GEN) icsigma.gx -o generic/icsigma.x $endif
+ $ifolder (generic/icsort.x, icsort.gx)
+ $(GEN) icsort.gx -o generic/icsort.x $endif
+ $ifolder (generic/icstat.x, icstat.gx)
+ $(GEN) icstat.gx -o generic/icstat.x $endif
+ ;
+
+libpkg.a:
+ $ifeq (USE_GENERIC, yes) $call generic $endif
+ @generic
+
+ icimstack.x <error.h> <imhdr.h>
+ iclog.x icmask.h icombine.com icombine.h <imhdr.h> <imset.h>\
+ <mach.h>
+ icmask.x icmask.h icombine.com icombine.h icombine.com <imhdr.h>\
+ <pmset.h>
+ icscale.x icombine.com icombine.h <error.h> <imhdr.h> <imset.h>
+ icsection.x <ctype.h>
+ icsetout.x icombine.com <imhdr.h> <mwset.h>
+ ;
diff --git a/noao/imred/ccdred/src/cor.gx b/noao/imred/ccdred/src/cor.gx
new file mode 100644
index 00000000..189f9437
--- /dev/null
+++ b/noao/imred/ccdred/src/cor.gx
@@ -0,0 +1,362 @@
+include "ccdred.h"
+
+
+.help cor Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+cor -- Process CCD image lines
+
+These procedures are the heart of the CCD processing. They do the desired
+set of processing operations on the image line data as efficiently as
+possible. They are called by the PROC procedures. There are four procedures
+one for each readout axis and one for short and real image data.
+Some sets of operations are coded as single compound operations for efficiency.
+To keep the number of combinations managable only the most common
+combinations are coded as compound operations. The combinations
+consist of any set of line overscan, column overscan, zero level, dark
+count, and flat field and any set of illumination and fringe
+correction. The corrections are applied in place to the output vector.
+
+The column readout procedure is more complicated in order to handle
+zero level and flat field corrections specified as one dimensional
+readout corrections instead of two dimensional calibration images.
+Column readout format is probably extremely rare and the 1D readout
+corrections are used only for special types of data.
+.ih
+SEE ALSO
+proc, ccdred.h
+.endhelp -----------------------------------------------------------------------
+
+$for (sr)
+# COR1 -- Correct image lines with readout axis 1 (lines).
+
+procedure cor1$t (cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, darkscale, flatscale, illumscale, frgscale)
+
+int cors[ARB] # Correction flags
+PIXEL out[n] # Output data
+real overscan # Overscan value
+PIXEL zero[n] # Zero level correction
+PIXEL dark[n] # Dark count correction
+PIXEL flat[n] # Flat field correction
+PIXEL illum[n] # Illumination correction
+PIXEL fringe[n] # Fringe correction
+int n # Number of pixels
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan
+ case Z: # zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+
+ case ZO: # zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i]
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ case DZO: # dark count + zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i] - darkscale * dark[i]
+
+ case F: # flat field
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ case FO: # flat field + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan) * flatscale / flat[i]
+ case FZ: # flat field + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ case FZO: # flat field + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale /
+ flat[i]
+ case FD: # flat field + dark count
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale / flat[i]
+ case FDO: # flat field + dark count + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZ: # flat field + dark count + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZO: # flat field + dark count + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+
+# COR2 -- Correct lines for readout axis 2 (columns). This procedure is
+# more complex than when the readout is along the image lines because the
+# zero level and/or flat field corrections may be single readout column
+# vectors.
+
+procedure cor2$t (line, cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+int line # Line to be corrected
+int cors[ARB] # Correction flags
+PIXEL out[n] # Output data
+real overscan[n] # Overscan value
+PIXEL zero[n] # Zero level correction
+PIXEL dark[n] # Dark count correction
+PIXEL flat[n] # Flat field correction
+PIXEL illum[n] # Illumination correction
+PIXEL fringe[n] # Fringe correction
+int n # Number of pixels
+pointer zeroim # Zero level IMIO pointer (NULL if 1D vector)
+pointer flatim # Flat field IMIO pointer (NULL if 1D vector)
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+PIXEL zeroval
+real flatval
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i]
+ case Z: # zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval
+ }
+
+ case ZO: # zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval
+ }
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval - darkscale * dark[i]
+ }
+ case DZO: # dark count + zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]
+ }
+
+ case F: # flat field
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = out[i] * flatval
+ }
+ case FO: # flat field + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatval
+ }
+ case FZ: # flat field + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatval
+ }
+ }
+ case FZO: # flat field + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) * flatval
+ }
+ }
+ case FD: # flat field + dark count
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale/flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatval
+ }
+ case FDO: # flat field + dark count + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatval
+ }
+ case FDZ: # flat field + dark count + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatval
+ }
+ }
+ case FDZO: # flat field + dark count + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatval
+ }
+ }
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/cosmic/cosmicrays.hlp b/noao/imred/ccdred/src/cosmic/cosmicrays.hlp
new file mode 100644
index 00000000..bfb56e9c
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/cosmicrays.hlp
@@ -0,0 +1,338 @@
+.help cosmicrays Dec87 noao.imred.ccdred
+.ih
+NAME
+cosmicrays -- Detect and replace cosmic rays
+.ih
+USAGE
+cosmicrays input output
+.ih
+PARAMETERS
+.ls input
+List of input images in which to detect cosmic rays.
+.le
+.ls output
+List of output images in which the detected cosmic rays will be replaced
+by an average of neighboring pixels. If the output image name differs
+from the input image name then a copy of the input image is made with
+the detected cosmic rays replaced. If no output images are specified
+then the input images are modified in place. In place modification of
+an input image also occurs when the output image name is the same as
+the input image name.
+.le
+.ls badpix = ""
+List of bad pixel files to be created, one for each input image. If no
+file names are given then no bad pixel file is created. The bad pixel
+file is a simple list of pixel coordinates for each replaced cosmic ray.
+This file may be used in conjunction with \fBbadpixelimage\fR to create
+a mask image.
+.le
+
+.ls ccdtype = ""
+If specified only the input images of the desired CCD image type will be
+selected.
+.le
+.ls threshold = 25.
+Detection threshold above the mean of the surrounding pixels for cosmic
+rays. The threshold will depend on the noise characteristics of the
+image and how weak the cosmic rays may be for detection. A typical value
+is 5 or more times the sigma of the background.
+.le
+.ls fluxratio = 2.
+The ratio (as a percent) of the mean neighboring pixel flux to the candidate
+cosmic ray pixel for rejection. The value depends on the seeing and the
+characteristics of the cosmic rays. Typical values are in the range
+2 to 10 percent. This value may be reset interactively from a plot
+or defined by identifying selected objects as stars or cosmic rays.
+.le
+.ls npasses = 5
+Number of cosmic ray detection passes. Since only the locally strongest
+pixel is considered a cosmic ray, multiple detection passes are needed to
+detect and replace multiple pixel cosmic ray events.
+.le
+.ls window = 5
+Size of cosmic ray detection window. A square window of either 5 by 5 or
+7 by 7 is used to detect cosmic rays. The smaller window allows detection
+in the presence of greater background gradients but is less sensitive at
+discriminating multiple event cosmic rays from stars. It is also marginally
+faster.
+.le
+.ls interactive = yes
+Examine parameters interactively? A plot of the mean flux within the
+detection window (x100) vs the flux ratio (x100) is plotted and the user may
+set the flux ratio threshold, delete and undelete specific events, and
+examine specific events. This is useful for new data in which one is
+uncertain of an appropriate flux ratio threshold. Once determined the
+task need not be used interactively.
+.le
+.ls train = no
+Define the flux ratio threshold by using a set of objects identified
+as stars (or other astronomical objects) or cosmic rays?
+.le
+.ls objects = ""
+Cursor list of coordinates of training objects. If null (the null string "")
+then the image display cursor will be read. The user is responsible for first
+displaying the image. Otherwise a file containing cursor coordinates
+may be given. The format of the cursor file is "x y wcs key" where
+x and y are the pixel coordinates, wcs is an arbitrary number such as 1,
+and key may be 's' for star or 'c' for cosmic ray.
+.le
+.ls savefile = ""
+File to save (by appending) the training object coordinates. This is of
+use when the objects are identified using the image display cursor. The
+saved file can then be input as the object cursor list for repeating the
+execution.
+.le
+.ls answer
+This parameter is used for interactive queries when processing a list of
+images. The responses may be "no", "yes", "NO", or "YES". The upper case
+responses permanently enable or disable the interactive review while
+the lower case reponses allow selective examination of certain input
+images. \fIThis parameter should not be specified on the command line.
+If it is then the value will be ignored and the task will act as if
+the answer "yes" is given for each image; i.e. it will enter the interactive
+phase without prompting.\fR
+.le
+.ih
+OTHER PARAMETERS
+There are other parameters which may be defined by the package, as is the
+case with \fBccdred\fR, or as part of the task, as is the case with
+standalone version in the \fBgeneric\fR package.
+
+.ls verbose
+If yes then a time stamped log of the operation is printed on the standard
+output.
+.le
+.ls logfile
+If a log file is specified then a time stamped log of the operation is
+recorded.
+.le
+.ls plotfile
+If a plot file is specified then the graph of the flux ratio (x100) vs
+the mean flux (x100) is recorded as metacode. This may be spooled or examined
+later.
+.le
+.ls graphics = "stdgraph"
+Interactive graphic output device for interactive examination of the
+detection parameters.
+.le
+.ls cursor = ""
+Interactive graphics cursor input. If null the graphics display cursor
+is used, otherwise a file containing cursor input may be specified.
+.le
+.ls instrument
+The \fBccdred\fR instrument file is used for mapping header keywords and
+CCD image types.
+.le
+.ih
+IMAGE CURSOR COMMANDS
+
+.nf
+? Help
+c Identify the object as a cosmic ray
+s Identify the object as a star
+g Switch to the graphics plot
+q Quit and continue with the cleaning
+.fi
+
+GRAPHICS CURSOR COMMANDS
+
+.nf
+? Help
+a Toggle between showing all candidates and only the training points
+d Mark candidate for replacement (applys to '+' points)
+q Quit and return to image cursor or replace the selected pixels
+r Redraw the graph
+s Make a surface plot for the candidate nearest the cursor
+t Set the flux ratio threshold at the y cursor position
+u Mark candidate to not be replaced (applys to 'x' points)
+w Adjust the graph window (see \fBgtools\fR)
+<space> Print the pixel coordinates
+.fi
+
+There are no colon commands except those for the windowing options (type
+:\help or see \fBgtools\fR).
+.ih
+DESCRIPTION
+Cosmic ray events in each input image are detected and replaced by the
+average of the four neighbors. The replacement may be performed
+directly on the input image if no output image is specified or if the
+output image name is the same as the input image name. If a new image
+is created it is a copy of the input image except for the replaced
+pixels. The processing keyword CRCOR is added to the output image
+header. Optional output includes a log file to which a processing log
+is appended, a verbose log output to the standard output (the same as
+that in the log file), a plot file showing the parameters of the
+detected cosmic ray candidates and the flux ratio threshold used, a
+bad pixel file containing the coordinates of the replaced pixels, and
+a file of training objects marked with the image display cursor. The
+bad pixel file may be used for plotting purposes or to create a mask
+image for display and analysis using the task \fBbadpiximage\fR. This
+bad pixel file will be replaced by the IRAF bad pixel facility when it
+becomes available. If one wants more than a simple mask image then by
+creating a different output image a difference image between the
+original and the modified image may be made using \fBimarith\fR.
+
+This task may be applied to an image previously processed to detect
+additional cosmic rays. A warning will be given (because of the
+CRCOR header parameter) and the previous processing header keyword will
+be overwritten.
+
+The cosmic ray detection algorithm consists of the following steps.
+First a pixel must be the brightest pixel within the specified
+detection window (either 5x5 or 7x7). The mean flux in the surrounding
+pixels with the second brightest pixel excluded (which may also be a
+cosmic ray event) is computed and the candidate pixel must exceed this
+mean by the amount specified by the parameter \fIthreshold\fR. A plane
+is fit to the border pixels of the window and the fitted background is
+subtracted. The mean flux (now background subtracted) and the ratio of
+this mean to the cosmic ray candidate (the brightest pixel) are
+computed. The mean flux (x100) and the ratio (x100) are recorded for
+interactive examination if desired.
+
+Once the list of cosmic ray candidates has been created and a threshold for
+the flux ratio established (by the parameter \fIfluxratio\fR, by the
+"training" method, or by using the graphics cursor in the interactive plot)
+the pixels with ratios below the threshold are replaced in the output by
+the average of the four neighboring pixels (with the second strongest pixel
+in the detection window excluded if it is one of these pixels). Additonal
+pixels may then be detected and replaced in further passes as specified by
+the parameter \fInpasses\fR. Note that only pixels in the vicinity of
+replaced pixels need be considered in further passes.
+
+The division between the peaks of real objects and cosmic rays is made
+based on the flux ratio between the mean flux (excluding the center
+pixel and the second strongest pixel) and the candidate pixel. This
+threshold depends on the point spread function and the distribution of
+multiple cosmic ray events and any additional neighboring light caused
+by the events. This threshold is not strongly coupled to small changes
+in the data so that once it is set for a new type of image data it may
+be used for similar images. To set it initially one may examine the
+scatter plot of the flux ratio as a function of the mean flux. This
+may be done interactively or from the optional plot file produced.
+
+After the initial list of cosmic ray candidates has been created and before
+the final replacing cosmic rays there are two optional steps to allow
+examining the candidates and setting the flux ratio threshold dividing
+cosmic rays from real objects. The first optional step is define the flux
+ratio boundary by reference to user specified classifications; that is
+"training". To do this step the \fItrain\fR parameter must be set to yes.
+The user classified objects are specified by a cursor input list. This
+list can be an actual file or the image display cursor as defined by the
+\fIobjects\fR parameter. The \fIsavefile\fR parameter is also used during
+the training to record the objects specified. The parameter specifies a
+file to append the objects selected. This is useful when the objects are
+defined by interactive image cursor and does not make much sense when using
+an input list.
+
+If the \fIobjects\fR parameter is specified as a null string then
+the image display cursor will be repeatedly read until a 'q' is
+entered. The user first displays the image and then when the task
+reads the display cursor the cursor shape will change. The user
+points at objects and types 's' for a star (or other astronomical
+object) and 'c' for a cosmic ray. Note that this input is used
+to search for the matching object in the cosmic ray candidate list
+and so it is possible the selected object is not in the list though
+it is unlikely. The selection will be quietly ignored in that case.
+To exit the interactive selection of training objects type 'q'.
+
+If 'g' is typed a graph of all the candidates is drawn showing
+"flux" vs. "flux ratio" (see below for more). Training objects will
+be shown with a box and the currently set flux ratio threshold will
+also be shown. Exiting the plot will return to entering more training
+objects. The plot will remain and additional objects will immediately
+be shown with a new box. Thus, if one wants to see the training
+objects identified in the plot as one selects them from the image
+display first type a 'g' to draw the initial plot. Also by switching
+to the plot with 'g' allows you to draw surface plots (with 's') or
+get the pixel coordinates of a candidate (the space key) to be
+found in the display using the coordinate readout of the display.
+Note that the display interaction is simpler than might be desired
+because this task does not directly connect to the display.
+
+The most likely use for training is with the interactive image display.
+However one may prepare an input list by other means, one example
+is with \fBrimcursor\fR, and then specify the file name. The savefile
+may also be used a cursor input to repeat the cosmic ray operation
+(but be careful not to have the cursor input and save file be the
+same file!).
+
+The flux ratio threshold is determined from the training objects by
+finding the point with the minimum number of misclassifications
+(stars as cosmic rays or cosmic rays as stars). The threshold is
+set at the lowest value so that it will always go through one of
+the cosmic ray objects. There should be at least one of each type
+of object defined for this to work. The following option of
+examining the cosmic ray candidates and parameters may still be
+used to modify the derived flux ratio threshold. One last point
+about the training objects is that even if some of the points
+lie on the wrong side of the threshold they will remain classified
+as cosmic ray or non-cosmic ray. In other words, any object
+classified by the user will remain in that classification regardless
+of the final flux ratio threshold.
+
+After the training step the user will be queried to examine the candidates
+in the flux vs flux ratio plane if the \fIinteractive\fR flag is set.
+Responses may be made for specific images or for all images by using
+lower or upper case answers respectively. When the parameters are
+examined interactively the user may change the flux ratio threshold
+('t' key). Changes made are stored in the parameter file and, thus,
+learned for further images. Pixels to be deleted are marked by crosses
+and pixels which are peaks of objects are marked by pluses. The user
+may explicitly delete or undelete any point if desired but this is only
+for special cases near the threshold. In the future keys for
+interactive display of the specific detections will be added.
+Currently a surface plot of any candidate may be displayed graphically
+in four 90 degree rotated views using the 's' key. Note that the
+initial graph does not show all the points some of which are clearly
+cosmic rays because they have negative mean flux or flux ratio. To
+view all data one must rewindow the graph with the 'w' key or ":/"
+commands (see \fBgtools\fR).
+.ih
+EXAMPLES
+1. To replace cosmic rays in a set of images ccd* without training:
+
+.nf
+ cl> cosmicrays ccd* new//ccd*
+ ccd001: Examine parameters interactively? (yes):
+ [A scatter plot graph is made. One can adjust the threshold.]
+ [Looking at a few points using the 's' key can be instructive.]
+ [When done type 'q'.]
+ ccd002: Examine parameters interactively? (yes): NO
+ [No further interactive examination is done.]
+.fi
+
+After cleaning one typically displays the images and possibly blinks them.
+A difference image or mask image may also be created.
+
+2. To use the interactive training method for setting the flux ratio threshold:
+
+.nf
+ # First display the image.
+ cl> display ccd001 1
+ z1 = 123.45 z2= 543.21
+ cl> cosmicrays ccd001 ccd001cr train+
+ [After the cosmic ray candidates are found the image display
+ [cursor will be activated. Mark a cosmic ray with 'c' and
+ [a star with 's'. Type 'g' to get a plot showing the two
+ [points with boxes. Type 'q' to go back to the image display.
+ [As each new object is marked a box will appear in the plot and
+ [the threshold may change. To find the location of an object
+ [seen in the plot use 'g' to go to the graph, space key to find
+ [the pixel coordinates, 'q' to go back to the image display,
+ [and the image display coordinate box to find the object.
+ [When done with the training type 'q'.
+ ccd001: Examine parameters interactively? (yes): no
+.fi
+
+3. To create a mask image a bad pixel file must be specified. In the
+following we replace the cosmic rays in place and create a bad pixel
+file and mask image:
+
+.nf
+ cl> cosmicrays ccd001 ccd001 badpix=ccd001.bp
+ cl> badpiximage ccd001.bp ccd001 ccd001bp
+.fi
+.ih
+SEE ALSO
+badpixelimage gtools imedit rimcursor
+.endhelp
diff --git a/noao/imred/ccdred/src/cosmic/crexamine.x b/noao/imred/ccdred/src/cosmic/crexamine.x
new file mode 100644
index 00000000..d84961bc
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/crexamine.x
@@ -0,0 +1,486 @@
+include <error.h>
+include <syserr.h>
+include <imhdr.h>
+include <gset.h>
+include <mach.h>
+include <pkg/gtools.h>
+include "crlist.h"
+
+# CR_EXAMINE -- Examine cosmic ray candidates interactively.
+# CR_GRAPH -- Make a graph
+# CR_NEAREST -- Find the nearest cosmic ray to the cursor.
+# CR_DELETE -- Set replace flag for cosmic ray candidate nearest cursor.
+# CR_UNDELETE -- Set no replace flag for cosmic ray candidate nearest cursor.
+# CR_UPDATE -- Change replacement flags, thresholds, and graphs.
+# CR_PLOT -- Make log plot
+
+define HELP "noao$lib/scr/cosmicrays.key"
+define PROMPT "cosmic ray options"
+
+# CR_EXAMINE -- Examine cosmic ray candidates interactively.
+
+procedure cr_examine (cr, gp, gt, im, fluxratio, first)
+
+pointer cr # Cosmic ray list
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer im # Image pointer
+real fluxratio # Flux ratio threshold
+int first # Initial key
+
+char cmd[SZ_LINE]
+int i, newgraph, wcs, key, nc, nl, c1, c2, l1, l2, show
+real wx, wy
+pointer data
+
+int clgcur()
+pointer imgs2r()
+
+begin
+ # Set up the graphics.
+ call gt_sets (gt, GTPARAMS, IM_TITLE(im))
+
+ # Set image limits
+ nc = IM_LEN(im, 1)
+ nl = IM_LEN(im, 2)
+
+ # Enter cursor loop.
+ key = first
+ repeat {
+ switch (key) {
+ case '?': # Print help text.
+ call gpagefile (gp, HELP, PROMPT)
+ case ':': # Colon commands.
+ switch (cmd[1]) {
+ case '/':
+ call gt_colon (cmd, gp, gt, newgraph)
+ default:
+ call printf ("\007")
+ }
+ case 'a': # Toggle show all
+ if (show == 0)
+ show = 1
+ else
+ show = 0
+ newgraph = YES
+ case 'd': # Delete candidate
+ call cr_delete (gp, wx, wy, cr, i, show)
+ case 'q': # Quit
+ break
+ case 'r': # Redraw the graph.
+ newgraph = YES
+ case 's': # Make surface plots
+ call cr_nearest (gp, wx, wy, cr, i, show)
+ c1 = max (1, int (Memr[CR_COL(cr)+i-1]) - 5)
+ c2 = min (nc, int (Memr[CR_COL(cr)+i-1]) + 5)
+ l1 = max (1, int (Memr[CR_LINE(cr)+i-1]) - 5)
+ l2 = min (nl, int (Memr[CR_LINE(cr)+i-1]) + 5)
+ data = imgs2r (im, c1, c2, l1, l2)
+ call gclear (gp)
+ call gsview (gp, 0.03, 0.48, 0.53, 0.98)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, -33., 25.)
+ call gsview (gp, 0.53, 0.98, 0.53, 0.98)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, -123., 25.)
+ call gsview (gp, 0.03, 0.48, 0.03, 0.48)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, 57., 25.)
+ call gsview (gp, 0.53, 0.98, 0.03, 0.48)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, 147., 25.)
+ call fprintf (STDERR, "[Type any key to continue]")
+ i = clgcur ("cursor", wx, wy, wcs, key, cmd, SZ_LINE)
+ newgraph = YES
+ case 't': # Set threshold
+ call cr_update (gp, wy, cr, fluxratio, show)
+ call clputr ("fluxratio", fluxratio)
+ case 'u': # Undelete candidate
+ call cr_undelete (gp, wx, wy, cr, i, show)
+ case 'w':# Window the graph.
+ call gt_window (gt, gp, "cursor", newgraph)
+ case ' ': # Print info
+ call cr_nearest (gp, wx, wy, cr, i, show)
+ call printf ("%d %d\n")
+ call pargr (Memr[CR_COL(cr)+i-1])
+ call pargr (Memr[CR_LINE(cr)+i-1])
+ case 'z': # NOP
+ newgraph = NO
+ default: # Ring bell for unrecognized commands.
+ call printf ("\007")
+ }
+
+ # Update the graph if needed.
+ if (newgraph == YES) {
+ call cr_graph (gp, gt, cr, fluxratio, show)
+ newgraph = NO
+ }
+ } until (clgcur ("cursor", wx, wy, wcs, key, cmd, SZ_LINE) == EOF)
+end
+
+
+# CR_GRAPH -- Make a graph
+
+procedure cr_graph (gp, gt, cr, fluxratio, show)
+
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointers
+pointer cr # Cosmic ray list
+real fluxratio # Flux ratio threshold
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x1, x2, y1, y2
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ call gclear (gp)
+ call gt_ascale (gp, gt, Memr[x+1], Memr[y+1], ncr)
+ call gt_swind (gp, gt)
+ call gt_labax (gp, gt)
+
+ do i = 1, ncr {
+ if ((Memi[flag+i] == NO) || (Memi[flag+i] == ALWAYSNO))
+ call gmark (gp, Memr[x+i], Memr[y+i], GM_PLUS, 2., 2.)
+ else
+ call gmark (gp, Memr[x+i], Memr[y+i], GM_CROSS, 2., 2.)
+ if (Memr[w+i] != 0.)
+ call gmark (gp, Memr[x+i], Memr[y+i], GM_BOX, 2., 2.)
+ }
+
+ call ggwind (gp, x1, x2, y1, y2)
+ call gseti (gp, G_PLTYPE, 2)
+ call gline (gp, x1, fluxratio, x2, fluxratio)
+
+ call sfree (sp)
+end
+
+
+# CR_NEAREST -- Find the nearest cosmic ray to the cursor.
+
+procedure cr_nearest (gp, wx, wy, cr, nearest, show)
+
+pointer gp # GIO pointer
+real wx, wy # Cursor position
+pointer cr # Cosmic ray list
+int nearest # Index of nearest point (returned)
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x0, y0, x1, y1, x2, y2, r2, r2min
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Search for nearest point in NDC.
+ r2min = MAX_REAL
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+ do i = 1, ncr {
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ x2 = x1
+ y2 = y1
+ nearest = i
+ }
+ }
+ if (index != NULL)
+ nearest = Memi[index+nearest]
+
+ # Move the cursor to the selected point.
+ call gscur (gp, x2, y2)
+
+ call sfree (sp)
+end
+
+
+# CR_DELETE -- Set replace flag for cosmic ray candidate nearest cursor.
+
+procedure cr_delete (gp, wx, wy, cr, nearest, show)
+
+pointer gp # GIO pointer
+real wx, wy # Cursor position
+pointer cr # Cosmic ray list
+int nearest # Index of nearest point (returned)
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x0, y0, x1, y1, x2, y2, r2, r2min
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Search for nearest point in NDC.
+ nearest = 0
+ r2min = MAX_REAL
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+ do i = 1, ncr {
+ if ((Memi[flag+i] == YES) || (Memi[flag+i] == ALWAYSYES))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ x2 = x1
+ y2 = y1
+ nearest = i
+ }
+ }
+
+ # Move the cursor to the selected point and mark the deleted point.
+ if (nearest > 0) {
+ if (index != NULL)
+ nearest = Memi[index+nearest]
+ Memi[CR_FLAG(cr)+nearest-1] = ALWAYSYES
+ Memi[CR_WT(cr)+nearest-1] = -1
+ call gscur (gp, x2, y2)
+ call gseti (gp, G_PMLTYPE, 0)
+ y2 = Memr[CR_RATIO(cr)+nearest-1]
+ call gmark (gp, x2, y2, GM_PLUS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x2, y2, GM_CROSS, 2., 2.)
+ }
+
+ call sfree (sp)
+end
+
+
+# CR_UNDELETE -- Set no replace flag for cosmic ray candidate nearest cursor.
+
+procedure cr_undelete (gp, wx, wy, cr, nearest, show)
+
+pointer gp # GIO pointer
+real wx, wy # Cursor position
+pointer cr # Cosmic ray list
+int nearest # Index of nearest point (returned)
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x0, y0, x1, y1, x2, y2, r2, r2min
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Search for nearest point in NDC.
+ nearest = 0
+ r2min = MAX_REAL
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+ do i = 1, ncr {
+ if ((Memi[flag+i] == NO) || (Memi[flag+i] == ALWAYSNO))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ x2 = x1
+ y2 = y1
+ nearest = i
+ }
+ }
+
+ # Move the cursor to the selected point and mark the delete point.
+ if (nearest > 0) {
+ if (index != NULL)
+ nearest = Memi[index+nearest]
+ Memi[CR_FLAG(cr)+nearest-1] = ALWAYSNO
+ Memi[CR_WT(cr)+nearest-1] = 1
+ call gscur (gp, x2, y2)
+
+ call gseti (gp, G_PMLTYPE, 0)
+ y2 = Memr[CR_RATIO(cr)+nearest-1]
+ call gmark (gp, x2, y2, GM_CROSS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x2, y2, GM_PLUS, 2., 2.)
+ }
+
+ call sfree (sp)
+end
+
+
+# CR_UPDATE -- Change replacement flags, thresholds, and graphs.
+
+procedure cr_update (gp, wy, cr, fluxratio, show)
+
+pointer gp # GIO pointer
+real wy # Y cursor position
+pointer cr # Cosmic ray list
+real fluxratio # Flux ratio threshold
+int show # Show (0=all, 1=train)
+
+int i, ncr, flag
+real x1, x2, y1, y2
+pointer x, y, f
+
+begin
+ call gseti (gp, G_PLTYPE, 0)
+ call ggwind (gp, x1, x2, y1, y2)
+ call gline (gp, x1, fluxratio, x2, fluxratio)
+ fluxratio = wy
+ call gseti (gp, G_PLTYPE, 2)
+ call gline (gp, x1, fluxratio, x2, fluxratio)
+
+ if (show == 1)
+ return
+
+ ncr = CR_NCR(cr)
+ x = CR_FLUX(cr) - 1
+ y = CR_RATIO(cr) - 1
+ f = CR_FLAG(cr) - 1
+
+ do i = 1, ncr {
+ flag = Memi[f+i]
+ if ((flag == ALWAYSYES) || (flag == ALWAYSNO))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ if (flag == NO) {
+ if (y1 < fluxratio) {
+ Memi[f+i] = YES
+ call gseti (gp, G_PMLTYPE, 0)
+ call gmark (gp, x1, y1, GM_PLUS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x1, y1, GM_CROSS, 2., 2.)
+ }
+ } else {
+ if (y1 >= fluxratio) {
+ Memi[f+i] = NO
+ call gseti (gp, G_PMLTYPE, 0)
+ call gmark (gp, x1, y1, GM_CROSS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x1, y1, GM_PLUS, 2., 2.)
+ }
+ }
+ }
+end
+
+
+# CR_PLOT -- Make log plot
+
+procedure cr_plot (cr, im, fluxratio)
+
+pointer cr # Cosmic ray list
+pointer im # Image pointer
+real fluxratio # Flux ratio threshold
+
+int fd, open(), errcode()
+pointer sp, fname, gp, gt, gopen(), gt_init()
+errchk gopen
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+
+ # Open the plotfile.
+ call clgstr ("plotfile", Memc[fname], SZ_FNAME)
+ iferr (fd = open (Memc[fname], APPEND, BINARY_FILE)) {
+ if (errcode() != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ # Set up the graphics.
+ gp = gopen ("stdplot", NEW_FILE, fd)
+ gt = gt_init()
+ call gt_sets (gt, GTTYPE, "mark")
+ call gt_sets (gt, GTXTRAN, "log")
+ call gt_setr (gt, GTXMIN, 10.)
+ call gt_setr (gt, GTYMIN, 0.)
+ call gt_sets (gt, GTTITLE, "Parameters of cosmic rays candidates")
+ call gt_sets (gt, GTPARAMS, IM_TITLE(im))
+ call gt_sets (gt, GTXLABEL, "Flux")
+ call gt_sets (gt, GTYLABEL, "Flux Ratio")
+
+ call cr_graph (gp, gt, cr, fluxratio, 'r')
+
+ call gt_free (gt)
+ call gclose (gp)
+ call close (fd)
+ call sfree (sp)
+end
+
+
+# CR_SHOW -- Select data to show.
+# This returns pointers to the data. Note the pointers are salloc from
+# the last smark which is done by the calling program.
+
+procedure cr_show (show, cr, x, y, w, flag, index, ncr)
+
+int show #I Data to show (0=all, 1=train)
+pointer cr #I CR data
+pointer x #O Fluxes
+pointer y #O Ratios
+pointer w #O Weights
+pointer flag #O Flags
+pointer index #O Index into CR data (if not null)
+int ncr #O Number of selected data points
+
+int i
+
+begin
+ switch (show) {
+ case 0:
+ ncr = CR_NCR(cr)
+ x = CR_FLUX(cr) - 1
+ y = CR_RATIO(cr) - 1
+ w = CR_WT(cr) - 1
+ flag = CR_FLAG(cr) - 1
+ index = NULL
+ case 1:
+ ncr = CR_NCR(cr)
+ call salloc (x, ncr, TY_REAL)
+ call salloc (y, ncr, TY_REAL)
+ call salloc (w, ncr, TY_REAL)
+ call salloc (flag, ncr, TY_INT)
+ call salloc (index, ncr, TY_INT)
+
+ ncr = 0
+ x = x - 1
+ y = y - 1
+ w = w - 1
+ flag = flag - 1
+ index = index - 1
+
+ do i = 1, CR_NCR(cr) {
+ if (Memr[CR_WT(cr)+i-1] == 0.)
+ next
+ ncr = ncr + 1
+ Memr[x+ncr] = Memr[CR_FLUX(cr)+i-1]
+ Memr[y+ncr] = Memr[CR_RATIO(cr)+i-1]
+ Memr[w+ncr] = Memr[CR_WT(cr)+i-1]
+ Memi[flag+ncr] = Memi[CR_FLAG(cr)+i-1]
+ Memi[index+ncr] = i
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/cosmic/crfind.x b/noao/imred/ccdred/src/cosmic/crfind.x
new file mode 100644
index 00000000..58850940
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/crfind.x
@@ -0,0 +1,305 @@
+include <math/gsurfit.h>
+
+# CR_FIND -- Find cosmic ray candidates.
+# This procedure is an interface to special procedures specific to a given
+# window size.
+
+procedure cr_find (cr, threshold, data, nc, nl, col, line,
+ sf1, sf2, x, y, z, w)
+
+pointer cr # Cosmic ray list
+real threshold # Detection threshold
+pointer data[ARB] # Data lines
+int nc # Number of columns
+int nl # Number of lines
+int col # First column
+int line # Center line
+pointer sf1, sf2 # Surface fitting
+real x[ARB], y[ARB], z[ARB], w[ARB] # Surface arrays
+
+pointer a, b, c, d, e, f, g
+
+begin
+ switch (nl) {
+ case 5:
+ a = data[1]
+ b = data[2]
+ c = data[3]
+ d = data[4]
+ e = data[5]
+ call cr_find5 (cr, threshold, col, line, Memr[a], Memr[b],
+ Memr[c], Memr[d], Memr[e], nc, sf1, sf2, x, y, z, w)
+ case 7:
+ a = data[1]
+ b = data[2]
+ c = data[3]
+ d = data[4]
+ e = data[5]
+ f = data[6]
+ g = data[7]
+ call cr_find7 (cr, threshold, col, line, Memr[a], Memr[b],
+ Memr[c], Memr[d], Memr[e], Memr[f], Memr[g], nc,
+ sf1, sf2, x, y, z, w)
+ }
+end
+
+
+# CR_FIND7 -- Find cosmic rays candidates in 7x7 window.
+# This routine finds cosmic rays candidates with the following algorithm.
+# 1. If the pixel is not a local maximum relative to it's 48 neighbors
+# go on to the next pixel.
+# 2. Identify the next strongest pixel in the 7x7 region.
+# This suspect pixel is excluded in the following.
+# 2. Compute the flux of the 7x7 region excluding the cosmic ray
+# candidate and the suspect pixel.
+# 3. The candidate must exceed the average flux per pixel by a specified
+# threshold. If not go on to the next pixel.
+# 4. Fit a plane to the border pixels (excluding the suspect pixel).
+# 5. Subtract the background defined by the plane.
+# 6. Determine a replacement value as the average of the four adjacent
+# pixels (excluding the suspect pixels).
+# 7. Add the pixel to the cosmic ray candidate list.
+
+procedure cr_find7 (cr, threshold, col, line, a, b, c, d, e, f, g, n,
+ sf1, sf2, x, y, z, w)
+
+pointer cr # Cosmic ray list
+real threshold # Detection threshold
+int col # First column
+int line # Line
+real a[ARB], b[ARB], c[ARB], d[ARB] # Image lines
+real e[ARB], f[ARB], g[ARB] # Image lines
+int n # Number of columns
+pointer sf1, sf2 # Surface fitting
+real x[49], y[49], z[49], w[49] # Surface arrays
+
+real bkgd[49]
+int i1, i2, i3, i4, i5, i6, i7, j, j1, j2
+real p, flux, replace, asumr()
+pointer sf
+
+begin
+ for (i4=4; i4<=n-3; i4=i4+1) {
+ # Must be local maxima.
+ p = d[i4]
+ if (p<a[i4]||p<b[i4]||p<c[i4]||p<e[i4]||p<f[i4]||p<g[i4])
+ next
+ i1 = i4 - 3
+ if (p<a[i1]||p<b[i1]||p<c[i1]||p<d[i1]||p<e[i1]||p<f[i1]||p<g[i1])
+ next
+ i2 = i4 - 2
+ if (p<a[i2]||p<b[i2]||p<c[i2]||p<d[i2]||p<e[i2]||p<f[i2]||p<g[i2])
+ next
+ i3 = i4 - 1
+ if (p<a[i3]||p<b[i3]||p<c[i3]||p<d[i3]||p<e[i3]||p<f[i3]||p<g[i3])
+ next
+ i5 = i4 + 1
+ if (p<a[i5]||p<b[i5]||p<c[i5]||p<d[i5]||p<e[i5]||p<f[i5]||p<g[i5])
+ next
+ i6 = i4 + 2
+ if (p<a[i6]||p<b[i6]||p<c[i6]||p<d[i6]||p<e[i6]||p<f[i6]||p<g[i6])
+ next
+ i7 = i4 + 3
+ if (p<a[i7]||p<b[i7]||p<c[i7]||p<d[i7]||p<e[i7]||p<f[i7]||p<g[i7])
+ next
+
+ # Convert to a single array in surface fitting order.
+ call amovr (a[i1], z[1], 7)
+ z[8] = b[i7]; z[9] = c[i7]; z[10] = d[i7]; z[11] = e[i7]
+ z[12] = f[i7]; z[13] = g[i7]; z[14] = g[i6]; z[15] = g[i5]
+ z[16] = f[i4]; z[17] = g[i3]; z[18] = g[i2]; z[19] = g[i1]
+ z[20] = f[i1]; z[21] = e[i1]; z[22] = d[i1]; z[23] = c[i1]
+ z[24] = b[i1]
+ call amovr (b[i2], z[25], 5)
+ call amovr (c[i2], z[30], 5)
+ call amovr (d[i2], z[35], 5)
+ call amovr (e[i2], z[40], 5)
+ call amovr (f[i2], z[45], 5)
+
+ # Find the highest point excluding the center.
+ j1 = 37; j2 = 1
+ do j = 2, 49 {
+ if (j == j1)
+ next
+ if (z[j] > z[j2])
+ j2 = j
+ }
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 49) - z[j1] - z[j2]) / 47
+
+ # Pixel must be exceed specified threshold.
+ if (p < flux + threshold)
+ next
+
+ # Fit and subtract the background.
+ if (j2 < 25) {
+ w[j2] = 0
+ sf = sf2
+ call gsfit (sf, x, y, z, w, 24, WTS_USER, j)
+ w[j2] = 1
+ } else {
+ sf = sf1
+ call gsrefit (sf, x, y, z, w, j)
+ }
+
+ call gsvector (sf, x, y, bkgd, 49)
+ call asubr (z, bkgd, z, 49)
+ p = z[j1]
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 49) - z[j1] - z[j2]) / 47
+
+ # Determine replacement value from four nearest neighbors again
+ # excluding the most deviant pixels.
+ replace = 0
+ j = 0
+ if (j2 != 32) {
+ replace = replace + c[i4]
+ j = j + 1
+ }
+ if (j2 != 36) {
+ replace = replace + d[i3]
+ j = j + 1
+ }
+ if (j2 != 38) {
+ replace = replace + d[i5]
+ j = j + 1
+ }
+ if (j2 != 42) {
+ replace = replace + e[i4]
+ j = j + 1
+ }
+ replace = replace / j
+
+ # Add pixel to cosmic ray list.
+ flux = 100. * flux
+ call cr_add (cr, col+i4-1, line, flux, flux/p, 0., replace, 0)
+ i4 = i7
+ }
+end
+
+
+# CR_FIND5 -- Find cosmic rays candidates in 5x5 window.
+# This routine finds cosmic rays candidates with the following algorithm.
+# 1. If the pixel is not a local maximum relative to it's 24 neighbors
+# go on to the next pixel.
+# 2. Identify the next strongest pixel in the 5x5 region.
+# This suspect pixel is excluded in the following.
+# 2. Compute the flux of the 5x5 region excluding the cosmic ray
+# candidate and the suspect pixel.
+# 3. The candidate must exceed the average flux per pixel by a specified
+# threshold. If not go on to the next pixel.
+# 4. Fit a plane to the border pixels (excluding the suspect pixel).
+# 5. Subtract the background defined by the plane.
+# 6. Determine a replacement value as the average of the four adjacent
+# pixels (excluding the suspect pixels).
+# 7. Add the pixel to the cosmic ray candidate list.
+
+procedure cr_find5 (cr, threshold, col, line, a, b, c, d, e, n,
+ sf1, sf2, x, y, z, w)
+
+pointer cr # Cosmic ray list
+real threshold # Detection threshold
+int col # First column
+int line # Line
+real a[ARB], b[ARB], c[ARB], d[ARB], e[ARB] # Image lines
+int n # Number of columns
+pointer sf1, sf2 # Surface fitting
+real x[25], y[25], z[25], w[25] # Surface arrays
+
+real bkgd[25]
+int i1, i2, i3, i4, i5, j, j1, j2
+real p, flux, replace, asumr()
+pointer sf
+
+begin
+ for (i3=3; i3<=n-2; i3=i3+1) {
+ # Must be local maxima.
+ p = c[i3]
+ if (p<a[i3]||p<b[i3]||p<d[i3]||p<e[i3])
+ next
+ i1 = i3 - 2
+ if (p<a[i1]||p<b[i1]||p<c[i1]||p<d[i1]||p<e[i1])
+ next
+ i2 = i3 - 1
+ if (p<a[i2]||p<b[i2]||p<c[i2]||p<d[i2]||p<e[i2])
+ next
+ i4 = i3 + 1
+ if (p<a[i4]||p<b[i4]||p<c[i4]||p<d[i4]||p<e[i4])
+ next
+ i5 = i3 + 2
+ if (p<a[i5]||p<b[i5]||p<c[i5]||p<d[i5]||p<e[i5])
+ next
+
+ # Convert to a single array in surface fitting order.
+ call amovr (a[i1], z[1], 5)
+ z[6] = b[i5]; z[7] = c[i5]; z[8] = d[i5]; z[9] = e[i5]
+ z[10] = e[i4]; z[11] = e[i3]; z[12] = e[i2]; z[13] = e[i1]
+ z[14] = d[i1]; z[15] = c[i1]; z[16] = b[i1]
+ call amovr (b[i2], z[17], 3)
+ call amovr (c[i2], z[20], 3)
+ call amovr (d[i2], z[23], 3)
+
+ # Find the highest point excluding the center.
+ j1 = 21; j2 = 1
+ do j = 2, 25 {
+ if (j == j1)
+ next
+ if (z[j] > z[j2])
+ j2 = j
+ }
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 25) - z[j1] - z[j2]) / 23
+
+ # Pixel must be exceed specified threshold.
+ if (p < flux + threshold)
+ next
+
+ # Fit and subtract the background.
+ if (j2 < 17) {
+ w[j2] = 0
+ sf = sf2
+ call gsfit (sf, x, y, z, w, 16, WTS_USER, j)
+ w[j2] = 1
+ } else {
+ sf = sf1
+ call gsrefit (sf, x, y, z, w, j)
+ }
+
+ call gsvector (sf, x, y, bkgd, 25)
+ call asubr (z, bkgd, z, 25)
+ p = z[j1]
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 25) - z[j1] - z[j2]) / 23
+
+ # Determine replacement value from four nearest neighbors again
+ # excluding the most deviant pixels.
+ replace = 0
+ j = 0
+ if (j2 != 18) {
+ replace = replace + b[i3]
+ j = j + 1
+ }
+ if (j2 != 20) {
+ replace = replace + c[i2]
+ j = j + 1
+ }
+ if (j2 != 22) {
+ replace = replace + c[i4]
+ j = j + 1
+ }
+ if (j2 != 24) {
+ replace = replace + d[i3]
+ j = j + 1
+ }
+ replace = replace / j
+
+ # Add pixel to cosmic ray list.
+ flux = 100. * flux
+ call cr_add (cr, col+i3-1, line, flux, flux/p, 0., replace, 0)
+ i3 = i5
+ }
+end
diff --git a/noao/imred/ccdred/src/cosmic/crlist.h b/noao/imred/ccdred/src/cosmic/crlist.h
new file mode 100644
index 00000000..1ed498a7
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/crlist.h
@@ -0,0 +1,17 @@
+define CR_ALLOC 100 # Allocation block size
+define CR_LENSTRUCT 9 # Length of structure
+
+define CR_NCR Memi[$1] # Number of cosmic rays
+define CR_NALLOC Memi[$1+1] # Length of cosmic ray list
+define CR_COL Memi[$1+2] # Pointer to columns
+define CR_LINE Memi[$1+3] # Pointer to lines
+define CR_FLUX Memi[$1+4] # Pointer to fluxes
+define CR_RATIO Memi[$1+5] # Pointer to flux ratios
+define CR_WT Memi[$1+6] # Pointer to training weights
+define CR_REPLACE Memi[$1+7] # Pointer to replacement values
+define CR_FLAG Memi[$1+8] # Pointer to rejection flag
+
+define ALWAYSNO 3
+define ALWAYSYES 4
+
+define CR_RMAX 3. # Maximum radius for matching
diff --git a/noao/imred/ccdred/src/cosmic/crlist.x b/noao/imred/ccdred/src/cosmic/crlist.x
new file mode 100644
index 00000000..e0a8fd5c
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/crlist.x
@@ -0,0 +1,366 @@
+include <error.h>
+include <syserr.h>
+include <gset.h>
+include "crlist.h"
+
+define HELP "noao$lib/scr/cosmicrays.key"
+define PROMPT "cosmic ray options"
+
+# CR_OPEN -- Open cosmic ray list
+# CR_CLOSE -- Close cosmic ray list
+# CR_ADD -- Add a cosmic ray candidate to cosmic ray list.
+# CR_TRAIN -- Set flux ratio threshold from a training set.
+# CR_FINDTHRESH -- Find flux ratio.
+# CR_WEIGHT -- Compute the training weight at a particular flux ratio.
+# CR_FLAGS -- Set cosmic ray reject flags.
+# CR_BADPIX -- Store cosmic rays in bad pixel list.
+# CR_REPLACE -- Replace cosmic rays in image with replacement values.
+
+# CR_OPEN -- Open cosmic ray list
+
+procedure cr_open (cr)
+
+pointer cr # Cosmic ray list pointer
+errchk malloc
+
+begin
+ call malloc (cr, CR_LENSTRUCT, TY_STRUCT)
+ call malloc (CR_COL(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_LINE(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_FLUX(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_RATIO(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_WT(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_REPLACE(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_FLAG(cr), CR_ALLOC, TY_INT)
+ CR_NCR(cr) = 0
+ CR_NALLOC(cr) = CR_ALLOC
+end
+
+
+# CR_CLOSE -- Close cosmic ray list
+
+procedure cr_close (cr)
+
+pointer cr # Cosmic ray list pointer
+
+begin
+ call mfree (CR_COL(cr), TY_REAL)
+ call mfree (CR_LINE(cr), TY_REAL)
+ call mfree (CR_FLUX(cr), TY_REAL)
+ call mfree (CR_RATIO(cr), TY_REAL)
+ call mfree (CR_WT(cr), TY_REAL)
+ call mfree (CR_REPLACE(cr), TY_REAL)
+ call mfree (CR_FLAG(cr), TY_INT)
+ call mfree (cr, TY_STRUCT)
+end
+
+# CR_ADD -- Add a cosmic ray candidate to cosmic ray list.
+
+procedure cr_add (cr, col, line, flux, ratio, wt, replace, flag)
+
+pointer cr # Cosmic ray list pointer
+int col # Cofluxn
+int line # Line
+real flux # Luminosity
+real ratio # Ratio
+real wt # Weight
+real replace # Sky value
+int flag # Flag value
+
+int ncr
+errchk realloc
+
+begin
+ if (CR_NCR(cr) == CR_NALLOC(cr)) {
+ CR_NALLOC(cr) = CR_NALLOC(cr) + CR_ALLOC
+ call realloc (CR_COL(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_LINE(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_FLUX(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_RATIO(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_WT(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_REPLACE(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_FLAG(cr), CR_NALLOC(cr), TY_INT)
+ }
+
+ ncr = CR_NCR(cr)
+ CR_NCR(cr) = ncr + 1
+ Memr[CR_COL(cr)+ncr] = col
+ Memr[CR_LINE(cr)+ncr] = line
+ Memr[CR_FLUX(cr)+ncr] = flux
+ Memr[CR_RATIO(cr)+ncr] = ratio
+ Memr[CR_WT(cr)+ncr] = wt
+ Memr[CR_REPLACE(cr)+ncr] = replace
+ Memi[CR_FLAG(cr)+ncr] = flag
+end
+
+
+# CR_TRAIN -- Set flux ratio threshold from a training set.
+
+procedure cr_train (cr, gp, gt, im, fluxratio, fname)
+
+pointer cr #I Cosmic ray list
+pointer gp #I GIO pointer
+pointer gt #I GTOOLS pointer
+pointer im #I IMIO pointer
+real fluxratio #O Flux ratio threshold
+char fname[ARB] #I Save file name
+
+char cmd[10]
+bool gflag
+real x, y, y1, y2, w, r, rmin
+int i, j, n, f, ncr, wcs, key, fd, clgcur(), open(), errcode()
+pointer col, line, ratio, flux, wt, flag
+
+begin
+ # Open save file
+ iferr (fd = open (fname, APPEND, TEXT_FILE)) {
+ if (errcode() != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ fd = 0
+ }
+
+ ncr = CR_NCR(cr)
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ flux = CR_FLUX(cr) - 1
+ ratio = CR_RATIO(cr) - 1
+ wt = CR_WT(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ gflag = false
+ n = 0
+ while (clgcur ("objects", x, y, wcs, key, cmd, 10) != EOF) {
+ switch (key) {
+ case '?':
+ call gpagefile (gp, HELP, PROMPT)
+ next
+ case 'q':
+ break
+ case 's':
+ w = 1
+ f = ALWAYSNO
+ case 'c':
+ w = -1
+ f = ALWAYSYES
+ case 'g':
+ if (gflag)
+ call cr_examine (cr, gp, gt, im, fluxratio, 'z')
+ else {
+ if (n > 1)
+ call cr_findthresh (cr, fluxratio)
+ call cr_flags (cr, fluxratio)
+ call cr_examine (cr, gp, gt, im, fluxratio, 'r')
+ gflag = true
+ }
+ next
+ default:
+ next
+ }
+
+ y1 = y - CR_RMAX
+ y2 = y + CR_RMAX
+ for (i=10; i<ncr && y1>Memr[line+i]; i=i+10)
+ ;
+ j = i - 9
+ rmin = (Memr[col+j] - x) ** 2 + (Memr[line+j] - y) ** 2
+ for (i=j+1; i<ncr && y2>Memr[line+i]; i=i+1) {
+ r = (Memr[col+i] - x) ** 2 + (Memr[line+i] - y) ** 2
+ if (r < rmin) {
+ rmin = r
+ j = i
+ }
+ }
+ if (sqrt (rmin) > CR_RMAX)
+ next
+
+ Memr[wt+j] = w
+ Memi[flag+j] = f
+ n = n + 1
+
+ if (gflag) {
+ if (n > 1) {
+ call cr_findthresh (cr, r)
+ call cr_update (gp, r, cr, fluxratio, 0)
+ }
+ call gmark (gp, Memr[flux+j], Memr[ratio+j], GM_BOX, 2., 2.)
+ }
+ if (fd > 0) {
+ call fprintf (fd, "%g %g %d %c\n")
+ call pargr (x)
+ call pargr (y)
+ call pargi (wcs)
+ call pargi (key)
+ }
+ }
+
+ if (fd > 0)
+ call close (fd)
+end
+
+
+# CR_FINDTHRESH -- Find flux ratio.
+
+procedure cr_findthresh (cr, fluxratio)
+
+pointer cr #I Cosmic ray list
+real fluxratio #O Flux ratio threshold
+
+real w, r, rmin, cr_weight()
+int i, ncr
+pointer ratio, wt
+
+begin
+ ncr = CR_NCR(cr)
+ ratio = CR_RATIO(cr) - 1
+ wt = CR_WT(cr) - 1
+
+ fluxratio = Memr[ratio+1]
+ rmin = cr_weight (fluxratio, Memr[ratio+1], Memr[wt+1], ncr)
+ do i = 2, ncr {
+ if (Memr[wt+i] == 0.)
+ next
+ r = Memr[ratio+i]
+ w = cr_weight (r, Memr[ratio+1], Memr[wt+1], ncr)
+ if (w <= rmin) {
+ if (w == rmin)
+ fluxratio = min (fluxratio, r)
+ else {
+ rmin = w
+ fluxratio = r
+ }
+ }
+ }
+end
+
+
+# CR_WEIGHT -- Compute the training weight at a particular flux ratio.
+
+real procedure cr_weight (fluxratio, ratio, wts, ncr)
+
+real fluxratio #I Flux ratio
+real ratio[ARB] #I Ratio Values
+real wts[ARB] #I Weights
+int ncr #I Number of ratio values
+real wt #O Sum of weights
+
+int i
+
+begin
+ wt = 0.
+ do i = 1, ncr {
+ if (ratio[i] > fluxratio) {
+ if (wts[i] < 0.)
+ wt = wt - wts[i]
+ } else {
+ if (wts[i] > 0.)
+ wt = wt + wts[i]
+ }
+ }
+ return (wt)
+end
+
+
+# CR_FLAGS -- Set cosmic ray reject flags.
+
+procedure cr_flags (cr, fluxratio)
+
+pointer cr # Cosmic ray candidate list
+real fluxratio # Rejection limits
+
+int i, ncr
+pointer ratio, flag
+
+begin
+ ncr = CR_NCR(cr)
+ ratio = CR_RATIO(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ do i = 1, ncr {
+ if ((Memi[flag+i] == ALWAYSYES) || (Memi[flag+i] == ALWAYSNO))
+ next
+ if (Memr[ratio+i] > fluxratio)
+ Memi[flag+i] = NO
+ else
+ Memi[flag+i] = YES
+ }
+end
+
+
+# CR_BADPIX -- Store cosmic rays in bad pixel list.
+# This is currently a temporary measure until a real bad pixel list is
+# implemented.
+
+procedure cr_badpix (cr, fname)
+
+pointer cr # Cosmic ray list
+char fname[ARB] # Bad pixel file name
+
+int i, ncr, c, l, f, fd, open(), errcode()
+pointer col, line, ratio, flux, flag
+errchk open
+
+begin
+ # Open bad pixel file
+ iferr (fd = open (fname, APPEND, TEXT_FILE)) {
+ if (errcode() != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ ncr = CR_NCR(cr)
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ flux = CR_FLUX(cr) - 1
+ ratio = CR_RATIO(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ do i = 1, ncr {
+ f = Memi[flag+i]
+ if ((f == NO) || (f == ALWAYSNO))
+ next
+
+ c = Memr[col+i]
+ l = Memr[line+i]
+ call fprintf (fd, "%d %d\n")
+ call pargi (c)
+ call pargi (l)
+ }
+ call close (fd)
+end
+
+
+# CR_REPLACE -- Replace cosmic rays in image with replacement values.
+
+procedure cr_replace (cr, offset, im, nreplaced)
+
+pointer cr # Cosmic ray list
+int offset # Offset in list
+pointer im # IMIO pointer of output image
+int nreplaced # Number replaced (for log)
+
+int i, ncr, c, l, f
+real r
+pointer col, line, replace, flag, imps2r()
+
+begin
+ ncr = CR_NCR(cr)
+ if (ncr <= offset)
+ return
+
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ replace = CR_REPLACE(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ do i = offset+1, ncr {
+ f = Memi[flag+i]
+ if ((f == NO) || (f == ALWAYSNO))
+ next
+
+ c = Memr[col+i]
+ l = Memr[line+i]
+ r = Memr[replace+i]
+ Memr[imps2r (im, c, c, l, l)] = r
+ nreplaced = nreplaced + 1
+ }
+end
diff --git a/noao/imred/ccdred/src/cosmic/crsurface.x b/noao/imred/ccdred/src/cosmic/crsurface.x
new file mode 100644
index 00000000..32645ff4
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/crsurface.x
@@ -0,0 +1,46 @@
+define DUMMY 6
+
+# CR_SURFACE -- Draw a perspective view of a surface. The altitude
+# and azimuth of the viewing angle are variable.
+
+procedure cr_surface(gp, data, ncols, nlines, angh, angv)
+
+pointer gp # GIO pointer
+real data[ncols,nlines] # Surface data to be plotted
+int ncols, nlines # Dimensions of surface
+real angh, angv # Orientation of surface (degrees)
+
+int wkid
+pointer sp, work
+
+int first
+real vpx1, vpx2, vpy1, vpy2
+common /frstfg/ first
+common /noaovp/ vpx1, vpx2, vpy1, vpy2
+
+begin
+ call smark (sp)
+ call salloc (work, 2 * (2 * ncols * nlines + ncols + nlines), TY_REAL)
+
+ # Initialize surface common blocks
+ first = 1
+ call srfabd()
+
+ # Define viewport.
+ call ggview (gp, vpx1, vpx2, vpy1, vpy2)
+
+ # Link GKS to GIO
+ wkid = 1
+ call gopks (STDERR)
+ call gopwk (wkid, DUMMY, gp)
+ call gacwk (wkid)
+
+ call ezsrfc (data, ncols, nlines, angh, angv, Memr[work])
+
+ call gdawk (wkid)
+ # We don't want to close the GIO pointer.
+ #call gclwk (wkid)
+ call gclks ()
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/cosmic/mkpkg b/noao/imred/ccdred/src/cosmic/mkpkg
new file mode 100644
index 00000000..d63d9c2c
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/mkpkg
@@ -0,0 +1,16 @@
+# COSMIC RAY CLEANING
+
+$checkout libpkg.a ../..
+$update libpkg.a
+$checkin libpkg.a ../..
+$exit
+
+libpkg.a:
+ crexamine.x crlist.h <error.h> <gset.h> <mach.h> <pkg/gtools.h>\
+ <imhdr.h> <syserr.h>
+ crfind.x <math/gsurfit.h>
+ crlist.x crlist.h <error.h> <gset.h> <syserr.h>
+ crsurface.x
+ t_cosmicrays.x crlist.h <error.h> <gset.h> <math/gsurfit.h>\
+ <pkg/gtools.h> <imhdr.h> <imset.h>
+ ;
diff --git a/noao/imred/ccdred/src/cosmic/t_cosmicrays.x b/noao/imred/ccdred/src/cosmic/t_cosmicrays.x
new file mode 100644
index 00000000..8640b639
--- /dev/null
+++ b/noao/imred/ccdred/src/cosmic/t_cosmicrays.x
@@ -0,0 +1,348 @@
+include <error.h>
+include <imhdr.h>
+include <imset.h>
+include <math/gsurfit.h>
+include <gset.h>
+include <pkg/gtools.h>
+include "crlist.h"
+
+# T_COSMICRAYS -- Detect and remove cosmic rays in images.
+# A list of images is examined for cosmic rays which are then replaced
+# by values from neighboring pixels. The output image may be the same
+# as the input image. This is the top level procedure which manages
+# the input and output image data. The actual algorithm for detecting
+# cosmic rays is in CR_FIND.
+
+procedure t_cosmicrays ()
+
+int list1 # List of input images to be cleaned
+int list2 # List of output images
+int list3 # List of output bad pixel files
+real threshold # Detection threshold
+real fluxratio # Luminosity boundary for stars
+int npasses # Number of cleaning passes
+int szwin # Size of detection window
+bool train # Use training objects?
+pointer savefile # Save file for training objects
+bool interactive # Examine cosmic ray parameters?
+char ans # Answer to interactive query
+
+int nc, nl, c, c1, c2, l, l1, l2, szhwin, szwin2
+int i, j, k, m, ncr, ncrlast, nreplaced, flag
+pointer sp, input, output, badpix, str, gp, gt, im, in, out
+pointer x, y, z, w, sf1, sf2, cr, data, ptr
+
+bool clgetb(), ccdflag(), streq(), strne()
+char clgetc()
+int imtopenp(), imtlen(), imtgetim(), clpopnu(), clgfil(), clgeti()
+real clgetr()
+pointer immap(), impl2r(), imgs2r(), gopen(), gt_init()
+errchk immap, impl2r, imgs2r
+errchk cr_find, cr_examine, cr_replace, cr_plot, cr_badpix
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (badpix, SZ_FNAME, TY_CHAR)
+ call salloc (savefile, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the task parameters. Check that the number of output images
+ # is either zero, in which case the cosmic rays will be removed
+ # in place, or equal to the number of input images.
+
+ list1 = imtopenp ("input")
+ list2 = imtopenp ("output")
+ i = imtlen (list1)
+ j = imtlen (list2)
+ if (j > 0 && j != i)
+ call error (0, "Input and output image lists do not match")
+
+ list3 = clpopnu ("badpix")
+ threshold = clgetr ("threshold")
+ fluxratio = clgetr ("fluxratio")
+ npasses = clgeti ("npasses")
+ szwin = clgeti ("window")
+ train = clgetb ("train")
+ call clgstr ("savefile", Memc[savefile], SZ_FNAME)
+ interactive = clgetb ("interactive")
+ call clpstr ("answer", "yes")
+ ans = 'y'
+
+ # Set up the graphics.
+ call clgstr ("graphics", Memc[str], SZ_LINE)
+ if (interactive) {
+ gp = gopen (Memc[str], NEW_FILE+AW_DEFER, STDGRAPH)
+ gt = gt_init()
+ call gt_sets (gt, GTTYPE, "mark")
+ call gt_sets (gt, GTXTRAN, "log")
+ call gt_setr (gt, GTXMIN, 10.)
+ call gt_setr (gt, GTYMIN, 0.)
+ call gt_sets (gt, GTTITLE, "Parameters of cosmic rays candidates")
+ call gt_sets (gt, GTXLABEL, "Flux")
+ call gt_sets (gt, GTYLABEL, "Flux Ratio")
+ }
+
+ # Use image header translation file.
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ call hdmopen (Memc[input])
+
+ # Set up surface fitting. The background points are placed together
+ # at the beginning of the arrays. There are two surface pointers,
+ # one for using the fast refit if there are no points excluded and
+ # one for doing a full fit with points excluded.
+
+ szhwin = szwin / 2
+ szwin2 = szwin * szwin
+ call salloc (data, szwin, TY_INT)
+ call salloc (x, szwin2, TY_REAL)
+ call salloc (y, szwin2, TY_REAL)
+ call salloc (z, szwin2, TY_REAL)
+ call salloc (w, szwin2, TY_REAL)
+
+ k = 0
+ do i = 1, szwin {
+ Memr[x+k] = i
+ Memr[y+k] = 1
+ k = k + 1
+ }
+ do i = 2, szwin {
+ Memr[x+k] = szwin
+ Memr[y+k] = i
+ k = k + 1
+ }
+ do i = szwin-1, 1, -1 {
+ Memr[x+k] = i
+ Memr[y+k] = szwin
+ k = k + 1
+ }
+ do i = szwin-1, 2, -1 {
+ Memr[x+k] = 1
+ Memr[y+k] = i
+ k = k + 1
+ }
+ do i = 2, szwin-1 {
+ do j = 2, szwin-1 {
+ Memr[x+k] = j
+ Memr[y+k] = i
+ k = k + 1
+ }
+ }
+ call aclrr (Memr[z], szwin2)
+ call amovkr (1., Memr[w], 4*szwin-4)
+ call gsinit (sf1, GS_POLYNOMIAL, 2, 2, NO, 1., real(szwin),
+ 1., real(szwin))
+ call gsinit (sf2, GS_POLYNOMIAL, 2, 2, NO, 1., real(szwin),
+ 1., real(szwin))
+ call gsfit (sf1, Memr[x], Memr[y], Memr[z], Memr[w], 4*szwin-4,
+ WTS_USER, j)
+
+ # Process each input image. Either work in place or create a
+ # new output image. If an error mapping the images occurs
+ # issue a warning and go on to the next input image.
+
+ while (imtgetim (list1, Memc[input], SZ_FNAME) != EOF) {
+ if (imtgetim (list2, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (clgfil (list3, Memc[badpix], SZ_FNAME) == EOF)
+ Memc[badpix] = EOS
+
+ iferr {
+ in = NULL
+ out = NULL
+ cr = NULL
+
+ # Map the input image and check for image type and
+ # previous correction flag. If the output image is
+ # the same as the input image work in place.
+ # Initialize IMIO to use a scrolling buffer of lines.
+
+ call set_input (Memc[input], im, i)
+ if (im == NULL)
+ call error (1, "Skipping input image")
+
+ if (ccdflag (im, "crcor")) {
+ call eprintf ("WARNING: %s previously corrected\n")
+ call pargstr (Memc[input])
+ #call imunmap (im)
+ #next
+ }
+
+ if (streq (Memc[input], Memc[output])) {
+ call imunmap (im)
+ im = immap (Memc[input], READ_WRITE, 0)
+ }
+ in = im
+
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ if ((nl < szwin) || (nc < szwin))
+ call error (0, "Image size is too small")
+ call imseti (in, IM_NBUFS, szwin)
+ call imseti (in, IM_TYBNDRY, BT_NEAREST)
+ call imseti (in, IM_NBNDRYPIX, szhwin)
+
+ # Open the output image if needed.
+ if (strne (Memc[input], Memc[output]))
+ im = immap (Memc[output], NEW_COPY, in)
+ out = im
+
+ # Open a cosmic ray list structure.
+ call cr_open (cr)
+ ncrlast = 0
+ nreplaced = 0
+
+ # Now proceed through the image line by line, scrolling
+ # the line buffers at each step. If creating a new image
+ # also write out each line as it is read. A procedure is
+ # called to find the cosmic ray candidates in the line
+ # and add them to the list maintained by CRLIST.
+ # Note that cosmic rays are not replaced at this point
+ # in order to allow the user to modify the criteria for
+ # a cosmic ray and review the results.
+
+ c1 = 1-szhwin
+ c2 = nc+szhwin
+ do i = 1, szwin-1
+ Memi[data+i] =
+ imgs2r (in, c1, c2, i-szhwin, i-szhwin)
+
+ do l = 1, nl {
+ do i = 1, szwin-1
+ Memi[data+i-1] = Memi[data+i]
+ Memi[data+szwin-1] =
+ imgs2r (in, c1, c2, l+szhwin, l+szhwin)
+ if (out != in)
+ call amovr (Memr[Memi[data+szhwin]+szhwin],
+ Memr[impl2r(out,l)], nc)
+
+ call cr_find (cr, threshold, Memi[data],
+ c2-c1+1, szwin, c1, l,
+ sf1, sf2, Memr[x], Memr[y], Memr[z], Memr[w])
+ }
+ if (interactive && train) {
+ call cr_train (cr, gp, gt, in, fluxratio, Memc[savefile])
+ train = false
+ }
+ call cr_flags (cr, fluxratio)
+
+ # If desired examine the cosmic ray list interactively.
+ if (interactive && ans != 'N') {
+ if (ans != 'Y') {
+ call eprintf ("%s - ")
+ call pargstr (Memc[input])
+ call flush (STDERR)
+ ans = clgetc ("answer")
+ }
+ if ((ans == 'Y') || (ans == 'y'))
+ call cr_examine (cr, gp, gt, in, fluxratio, 'r')
+ }
+
+ # Now replace the selected cosmic rays in the output image.
+
+ call imflush (out)
+ call imseti (out, IM_ADVICE, RANDOM)
+ call cr_replace (cr, ncrlast, out, nreplaced)
+
+ # Do additional passes through the data. We work in place
+ # in the output image. Note that we only have to look in
+ # the vicinity of replaced cosmic rays for secondary
+ # events since we've already looked at every pixel once.
+ # Instead of scrolling through the image we will extract
+ # subrasters around each replaced cosmic ray. However,
+ # we use pointers into the subraster to maintain the same
+ # format expected by CR_FIND.
+
+ if (npasses > 1) {
+ if (out != in)
+ call imunmap (out)
+ call imunmap (in)
+ im = immap (Memc[output], READ_WRITE, 0)
+ in = im
+ out = im
+ call imseti (in, IM_TYBNDRY, BT_NEAREST)
+ call imseti (in, IM_NBNDRYPIX, szhwin)
+
+ for (i=2; i<=npasses; i=i+1) {
+ # Loop through each cosmic ray in the previous pass.
+ ncr = CR_NCR(cr)
+ do j = ncrlast+1, ncr {
+ flag = Memi[CR_FLAG(cr)+j-1]
+ if (flag==NO || flag==ALWAYSNO)
+ next
+ c = Memr[CR_COL(cr)+j-1]
+ l = Memr[CR_LINE(cr)+j-1]
+ c1 = max (1-szhwin, c - (szwin-1))
+ c2 = min (nc+szhwin, c + (szwin-1))
+ k = c2 - c1 + 1
+ l1 = max (1-szhwin, l - (szwin-1))
+ l2 = min (nl+szhwin, l + (szwin-1))
+
+ # Set the line pointers off an image section
+ # centered on a previously replaced cosmic ray.
+
+ ptr = imgs2r (in, c1, c2, l1, l2) - k
+
+ l1 = max (1, l - szhwin)
+ l2 = min (nl, l + szhwin)
+ do l = l1, l2 {
+ do m = 1, szwin
+ Memi[data+m-1] = ptr + m * k
+ ptr = ptr + k
+
+ call cr_find ( cr, threshold, Memi[data],
+ k, szwin, c1, l, sf1, sf2,
+ Memr[x], Memr[y], Memr[z], Memr[w])
+ }
+ }
+ call cr_flags (cr, fluxratio)
+
+ # Replace any new cosmic rays found.
+ call cr_replace (cr, ncr, in, nreplaced)
+ ncrlast = ncr
+ }
+ }
+
+ # Output header log, log, plot, and bad pixels.
+ call sprintf (Memc[str], SZ_LINE,
+ "Threshold=%5.1f, fluxratio=%6.2f, removed=%d")
+ call pargr (threshold)
+ call pargr (fluxratio)
+ call pargi (nreplaced)
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out, Memc[str])
+ call hdmpstr (out, "crcor", Memc[str])
+
+ call cr_plot (cr, in, fluxratio)
+ call cr_badpix (cr, Memc[badpix])
+
+ call cr_close (cr)
+ if (out != in)
+ call imunmap (out)
+ call imunmap (in)
+ } then {
+ # In case of error clean up and go on to the next image.
+ if (in != NULL) {
+ if (out != NULL && out != in)
+ call imunmap (out)
+ call imunmap (in)
+ }
+ if (cr != NULL)
+ call cr_close (cr)
+ call erract (EA_WARN)
+ }
+ }
+
+ if (interactive) {
+ call gt_free (gt)
+ call gclose (gp)
+ }
+ call imtclose (list1)
+ call imtclose (list2)
+ call clpcls (list3)
+ call hdmclose ()
+ call gsfree (sf1)
+ call gsfree (sf2)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/doproc.x b/noao/imred/ccdred/src/doproc.x
new file mode 100644
index 00000000..909c6f12
--- /dev/null
+++ b/noao/imred/ccdred/src/doproc.x
@@ -0,0 +1,29 @@
+include "ccdred.h"
+
+# DOPROC -- Call the appropriate processing procedure.
+#
+# There are four data type paths depending on the readout axis and
+# the calculation data type.
+
+procedure doproc (ccd)
+
+pointer ccd # CCD processing structure
+
+begin
+ switch (READAXIS (ccd)) {
+ case 1:
+ switch (CALCTYPE (ccd)) {
+ case TY_SHORT:
+ call proc1s (ccd)
+ default:
+ call proc1r (ccd)
+ }
+ case 2:
+ switch (CALCTYPE (ccd)) {
+ case TY_SHORT:
+ call proc2s (ccd)
+ default:
+ call proc2r (ccd)
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/generic/ccdred.h b/noao/imred/ccdred/src/generic/ccdred.h
new file mode 100644
index 00000000..2d370d86
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/ccdred.h
@@ -0,0 +1,150 @@
+# CCDRED Data Structures and Definitions
+
+# The CCD structure: This structure is used to communicate processing
+# parameters between the package procedures. It contains pointers to
+# data, calibration image IMIO pointers, scaling parameters, and the
+# correction flags. The corrections flags indicate which processing
+# operations are to be performed. The subsection parameters do not
+# include a step size. A step size is assumed. If arbitrary subsampling
+# is desired this would be the next generalization.
+
+define LEN_CCD 131 # Length of CCD structure
+
+# CCD data coordinates
+define CCD_C1 Memi[$1] # CCD starting column
+define CCD_C2 Memi[$1+1] # CCD ending column
+define CCD_L1 Memi[$1+2] # CCD starting line
+define CCD_L2 Memi[$1+3] # CCD ending line
+
+# Input data
+define IN_IM Memi[$1+10] # Input image pointer
+define IN_C1 Memi[$1+11] # Input data starting column
+define IN_C2 Memi[$1+12] # Input data ending column
+define IN_L1 Memi[$1+13] # Input data starting line
+define IN_L2 Memi[$1+14] # Input data ending line
+
+# Output data
+define OUT_IM Memi[$1+20] # Output image pointer
+define OUT_C1 Memi[$1+21] # Output data starting column
+define OUT_C2 Memi[$1+22] # Output data ending column
+define OUT_L1 Memi[$1+23] # Output data starting line
+define OUT_L2 Memi[$1+24] # Output data ending line
+
+# Mask data
+define MASK_IM Memi[$1+30] # Mask image pointer
+define MASK_C1 Memi[$1+31] # Mask data starting column
+define MASK_C2 Memi[$1+32] # Mask data ending column
+define MASK_L1 Memi[$1+33] # Mask data starting line
+define MASK_L2 Memi[$1+34] # Mask data ending line
+define MASK_PM Memi[$1+35] # Mask pointer
+define MASK_FP Memi[$1+36] # Mask fixpix data
+
+# Zero level data
+define ZERO_IM Memi[$1+40] # Zero level image pointer
+define ZERO_C1 Memi[$1+41] # Zero level data starting column
+define ZERO_C2 Memi[$1+42] # Zero level data ending column
+define ZERO_L1 Memi[$1+43] # Zero level data starting line
+define ZERO_L2 Memi[$1+44] # Zero level data ending line
+
+# Dark count data
+define DARK_IM Memi[$1+50] # Dark count image pointer
+define DARK_C1 Memi[$1+51] # Dark count data starting column
+define DARK_C2 Memi[$1+52] # Dark count data ending column
+define DARK_L1 Memi[$1+53] # Dark count data starting line
+define DARK_L2 Memi[$1+54] # Dark count data ending line
+
+# Flat field data
+define FLAT_IM Memi[$1+60] # Flat field image pointer
+define FLAT_C1 Memi[$1+61] # Flat field data starting column
+define FLAT_C2 Memi[$1+62] # Flat field data ending column
+define FLAT_L1 Memi[$1+63] # Flat field data starting line
+define FLAT_L2 Memi[$1+64] # Flat field data ending line
+
+# Illumination data
+define ILLUM_IM Memi[$1+70] # Illumination image pointer
+define ILLUM_C1 Memi[$1+71] # Illumination data starting column
+define ILLUM_C2 Memi[$1+72] # Illumination data ending column
+define ILLUM_L1 Memi[$1+73] # Illumination data starting line
+define ILLUM_L2 Memi[$1+74] # Illumination data ending line
+
+# Fringe data
+define FRINGE_IM Memi[$1+80] # Fringe image pointer
+define FRINGE_C1 Memi[$1+81] # Fringe data starting column
+define FRINGE_C2 Memi[$1+82] # Fringe data ending column
+define FRINGE_L1 Memi[$1+83] # Fringe data starting line
+define FRINGE_L2 Memi[$1+84] # Fringe data ending line
+
+# Trim section
+define TRIM_C1 Memi[$1+90] # Trim starting column
+define TRIM_C2 Memi[$1+91] # Trim ending column
+define TRIM_L1 Memi[$1+92] # Trim starting line
+define TRIM_L2 Memi[$1+93] # Trim ending line
+
+# Bias section
+define BIAS_C1 Memi[$1+100] # Bias starting column
+define BIAS_C2 Memi[$1+101] # Bias ending column
+define BIAS_L1 Memi[$1+102] # Bias starting line
+define BIAS_L2 Memi[$1+103] # Bias ending line
+
+define READAXIS Memi[$1+110] # Read out axis (1=cols, 2=lines)
+define CALCTYPE Memi[$1+111] # Calculation data type
+define OVERSCAN_TYPE Memi[$1+112] # Overscan type
+define OVERSCAN_VEC Memi[$1+113] # Pointer to overscan vector
+define DARKSCALE Memr[P2R($1+114)] # Dark count scale factor
+define FRINGESCALE Memr[P2R($1+115)] # Fringe scale factor
+define FLATSCALE Memr[P2R($1+116)] # Flat field scale factor
+define ILLUMSCALE Memr[P2R($1+117)] # Illumination scale factor
+define MINREPLACE Memr[P2R($1+118)] # Minimum replacement value
+define MEAN Memr[P2R($1+119)] # Mean of output image
+define COR Memi[$1+120] # Overall correction flag
+define CORS Memi[$1+121+($2-1)] # Individual correction flags
+
+# The correction array contains the following elements with array indices
+# given by the macro definitions.
+
+define NCORS 10 # Number of corrections
+
+define FIXPIX 1 # Fix bad pixels
+define TRIM 2 # Trim image
+define OVERSCAN 3 # Apply overscan correction
+define ZEROCOR 4 # Apply zero level correction
+define DARKCOR 5 # Apply dark count correction
+define FLATCOR 6 # Apply flat field correction
+define ILLUMCOR 7 # Apply illumination correction
+define FRINGECOR 8 # Apply fringe correction
+define FINDMEAN 9 # Find the mean of the output image
+define MINREP 10 # Check and replace minimum value
+
+# The following definitions identify the correction values in the correction
+# array. They are defined in terms of bit fields so that it is possible to
+# add corrections to form unique combination corrections. Some of
+# these combinations are implemented as compound operations for efficiency.
+
+define O 001B # overscan
+define Z 002B # zero level
+define D 004B # dark count
+define F 010B # flat field
+define I 020B # Illumination
+define Q 040B # Fringe
+
+# The following correction combinations are recognized.
+
+define ZO 003B # zero level + overscan
+define DO 005B # dark count + overscan
+define DZ 006B # dark count + zero level
+define DZO 007B # dark count + zero level + overscan
+define FO 011B # flat field + overscan
+define FZ 012B # flat field + zero level
+define FZO 013B # flat field + zero level + overscan
+define FD 014B # flat field + dark count
+define FDO 015B # flat field + dark count + overscan
+define FDZ 016B # flat field + dark count + zero level
+define FDZO 017B # flat field + dark count + zero level + overscan
+define QI 060B # fringe + illumination
+
+# The following overscan functions are recognized.
+define OVERSCAN_TYPES "|mean|median|minmax|chebyshev|legendre|spline3|spline1|"
+define OVERSCAN_MEAN 1 # Mean of overscan
+define OVERSCAN_MEDIAN 2 # Median of overscan
+define OVERSCAN_MINMAX 3 # Minmax of overscan
+define OVERSCAN_FIT 4 # Following codes are function fits
diff --git a/noao/imred/ccdred/src/generic/cor.x b/noao/imred/ccdred/src/generic/cor.x
new file mode 100644
index 00000000..fd2a8d6b
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/cor.x
@@ -0,0 +1,694 @@
+include "ccdred.h"
+
+
+.help cor Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+cor -- Process CCD image lines
+
+These procedures are the heart of the CCD processing. They do the desired
+set of processing operations on the image line data as efficiently as
+possible. They are called by the PROC procedures. There are four procedures
+one for each readout axis and one for short and real image data.
+Some sets of operations are coded as single compound operations for efficiency.
+To keep the number of combinations managable only the most common
+combinations are coded as compound operations. The combinations
+consist of any set of line overscan, column overscan, zero level, dark
+count, and flat field and any set of illumination and fringe
+correction. The corrections are applied in place to the output vector.
+
+The column readout procedure is more complicated in order to handle
+zero level and flat field corrections specified as one dimensional
+readout corrections instead of two dimensional calibration images.
+Column readout format is probably extremely rare and the 1D readout
+corrections are used only for special types of data.
+.ih
+SEE ALSO
+proc, ccdred.h
+.endhelp -----------------------------------------------------------------------
+
+
+# COR1 -- Correct image lines with readout axis 1 (lines).
+
+procedure cor1s (cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, darkscale, flatscale, illumscale, frgscale)
+
+int cors[ARB] # Correction flags
+short out[n] # Output data
+real overscan # Overscan value
+short zero[n] # Zero level correction
+short dark[n] # Dark count correction
+short flat[n] # Flat field correction
+short illum[n] # Illumination correction
+short fringe[n] # Fringe correction
+int n # Number of pixels
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan
+ case Z: # zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+
+ case ZO: # zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i]
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ case DZO: # dark count + zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i] - darkscale * dark[i]
+
+ case F: # flat field
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ case FO: # flat field + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan) * flatscale / flat[i]
+ case FZ: # flat field + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ case FZO: # flat field + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale /
+ flat[i]
+ case FD: # flat field + dark count
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale / flat[i]
+ case FDO: # flat field + dark count + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZ: # flat field + dark count + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZO: # flat field + dark count + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+
+# COR2 -- Correct lines for readout axis 2 (columns). This procedure is
+# more complex than when the readout is along the image lines because the
+# zero level and/or flat field corrections may be single readout column
+# vectors.
+
+procedure cor2s (line, cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+int line # Line to be corrected
+int cors[ARB] # Correction flags
+short out[n] # Output data
+real overscan[n] # Overscan value
+short zero[n] # Zero level correction
+short dark[n] # Dark count correction
+short flat[n] # Flat field correction
+short illum[n] # Illumination correction
+short fringe[n] # Fringe correction
+int n # Number of pixels
+pointer zeroim # Zero level IMIO pointer (NULL if 1D vector)
+pointer flatim # Flat field IMIO pointer (NULL if 1D vector)
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+short zeroval
+real flatval
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i]
+ case Z: # zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval
+ }
+
+ case ZO: # zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval
+ }
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval - darkscale * dark[i]
+ }
+ case DZO: # dark count + zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]
+ }
+
+ case F: # flat field
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = out[i] * flatval
+ }
+ case FO: # flat field + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatval
+ }
+ case FZ: # flat field + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatval
+ }
+ }
+ case FZO: # flat field + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) * flatval
+ }
+ }
+ case FD: # flat field + dark count
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale/flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatval
+ }
+ case FDO: # flat field + dark count + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatval
+ }
+ case FDZ: # flat field + dark count + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatval
+ }
+ }
+ case FDZO: # flat field + dark count + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatval
+ }
+ }
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+# COR1 -- Correct image lines with readout axis 1 (lines).
+
+procedure cor1r (cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, darkscale, flatscale, illumscale, frgscale)
+
+int cors[ARB] # Correction flags
+real out[n] # Output data
+real overscan # Overscan value
+real zero[n] # Zero level correction
+real dark[n] # Dark count correction
+real flat[n] # Flat field correction
+real illum[n] # Illumination correction
+real fringe[n] # Fringe correction
+int n # Number of pixels
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan
+ case Z: # zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+
+ case ZO: # zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i]
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ case DZO: # dark count + zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i] - darkscale * dark[i]
+
+ case F: # flat field
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ case FO: # flat field + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan) * flatscale / flat[i]
+ case FZ: # flat field + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ case FZO: # flat field + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale /
+ flat[i]
+ case FD: # flat field + dark count
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale / flat[i]
+ case FDO: # flat field + dark count + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZ: # flat field + dark count + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZO: # flat field + dark count + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+
+# COR2 -- Correct lines for readout axis 2 (columns). This procedure is
+# more complex than when the readout is along the image lines because the
+# zero level and/or flat field corrections may be single readout column
+# vectors.
+
+procedure cor2r (line, cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+int line # Line to be corrected
+int cors[ARB] # Correction flags
+real out[n] # Output data
+real overscan[n] # Overscan value
+real zero[n] # Zero level correction
+real dark[n] # Dark count correction
+real flat[n] # Flat field correction
+real illum[n] # Illumination correction
+real fringe[n] # Fringe correction
+int n # Number of pixels
+pointer zeroim # Zero level IMIO pointer (NULL if 1D vector)
+pointer flatim # Flat field IMIO pointer (NULL if 1D vector)
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+real zeroval
+real flatval
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i]
+ case Z: # zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval
+ }
+
+ case ZO: # zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval
+ }
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval - darkscale * dark[i]
+ }
+ case DZO: # dark count + zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]
+ }
+
+ case F: # flat field
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = out[i] * flatval
+ }
+ case FO: # flat field + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatval
+ }
+ case FZ: # flat field + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatval
+ }
+ }
+ case FZO: # flat field + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) * flatval
+ }
+ }
+ case FD: # flat field + dark count
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale/flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatval
+ }
+ case FDO: # flat field + dark count + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatval
+ }
+ case FDZ: # flat field + dark count + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatval
+ }
+ }
+ case FDZO: # flat field + dark count + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatval
+ }
+ }
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/generic/icaclip.x b/noao/imred/ccdred/src/generic/icaclip.x
new file mode 100644
index 00000000..1530145c
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icaclip.x
@@ -0,0 +1,1102 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number of images for this algorithm
+
+
+# IC_AAVSIGCLIP -- Reject pixels using an average sigma about the average
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_aavsigclips (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, s1, r, one
+data one /1.0/
+pointer sp, sums, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (sums, npts, TY_REAL)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Since the unweighted average is computed here possibly skip combining
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Compute the unweighted average with the high and low rejected and
+ # the poisson scaled average sigma. There must be at least three
+ # pixels at each point to define the average and contributions to
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ nin = n[1]
+ s = 0.
+ n2 = 0
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3)
+ next
+
+ # Unweighted average with the high and low rejected
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mems[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mems[dp1]
+ l = Memi[mp1]
+ s1 = max (one, (a + zeros[l]) / scales[l])
+ s = s + (d1 - a) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, a)
+ do j = 1, n1
+ s = s + (Mems[d[j]+k] - a) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the average and sum for later.
+ average[i] = a
+ Memr[sums+k] = sum
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+
+ # Reject pixels and compute the final average (if needed).
+ # There must be at least three pixels at each point for rejection.
+ # Iteratively scale the mean sigma and reject pixels
+ # Compact the data and keep track of the image IDs if needed.
+
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (2, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mems[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ a = average[i]
+ sum = Memr[sums+k]
+
+ repeat {
+ n2 = n1
+ if (s > 0.) {
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mems[dp1]
+ l = Memi[mp1]
+ s1 = s * sqrt (max (one, (a+zeros[l]) / scales[l]))
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ s1 = s * sqrt (max (one, a))
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mems[dp1]
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MAVSIGCLIP -- Reject pixels using an average sigma about the median
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_mavsigclips (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+pointer sp, resid, mp1, mp2
+real med, low, high, r, s, s1, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute the poisson scaled average sigma about the median.
+ # There must be at least three pixels at each point to define
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ s = 0.
+ n2 = 0
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3) {
+ if (n1 == 0)
+ median[i] = blank
+ else if (n1 == 1)
+ median[i] = Mems[d[1]+k]
+ else {
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ median[i] = (low + high) / 2.
+ }
+ next
+ }
+
+ # Median
+ n3 = 1 + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mems[d[n3-1]+k]
+ high = Mems[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mems[d[n3]+k]
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ l = Memi[m[j]+k]
+ s1 = max (one, (med + zeros[l]) / scales[l])
+ s = s + (Mems[d[j]+k] - med) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, med)
+ do j = 1, n1
+ s = s + (Mems[d[j]+k] - med) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the median for later.
+ median[i] = med
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+ else
+ return
+
+ # Compute individual sigmas and iteratively clip.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 < max (3, maxkeep+1))
+ next
+ nl = 1
+ nh = n1
+ med = median[i]
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 >= max (MINCLIP, maxkeep+1) && s > 0.) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (med - Mems[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (Mems[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ s1 = s * sqrt (max (one, med))
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mems[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mems[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mems[d[n3-1]+k]
+ high = Mems[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mems[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mems[d[n3-1]+k]
+ high = Mems[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mems[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_AAVSIGCLIP -- Reject pixels using an average sigma about the average
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_aavsigclipr (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, s1, r, one
+data one /1.0/
+pointer sp, sums, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (sums, npts, TY_REAL)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Since the unweighted average is computed here possibly skip combining
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Compute the unweighted average with the high and low rejected and
+ # the poisson scaled average sigma. There must be at least three
+ # pixels at each point to define the average and contributions to
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ nin = n[1]
+ s = 0.
+ n2 = 0
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3)
+ next
+
+ # Unweighted average with the high and low rejected
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Memr[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Memr[dp1]
+ l = Memi[mp1]
+ s1 = max (one, (a + zeros[l]) / scales[l])
+ s = s + (d1 - a) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, a)
+ do j = 1, n1
+ s = s + (Memr[d[j]+k] - a) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the average and sum for later.
+ average[i] = a
+ Memr[sums+k] = sum
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+
+ # Reject pixels and compute the final average (if needed).
+ # There must be at least three pixels at each point for rejection.
+ # Iteratively scale the mean sigma and reject pixels
+ # Compact the data and keep track of the image IDs if needed.
+
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (2, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Memr[d[1]+k]
+ do j = 2, n1
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ a = average[i]
+ sum = Memr[sums+k]
+
+ repeat {
+ n2 = n1
+ if (s > 0.) {
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Memr[dp1]
+ l = Memi[mp1]
+ s1 = s * sqrt (max (one, (a+zeros[l]) / scales[l]))
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ s1 = s * sqrt (max (one, a))
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Memr[dp1]
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MAVSIGCLIP -- Reject pixels using an average sigma about the median
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_mavsigclipr (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+pointer sp, resid, mp1, mp2
+real med, low, high, r, s, s1, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute the poisson scaled average sigma about the median.
+ # There must be at least three pixels at each point to define
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ s = 0.
+ n2 = 0
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3) {
+ if (n1 == 0)
+ median[i] = blank
+ else if (n1 == 1)
+ median[i] = Memr[d[1]+k]
+ else {
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ median[i] = (low + high) / 2.
+ }
+ next
+ }
+
+ # Median
+ n3 = 1 + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Memr[d[n3-1]+k]
+ high = Memr[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Memr[d[n3]+k]
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ l = Memi[m[j]+k]
+ s1 = max (one, (med + zeros[l]) / scales[l])
+ s = s + (Memr[d[j]+k] - med) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, med)
+ do j = 1, n1
+ s = s + (Memr[d[j]+k] - med) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the median for later.
+ median[i] = med
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+ else
+ return
+
+ # Compute individual sigmas and iteratively clip.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 < max (3, maxkeep+1))
+ next
+ nl = 1
+ nh = n1
+ med = median[i]
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 >= max (MINCLIP, maxkeep+1) && s > 0.) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (med - Memr[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (Memr[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ s1 = s * sqrt (max (one, med))
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Memr[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Memr[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Memr[d[n3-1]+k]
+ high = Memr[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Memr[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Memr[d[n3-1]+k]
+ high = Memr[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Memr[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/generic/icaverage.x b/noao/imred/ccdred/src/generic/icaverage.x
new file mode 100644
index 00000000..3646b725
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icaverage.x
@@ -0,0 +1,163 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+
+# IC_AVERAGE -- Compute the average image line.
+# Options include a weight average.
+
+procedure ic_averages (d, m, n, wts, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average (returned)
+
+int i, j, k
+real sumwt, wt
+real sum
+
+include "../icombine.com"
+
+begin
+ # If no data has been excluded do the average without checking the
+ # number of points and using the fact that the weights are normalized.
+ # If all the data has been excluded set the average to the blank value.
+
+ if (dflag == D_ALL) {
+ if (dowts) {
+ do i = 1, npts {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mems[d[1]+k] * wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mems[d[j]+k] * wt
+ }
+ average[i] = sum
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ sum = Mems[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n[i]
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ average[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mems[d[1]+k] * wt
+ sumwt = wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mems[d[j]+k] * wt
+ sumwt = sumwt + wt
+ }
+ average[i] = sum / sumwt
+ } else
+ average[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ sum = Mems[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n[i]
+ } else
+ average[i] = blank
+ }
+ }
+ }
+end
+
+# IC_AVERAGE -- Compute the average image line.
+# Options include a weight average.
+
+procedure ic_averager (d, m, n, wts, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average (returned)
+
+int i, j, k
+real sumwt, wt
+real sum
+
+include "../icombine.com"
+
+begin
+ # If no data has been excluded do the average without checking the
+ # number of points and using the fact that the weights are normalized.
+ # If all the data has been excluded set the average to the blank value.
+
+ if (dflag == D_ALL) {
+ if (dowts) {
+ do i = 1, npts {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Memr[d[1]+k] * wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Memr[d[j]+k] * wt
+ }
+ average[i] = sum
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ sum = Memr[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n[i]
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ average[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Memr[d[1]+k] * wt
+ sumwt = wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Memr[d[j]+k] * wt
+ sumwt = sumwt + wt
+ }
+ average[i] = sum / sumwt
+ } else
+ average[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ sum = Memr[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n[i]
+ } else
+ average[i] = blank
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/generic/iccclip.x b/noao/imred/ccdred/src/generic/iccclip.x
new file mode 100644
index 00000000..57709064
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/iccclip.x
@@ -0,0 +1,898 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 2 # Mininum number of images for algorithm
+
+
+# IC_ACCDCLIP -- Reject pixels using CCD noise parameters about the average
+
+procedure ic_accdclips (d, m, n, scales, zeros, nm, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model parameters
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, zero
+data zero /0.0/
+pointer sp, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are no pixels go on to the combining. Since the unweighted
+ # average is computed here possibly skip the combining later.
+
+ # There must be at least max (1, nkeep) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ } else if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # There must be at least two pixels for rejection. The initial
+ # average is the low/high rejected average except in the case of
+ # just two pixels. The rejections are iterated and the average
+ # is recomputed. Corrections for scaling may be performed.
+ # Depending on other flags the image IDs may also need to be adjusted.
+
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (MINCLIP-1, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mems[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ repeat {
+ if (n1 == 2) {
+ sum = Mems[d[1]+k]
+ sum = sum + Mems[d[2]+k]
+ a = sum / 2
+ } else {
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mems[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+ }
+ n2 = n1
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ l = Memi[mp1]
+ s = scales[l]
+ d1 = max (zero, s * (a + zeros[l]))
+ s = sqrt (nm[1,l] + d1/nm[2,l] + (d1*nm[3,l])**2) / s
+
+ d1 = Mems[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, a)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (j=1; j<=n1; j=j+1) {
+ if (keepids) {
+ l = Memi[m[j]+k]
+ s = max (zero, a)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ dp1 = d[j] + k
+ d1 = Mems[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ }
+
+ n[i] = n1
+ if (!docombine)
+ if (n1 > 0)
+ average[i] = sum / n1
+ else
+ average[i] = blank
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_CCDCLIP -- Reject pixels using CCD noise parameters about the median
+
+procedure ic_mccdclips (d, m, n, scales, zeros, nm, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, mp1, mp2
+real med, zero
+data zero /0.0/
+
+include "../icombine.com"
+
+begin
+ # There must be at least max (MINCLIP, nkeep+1) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0) {
+ med = Mems[d[n3-1]+k]
+ med = (med + Mems[d[n3]+k]) / 2.
+ } else
+ med = Mems[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (med - Mems[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (Mems[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, med)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (; nl <= n2; nl = nl + 1) {
+ if (keepids) {
+ l = Memi[m[nl]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (med - Mems[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ if (keepids) {
+ l = Memi[m[nh]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (Mems[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_ACCDCLIP -- Reject pixels using CCD noise parameters about the average
+
+procedure ic_accdclipr (d, m, n, scales, zeros, nm, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model parameters
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, zero
+data zero /0.0/
+pointer sp, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are no pixels go on to the combining. Since the unweighted
+ # average is computed here possibly skip the combining later.
+
+ # There must be at least max (1, nkeep) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ } else if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # There must be at least two pixels for rejection. The initial
+ # average is the low/high rejected average except in the case of
+ # just two pixels. The rejections are iterated and the average
+ # is recomputed. Corrections for scaling may be performed.
+ # Depending on other flags the image IDs may also need to be adjusted.
+
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (MINCLIP-1, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Memr[d[1]+k]
+ do j = 2, n1
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ repeat {
+ if (n1 == 2) {
+ sum = Memr[d[1]+k]
+ sum = sum + Memr[d[2]+k]
+ a = sum / 2
+ } else {
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Memr[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+ }
+ n2 = n1
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ l = Memi[mp1]
+ s = scales[l]
+ d1 = max (zero, s * (a + zeros[l]))
+ s = sqrt (nm[1,l] + d1/nm[2,l] + (d1*nm[3,l])**2) / s
+
+ d1 = Memr[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, a)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (j=1; j<=n1; j=j+1) {
+ if (keepids) {
+ l = Memi[m[j]+k]
+ s = max (zero, a)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ dp1 = d[j] + k
+ d1 = Memr[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ }
+
+ n[i] = n1
+ if (!docombine)
+ if (n1 > 0)
+ average[i] = sum / n1
+ else
+ average[i] = blank
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_CCDCLIP -- Reject pixels using CCD noise parameters about the median
+
+procedure ic_mccdclipr (d, m, n, scales, zeros, nm, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, mp1, mp2
+real med, zero
+data zero /0.0/
+
+include "../icombine.com"
+
+begin
+ # There must be at least max (MINCLIP, nkeep+1) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0) {
+ med = Memr[d[n3-1]+k]
+ med = (med + Memr[d[n3]+k]) / 2.
+ } else
+ med = Memr[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (med - Memr[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (Memr[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, med)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (; nl <= n2; nl = nl + 1) {
+ if (keepids) {
+ l = Memi[m[nl]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (med - Memr[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ if (keepids) {
+ l = Memi[m[nh]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (Memr[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/generic/icgdata.x b/noao/imred/ccdred/src/generic/icgdata.x
new file mode 100644
index 00000000..5c6ac18c
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icgdata.x
@@ -0,0 +1,459 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <mach.h>
+include "../icombine.h"
+
+
+# IC_GDATA -- Get line of image and mask data and apply threshold and scaling.
+# Entirely empty lines are excluded. The data are compacted within the
+# input data buffers. If it is required, the connection to the original
+# image index is keeped in the returned m data pointers.
+
+procedure ic_gdatas (in, out, dbuf, d, id, n, m, lflag, offsets, scales,
+ zeros, nimages, npts, v1, v2)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Empty mask flags
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+int nimages # Number of input images
+int npts # NUmber of output points per line
+long v1[ARB], v2[ARB] # Line vectors
+
+int i, j, k, l, ndim, nused
+real a, b
+pointer buf, dp, ip, mp, imgnls()
+
+include "../icombine.com"
+
+begin
+ # Get masks and return if there is no data
+ call ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+ if (dflag == D_NONE)
+ return
+
+ # Get data and fill data buffers. Correct for offsets if needed.
+ ndim = IM_NDIM(out[1])
+ do i = 1, nimages {
+ if (lflag[i] == D_NONE)
+ next
+ if (aligned) {
+ call amovl (v1, v2, IM_MAXDIM)
+ if (project)
+ v2[ndim+1] = i
+ j = imgnls (in[i], d[i], v2)
+ } else {
+ v2[1] = v1[1]
+ do j = 2, ndim
+ v2[j] = v1[j] - offsets[i,j]
+ if (project)
+ v2[ndim+1] = i
+ j = imgnls (in[i], buf, v2)
+ call amovs (Mems[buf], Mems[dbuf[i]+offsets[i,1]],
+ IM_LEN(in[i],1))
+ d[i] = dbuf[i]
+ }
+ }
+
+ # Apply threshold if needed
+ if (dothresh) {
+ do i = 1, nimages {
+ dp = d[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ a = Mems[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ lflag[i] = D_MIX
+ dflag = D_MIX
+ }
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ a = Mems[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ dflag = D_MIX
+ }
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+
+ # Check for completely empty lines
+ if (lflag[i] == D_MIX) {
+ lflag[i] = D_NONE
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Apply scaling (avoiding masked pixels which might overflow?)
+ if (doscale) {
+ if (dflag == D_ALL) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ do j = 1, npts {
+ Mems[dp] = Mems[dp] / a + b
+ dp = dp + 1
+ }
+ }
+ } else if (dflag == D_MIX) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ Mems[dp] = Mems[dp] / a + b
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0)
+ Mems[dp] = Mems[dp] / a + b
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+ }
+
+ # Sort pointers to exclude unused images.
+ # Use the lflag array to keep track of the image index.
+
+ if (dflag == D_ALL)
+ nused = nimages
+ else {
+ nused = 0
+ do i = 1, nimages
+ if (lflag[i] != D_NONE) {
+ nused = nused + 1
+ d[nused] = d[i]
+ m[nused] = m[i]
+ lflag[nused] = i
+ }
+ if (nused == 0)
+ dflag = D_NONE
+ }
+
+ # Compact data to remove bad pixels
+ # Keep track of the image indices if needed
+ # If growing mark the end of the included image indices with zero
+
+ if (dflag == D_ALL) {
+ call amovki (nused, n, npts)
+ if (keepids)
+ do i = 1, nimages
+ call amovki (i, Memi[id[i]], npts)
+ } else if (dflag == D_NONE)
+ call aclri (n, npts)
+ else {
+ call aclri (n, npts)
+ if (keepids) {
+ do i = 1, nused {
+ l = lflag[i]
+ dp = d[i]
+ ip = id[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i) {
+ Mems[d[k]+j-1] = Mems[dp]
+ Memi[id[k]+j-1] = l
+ } else
+ Memi[ip] = l
+ }
+ dp = dp + 1
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (grow > 0) {
+ do j = 1, npts {
+ do i = n[j]+1, nimages
+ Memi[id[i]+j-1] = 0
+ }
+ }
+ } else {
+ do i = 1, nused {
+ dp = d[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i)
+ Mems[d[k]+j-1] = Mems[dp]
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Sort the pixels and IDs if needed
+ if (mclip) {
+ call malloc (dp, nimages, TY_SHORT)
+ if (keepids) {
+ call malloc (ip, nimages, TY_INT)
+ call ic_2sorts (d, Mems[dp], id, Memi[ip], n, npts)
+ call mfree (ip, TY_INT)
+ } else
+ call ic_sorts (d, Mems[dp], n, npts)
+ call mfree (dp, TY_SHORT)
+ }
+end
+
+# IC_GDATA -- Get line of image and mask data and apply threshold and scaling.
+# Entirely empty lines are excluded. The data are compacted within the
+# input data buffers. If it is required, the connection to the original
+# image index is keeped in the returned m data pointers.
+
+procedure ic_gdatar (in, out, dbuf, d, id, n, m, lflag, offsets, scales,
+ zeros, nimages, npts, v1, v2)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Empty mask flags
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+int nimages # Number of input images
+int npts # NUmber of output points per line
+long v1[ARB], v2[ARB] # Line vectors
+
+int i, j, k, l, ndim, nused
+real a, b
+pointer buf, dp, ip, mp, imgnlr()
+
+include "../icombine.com"
+
+begin
+ # Get masks and return if there is no data
+ call ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+ if (dflag == D_NONE)
+ return
+
+ # Get data and fill data buffers. Correct for offsets if needed.
+ ndim = IM_NDIM(out[1])
+ do i = 1, nimages {
+ if (lflag[i] == D_NONE)
+ next
+ if (aligned) {
+ call amovl (v1, v2, IM_MAXDIM)
+ if (project)
+ v2[ndim+1] = i
+ j = imgnlr (in[i], d[i], v2)
+ } else {
+ v2[1] = v1[1]
+ do j = 2, ndim
+ v2[j] = v1[j] - offsets[i,j]
+ if (project)
+ v2[ndim+1] = i
+ j = imgnlr (in[i], buf, v2)
+ call amovr (Memr[buf], Memr[dbuf[i]+offsets[i,1]],
+ IM_LEN(in[i],1))
+ d[i] = dbuf[i]
+ }
+ }
+
+ # Apply threshold if needed
+ if (dothresh) {
+ do i = 1, nimages {
+ dp = d[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ a = Memr[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ lflag[i] = D_MIX
+ dflag = D_MIX
+ }
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ a = Memr[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ dflag = D_MIX
+ }
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+
+ # Check for completely empty lines
+ if (lflag[i] == D_MIX) {
+ lflag[i] = D_NONE
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Apply scaling (avoiding masked pixels which might overflow?)
+ if (doscale) {
+ if (dflag == D_ALL) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ do j = 1, npts {
+ Memr[dp] = Memr[dp] / a + b
+ dp = dp + 1
+ }
+ }
+ } else if (dflag == D_MIX) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ Memr[dp] = Memr[dp] / a + b
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0)
+ Memr[dp] = Memr[dp] / a + b
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+ }
+
+ # Sort pointers to exclude unused images.
+ # Use the lflag array to keep track of the image index.
+
+ if (dflag == D_ALL)
+ nused = nimages
+ else {
+ nused = 0
+ do i = 1, nimages
+ if (lflag[i] != D_NONE) {
+ nused = nused + 1
+ d[nused] = d[i]
+ m[nused] = m[i]
+ lflag[nused] = i
+ }
+ if (nused == 0)
+ dflag = D_NONE
+ }
+
+ # Compact data to remove bad pixels
+ # Keep track of the image indices if needed
+ # If growing mark the end of the included image indices with zero
+
+ if (dflag == D_ALL) {
+ call amovki (nused, n, npts)
+ if (keepids)
+ do i = 1, nimages
+ call amovki (i, Memi[id[i]], npts)
+ } else if (dflag == D_NONE)
+ call aclri (n, npts)
+ else {
+ call aclri (n, npts)
+ if (keepids) {
+ do i = 1, nused {
+ l = lflag[i]
+ dp = d[i]
+ ip = id[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i) {
+ Memr[d[k]+j-1] = Memr[dp]
+ Memi[id[k]+j-1] = l
+ } else
+ Memi[ip] = l
+ }
+ dp = dp + 1
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (grow > 0) {
+ do j = 1, npts {
+ do i = n[j]+1, nimages
+ Memi[id[i]+j-1] = 0
+ }
+ }
+ } else {
+ do i = 1, nused {
+ dp = d[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i)
+ Memr[d[k]+j-1] = Memr[dp]
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Sort the pixels and IDs if needed
+ if (mclip) {
+ call malloc (dp, nimages, TY_REAL)
+ if (keepids) {
+ call malloc (ip, nimages, TY_INT)
+ call ic_2sortr (d, Memr[dp], id, Memi[ip], n, npts)
+ call mfree (ip, TY_INT)
+ } else
+ call ic_sortr (d, Memr[dp], n, npts)
+ call mfree (dp, TY_REAL)
+ }
+end
+
diff --git a/noao/imred/ccdred/src/generic/icgrow.x b/noao/imred/ccdred/src/generic/icgrow.x
new file mode 100644
index 00000000..b94e1cbc
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icgrow.x
@@ -0,0 +1,148 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+
+# IC_GROW -- Reject neigbors of rejected pixels.
+# The rejected pixels are marked by having nonzero ids beyond the number
+# of included pixels. The pixels rejected here are given zero ids
+# to avoid growing of the pixels rejected here. The unweighted average
+# can be updated but any rejected pixels requires the median to be
+# recomputed. When the number of pixels at a grow point reaches nkeep
+# no further pixels are rejected. Note that the rejection order is not
+# based on the magnitude of the residuals and so a grow from a weakly
+# rejected image pixel may take precedence over a grow from a strongly
+# rejected image pixel.
+
+procedure ic_grows (d, m, n, nimages, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i1, i2, j1, j2, k1, k2, l, is, ie, n2, maxkeep
+pointer mp1, mp2
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ do i1 = 1, npts {
+ k1 = i1 - 1
+ is = max (1, i1 - grow)
+ ie = min (npts, i1 + grow)
+ do j1 = n[i1]+1, nimages {
+ l = Memi[m[j1]+k1]
+ if (l == 0)
+ next
+ if (combine == MEDIAN)
+ docombine = true
+
+ do i2 = is, ie {
+ if (i2 == i1)
+ next
+ k2 = i2 - 1
+ n2 = n[i2]
+ if (nkeep < 0)
+ maxkeep = max (0, n2 + nkeep)
+ else
+ maxkeep = min (n2, nkeep)
+ if (n2 <= maxkeep)
+ next
+ do j2 = 1, n2 {
+ mp1 = m[j2] + k2
+ if (Memi[mp1] == l) {
+ if (!docombine && n2 > 1)
+ average[i2] =
+ (n2*average[i2] - Mems[d[j2]+k2]) / (n2-1)
+ mp2 = m[n2] + k2
+ if (j2 < n2) {
+ Mems[d[j2]+k2] = Mems[d[n2]+k2]
+ Memi[mp1] = Memi[mp2]
+ }
+ Memi[mp2] = 0
+ n[i2] = n2 - 1
+ break
+ }
+ }
+ }
+ }
+ }
+end
+
+# IC_GROW -- Reject neigbors of rejected pixels.
+# The rejected pixels are marked by having nonzero ids beyond the number
+# of included pixels. The pixels rejected here are given zero ids
+# to avoid growing of the pixels rejected here. The unweighted average
+# can be updated but any rejected pixels requires the median to be
+# recomputed. When the number of pixels at a grow point reaches nkeep
+# no further pixels are rejected. Note that the rejection order is not
+# based on the magnitude of the residuals and so a grow from a weakly
+# rejected image pixel may take precedence over a grow from a strongly
+# rejected image pixel.
+
+procedure ic_growr (d, m, n, nimages, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i1, i2, j1, j2, k1, k2, l, is, ie, n2, maxkeep
+pointer mp1, mp2
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ do i1 = 1, npts {
+ k1 = i1 - 1
+ is = max (1, i1 - grow)
+ ie = min (npts, i1 + grow)
+ do j1 = n[i1]+1, nimages {
+ l = Memi[m[j1]+k1]
+ if (l == 0)
+ next
+ if (combine == MEDIAN)
+ docombine = true
+
+ do i2 = is, ie {
+ if (i2 == i1)
+ next
+ k2 = i2 - 1
+ n2 = n[i2]
+ if (nkeep < 0)
+ maxkeep = max (0, n2 + nkeep)
+ else
+ maxkeep = min (n2, nkeep)
+ if (n2 <= maxkeep)
+ next
+ do j2 = 1, n2 {
+ mp1 = m[j2] + k2
+ if (Memi[mp1] == l) {
+ if (!docombine && n2 > 1)
+ average[i2] =
+ (n2*average[i2] - Memr[d[j2]+k2]) / (n2-1)
+ mp2 = m[n2] + k2
+ if (j2 < n2) {
+ Memr[d[j2]+k2] = Memr[d[n2]+k2]
+ Memi[mp1] = Memi[mp2]
+ }
+ Memi[mp2] = 0
+ n[i2] = n2 - 1
+ break
+ }
+ }
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/generic/icmedian.x b/noao/imred/ccdred/src/generic/icmedian.x
new file mode 100644
index 00000000..ec0166ba
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icmedian.x
@@ -0,0 +1,343 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+
+# IC_MEDIAN -- Median of lines
+
+procedure ic_medians (d, n, npts, median)
+
+pointer d[ARB] # Input data line pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, j1, j2, n1, lo, up, lo1, up1
+bool even
+real val1, val2, val3
+short temp, wtemp
+
+include "../icombine.com"
+
+begin
+ # If no data return after possibly setting blank values.
+ if (dflag == D_NONE) {
+ do i = 1, npts
+ median[i]= blank
+ return
+ }
+
+ # If the data were previously sorted then directly compute the median.
+ if (mclip) {
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ even = (mod (n1, 2) == 0)
+ j1 = n1 / 2 + 1
+ j2 = n1 / 2
+ do i = 1, npts {
+ k = i - 1
+ if (even) {
+ val1 = Mems[d[j1]+k]
+ val2 = Mems[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mems[d[j1]+k]
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 > 0) {
+ j1 = n1 / 2 + 1
+ if (mod (n1, 2) == 0) {
+ j2 = n1 / 2
+ val1 = Mems[d[j1]+k]
+ val2 = Mems[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mems[d[j1]+k]
+ } else
+ median[i] = blank
+ }
+ }
+ return
+ }
+
+ # Compute the median.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+
+ # If there are more than 3 points use Wirth algorithm. This
+ # is the same as vops$amed.gx except for an even number of
+ # points it selects the middle two and averages.
+ if (n1 > 3) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2))
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mems[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Mems[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Mems[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mems[d[lo1]+k]
+ Mems[d[lo1]+k] = Mems[d[up1]+k]
+ Mems[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+
+ median[i] = Mems[d[j]+k]
+
+ if (mod (n1,2) == 0) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2)+1)
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mems[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Mems[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Mems[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mems[d[lo1]+k]
+ Mems[d[lo1]+k] = Mems[d[up1]+k]
+ Mems[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+ median[i] = (median[i] + Mems[d[j]+k]) / 2
+ }
+
+ # If 3 points find the median directly.
+ } else if (n1 == 3) {
+ val1 = Mems[d[1]+k]
+ val2 = Mems[d[2]+k]
+ val3 = Mems[d[3]+k]
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = val2
+ else if (val1 < val3) # acb
+ median[i] = val3
+ else # cab
+ median[i] = val1
+ } else {
+ if (val2 > val3) # cba
+ median[i] = val2
+ else if (val1 > val3) # bca
+ median[i] = val3
+ else # bac
+ median[i] = val1
+ }
+
+ # If 2 points average.
+ } else if (n1 == 2) {
+ val1 = Mems[d[1]+k]
+ val2 = Mems[d[2]+k]
+ median[i] = (val1 + val2) / 2
+
+ # If 1 point return the value.
+ } else if (n1 == 1)
+ median[i] = Mems[d[1]+k]
+
+ # If no points return with a possibly blank value.
+ else
+ median[i] = blank
+ }
+end
+
+# IC_MEDIAN -- Median of lines
+
+procedure ic_medianr (d, n, npts, median)
+
+pointer d[ARB] # Input data line pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, j1, j2, n1, lo, up, lo1, up1
+bool even
+real val1, val2, val3
+real temp, wtemp
+
+include "../icombine.com"
+
+begin
+ # If no data return after possibly setting blank values.
+ if (dflag == D_NONE) {
+ do i = 1, npts
+ median[i]= blank
+ return
+ }
+
+ # If the data were previously sorted then directly compute the median.
+ if (mclip) {
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ even = (mod (n1, 2) == 0)
+ j1 = n1 / 2 + 1
+ j2 = n1 / 2
+ do i = 1, npts {
+ k = i - 1
+ if (even) {
+ val1 = Memr[d[j1]+k]
+ val2 = Memr[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Memr[d[j1]+k]
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 > 0) {
+ j1 = n1 / 2 + 1
+ if (mod (n1, 2) == 0) {
+ j2 = n1 / 2
+ val1 = Memr[d[j1]+k]
+ val2 = Memr[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Memr[d[j1]+k]
+ } else
+ median[i] = blank
+ }
+ }
+ return
+ }
+
+ # Compute the median.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+
+ # If there are more than 3 points use Wirth algorithm. This
+ # is the same as vops$amed.gx except for an even number of
+ # points it selects the middle two and averages.
+ if (n1 > 3) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2))
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Memr[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Memr[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Memr[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Memr[d[lo1]+k]
+ Memr[d[lo1]+k] = Memr[d[up1]+k]
+ Memr[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+
+ median[i] = Memr[d[j]+k]
+
+ if (mod (n1,2) == 0) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2)+1)
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Memr[d[j]+k]; lo1 = lo; up1 = up
+
+ repeat {
+ while (Memr[d[lo1]+k] < temp)
+ lo1 = lo1 + 1
+ while (temp < Memr[d[up1]+k])
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Memr[d[lo1]+k]
+ Memr[d[lo1]+k] = Memr[d[up1]+k]
+ Memr[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+ median[i] = (median[i] + Memr[d[j]+k]) / 2
+ }
+
+ # If 3 points find the median directly.
+ } else if (n1 == 3) {
+ val1 = Memr[d[1]+k]
+ val2 = Memr[d[2]+k]
+ val3 = Memr[d[3]+k]
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = val2
+ else if (val1 < val3) # acb
+ median[i] = val3
+ else # cab
+ median[i] = val1
+ } else {
+ if (val2 > val3) # cba
+ median[i] = val2
+ else if (val1 > val3) # bca
+ median[i] = val3
+ else # bac
+ median[i] = val1
+ }
+
+ # If 2 points average.
+ } else if (n1 == 2) {
+ val1 = Memr[d[1]+k]
+ val2 = Memr[d[2]+k]
+ median[i] = (val1 + val2) / 2
+
+ # If 1 point return the value.
+ } else if (n1 == 1)
+ median[i] = Memr[d[1]+k]
+
+ # If no points return with a possibly blank value.
+ else
+ median[i] = blank
+ }
+end
+
diff --git a/noao/imred/ccdred/src/generic/icmm.x b/noao/imred/ccdred/src/generic/icmm.x
new file mode 100644
index 00000000..259759bd
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icmm.x
@@ -0,0 +1,300 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+
+# IC_MM -- Reject a specified number of high and low pixels
+
+procedure ic_mms (d, m, n, npts)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+
+int n1, ncombine, npairs, nlow, nhigh, np
+int i, i1, j, jmax, jmin
+pointer k, kmax, kmin
+short d1, d2, dmin, dmax
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = n1 - nlow - nhigh
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ do i = 1, npts {
+ i1 = i - 1
+ n1 = n[i]
+ if (dflag == D_MIX) {
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = max (ncombine, n1 - nlow - nhigh)
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ # Reject the npairs low and high points.
+ do np = 1, npairs {
+ k = d[1] + i1
+ d1 = Mems[k]
+ dmax = d1; dmin = d1; jmax = 1; jmin = 1; kmax = k; kmin = k
+ do j = 2, n1 {
+ d2 = d1
+ k = d[j] + i1
+ d1 = Mems[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ } else if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ j = n1 - 1
+ if (keepids) {
+ if (jmax < j) {
+ if (jmin != j) {
+ Mems[kmax] = d2
+ Memi[m[jmax]+i1] = Memi[m[j]+i1]
+ } else {
+ Mems[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ }
+ if (jmin < j) {
+ if (jmax != n1) {
+ Mems[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ } else {
+ Mems[kmin] = d2
+ Memi[m[jmin]+i1] = Memi[m[j]+i1]
+ }
+ }
+ } else {
+ if (jmax < j) {
+ if (jmin != j)
+ Mems[kmax] = d2
+ else
+ Mems[kmax] = d1
+ }
+ if (jmin < j) {
+ if (jmax != n1)
+ Mems[kmin] = d1
+ else
+ Mems[kmin] = d2
+ }
+ }
+ n1 = n1 - 2
+ }
+
+ # Reject the excess low points.
+ do np = 1, nlow {
+ k = d[1] + i1
+ d1 = Mems[k]
+ dmin = d1; jmin = 1; kmin = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Mems[k]
+ if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ if (keepids) {
+ if (jmin < n1) {
+ Mems[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmin < n1)
+ Mems[kmin] = d1
+ }
+ n1 = n1 - 1
+ }
+
+ # Reject the excess high points.
+ do np = 1, nhigh {
+ k = d[1] + i1
+ d1 = Mems[k]
+ dmax = d1; jmax = 1; kmax = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Mems[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ }
+ }
+ if (keepids) {
+ if (jmax < n1) {
+ Mems[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmax < n1)
+ Mems[kmax] = d1
+ }
+ n1 = n1 - 1
+ }
+ n[i] = n1
+ }
+
+ if (dflag == D_ALL && npairs + nlow + nhigh > 0)
+ dflag = D_MIX
+end
+
+# IC_MM -- Reject a specified number of high and low pixels
+
+procedure ic_mmr (d, m, n, npts)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+
+int n1, ncombine, npairs, nlow, nhigh, np
+int i, i1, j, jmax, jmin
+pointer k, kmax, kmin
+real d1, d2, dmin, dmax
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = n1 - nlow - nhigh
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ do i = 1, npts {
+ i1 = i - 1
+ n1 = n[i]
+ if (dflag == D_MIX) {
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = max (ncombine, n1 - nlow - nhigh)
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ # Reject the npairs low and high points.
+ do np = 1, npairs {
+ k = d[1] + i1
+ d1 = Memr[k]
+ dmax = d1; dmin = d1; jmax = 1; jmin = 1; kmax = k; kmin = k
+ do j = 2, n1 {
+ d2 = d1
+ k = d[j] + i1
+ d1 = Memr[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ } else if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ j = n1 - 1
+ if (keepids) {
+ if (jmax < j) {
+ if (jmin != j) {
+ Memr[kmax] = d2
+ Memi[m[jmax]+i1] = Memi[m[j]+i1]
+ } else {
+ Memr[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ }
+ if (jmin < j) {
+ if (jmax != n1) {
+ Memr[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ } else {
+ Memr[kmin] = d2
+ Memi[m[jmin]+i1] = Memi[m[j]+i1]
+ }
+ }
+ } else {
+ if (jmax < j) {
+ if (jmin != j)
+ Memr[kmax] = d2
+ else
+ Memr[kmax] = d1
+ }
+ if (jmin < j) {
+ if (jmax != n1)
+ Memr[kmin] = d1
+ else
+ Memr[kmin] = d2
+ }
+ }
+ n1 = n1 - 2
+ }
+
+ # Reject the excess low points.
+ do np = 1, nlow {
+ k = d[1] + i1
+ d1 = Memr[k]
+ dmin = d1; jmin = 1; kmin = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Memr[k]
+ if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ if (keepids) {
+ if (jmin < n1) {
+ Memr[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmin < n1)
+ Memr[kmin] = d1
+ }
+ n1 = n1 - 1
+ }
+
+ # Reject the excess high points.
+ do np = 1, nhigh {
+ k = d[1] + i1
+ d1 = Memr[k]
+ dmax = d1; jmax = 1; kmax = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ d1 = Memr[k]
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ }
+ }
+ if (keepids) {
+ if (jmax < n1) {
+ Memr[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmax < n1)
+ Memr[kmax] = d1
+ }
+ n1 = n1 - 1
+ }
+ n[i] = n1
+ }
+
+ if (dflag == D_ALL && npairs + nlow + nhigh > 0)
+ dflag = D_MIX
+end
diff --git a/noao/imred/ccdred/src/generic/icombine.x b/noao/imred/ccdred/src/generic/icombine.x
new file mode 100644
index 00000000..b4ff60be
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icombine.x
@@ -0,0 +1,607 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <error.h>
+include <syserr.h>
+include <mach.h>
+include "../icombine.h"
+
+
+# ICOMBINE -- Combine images
+#
+# The memory and open file descriptor limits are checked and an attempt
+# to recover is made either by setting the image pixel files to be
+# closed after I/O or by notifying the calling program that memory
+# ran out and the IMIO buffer size should be reduced. After the checks
+# a procedure for the selected combine option is called.
+# Because there may be several failure modes when reaching the file
+# limits we first assume an error is due to the file limit, except for
+# out of memory, and close some pixel files. If the error then repeats
+# on accessing the pixels the error is passed back.
+
+
+procedure icombines (in, out, offsets, nimages, bufsize)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Input image offsets
+int nimages # Number of input images
+int bufsize # IMIO buffer size
+
+char str[1]
+int i, j, npts, fd, stropen(), errcode(), imstati()
+pointer sp, d, id, n, m, lflag, scales, zeros, wts, dbuf
+pointer buf, imgl1s(), impl1i()
+errchk stropen, imgl1s, impl1i
+pointer impl1r()
+errchk impl1r
+
+include "../icombine.com"
+
+begin
+ npts = IM_LEN(out[1],1)
+
+ # Allocate memory.
+ call smark (sp)
+ call salloc (d, nimages, TY_POINTER)
+ call salloc (id, nimages, TY_POINTER)
+ call salloc (n, npts, TY_INT)
+ call salloc (m, nimages, TY_POINTER)
+ call salloc (lflag, nimages, TY_INT)
+ call salloc (scales, nimages, TY_REAL)
+ call salloc (zeros, nimages, TY_REAL)
+ call salloc (wts, nimages, TY_REAL)
+ call amovki (D_ALL, Memi[lflag], nimages)
+
+ # If aligned use the IMIO buffer otherwise we need vectors of
+ # output length.
+
+ if (!aligned) {
+ call salloc (dbuf, nimages, TY_POINTER)
+ do i = 1, nimages
+ call salloc (Memi[dbuf+i-1], npts, TY_SHORT)
+ }
+
+ if (project) {
+ call imseti (in[1], IM_NBUFS, nimages)
+ call imseti (in[1], IM_BUFSIZE, bufsize)
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ } else {
+ # Reserve FD for string operations.
+ fd = stropen (str, 1, NEW_FILE)
+
+ # Do I/O to the images.
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ buf = impl1r (out[1])
+ call aclrr (Memr[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1r (out[3])
+ call aclrr (Memr[buf], npts)
+ }
+ if (out[2] != NULL) {
+ buf = impl1i (out[2])
+ call aclri (Memi[buf], npts)
+ }
+
+ do i = 1, nimages {
+ call imseti (in[i], IM_BUFSIZE, bufsize)
+ iferr (buf = imgl1s (in[i])) {
+ switch (errcode()) {
+ case SYS_MFULL:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (imstati (in[i], IM_CLOSEFD) == YES) {
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ do j = i-2, nimages
+ call imseti (in[j], IM_CLOSEFD, YES)
+ buf = imgl1s (in[i])
+ default:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ }
+ }
+
+ call strclose (fd)
+ }
+
+ call ic_combines (in, out, Memi[dbuf], Memi[d], Memi[id], Memi[n],
+ Memi[m], Memi[lflag], offsets, Memr[scales], Memr[zeros],
+ Memr[wts], nimages, npts)
+end
+
+
+# IC_COMBINE -- Combine images.
+
+procedure ic_combines (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, wts, nimages, npts)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output image
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # Image index ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Line flags
+int offsets[nimages,ARB] # Input image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+real wts[nimages] # Combining weights
+int nimages # Number of input images
+int npts # Number of points per output line
+
+int i, ctor()
+real r, imgetr()
+pointer sp, v1, v2, v3, outdata, buf, nm, impnli()
+pointer impnlr()
+errchk ic_scale, imgetr
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (v3, IM_MAXDIM, TY_LONG)
+ call amovkl (long(1), Meml[v1], IM_MAXDIM)
+ call amovkl (long(1), Meml[v2], IM_MAXDIM)
+ call amovkl (long(1), Meml[v3], IM_MAXDIM)
+
+ call ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+ # Set combine parameters
+ switch (combine) {
+ case AVERAGE:
+ if (dowts)
+ keepids = true
+ else
+ keepids = false
+ case MEDIAN:
+ dowts = false
+ keepids = false
+ }
+ docombine = true
+
+ # Set rejection algorithm specific parameters
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ call salloc (nm, 3*nimages, TY_REAL)
+ i = 1
+ if (ctor (Memc[rdnoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = r
+ } else {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = imgetr (in[i], Memc[rdnoise])
+ }
+ i = 1
+ if (ctor (Memc[gain], i, r) > 0) {
+ do i = 1, nimages {
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[gain])
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ }
+ i = 1
+ if (ctor (Memc[snoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)+2] = r
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[snoise])
+ Memr[nm+3*(i-1)+2] = r
+ }
+ }
+ if (!keepids) {
+ if (doscale1 || grow > 0)
+ keepids = true
+ else {
+ do i = 2, nimages {
+ if (Memr[nm+3*(i-1)] != Memr[nm] ||
+ Memr[nm+3*(i-1)+1] != Memr[nm+1] ||
+ Memr[nm+3*(i-1)+2] != Memr[nm+2]) {
+ keepids = true
+ break
+ }
+ }
+ }
+ }
+ if (reject == CRREJECT)
+ lsigma = MAX_REAL
+ case MINMAX:
+ mclip = false
+ if (grow > 0)
+ keepids = true
+ case PCLIP:
+ mclip = true
+ if (grow > 0)
+ keepids = true
+ case AVSIGCLIP, SIGCLIP:
+ if (doscale1 || grow > 0)
+ keepids = true
+ case NONE:
+ mclip = false
+ grow = 0
+ }
+
+ if (keepids) {
+ do i = 1, nimages
+ call salloc (id[i], npts, TY_INT)
+ }
+
+ while (impnlr (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdatas (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclips (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ else
+ call ic_accdclips (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ case MINMAX:
+ call ic_mms (d, id, n, npts)
+ case PCLIP:
+ call ic_pclips (d, id, n, nimages, npts, Memr[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclips (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ else
+ call ic_asigclips (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclips (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ else
+ call ic_aavsigclips (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ }
+
+ if (grow > 0)
+ call ic_grows (d, id, n, nimages, npts, Memr[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_averages (d, id, n, wts, npts, Memr[outdata])
+ case MEDIAN:
+ call ic_medians (d, n, npts, Memr[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnlr (out[3], buf, Meml[v1])
+ call ic_sigmas (d, id, n, wts, npts, Memr[outdata],
+ Memr[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+
+ call sfree (sp)
+end
+
+procedure icombiner (in, out, offsets, nimages, bufsize)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Input image offsets
+int nimages # Number of input images
+int bufsize # IMIO buffer size
+
+char str[1]
+int i, j, npts, fd, stropen(), errcode(), imstati()
+pointer sp, d, id, n, m, lflag, scales, zeros, wts, dbuf
+pointer buf, imgl1r(), impl1i()
+errchk stropen, imgl1r, impl1i
+pointer impl1r()
+errchk impl1r
+
+include "../icombine.com"
+
+begin
+ npts = IM_LEN(out[1],1)
+
+ # Allocate memory.
+ call smark (sp)
+ call salloc (d, nimages, TY_POINTER)
+ call salloc (id, nimages, TY_POINTER)
+ call salloc (n, npts, TY_INT)
+ call salloc (m, nimages, TY_POINTER)
+ call salloc (lflag, nimages, TY_INT)
+ call salloc (scales, nimages, TY_REAL)
+ call salloc (zeros, nimages, TY_REAL)
+ call salloc (wts, nimages, TY_REAL)
+ call amovki (D_ALL, Memi[lflag], nimages)
+
+ # If aligned use the IMIO buffer otherwise we need vectors of
+ # output length.
+
+ if (!aligned) {
+ call salloc (dbuf, nimages, TY_POINTER)
+ do i = 1, nimages
+ call salloc (Memi[dbuf+i-1], npts, TY_REAL)
+ }
+
+ if (project) {
+ call imseti (in[1], IM_NBUFS, nimages)
+ call imseti (in[1], IM_BUFSIZE, bufsize)
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ } else {
+ # Reserve FD for string operations.
+ fd = stropen (str, 1, NEW_FILE)
+
+ # Do I/O to the images.
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ buf = impl1r (out[1])
+ call aclrr (Memr[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1r (out[3])
+ call aclrr (Memr[buf], npts)
+ }
+ if (out[2] != NULL) {
+ buf = impl1i (out[2])
+ call aclri (Memi[buf], npts)
+ }
+
+ do i = 1, nimages {
+ call imseti (in[i], IM_BUFSIZE, bufsize)
+ iferr (buf = imgl1r (in[i])) {
+ switch (errcode()) {
+ case SYS_MFULL:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (imstati (in[i], IM_CLOSEFD) == YES) {
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ do j = i-2, nimages
+ call imseti (in[j], IM_CLOSEFD, YES)
+ buf = imgl1r (in[i])
+ default:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ }
+ }
+
+ call strclose (fd)
+ }
+
+ call ic_combiner (in, out, Memi[dbuf], Memi[d], Memi[id], Memi[n],
+ Memi[m], Memi[lflag], offsets, Memr[scales], Memr[zeros],
+ Memr[wts], nimages, npts)
+end
+
+
+# IC_COMBINE -- Combine images.
+
+procedure ic_combiner (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, wts, nimages, npts)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output image
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # Image index ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Line flags
+int offsets[nimages,ARB] # Input image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+real wts[nimages] # Combining weights
+int nimages # Number of input images
+int npts # Number of points per output line
+
+int i, ctor()
+real r, imgetr()
+pointer sp, v1, v2, v3, outdata, buf, nm, impnli()
+pointer impnlr()
+errchk ic_scale, imgetr
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (v3, IM_MAXDIM, TY_LONG)
+ call amovkl (long(1), Meml[v1], IM_MAXDIM)
+ call amovkl (long(1), Meml[v2], IM_MAXDIM)
+ call amovkl (long(1), Meml[v3], IM_MAXDIM)
+
+ call ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+ # Set combine parameters
+ switch (combine) {
+ case AVERAGE:
+ if (dowts)
+ keepids = true
+ else
+ keepids = false
+ case MEDIAN:
+ dowts = false
+ keepids = false
+ }
+ docombine = true
+
+ # Set rejection algorithm specific parameters
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ call salloc (nm, 3*nimages, TY_REAL)
+ i = 1
+ if (ctor (Memc[rdnoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = r
+ } else {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = imgetr (in[i], Memc[rdnoise])
+ }
+ i = 1
+ if (ctor (Memc[gain], i, r) > 0) {
+ do i = 1, nimages {
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[gain])
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ }
+ i = 1
+ if (ctor (Memc[snoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)+2] = r
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[snoise])
+ Memr[nm+3*(i-1)+2] = r
+ }
+ }
+ if (!keepids) {
+ if (doscale1 || grow > 0)
+ keepids = true
+ else {
+ do i = 2, nimages {
+ if (Memr[nm+3*(i-1)] != Memr[nm] ||
+ Memr[nm+3*(i-1)+1] != Memr[nm+1] ||
+ Memr[nm+3*(i-1)+2] != Memr[nm+2]) {
+ keepids = true
+ break
+ }
+ }
+ }
+ }
+ if (reject == CRREJECT)
+ lsigma = MAX_REAL
+ case MINMAX:
+ mclip = false
+ if (grow > 0)
+ keepids = true
+ case PCLIP:
+ mclip = true
+ if (grow > 0)
+ keepids = true
+ case AVSIGCLIP, SIGCLIP:
+ if (doscale1 || grow > 0)
+ keepids = true
+ case NONE:
+ mclip = false
+ grow = 0
+ }
+
+ if (keepids) {
+ do i = 1, nimages
+ call salloc (id[i], npts, TY_INT)
+ }
+
+ while (impnlr (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdatar (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclipr (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ else
+ call ic_accdclipr (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ case MINMAX:
+ call ic_mmr (d, id, n, npts)
+ case PCLIP:
+ call ic_pclipr (d, id, n, nimages, npts, Memr[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclipr (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ else
+ call ic_asigclipr (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclipr (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ else
+ call ic_aavsigclipr (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ }
+
+ if (grow > 0)
+ call ic_growr (d, id, n, nimages, npts, Memr[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_averager (d, id, n, wts, npts, Memr[outdata])
+ case MEDIAN:
+ call ic_medianr (d, n, npts, Memr[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnlr (out[3], buf, Meml[v1])
+ call ic_sigmar (d, id, n, wts, npts, Memr[outdata],
+ Memr[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+
+ call sfree (sp)
+end
+
diff --git a/noao/imred/ccdred/src/generic/icpclip.x b/noao/imred/ccdred/src/generic/icpclip.x
new file mode 100644
index 00000000..da09bb75
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icpclip.x
@@ -0,0 +1,442 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number for clipping
+
+
+# IC_PCLIP -- Percentile clip
+#
+# 1) Find the median
+# 2) Find the pixel which is the specified order index away
+# 3) Use the data value difference as a sigma and apply clipping
+# 4) Since the median is known return it so it does not have to be recomputed
+
+procedure ic_pclips (d, m, n, nimages, npts, median)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of input images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, n4, n5, nl, nh, nin, maxkeep
+bool even, fp_equalr()
+real sigma, r, s, t
+pointer sp, resid, mp1, mp2
+real med
+
+include "../icombine.com"
+
+begin
+ # There must be at least MINCLIP and more than nkeep pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Set sign of pclip parameter
+ if (pclip < 0)
+ t = -1.
+ else
+ t = 1.
+
+ # If there are no rejected pixels compute certain parameters once.
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0.) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ nin = n1
+ }
+
+ # Now apply clipping.
+ do i = 1, npts {
+ # Compute median.
+ if (dflag == D_MIX) {
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 == 0) {
+ if (combine == MEDIAN)
+ median[i] = blank
+ next
+ }
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ }
+
+ j = i - 1
+ if (even) {
+ med = Mems[d[n2-1]+j]
+ med = (med + Mems[d[n2]+j]) / 2.
+ } else
+ med = Mems[d[n2]+j]
+
+ if (n1 < max (MINCLIP, maxkeep+1)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Define sigma for clipping
+ sigma = t * (Mems[d[n3]+j] - med)
+ if (fp_equalr (sigma, 0.)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Reject pixels and save residuals.
+ # Check if any pixels are clipped.
+ # If so recompute the median and reset the number of good pixels.
+ # Only reorder if needed.
+
+ for (nl=1; nl<=n1; nl=nl+1) {
+ r = (med - Mems[d[nl]+j]) / sigma
+ if (r < lsigma)
+ break
+ Memr[resid+nl] = r
+ }
+ for (nh=n1; nh>=1; nh=nh-1) {
+ r = (Mems[d[nh]+j] - med) / sigma
+ if (r < hsigma)
+ break
+ Memr[resid+nh] = r
+ }
+ n4 = nh - nl + 1
+
+ # If too many pixels are rejected add some back in.
+ # All pixels with the same residual are added.
+ while (n4 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n4 = nh - nl + 1
+ }
+
+ # If any pixels are rejected recompute the median.
+ if (nl > 1 || nh < n1) {
+ n5 = nl + n4 / 2
+ if (mod (n4, 2) == 0) {
+ med = Mems[d[n5-1]+j]
+ med = (med + Mems[d[n5]+j]) / 2.
+ } else
+ med = Mems[d[n5]+j]
+ n[i] = n4
+ }
+ if (combine == MEDIAN)
+ median[i] = med
+
+ # Reorder if pixels only if necessary.
+ if (nl > 1 && (combine != MEDIAN || grow > 0)) {
+ k = max (nl, n4 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+j] = Mems[d[k]+j]
+ if (grow > 0) {
+ mp1 = m[l] + j
+ mp2 = m[k] + j
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+j] = Memi[m[k]+j]
+ k = k + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+j] = Mems[d[k]+j]
+ k = k + 1
+ }
+ }
+ }
+ }
+
+ # Check if data flag needs to be reset for rejected pixels.
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag whether the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_PCLIP -- Percentile clip
+#
+# 1) Find the median
+# 2) Find the pixel which is the specified order index away
+# 3) Use the data value difference as a sigma and apply clipping
+# 4) Since the median is known return it so it does not have to be recomputed
+
+procedure ic_pclipr (d, m, n, nimages, npts, median)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of input images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, n4, n5, nl, nh, nin, maxkeep
+bool even, fp_equalr()
+real sigma, r, s, t
+pointer sp, resid, mp1, mp2
+real med
+
+include "../icombine.com"
+
+begin
+ # There must be at least MINCLIP and more than nkeep pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Set sign of pclip parameter
+ if (pclip < 0)
+ t = -1.
+ else
+ t = 1.
+
+ # If there are no rejected pixels compute certain parameters once.
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0.) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ nin = n1
+ }
+
+ # Now apply clipping.
+ do i = 1, npts {
+ # Compute median.
+ if (dflag == D_MIX) {
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 == 0) {
+ if (combine == MEDIAN)
+ median[i] = blank
+ next
+ }
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ }
+
+ j = i - 1
+ if (even) {
+ med = Memr[d[n2-1]+j]
+ med = (med + Memr[d[n2]+j]) / 2.
+ } else
+ med = Memr[d[n2]+j]
+
+ if (n1 < max (MINCLIP, maxkeep+1)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Define sigma for clipping
+ sigma = t * (Memr[d[n3]+j] - med)
+ if (fp_equalr (sigma, 0.)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Reject pixels and save residuals.
+ # Check if any pixels are clipped.
+ # If so recompute the median and reset the number of good pixels.
+ # Only reorder if needed.
+
+ for (nl=1; nl<=n1; nl=nl+1) {
+ r = (med - Memr[d[nl]+j]) / sigma
+ if (r < lsigma)
+ break
+ Memr[resid+nl] = r
+ }
+ for (nh=n1; nh>=1; nh=nh-1) {
+ r = (Memr[d[nh]+j] - med) / sigma
+ if (r < hsigma)
+ break
+ Memr[resid+nh] = r
+ }
+ n4 = nh - nl + 1
+
+ # If too many pixels are rejected add some back in.
+ # All pixels with the same residual are added.
+ while (n4 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n4 = nh - nl + 1
+ }
+
+ # If any pixels are rejected recompute the median.
+ if (nl > 1 || nh < n1) {
+ n5 = nl + n4 / 2
+ if (mod (n4, 2) == 0) {
+ med = Memr[d[n5-1]+j]
+ med = (med + Memr[d[n5]+j]) / 2.
+ } else
+ med = Memr[d[n5]+j]
+ n[i] = n4
+ }
+ if (combine == MEDIAN)
+ median[i] = med
+
+ # Reorder if pixels only if necessary.
+ if (nl > 1 && (combine != MEDIAN || grow > 0)) {
+ k = max (nl, n4 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+j] = Memr[d[k]+j]
+ if (grow > 0) {
+ mp1 = m[l] + j
+ mp2 = m[k] + j
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+j] = Memi[m[k]+j]
+ k = k + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+j] = Memr[d[k]+j]
+ k = k + 1
+ }
+ }
+ }
+ }
+
+ # Check if data flag needs to be reset for rejected pixels.
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag whether the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/generic/icsclip.x b/noao/imred/ccdred/src/generic/icsclip.x
new file mode 100644
index 00000000..d7ccfd84
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icsclip.x
@@ -0,0 +1,964 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Mininum number of images for algorithm
+
+
+# IC_ASIGCLIP -- Reject pixels using sigma clipping about the average
+# The initial average rejects the high and low pixels. A correction for
+# different scalings of the images may be made. Weights are not used.
+
+procedure ic_asigclips (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, one
+data one /1.0/
+pointer sp, resid, w, wp, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Flag whether returned average needs to be recomputed.
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Save the residuals and the sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Do sigma clipping.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+
+ # If there are not enough pixels simply compute the average.
+ if (n1 < max (3, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mems[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mems[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ # Compute average with the high and low rejected.
+ low = Mems[d[1]+k]
+ high = Mems[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mems[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Iteratively reject pixels and compute the final average if needed.
+ # Compact the data and keep track of the image IDs if needed.
+
+ repeat {
+ n2 = n1
+ if (doscale1) {
+ # Compute sigma corrected for scaling.
+ s = 0.
+ wp = w - 1
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mems[dp1]
+ l = Memi[mp1]
+ r = sqrt (max (one, (a + zeros[l]) / scales[l]))
+ s = s + ((d1 - a) / r) ** 2
+ Memr[wp] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ wp = w - 1
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mems[dp1]
+ r = (d1 - a) / (s * Memr[wp])
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ Memr[wp] = Memr[w+n1-1]
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } else {
+ # Compute the sigma without scale correction.
+ s = 0.
+ do j = 1, n1
+ s = s + (Mems[d[j]+k] - a) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mems[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mems[dp1]
+ Mems[dp1] = Mems[dp2]
+ Mems[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mems[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MSIGCLIP -- Reject pixels using sigma clipping about the median
+
+procedure ic_msigclips (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, w, mp1, mp2
+real med, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Save the residuals and sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0)
+ med = (Mems[d[n3-1]+k] + Mems[d[n3]+k]) / 2.
+ else
+ med = Mems[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ # Compute the sigma with scaling correction.
+ s = 0.
+ do j = nl, nh {
+ l = Memi[m[j]+k]
+ r = sqrt (max (one, (med + zeros[l]) / scales[l]))
+ s = s + ((Mems[d[j]+k] - med) / r) ** 2
+ Memr[w+j-1] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mems[d[nl]+k]) / (s * Memr[w+nl-1])
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mems[d[nh]+k] - med) / (s * Memr[w+nh-1])
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ # Compute the sigma without scaling correction.
+ s = 0.
+ do j = nl, nh
+ s = s + (Mems[d[j]+k] - med) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mems[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mems[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mems[d[l]+k] = Mems[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+
+# IC_ASIGCLIP -- Reject pixels using sigma clipping about the average
+# The initial average rejects the high and low pixels. A correction for
+# different scalings of the images may be made. Weights are not used.
+
+procedure ic_asigclipr (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real average[npts] # Average
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+real d1, low, high, sum, a, s, r, one
+data one /1.0/
+pointer sp, resid, w, wp, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Flag whether returned average needs to be recomputed.
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Save the residuals and the sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Do sigma clipping.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+
+ # If there are not enough pixels simply compute the average.
+ if (n1 < max (3, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Memr[d[1]+k]
+ do j = 2, n1
+ sum = sum + Memr[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ # Compute average with the high and low rejected.
+ low = Memr[d[1]+k]
+ high = Memr[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Memr[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Iteratively reject pixels and compute the final average if needed.
+ # Compact the data and keep track of the image IDs if needed.
+
+ repeat {
+ n2 = n1
+ if (doscale1) {
+ # Compute sigma corrected for scaling.
+ s = 0.
+ wp = w - 1
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Memr[dp1]
+ l = Memi[mp1]
+ r = sqrt (max (one, (a + zeros[l]) / scales[l]))
+ s = s + ((d1 - a) / r) ** 2
+ Memr[wp] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ wp = w - 1
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Memr[dp1]
+ r = (d1 - a) / (s * Memr[wp])
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ Memr[wp] = Memr[w+n1-1]
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } else {
+ # Compute the sigma without scale correction.
+ s = 0.
+ do j = 1, n1
+ s = s + (Memr[d[j]+k] - a) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Memr[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Memr[dp1]
+ Memr[dp1] = Memr[dp2]
+ Memr[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Memr[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MSIGCLIP -- Reject pixels using sigma clipping about the median
+
+procedure ic_msigclipr (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+real median[npts] # Median
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, w, mp1, mp2
+real med, one
+data one /1.0/
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Save the residuals and sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0)
+ med = (Memr[d[n3-1]+k] + Memr[d[n3]+k]) / 2.
+ else
+ med = Memr[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ # Compute the sigma with scaling correction.
+ s = 0.
+ do j = nl, nh {
+ l = Memi[m[j]+k]
+ r = sqrt (max (one, (med + zeros[l]) / scales[l]))
+ s = s + ((Memr[d[j]+k] - med) / r) ** 2
+ Memr[w+j-1] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Memr[d[nl]+k]) / (s * Memr[w+nl-1])
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Memr[d[nh]+k] - med) / (s * Memr[w+nh-1])
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ # Compute the sigma without scaling correction.
+ s = 0.
+ do j = nl, nh
+ s = s + (Memr[d[j]+k] - med) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Memr[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Memr[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Memr[d[l]+k] = Memr[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/generic/icsigma.x b/noao/imred/ccdred/src/generic/icsigma.x
new file mode 100644
index 00000000..bc0d9788
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icsigma.x
@@ -0,0 +1,205 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+
+# IC_SIGMA -- Compute the sigma image line.
+# The estimated sigma includes a correction for the finite population.
+# Weights are used if desired.
+
+procedure ic_sigmas (d, m, n, wts, npts, average, sigma)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average
+real sigma[npts] # Sigma line (returned)
+
+int i, j, k, n1
+real wt, sigcor, sumwt
+real a, sum
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (dowts) {
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mems[d[1]+k] - a) ** 2 * wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mems[d[j]+k] - a) ** 2 * wt
+ }
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ } else {
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ sum = (Mems[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mems[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ sigma[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 -1)
+ else
+ sigcor = 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mems[d[1]+k] - a) ** 2 * wt
+ sumwt = wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mems[d[j]+k] - a) ** 2 * wt
+ sumwt = sumwt + wt
+ }
+ sigma[i] = sqrt (sum / sumwt * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ a = average[i]
+ sum = (Mems[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mems[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ }
+ }
+end
+
+# IC_SIGMA -- Compute the sigma image line.
+# The estimated sigma includes a correction for the finite population.
+# Weights are used if desired.
+
+procedure ic_sigmar (d, m, n, wts, npts, average, sigma)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+real average[npts] # Average
+real sigma[npts] # Sigma line (returned)
+
+int i, j, k, n1
+real wt, sigcor, sumwt
+real a, sum
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (dowts) {
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Memr[d[1]+k] - a) ** 2 * wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Memr[d[j]+k] - a) ** 2 * wt
+ }
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ } else {
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ sum = (Memr[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Memr[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ sigma[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 -1)
+ else
+ sigcor = 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Memr[d[1]+k] - a) ** 2 * wt
+ sumwt = wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Memr[d[j]+k] - a) ** 2 * wt
+ sumwt = sumwt + wt
+ }
+ sigma[i] = sqrt (sum / sumwt * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ a = average[i]
+ sum = (Memr[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Memr[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/generic/icsort.x b/noao/imred/ccdred/src/generic/icsort.x
new file mode 100644
index 00000000..a39b68e2
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icsort.x
@@ -0,0 +1,550 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+define LOGPTR 32 # log2(maxpts) (4e9)
+
+
+# IC_SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially.
+
+procedure ic_sorts (a, b, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+short b[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+short pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR]
+define swap {temp=$1;$1=$2;$2=temp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix
+ b[i] = Mems[a[i]+l]
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ do i = 1, npix
+ b[i] = Mems[a[i]+l]
+
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix
+ Mems[a[i]+l] = b[i]
+ }
+end
+
+
+# IC_2SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially. A second integer set of
+# vectors is sorted.
+
+procedure ic_2sorts (a, b, c, d, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+short b[ARB] # work array
+pointer c[ARB] # pointer to associated integer vectors
+int d[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+short pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR], itemp
+define swap {temp=$1;$1=$2;$2=temp}
+define iswap {itemp=$1;$1=$2;$2=itemp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix {
+ b[i] = Mems[a[i]+l]
+ d[i] = Memi[c[i]+l]
+ }
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k]); swap (d[j], d[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) { # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+ }
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix {
+ Mems[a[i]+l] = b[i]
+ Memi[c[i]+l] = d[i]
+ }
+ }
+end
+
+# IC_SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially.
+
+procedure ic_sortr (a, b, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+real b[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+real pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR]
+define swap {temp=$1;$1=$2;$2=temp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix
+ b[i] = Memr[a[i]+l]
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ do i = 1, npix
+ b[i] = Memr[a[i]+l]
+
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix
+ Memr[a[i]+l] = b[i]
+ }
+end
+
+
+# IC_2SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially. A second integer set of
+# vectors is sorted.
+
+procedure ic_2sortr (a, b, c, d, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+real b[ARB] # work array
+pointer c[ARB] # pointer to associated integer vectors
+int d[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+real pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR], itemp
+define swap {temp=$1;$1=$2;$2=temp}
+define iswap {itemp=$1;$1=$2;$2=itemp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix {
+ b[i] = Memr[a[i]+l]
+ d[i] = Memi[c[i]+l]
+ }
+
+ # Special cases
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+
+ # General case
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k]); swap (d[j], d[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ for (i=i+1; b[i] < pivot; i=i+1)
+ ;
+ for (j=j-1; j > i; j=j-1)
+ if (b[j] <= pivot)
+ break
+ if (i < j) { # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+ }
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix {
+ Memr[a[i]+l] = b[i]
+ Memi[c[i]+l] = d[i]
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/generic/icstat.x b/noao/imred/ccdred/src/generic/icstat.x
new file mode 100644
index 00000000..41512ccb
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/icstat.x
@@ -0,0 +1,444 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+define NMAX 10000 # Maximum number of pixels to sample
+
+
+# IC_STAT -- Compute image statistics within specified section.
+# The image section is relative to a reference image which may be
+# different than the input image and may have an offset. Only a
+# subsample of pixels is used. Masked and thresholded pixels are
+# ignored. Only the desired statistics are computed to increase
+# efficiency.
+
+procedure ic_stats (im, imref, section, offsets, image, nimages,
+ domode, domedian, domean, mode, median, mean)
+
+pointer im # Data image
+pointer imref # Reference image for image section
+char section[ARB] # Image section
+int offsets[nimages,ARB] # Image section offset from data to reference
+int image # Image index (for mask I/O)
+int nimages # Number of images in offsets.
+bool domode, domedian, domean # Statistics to compute
+real mode, median, mean # Statistics
+
+int i, j, ndim, n, nv
+real a
+pointer sp, v1, v2, dv, va, vb
+pointer data, mask, dp, lp, mp, imgnls()
+short ic_modes()
+real asums()
+
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (dv, IM_MAXDIM, TY_LONG)
+ call salloc (va, IM_MAXDIM, TY_LONG)
+ call salloc (vb, IM_MAXDIM, TY_LONG)
+
+ # Determine the image section parameters. This must be in terms of
+ # the data image pixel coordinates though the section may be specified
+ # in terms of the reference image coordinates. Limit the number of
+ # pixels in each dimension to a maximum.
+
+ ndim = IM_NDIM(im)
+ if (project)
+ ndim = ndim - 1
+ call amovki (1, Memi[v1], IM_MAXDIM)
+ call amovki (1, Memi[va], IM_MAXDIM)
+ call amovki (1, Memi[dv], IM_MAXDIM)
+ call amovi (IM_LEN(imref,1), Memi[vb], ndim)
+ call ic_section (section, Memi[va], Memi[vb], Memi[dv], ndim)
+ if (im != imref)
+ do i = 1, ndim {
+ Memi[va+i-1] = Memi[va+i-1] - offsets[image,i]
+ Memi[vb+i-1] = Memi[vb+i-1] - offsets[image,i]
+ }
+
+ do j = 1, 10 {
+ n = 1
+ do i = 0, ndim-1 {
+ Memi[v1+i] = max (1, min (Memi[va+i], Memi[vb+i]))
+ Memi[v2+i] = min (IM_LEN(im,i+1), max (Memi[va+i], Memi[vb+i]))
+ Memi[dv+i] = j
+ nv = max (1, (Memi[v2+i] - Memi[v1+i]) / Memi[dv+i] + 1)
+ Memi[v2+i] = Memi[v1+i] + (nv - 1) * Memi[dv+i]
+ n = n * nv
+ }
+ if (n < NMAX)
+ break
+ }
+
+ call amovl (Memi[v1], Memi[va], IM_MAXDIM)
+ Memi[va] = 1
+ if (project)
+ Memi[va+ndim] = image
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+
+ # Accumulate the pixel values within the section. Masked pixels and
+ # thresholded pixels are ignored.
+
+ call salloc (data, n, TY_SHORT)
+ dp = data
+ while (imgnls (im, lp, Memi[vb]) != EOF) {
+ call ic_mget1 (im, image, offsets[image,1], Memi[va], mask)
+ lp = lp + Memi[v1] - 1
+ if (dflag == D_ALL) {
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ a = Mems[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mems[dp] = a
+ dp = dp + 1
+ }
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ Mems[dp] = Mems[lp]
+ dp = dp + 1
+ lp = lp + Memi[dv]
+ }
+ }
+ } else if (dflag == D_MIX) {
+ mp = mask + Memi[v1] - 1
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ a = Mems[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mems[dp] = a
+ dp = dp + 1
+ }
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ Mems[dp] = Mems[lp]
+ dp = dp + 1
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ }
+ }
+ for (i=2; i<=ndim; i=i+1) {
+ Memi[va+i-1] = Memi[va+i-1] + Memi[dv+i-1]
+ if (Memi[va+i-1] <= Memi[v2+i-1])
+ break
+ Memi[va+i-1] = Memi[v1+i-1]
+ }
+ if (i > ndim)
+ break
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+ }
+
+ n = dp - data
+ if (n < 1) {
+ call sfree (sp)
+ call error (1, "Image section contains no pixels")
+ }
+
+ # Compute only statistics needed.
+ if (domode || domedian) {
+ call asrts (Mems[data], Mems[data], n)
+ mode = ic_modes (Mems[data], n)
+ median = Mems[data+n/2-1]
+ }
+ if (domean)
+ mean = asums (Mems[data], n) / n
+
+ call sfree (sp)
+end
+
+
+define NMIN 10 # Minimum number of pixels for mode calculation
+define ZRANGE 0.8 # Fraction of pixels about median to use
+define ZSTEP 0.01 # Step size for search for mode
+define ZBIN 0.1 # Bin size for mode.
+
+# IC_MODE -- Compute mode of an array. The mode is found by binning
+# with a bin size based on the data range over a fraction of the
+# pixels about the median and a bin step which may be smaller than the
+# bin size. If there are too few points the median is returned.
+# The input array must be sorted.
+
+short procedure ic_modes (a, n)
+
+short a[n] # Data array
+int n # Number of points
+
+int i, j, k, nmax
+real z1, z2, zstep, zbin
+short mode
+bool fp_equalr()
+
+begin
+ if (n < NMIN)
+ return (a[n/2])
+
+ # Compute the mode. The array must be sorted. Consider a
+ # range of values about the median point. Use a bin size which
+ # is ZBIN of the range. Step the bin limits in ZSTEP fraction of
+ # the bin size.
+
+ i = 1 + n * (1. - ZRANGE) / 2.
+ j = 1 + n * (1. + ZRANGE) / 2.
+ z1 = a[i]
+ z2 = a[j]
+ if (fp_equalr (z1, z2)) {
+ mode = z1
+ return (mode)
+ }
+
+ zstep = ZSTEP * (z2 - z1)
+ zbin = ZBIN * (z2 - z1)
+ zstep = max (1., zstep)
+ zbin = max (1., zbin)
+
+ z1 = z1 - zstep
+ k = i
+ nmax = 0
+ repeat {
+ z1 = z1 + zstep
+ z2 = z1 + zbin
+ for (; i < j && a[i] < z1; i=i+1)
+ ;
+ for (; k < j && a[k] < z2; k=k+1)
+ ;
+ if (k - i > nmax) {
+ nmax = k - i
+ mode = a[(i+k)/2]
+ }
+ } until (k >= j)
+
+ return (mode)
+end
+
+# IC_STAT -- Compute image statistics within specified section.
+# The image section is relative to a reference image which may be
+# different than the input image and may have an offset. Only a
+# subsample of pixels is used. Masked and thresholded pixels are
+# ignored. Only the desired statistics are computed to increase
+# efficiency.
+
+procedure ic_statr (im, imref, section, offsets, image, nimages,
+ domode, domedian, domean, mode, median, mean)
+
+pointer im # Data image
+pointer imref # Reference image for image section
+char section[ARB] # Image section
+int offsets[nimages,ARB] # Image section offset from data to reference
+int image # Image index (for mask I/O)
+int nimages # Number of images in offsets.
+bool domode, domedian, domean # Statistics to compute
+real mode, median, mean # Statistics
+
+int i, j, ndim, n, nv
+real a
+pointer sp, v1, v2, dv, va, vb
+pointer data, mask, dp, lp, mp, imgnlr()
+real ic_moder()
+real asumr()
+
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (dv, IM_MAXDIM, TY_LONG)
+ call salloc (va, IM_MAXDIM, TY_LONG)
+ call salloc (vb, IM_MAXDIM, TY_LONG)
+
+ # Determine the image section parameters. This must be in terms of
+ # the data image pixel coordinates though the section may be specified
+ # in terms of the reference image coordinates. Limit the number of
+ # pixels in each dimension to a maximum.
+
+ ndim = IM_NDIM(im)
+ if (project)
+ ndim = ndim - 1
+ call amovki (1, Memi[v1], IM_MAXDIM)
+ call amovki (1, Memi[va], IM_MAXDIM)
+ call amovki (1, Memi[dv], IM_MAXDIM)
+ call amovi (IM_LEN(imref,1), Memi[vb], ndim)
+ call ic_section (section, Memi[va], Memi[vb], Memi[dv], ndim)
+ if (im != imref)
+ do i = 1, ndim {
+ Memi[va+i-1] = Memi[va+i-1] - offsets[image,i]
+ Memi[vb+i-1] = Memi[vb+i-1] - offsets[image,i]
+ }
+
+ do j = 1, 10 {
+ n = 1
+ do i = 0, ndim-1 {
+ Memi[v1+i] = max (1, min (Memi[va+i], Memi[vb+i]))
+ Memi[v2+i] = min (IM_LEN(im,i+1), max (Memi[va+i], Memi[vb+i]))
+ Memi[dv+i] = j
+ nv = max (1, (Memi[v2+i] - Memi[v1+i]) / Memi[dv+i] + 1)
+ Memi[v2+i] = Memi[v1+i] + (nv - 1) * Memi[dv+i]
+ n = n * nv
+ }
+ if (n < NMAX)
+ break
+ }
+
+ call amovl (Memi[v1], Memi[va], IM_MAXDIM)
+ Memi[va] = 1
+ if (project)
+ Memi[va+ndim] = image
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+
+ # Accumulate the pixel values within the section. Masked pixels and
+ # thresholded pixels are ignored.
+
+ call salloc (data, n, TY_REAL)
+ dp = data
+ while (imgnlr (im, lp, Memi[vb]) != EOF) {
+ call ic_mget1 (im, image, offsets[image,1], Memi[va], mask)
+ lp = lp + Memi[v1] - 1
+ if (dflag == D_ALL) {
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ a = Memr[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Memr[dp] = a
+ dp = dp + 1
+ }
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ Memr[dp] = Memr[lp]
+ dp = dp + 1
+ lp = lp + Memi[dv]
+ }
+ }
+ } else if (dflag == D_MIX) {
+ mp = mask + Memi[v1] - 1
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ a = Memr[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Memr[dp] = a
+ dp = dp + 1
+ }
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ Memr[dp] = Memr[lp]
+ dp = dp + 1
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ }
+ }
+ for (i=2; i<=ndim; i=i+1) {
+ Memi[va+i-1] = Memi[va+i-1] + Memi[dv+i-1]
+ if (Memi[va+i-1] <= Memi[v2+i-1])
+ break
+ Memi[va+i-1] = Memi[v1+i-1]
+ }
+ if (i > ndim)
+ break
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+ }
+
+ n = dp - data
+ if (n < 1) {
+ call sfree (sp)
+ call error (1, "Image section contains no pixels")
+ }
+
+ # Compute only statistics needed.
+ if (domode || domedian) {
+ call asrtr (Memr[data], Memr[data], n)
+ mode = ic_moder (Memr[data], n)
+ median = Memr[data+n/2-1]
+ }
+ if (domean)
+ mean = asumr (Memr[data], n) / n
+
+ call sfree (sp)
+end
+
+
+define NMIN 10 # Minimum number of pixels for mode calculation
+define ZRANGE 0.8 # Fraction of pixels about median to use
+define ZSTEP 0.01 # Step size for search for mode
+define ZBIN 0.1 # Bin size for mode.
+
+# IC_MODE -- Compute mode of an array. The mode is found by binning
+# with a bin size based on the data range over a fraction of the
+# pixels about the median and a bin step which may be smaller than the
+# bin size. If there are too few points the median is returned.
+# The input array must be sorted.
+
+real procedure ic_moder (a, n)
+
+real a[n] # Data array
+int n # Number of points
+
+int i, j, k, nmax
+real z1, z2, zstep, zbin
+real mode
+bool fp_equalr()
+
+begin
+ if (n < NMIN)
+ return (a[n/2])
+
+ # Compute the mode. The array must be sorted. Consider a
+ # range of values about the median point. Use a bin size which
+ # is ZBIN of the range. Step the bin limits in ZSTEP fraction of
+ # the bin size.
+
+ i = 1 + n * (1. - ZRANGE) / 2.
+ j = 1 + n * (1. + ZRANGE) / 2.
+ z1 = a[i]
+ z2 = a[j]
+ if (fp_equalr (z1, z2)) {
+ mode = z1
+ return (mode)
+ }
+
+ zstep = ZSTEP * (z2 - z1)
+ zbin = ZBIN * (z2 - z1)
+
+ z1 = z1 - zstep
+ k = i
+ nmax = 0
+ repeat {
+ z1 = z1 + zstep
+ z2 = z1 + zbin
+ for (; i < j && a[i] < z1; i=i+1)
+ ;
+ for (; k < j && a[k] < z2; k=k+1)
+ ;
+ if (k - i > nmax) {
+ nmax = k - i
+ mode = a[(i+k)/2]
+ }
+ } until (k >= j)
+
+ return (mode)
+end
+
diff --git a/noao/imred/ccdred/src/generic/mkpkg b/noao/imred/ccdred/src/generic/mkpkg
new file mode 100644
index 00000000..3d841680
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/mkpkg
@@ -0,0 +1,11 @@
+# Make CCDRED Package.
+
+$checkout libpkg.a ../..
+$update libpkg.a
+$checkin libpkg.a ../..
+$exit
+
+libpkg.a:
+ cor.x ccdred.h
+ proc.x ccdred.h <imhdr.h>
+ ;
diff --git a/noao/imred/ccdred/src/generic/proc.x b/noao/imred/ccdred/src/generic/proc.x
new file mode 100644
index 00000000..242da9c9
--- /dev/null
+++ b/noao/imred/ccdred/src/generic/proc.x
@@ -0,0 +1,735 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+.help proc Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+proc -- Process CCD images
+
+These are the main CCD reduction procedures. There is one for each
+readout axis (lines or columns) and one for short and real image data.
+They apply corrections for bad pixels, overscan levels, zero levels,
+dark counts, flat field response, illumination response, and fringe
+effects. The image is also trimmed if it was mapped with an image
+section. The mean value for the output image is computed when the flat
+field or illumination image is processed to form the scale factor for
+these calibrations in order to avoid reading through these image a
+second time.
+
+The processing information and parameters are specified in the CCD
+structure. The processing operations to be performed are specified by
+the correction array CORS in the ccd structure. There is one array
+element for each operation with indices defined symbolically by macro
+definitions (see ccdred.h); i.e. FLATCOR. The value of the array
+element is an integer bit field in which the bit set is the same as the
+array index; i.e element 3 will have the third bit set for an operation
+with array value 2**(3-1)=4. If an operation is not to be performed
+the bit is not set and the array element has the numeric value zero.
+Note that the addition of several correction elements gives a unique
+bit field describing a combination of operations. For efficiency the
+most common combinations are implemented as separate units.
+
+The CCD structure also contains the correction or calibration data
+consisting either pointers to data, IMIO pointers for the calibration
+images, and scale factors.
+
+The processing is performed line-by-line. The procedure CORINPUT is
+called to get an input line. This procedure trims and fixes bad pixels by
+interpolation. The output line and lines from the various calibration
+images are read. The image vectors as well as the overscan vector and
+the scale factors are passed to the procedure COR (which also
+dereferences the pointer data into simple arrays and variables). That
+procedure does the actual corrections apart from bad pixel
+corrections.
+
+The final optional step is to add each corrected output line to form a
+mean. This adds efficiency since the operation is done only if desired
+and the output image data is already in memory so there is no I/O
+penalty.
+
+SEE ALSO
+ ccdred.h, cor, fixpix, setfixpix, setoverscan, settrim,
+ setzero, setdark, setflat, setillum, setfringe
+.endhelp ----------------------------------------------------------------------
+
+
+
+# PROC1 -- Process CCD images with readout axis 1 (lines).
+
+procedure proc1s (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+int overscan_type, overscan_c1, noverscan
+real overscan, darkscale, flatscale, illumscale, frgscale, mean
+short minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim, overscan_vec
+pointer inbuf, outbuf, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asums()
+real find_overscans()
+pointer imgl2s(), impl2s(), ccd_gls(), xt_fpss()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ if (CORS(ccd, OVERSCAN) == 0)
+ overscan_type = 0
+ else {
+ overscan_type = OVERSCAN_TYPE(ccd)
+ overscan_vec = OVERSCAN_VEC(ccd)
+ overscan_c1 = BIAS_C1(ccd) - 1
+ noverscan = BIAS_C2(ccd) - overscan_c1
+ }
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),2) == 1) {
+ zeroim = NULL
+ zerobuf = ccd_gls (ZERO_IM(ccd), ZERO_C1(ccd), ZERO_C2(ccd), 1)
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),2) == 1) {
+ darkim = NULL
+ darkbuf = ccd_gls (DARK_IM(ccd), DARK_C1(ccd), DARK_C2(ccd), 1)
+ darkscale = FLATSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),2) == 1) {
+ flatim = NULL
+ flatbuf = ccd_gls (FLAT_IM(ccd), FLAT_C1(ccd), FLAT_C2(ccd), 1)
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure XT_FPS replaces
+ # bad pixels by interpolation. The trimmed region is copied to the
+ # output. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images. Call COR1
+ # to do the actual pixel corrections. Finally, add the output pixels
+ # to a sum for computing the mean. We must copy data outside of the
+ # output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2s (out, OUT_L1(ccd)+line-1)
+
+ inbuf = xt_fpss (MASK_FP(ccd), in, IN_L1(ccd)+line-1, IN_C1(ccd),
+ IN_C2(ccd), IN_L1(ccd), IN_L2(ccd), NULL)
+ call amovs (Mems[inbuf+IN_C1(ccd)-OUT_C1(ccd)], Mems[outbuf],
+ IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (overscan_type != 0) {
+ if (overscan_type < OVERSCAN_FIT)
+ overscan = find_overscans (Mems[inbuf+overscan_c1],
+ noverscan, overscan_type)
+ else
+ overscan = Memr[overscan_vec+line-1]
+ }
+ if (zeroim != NULL)
+ zerobuf = ccd_gls (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gls (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gls (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gls (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gls (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor1s (CORS(ccd,1), Mems[outbuf],
+ overscan, Mems[zerobuf], Mems[darkbuf],
+ Mems[flatbuf], Mems[illumbuf], Mems[fringebuf], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxks (Mems[outbuf], minrep, Mems[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asums (Mems[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+end
+
+
+# PROC2 -- Process CCD images with readout axis 2 (columns).
+
+procedure proc2s (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+real darkscale, flatscale, illumscale, frgscale, mean
+short minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim, overscan_vec
+pointer inbuf, outbuf, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asums()
+pointer imgl2s(), impl2s(), imgs2s(), ccd_gls(), xt_fpss()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),1) == 1) {
+ zeroim = NULL
+ zerobuf = imgs2s (ZERO_IM(ccd), 1, 1, ZERO_L1(ccd), ZERO_L2(ccd))
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),1) == 1) {
+ darkim = NULL
+ darkbuf = imgs2s (DARK_IM(ccd), 1, 1, DARK_L1(ccd), DARK_L2(ccd))
+ darkscale = DARKSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),1) == 1) {
+ flatim = NULL
+ flatbuf = imgs2s (FLAT_IM(ccd), 1, 1, FLAT_L1(ccd), FLAT_L2(ccd))
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR2 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2s (out, OUT_L1(ccd)+line-1)
+
+ inbuf = xt_fpss (MASK_FP(ccd), in, IN_L1(ccd)+line-1, IN_C1(ccd),
+ IN_C2(ccd), IN_L1(ccd), IN_L2(ccd), NULL)
+ call amovs (Mems[inbuf+IN_C1(ccd)-OUT_C1(ccd)], Mems[outbuf],
+ IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (zeroim != NULL)
+ zerobuf = ccd_gls (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gls (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gls (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gls (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gls (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor2s (line, CORS(ccd,1), Mems[outbuf],
+ Memr[overscan_vec], Mems[zerobuf], Mems[darkbuf],
+ Mems[flatbuf], Mems[illumbuf], Mems[fringebuf], ncols,
+ zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxks (Mems[outbuf], minrep, Mems[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asums (Mems[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+end
+
+
+# FIND_OVERSCAN -- Find the overscan value for a line.
+# No check is made on the number of pixels.
+# The median is the (npix+1)/2 element.
+
+real procedure find_overscans (data, npix, type)
+
+short data[npix] #I Overscan data
+int npix #I Number of overscan points
+int type #I Type of overscan calculation
+
+int i
+real overscan, d, dmin, dmax
+short asoks()
+
+begin
+ if (type == OVERSCAN_MINMAX) {
+ overscan = data[1]
+ dmin = data[1]
+ dmax = data[1]
+ do i = 2, npix {
+ d = data[i]
+ overscan = overscan + d
+ if (d < dmin)
+ dmin = d
+ else if (d > dmax)
+ dmax = d
+ }
+ overscan = (overscan - dmin - dmax) / (npix - 2)
+ } else if (type == OVERSCAN_MEDIAN)
+ overscan = asoks (data, npix, (npix + 1) / 2)
+ else {
+ overscan = data[1]
+ do i = 2, npix
+ overscan = overscan + data[i]
+ overscan = overscan / npix
+ }
+
+ return (overscan)
+end
+
+# PROC1 -- Process CCD images with readout axis 1 (lines).
+
+procedure proc1r (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+int overscan_type, overscan_c1, noverscan
+real overscan, darkscale, flatscale, illumscale, frgscale, mean
+real minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim, overscan_vec
+pointer inbuf, outbuf, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asumr()
+real find_overscanr()
+pointer imgl2r(), impl2r(), ccd_glr(), xt_fpsr()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ if (CORS(ccd, OVERSCAN) == 0)
+ overscan_type = 0
+ else {
+ overscan_type = OVERSCAN_TYPE(ccd)
+ overscan_vec = OVERSCAN_VEC(ccd)
+ overscan_c1 = BIAS_C1(ccd) - 1
+ noverscan = BIAS_C2(ccd) - overscan_c1
+ }
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),2) == 1) {
+ zeroim = NULL
+ zerobuf = ccd_glr (ZERO_IM(ccd), ZERO_C1(ccd), ZERO_C2(ccd), 1)
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),2) == 1) {
+ darkim = NULL
+ darkbuf = ccd_glr (DARK_IM(ccd), DARK_C1(ccd), DARK_C2(ccd), 1)
+ darkscale = FLATSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),2) == 1) {
+ flatim = NULL
+ flatbuf = ccd_glr (FLAT_IM(ccd), FLAT_C1(ccd), FLAT_C2(ccd), 1)
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure XT_FPS replaces
+ # bad pixels by interpolation. The trimmed region is copied to the
+ # output. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images. Call COR1
+ # to do the actual pixel corrections. Finally, add the output pixels
+ # to a sum for computing the mean. We must copy data outside of the
+ # output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2r (out, OUT_L1(ccd)+line-1)
+
+ inbuf = xt_fpsr (MASK_FP(ccd), in, IN_L1(ccd)+line-1, IN_C1(ccd),
+ IN_C2(ccd), IN_L1(ccd), IN_L2(ccd), NULL)
+ call amovr (Memr[inbuf+IN_C1(ccd)-OUT_C1(ccd)], Memr[outbuf],
+ IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (overscan_type != 0) {
+ if (overscan_type < OVERSCAN_FIT)
+ overscan = find_overscanr (Memr[inbuf+overscan_c1],
+ noverscan, overscan_type)
+ else
+ overscan = Memr[overscan_vec+line-1]
+ }
+ if (zeroim != NULL)
+ zerobuf = ccd_glr (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_glr (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_glr (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_glr (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_glr (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor1r (CORS(ccd,1), Memr[outbuf],
+ overscan, Memr[zerobuf], Memr[darkbuf],
+ Memr[flatbuf], Memr[illumbuf], Memr[fringebuf], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxkr (Memr[outbuf], minrep, Memr[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asumr (Memr[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+end
+
+
+# PROC2 -- Process CCD images with readout axis 2 (columns).
+
+procedure proc2r (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+real darkscale, flatscale, illumscale, frgscale, mean
+real minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim, overscan_vec
+pointer inbuf, outbuf, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asumr()
+pointer imgl2r(), impl2r(), imgs2r(), ccd_glr(), xt_fpsr()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),1) == 1) {
+ zeroim = NULL
+ zerobuf = imgs2r (ZERO_IM(ccd), 1, 1, ZERO_L1(ccd), ZERO_L2(ccd))
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),1) == 1) {
+ darkim = NULL
+ darkbuf = imgs2r (DARK_IM(ccd), 1, 1, DARK_L1(ccd), DARK_L2(ccd))
+ darkscale = DARKSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),1) == 1) {
+ flatim = NULL
+ flatbuf = imgs2r (FLAT_IM(ccd), 1, 1, FLAT_L1(ccd), FLAT_L2(ccd))
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR2 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2r (out, OUT_L1(ccd)+line-1)
+
+ inbuf = xt_fpsr (MASK_FP(ccd), in, IN_L1(ccd)+line-1, IN_C1(ccd),
+ IN_C2(ccd), IN_L1(ccd), IN_L2(ccd), NULL)
+ call amovr (Memr[inbuf+IN_C1(ccd)-OUT_C1(ccd)], Memr[outbuf],
+ IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (zeroim != NULL)
+ zerobuf = ccd_glr (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_glr (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_glr (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_glr (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_glr (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor2r (line, CORS(ccd,1), Memr[outbuf],
+ Memr[overscan_vec], Memr[zerobuf], Memr[darkbuf],
+ Memr[flatbuf], Memr[illumbuf], Memr[fringebuf], ncols,
+ zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxkr (Memr[outbuf], minrep, Memr[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asumr (Memr[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+end
+
+
+# FIND_OVERSCAN -- Find the overscan value for a line.
+# No check is made on the number of pixels.
+# The median is the (npix+1)/2 element.
+
+real procedure find_overscanr (data, npix, type)
+
+real data[npix] #I Overscan data
+int npix #I Number of overscan points
+int type #I Type of overscan calculation
+
+int i
+real overscan, d, dmin, dmax
+real asokr()
+
+begin
+ if (type == OVERSCAN_MINMAX) {
+ overscan = data[1]
+ dmin = data[1]
+ dmax = data[1]
+ do i = 2, npix {
+ d = data[i]
+ overscan = overscan + d
+ if (d < dmin)
+ dmin = d
+ else if (d > dmax)
+ dmax = d
+ }
+ overscan = (overscan - dmin - dmax) / (npix - 2)
+ } else if (type == OVERSCAN_MEDIAN)
+ overscan = asokr (data, npix, (npix + 1) / 2)
+ else {
+ overscan = data[1]
+ do i = 2, npix
+ overscan = overscan + data[i]
+ overscan = overscan / npix
+ }
+
+ return (overscan)
+end
diff --git a/noao/imred/ccdred/src/hdrmap.com b/noao/imred/ccdred/src/hdrmap.com
new file mode 100644
index 00000000..5aa74185
--- /dev/null
+++ b/noao/imred/ccdred/src/hdrmap.com
@@ -0,0 +1,4 @@
+# Common for HDRMAP package.
+
+pointer stp # Symbol table pointer
+common /hdmcom/ stp
diff --git a/noao/imred/ccdred/src/hdrmap.x b/noao/imred/ccdred/src/hdrmap.x
new file mode 100644
index 00000000..ebcb253e
--- /dev/null
+++ b/noao/imred/ccdred/src/hdrmap.x
@@ -0,0 +1,544 @@
+include <error.h>
+include <syserr.h>
+
+.help hdrmap
+.nf-----------------------------------------------------------------------------
+HDRMAP -- Map translation between task parameters and image header parameters.
+
+In order for tasks to be partially independent of the image header
+parameter names used by different instruments and observatories a
+translation is made between task parameters and image header
+parameters. This translation is given in a file consisting of the task
+parameter name, the image header parameter name, and an optional
+default value. This file is turned into a symbol table. If the
+translation file is not found a null pointer is returned. The package will
+then use the task parameter names directly. Also if there is no
+translation given in the file for a particular parameter it is passed
+on directly. If a parameter is not in the image header then the symbol
+table default value, if given, is returned. This package is layered on
+the IMIO header package.
+
+ hdmopen (fname)
+ hdmclose ()
+ hdmwrite (fname, mode)
+ hdmname (parameter, str, max_char)
+ hdmgdef (parameter, str, max_char)
+ hdmpdef (parameter, str, max_char)
+ y/n = hdmaccf (im, parameter)
+ hdmgstr (im, parameter, str, max_char)
+ ival = hdmgeti (im, parameter)
+ rval = hdmgetr (im, parameter)
+ hdmpstr (im, parameter, str)
+ hdmputi (im, parameter, value)
+ hdmputr (im, parameter, value)
+ hdmgstp (stp)
+ hdmpstp (stp)
+ hdmdelf (im, parameter)
+ hdmparm (name, parameter, max_char)
+
+hdmopen -- Open the translation file and map it into a symbol table pointer.
+hdmclose -- Close the symbol table pointer.
+hdmwrite -- Write out translation file.
+hdmname -- Return the image header parameter name.
+hdmpname -- Put the image header parameter name.
+hdmgdef -- Get the default value as a string (null if none).
+hdmpdef -- Put the default value as a string.
+hdmaccf -- Return whether the image header parameter exists (regardless of
+ whether there is a default value).
+hdmgstr -- Get a string valued parameter. Return default value if not in the
+ image header. Return null string if no default or image value.
+hdmgeti -- Get an integer valued parameter. Return default value if not in
+ the image header and error condition if no default or image value.
+hdmgetr -- Get a real valued parameter. Return default value if not in
+ the image header or error condition if no default or image value.
+hdmpstr -- Put a string valued parameter in the image header.
+hdmputi -- Put an integer valued parameter in the image header.
+hdmputr -- Put a real valued parameter in the image header.
+hdmgstp -- Get the symbol table pointer to save it while another map is used.
+hdmpstp -- Put the symbol table pointer to restore a map.
+hdmdelf -- Delete a field.
+hdmparm -- Return the parameter name corresponding to an image header name.
+.endhelp -----------------------------------------------------------------------
+
+# Symbol table definitions.
+define LEN_INDEX 32 # Length of symtab index
+define LEN_STAB 1024 # Length of symtab string buffer
+define SZ_SBUF 128 # Size of symtab string buffer
+
+define SZ_NAME 79 # Size of translation symbol name
+define SZ_DEFAULT 79 # Size of default string
+define SYMLEN 80 # Length of symbol structure
+
+# Symbol table structure
+define NAME Memc[P2C($1)] # Translation name for symbol
+define DEFAULT Memc[P2C($1+40)] # Default value of parameter
+
+
+# HDMOPEN -- Open the translation file and map it into a symbol table pointer.
+
+procedure hdmopen (fname)
+
+char fname[ARB] # Image header map file
+
+int fd, open(), fscan(), nscan(), errcode()
+pointer sp, parameter, sym, stopen(), stenter()
+include "hdrmap.com"
+
+begin
+ # Create an empty symbol table.
+ stp = stopen (fname, LEN_INDEX, LEN_STAB, SZ_SBUF)
+
+ # Return if file not found.
+ iferr (fd = open (fname, READ_ONLY, TEXT_FILE)) {
+ if (errcode () != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ call smark (sp)
+ call salloc (parameter, SZ_NAME, TY_CHAR)
+
+ # Read the file an enter the translations in the symbol table.
+ while (fscan(fd) != EOF) {
+ call gargwrd (Memc[parameter], SZ_NAME)
+ if ((nscan() == 0) || (Memc[parameter] == '#'))
+ next
+ sym = stenter (stp, Memc[parameter], SYMLEN)
+ call gargwrd (NAME(sym), SZ_NAME)
+ call gargwrd (DEFAULT(sym), SZ_DEFAULT)
+ }
+
+ call close (fd)
+ call sfree (sp)
+end
+
+
+# HDMCLOSE -- Close the symbol table pointer.
+
+procedure hdmclose ()
+
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ call stclose (stp)
+end
+
+
+# HDMWRITE -- Write out translation file.
+
+procedure hdmwrite (fname, mode)
+
+char fname[ARB] # Image header map file
+int mode # Access mode (APPEND, NEW_FILE)
+
+int fd, open(), stridxs()
+pointer sym, sthead(), stnext(), stname()
+errchk open
+include "hdrmap.com"
+
+begin
+ # If there is no symbol table do nothing.
+ if (stp == NULL)
+ return
+
+ fd = open (fname, mode, TEXT_FILE)
+
+ sym = sthead (stp)
+ for (sym = sthead (stp); sym != NULL; sym = stnext (stp, sym)) {
+ if (stridxs (" ", Memc[stname (stp, sym)]) > 0)
+ call fprintf (fd, "'%s'%30t")
+ else
+ call fprintf (fd, "%s%30t")
+ call pargstr (Memc[stname (stp, sym)])
+ if (stridxs (" ", NAME(sym)) > 0)
+ call fprintf (fd, " '%s'%10t")
+ else
+ call fprintf (fd, " %s%10t")
+ call pargstr (NAME(sym))
+ if (DEFAULT(sym) != EOS) {
+ if (stridxs (" ", DEFAULT(sym)) > 0)
+ call fprintf (fd, " '%s'")
+ else
+ call fprintf (fd, " %s")
+ call pargstr (DEFAULT(sym))
+ }
+ call fprintf (fd, "\n")
+ }
+
+ call close (fd)
+end
+
+
+# HDMNAME -- Return the image header parameter name
+
+procedure hdmname (parameter, str, max_char)
+
+char parameter[ARB] # Parameter name
+char str[max_char] # String containing mapped parameter name
+int max_char # Maximum characters in string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call strcpy (NAME(sym), str, max_char)
+ else
+ call strcpy (parameter, str, max_char)
+end
+
+
+# HDMPNAME -- Put the image header parameter name
+
+procedure hdmpname (parameter, str)
+
+char parameter[ARB] # Parameter name
+char str[ARB] # String containing mapped parameter name
+
+pointer sym, stfind(), stenter()
+include "hdrmap.com"
+
+begin
+ if (stp == NULL)
+ return
+
+ sym = stfind (stp, parameter)
+ if (sym == NULL) {
+ sym = stenter (stp, parameter, SYMLEN)
+ DEFAULT(sym) = EOS
+ }
+
+ call strcpy (str, NAME(sym), SZ_NAME)
+end
+
+
+# HDMGDEF -- Get the default value as a string (null string if none).
+
+procedure hdmgdef (parameter, str, max_char)
+
+char parameter[ARB] # Parameter name
+char str[max_char] # String containing default value
+int max_char # Maximum characters in string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call strcpy (DEFAULT(sym), str, max_char)
+ else
+ str[1] = EOS
+end
+
+
+# HDMPDEF -- PUt the default value as a string.
+
+procedure hdmpdef (parameter, str)
+
+char parameter[ARB] # Parameter name
+char str[ARB] # String containing default value
+
+pointer sym, stfind(), stenter()
+include "hdrmap.com"
+
+begin
+ if (stp == NULL)
+ return
+
+ sym = stfind (stp, parameter)
+ if (sym == NULL) {
+ sym = stenter (stp, parameter, SYMLEN)
+ call strcpy (parameter, NAME(sym), SZ_NAME)
+ }
+
+ call strcpy (str, DEFAULT(sym), SZ_DEFAULT)
+end
+
+
+# HDMACCF -- Return whether the image header parameter exists (regardless of
+# whether there is a default value).
+
+int procedure hdmaccf (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int imaccf()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ return (imaccf (im, NAME(sym)))
+ else
+ return (imaccf (im, parameter))
+end
+
+
+# HDMGSTR -- Get a string valued parameter. Return default value if not in
+# the image header. Return null string if no default or image value.
+
+procedure hdmgstr (im, parameter, str, max_char)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+char str[max_char] # String value to return
+int max_char # Maximum characters in returned string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (call imgstr (im, NAME(sym), str, max_char))
+ call strcpy (DEFAULT(sym), str, max_char)
+ } else {
+ iferr (call imgstr (im, parameter, str, max_char))
+ str[1] = EOS
+ }
+end
+
+
+# HDMGETR -- Get a real valued parameter. Return default value if not in
+# the image header. Return error condition if no default or image value.
+
+real procedure hdmgetr (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int ip, ctor()
+real value, imgetr()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (value = imgetr (im, NAME(sym))) {
+ ip = 1
+ if (ctor (DEFAULT(sym), ip, value) == 0)
+ call error (0, "HDMGETR: No value found")
+ }
+ } else
+ value = imgetr (im, parameter)
+
+ return (value)
+end
+
+
+# HDMGETI -- Get an integer valued parameter. Return default value if not in
+# the image header. Return error condition if no default or image value.
+
+int procedure hdmgeti (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int ip, ctoi()
+int value, imgeti()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (value = imgeti (im, NAME(sym))) {
+ ip = 1
+ if (ctoi (DEFAULT(sym), ip, value) == 0)
+ call error (0, "HDMGETI: No value found")
+ }
+ } else
+ value = imgeti (im, parameter)
+
+ return (value)
+end
+
+
+# HDMPSTR -- Put a string valued parameter in the image header.
+
+procedure hdmpstr (im, parameter, str)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+char str[ARB] # String value
+
+int imaccf(), imgftype()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ if (imaccf (im, NAME(sym)) == YES)
+ if (imgftype (im, NAME(sym)) != TY_CHAR)
+ call imdelf (im, NAME(sym))
+ call imastr (im, NAME(sym), str)
+ } else {
+ if (imaccf (im, parameter) == YES)
+ if (imgftype (im, parameter) != TY_CHAR)
+ call imdelf (im, parameter)
+ call imastr (im, parameter, str)
+ }
+end
+
+
+# HDMPUTI -- Put an integer valued parameter in the image header.
+
+procedure hdmputi (im, parameter, value)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+int value # Integer value to put
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imaddi (im, NAME(sym), value)
+ else
+ call imaddi (im, parameter, value)
+end
+
+
+# HDMPUTR -- Put a real valued parameter in the image header.
+
+procedure hdmputr (im, parameter, value)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+real value # Real value to put
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imaddr (im, NAME(sym), value)
+ else
+ call imaddr (im, parameter, value)
+end
+
+
+# HDMGSTP -- Get the symbol table pointer to save a translation map.
+# The symbol table is restored with HDMPSTP.
+
+procedure hdmgstp (ptr)
+
+pointer ptr # Symbol table pointer to return
+
+include "hdrmap.com"
+
+begin
+ ptr = stp
+end
+
+
+# HDMPSTP -- Put a symbol table pointer to restore a header map.
+# The symbol table is optained with HDMGSTP.
+
+procedure hdmpstp (ptr)
+
+pointer ptr # Symbol table pointer to restore
+
+include "hdrmap.com"
+
+begin
+ stp = ptr
+end
+
+
+# HDMDELF -- Delete a field. It is an error if the field does not exist.
+
+procedure hdmdelf (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imdelf (im, NAME(sym))
+ else
+ call imdelf (im, parameter)
+end
+
+
+# HDMPARAM -- Get parameter given the image header name.
+
+procedure hdmparam (name, parameter, max_char)
+
+char name[ARB] # Image header name
+char parameter[max_char] # Parameter
+int max_char # Maximum size of parameter string
+
+bool streq()
+pointer sym, sthead(), stname(), stnext()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = sthead (stp)
+ else
+ sym = NULL
+
+ while (sym != NULL) {
+ if (streq (NAME(sym), name)) {
+ call strcpy (Memc[stname(stp, sym)], parameter, max_char)
+ return
+ }
+ sym = stnext (stp, sym)
+ }
+ call strcpy (name, parameter, max_char)
+end
diff --git a/noao/imred/ccdred/src/icaclip.gx b/noao/imred/ccdred/src/icaclip.gx
new file mode 100644
index 00000000..bb592542
--- /dev/null
+++ b/noao/imred/ccdred/src/icaclip.gx
@@ -0,0 +1,573 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number of images for this algorithm
+
+$for (sr)
+# IC_AAVSIGCLIP -- Reject pixels using an average sigma about the average
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_aavsigclip$t (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+$if (datatype == sil)
+real d1, low, high, sum, a, s, s1, r, one
+data one /1.0/
+$else
+PIXEL d1, low, high, sum, a, s, s1, r, one
+data one /1$f/
+$endif
+pointer sp, sums, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (sums, npts, TY_REAL)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Since the unweighted average is computed here possibly skip combining
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Compute the unweighted average with the high and low rejected and
+ # the poisson scaled average sigma. There must be at least three
+ # pixels at each point to define the average and contributions to
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ nin = n[1]
+ s = 0.
+ n2 = 0
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3)
+ next
+
+ # Unweighted average with the high and low rejected
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mem$t[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mem$t[dp1]
+ l = Memi[mp1]
+ s1 = max (one, (a + zeros[l]) / scales[l])
+ s = s + (d1 - a) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, a)
+ do j = 1, n1
+ s = s + (Mem$t[d[j]+k] - a) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the average and sum for later.
+ average[i] = a
+ Memr[sums+k] = sum
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+
+ # Reject pixels and compute the final average (if needed).
+ # There must be at least three pixels at each point for rejection.
+ # Iteratively scale the mean sigma and reject pixels
+ # Compact the data and keep track of the image IDs if needed.
+
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (2, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mem$t[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ a = average[i]
+ sum = Memr[sums+k]
+
+ repeat {
+ n2 = n1
+ if (s > 0.) {
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ d1 = Mem$t[dp1]
+ l = Memi[mp1]
+ s1 = s * sqrt (max (one, (a+zeros[l]) / scales[l]))
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ s1 = s * sqrt (max (one, a))
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s1
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MAVSIGCLIP -- Reject pixels using an average sigma about the median
+# The average sigma is normalized by the expected poisson sigma.
+
+procedure ic_mavsigclip$t (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+pointer sp, resid, mp1, mp2
+$if (datatype == sil)
+real med, low, high, r, s, s1, one
+data one /1.0/
+$else
+PIXEL med, low, high, r, s, s1, one
+data one /1$f/
+$endif
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute the poisson scaled average sigma about the median.
+ # There must be at least three pixels at each point to define
+ # the mean sigma. Corrections for differences in the image
+ # scale factors are selected by the doscale1 flag.
+
+ s = 0.
+ n2 = 0
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 < 3) {
+ if (n1 == 0)
+ median[i] = blank
+ else if (n1 == 1)
+ median[i] = Mem$t[d[1]+k]
+ else {
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ median[i] = (low + high) / 2.
+ }
+ next
+ }
+
+ # Median
+ n3 = 1 + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mem$t[d[n3-1]+k]
+ high = Mem$t[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+
+ # Poisson scaled sigma accumulation
+ if (doscale1) {
+ do j = 1, n1 {
+ l = Memi[m[j]+k]
+ s1 = max (one, (med + zeros[l]) / scales[l])
+ s = s + (Mem$t[d[j]+k] - med) ** 2 / s1
+ }
+ } else {
+ s1 = max (one, med)
+ do j = 1, n1
+ s = s + (Mem$t[d[j]+k] - med) ** 2 / s1
+ }
+ n2 = n2 + n1
+
+ # Save the median for later.
+ median[i] = med
+ }
+
+ # Here is the final sigma.
+ if (n2 > 1)
+ s = sqrt (s / (n2 - 1))
+ else
+ return
+
+ # Compute individual sigmas and iteratively clip.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 < max (3, maxkeep+1))
+ next
+ nl = 1
+ nh = n1
+ med = median[i]
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 >= max (MINCLIP, maxkeep+1) && s > 0.) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (med - Mem$t[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s1 = s * sqrt (max (one, (med+zeros[l])/scales[l]))
+ r = (Mem$t[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ s1 = s * sqrt (max (one, med))
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mem$t[d[nl]+k]) / s1
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mem$t[d[nh]+k] - med) / s1
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mem$t[d[n3-1]+k]
+ high = Mem$t[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many are rejected add some back in.
+ # Pixels with equal residuals are added together.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+
+ # Recompute median
+ if (n1 < n2) {
+ if (n1 > 0) {
+ n3 = nl + n1 / 2
+ if (mod (n1, 2) == 0) {
+ low = Mem$t[d[n3-1]+k]
+ high = Mem$t[d[n3]+k]
+ med = (low + high) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+ } else
+ med = blank
+ }
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icaverage.gx b/noao/imred/ccdred/src/icaverage.gx
new file mode 100644
index 00000000..c145bb33
--- /dev/null
+++ b/noao/imred/ccdred/src/icaverage.gx
@@ -0,0 +1,93 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+$for (sr)
+# IC_AVERAGE -- Compute the average image line.
+# Options include a weight average.
+
+procedure ic_average$t (d, m, n, wts, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average (returned)
+$else
+PIXEL average[npts] # Average (returned)
+$endif
+
+int i, j, k
+real sumwt, wt
+$if (datatype == sil)
+real sum
+$else
+PIXEL sum
+$endif
+
+include "../icombine.com"
+
+begin
+ # If no data has been excluded do the average without checking the
+ # number of points and using the fact that the weights are normalized.
+ # If all the data has been excluded set the average to the blank value.
+
+ if (dflag == D_ALL) {
+ if (dowts) {
+ do i = 1, npts {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mem$t[d[1]+k] * wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mem$t[d[j]+k] * wt
+ }
+ average[i] = sum
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ sum = Mem$t[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n[i]
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ average[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ wt = wts[Memi[m[1]+k]]
+ sum = Mem$t[d[1]+k] * wt
+ sumwt = wt
+ do j = 2, n[i] {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + Mem$t[d[j]+k] * wt
+ sumwt = sumwt + wt
+ }
+ average[i] = sum / sumwt
+ } else
+ average[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ if (n[i] > 0) {
+ k = i - 1
+ sum = Mem$t[d[1]+k]
+ do j = 2, n[i]
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n[i]
+ } else
+ average[i] = blank
+ }
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/iccclip.gx b/noao/imred/ccdred/src/iccclip.gx
new file mode 100644
index 00000000..69df984c
--- /dev/null
+++ b/noao/imred/ccdred/src/iccclip.gx
@@ -0,0 +1,471 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 2 # Mininum number of images for algorithm
+
+$for (sr)
+# IC_ACCDCLIP -- Reject pixels using CCD noise parameters about the average
+
+procedure ic_accdclip$t (d, m, n, scales, zeros, nm, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model parameters
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+$if (datatype == sil)
+real d1, low, high, sum, a, s, r, zero
+data zero /0.0/
+$else
+PIXEL d1, low, high, sum, a, s, r, zero
+data zero /0$f/
+$endif
+pointer sp, resid, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are no pixels go on to the combining. Since the unweighted
+ # average is computed here possibly skip the combining later.
+
+ # There must be at least max (1, nkeep) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ } else if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # There must be at least two pixels for rejection. The initial
+ # average is the low/high rejected average except in the case of
+ # just two pixels. The rejections are iterated and the average
+ # is recomputed. Corrections for scaling may be performed.
+ # Depending on other flags the image IDs may also need to be adjusted.
+
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 <= max (MINCLIP-1, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mem$t[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ repeat {
+ if (n1 == 2) {
+ sum = Mem$t[d[1]+k]
+ sum = sum + Mem$t[d[2]+k]
+ a = sum / 2
+ } else {
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mem$t[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+ }
+ n2 = n1
+ if (doscale1) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+
+ l = Memi[mp1]
+ s = scales[l]
+ d1 = max (zero, s * (a + zeros[l]))
+ s = sqrt (nm[1,l] + d1/nm[2,l] + (d1*nm[3,l])**2) / s
+
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[n1] + k
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, a)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (j=1; j<=n1; j=j+1) {
+ if (keepids) {
+ l = Memi[m[j]+k]
+ s = max (zero, a)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ dp1 = d[j] + k
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs(r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+ }
+
+ n[i] = n1
+ if (!docombine)
+ if (n1 > 0)
+ average[i] = sum / n1
+ else
+ average[i] = blank
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_CCDCLIP -- Reject pixels using CCD noise parameters about the median
+
+procedure ic_mccdclip$t (d, m, n, scales, zeros, nm, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+real nm[3,nimages] # Noise model
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, mp1, mp2
+$if (datatype == sil)
+real med, zero
+data zero /0.0/
+$else
+PIXEL med, zero
+data zero /0$f/
+$endif
+
+include "../icombine.com"
+
+begin
+ # There must be at least max (MINCLIP, nkeep+1) pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0) {
+ med = Mem$t[d[n3-1]+k]
+ med = (med + Mem$t[d[n3]+k]) / 2.
+ } else
+ med = Mem$t[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ for (; nl <= n2; nl = nl + 1) {
+ l = Memi[m[nl]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (med - Mem$t[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ l = Memi[m[nh]+k]
+ s = scales[l]
+ r = max (zero, s * (med + zeros[l]))
+ s = sqrt (nm[1,l] + r/nm[2,l] + (r*nm[3,l])**2) / s
+ r = (Mem$t[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ } else {
+ if (!keepids) {
+ s = max (zero, med)
+ s = sqrt (nm[1,1] + s/nm[2,1] + (s*nm[3,1])**2)
+ }
+ for (; nl <= n2; nl = nl + 1) {
+ if (keepids) {
+ l = Memi[m[nl]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (med - Mem$t[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ if (keepids) {
+ l = Memi[m[nh]+k]
+ s = max (zero, med)
+ s = sqrt (nm[1,l] + s/nm[2,l] + (s*nm[3,l])**2)
+ }
+ r = (Mem$t[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median is computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icgdata.gx b/noao/imred/ccdred/src/icgdata.gx
new file mode 100644
index 00000000..41cf5810
--- /dev/null
+++ b/noao/imred/ccdred/src/icgdata.gx
@@ -0,0 +1,233 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <mach.h>
+include "../icombine.h"
+
+$for (sr)
+# IC_GDATA -- Get line of image and mask data and apply threshold and scaling.
+# Entirely empty lines are excluded. The data are compacted within the
+# input data buffers. If it is required, the connection to the original
+# image index is keeped in the returned m data pointers.
+
+procedure ic_gdata$t (in, out, dbuf, d, id, n, m, lflag, offsets, scales,
+ zeros, nimages, npts, v1, v2)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Empty mask flags
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+int nimages # Number of input images
+int npts # NUmber of output points per line
+long v1[ARB], v2[ARB] # Line vectors
+
+int i, j, k, l, ndim, nused
+real a, b
+pointer buf, dp, ip, mp, imgnl$t()
+
+include "../icombine.com"
+
+begin
+ # Get masks and return if there is no data
+ call ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+ if (dflag == D_NONE)
+ return
+
+ # Get data and fill data buffers. Correct for offsets if needed.
+ ndim = IM_NDIM(out[1])
+ do i = 1, nimages {
+ if (lflag[i] == D_NONE)
+ next
+ if (aligned) {
+ call amovl (v1, v2, IM_MAXDIM)
+ if (project)
+ v2[ndim+1] = i
+ j = imgnl$t (in[i], d[i], v2)
+ } else {
+ v2[1] = v1[1]
+ do j = 2, ndim
+ v2[j] = v1[j] - offsets[i,j]
+ if (project)
+ v2[ndim+1] = i
+ j = imgnl$t (in[i], buf, v2)
+ call amov$t (Mem$t[buf], Mem$t[dbuf[i]+offsets[i,1]],
+ IM_LEN(in[i],1))
+ d[i] = dbuf[i]
+ }
+ }
+
+ # Apply threshold if needed
+ if (dothresh) {
+ do i = 1, nimages {
+ dp = d[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ a = Mem$t[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ lflag[i] = D_MIX
+ dflag = D_MIX
+ }
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ a = Mem$t[dp]
+ if (a < lthresh || a > hthresh) {
+ Memi[m[i]+j-1] = 1
+ dflag = D_MIX
+ }
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+
+ # Check for completely empty lines
+ if (lflag[i] == D_MIX) {
+ lflag[i] = D_NONE
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Apply scaling (avoiding masked pixels which might overflow?)
+ if (doscale) {
+ if (dflag == D_ALL) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ do j = 1, npts {
+ Mem$t[dp] = Mem$t[dp] / a + b
+ dp = dp + 1
+ }
+ }
+ } else if (dflag == D_MIX) {
+ do i = 1, nimages {
+ dp = d[i]
+ a = scales[i]
+ b = -zeros[i]
+ if (lflag[i] == D_ALL) {
+ do j = 1, npts {
+ Mem$t[dp] = Mem$t[dp] / a + b
+ dp = dp + 1
+ }
+ } else if (lflag[i] == D_MIX) {
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0)
+ Mem$t[dp] = Mem$t[dp] / a + b
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+ }
+
+ # Sort pointers to exclude unused images.
+ # Use the lflag array to keep track of the image index.
+
+ if (dflag == D_ALL)
+ nused = nimages
+ else {
+ nused = 0
+ do i = 1, nimages
+ if (lflag[i] != D_NONE) {
+ nused = nused + 1
+ d[nused] = d[i]
+ m[nused] = m[i]
+ lflag[nused] = i
+ }
+ if (nused == 0)
+ dflag = D_NONE
+ }
+
+ # Compact data to remove bad pixels
+ # Keep track of the image indices if needed
+ # If growing mark the end of the included image indices with zero
+
+ if (dflag == D_ALL) {
+ call amovki (nused, n, npts)
+ if (keepids)
+ do i = 1, nimages
+ call amovki (i, Memi[id[i]], npts)
+ } else if (dflag == D_NONE)
+ call aclri (n, npts)
+ else {
+ call aclri (n, npts)
+ if (keepids) {
+ do i = 1, nused {
+ l = lflag[i]
+ dp = d[i]
+ ip = id[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i) {
+ Mem$t[d[k]+j-1] = Mem$t[dp]
+ Memi[id[k]+j-1] = l
+ } else
+ Memi[ip] = l
+ }
+ dp = dp + 1
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (grow > 0) {
+ do j = 1, npts {
+ do i = n[j]+1, nimages
+ Memi[id[i]+j-1] = 0
+ }
+ }
+ } else {
+ do i = 1, nused {
+ dp = d[i]
+ mp = m[i]
+ do j = 1, npts {
+ if (Memi[mp] == 0) {
+ n[j] = n[j] + 1
+ k = n[j]
+ if (k < i)
+ Mem$t[d[k]+j-1] = Mem$t[dp]
+ }
+ dp = dp + 1
+ mp = mp + 1
+ }
+ }
+ }
+ }
+
+ # Sort the pixels and IDs if needed
+ if (mclip) {
+ call malloc (dp, nimages, TY_PIXEL)
+ if (keepids) {
+ call malloc (ip, nimages, TY_INT)
+ call ic_2sort$t (d, Mem$t[dp], id, Memi[ip], n, npts)
+ call mfree (ip, TY_INT)
+ } else
+ call ic_sort$t (d, Mem$t[dp], n, npts)
+ call mfree (dp, TY_PIXEL)
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icgrow.gx b/noao/imred/ccdred/src/icgrow.gx
new file mode 100644
index 00000000..e3cf6228
--- /dev/null
+++ b/noao/imred/ccdred/src/icgrow.gx
@@ -0,0 +1,81 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+$for (sr)
+# IC_GROW -- Reject neigbors of rejected pixels.
+# The rejected pixels are marked by having nonzero ids beyond the number
+# of included pixels. The pixels rejected here are given zero ids
+# to avoid growing of the pixels rejected here. The unweighted average
+# can be updated but any rejected pixels requires the median to be
+# recomputed. When the number of pixels at a grow point reaches nkeep
+# no further pixels are rejected. Note that the rejection order is not
+# based on the magnitude of the residuals and so a grow from a weakly
+# rejected image pixel may take precedence over a grow from a strongly
+# rejected image pixel.
+
+procedure ic_grow$t (d, m, n, nimages, npts, average)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i1, i2, j1, j2, k1, k2, l, is, ie, n2, maxkeep
+pointer mp1, mp2
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ do i1 = 1, npts {
+ k1 = i1 - 1
+ is = max (1, i1 - grow)
+ ie = min (npts, i1 + grow)
+ do j1 = n[i1]+1, nimages {
+ l = Memi[m[j1]+k1]
+ if (l == 0)
+ next
+ if (combine == MEDIAN)
+ docombine = true
+
+ do i2 = is, ie {
+ if (i2 == i1)
+ next
+ k2 = i2 - 1
+ n2 = n[i2]
+ if (nkeep < 0)
+ maxkeep = max (0, n2 + nkeep)
+ else
+ maxkeep = min (n2, nkeep)
+ if (n2 <= maxkeep)
+ next
+ do j2 = 1, n2 {
+ mp1 = m[j2] + k2
+ if (Memi[mp1] == l) {
+ if (!docombine && n2 > 1)
+ average[i2] =
+ (n2*average[i2] - Mem$t[d[j2]+k2]) / (n2-1)
+ mp2 = m[n2] + k2
+ if (j2 < n2) {
+ Mem$t[d[j2]+k2] = Mem$t[d[n2]+k2]
+ Memi[mp1] = Memi[mp2]
+ }
+ Memi[mp2] = 0
+ n[i2] = n2 - 1
+ break
+ }
+ }
+ }
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icimstack.x b/noao/imred/ccdred/src/icimstack.x
new file mode 100644
index 00000000..2a19751d
--- /dev/null
+++ b/noao/imred/ccdred/src/icimstack.x
@@ -0,0 +1,125 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <error.h>
+include <imhdr.h>
+
+
+# IC_IMSTACK -- Stack images into a single image of higher dimension.
+
+procedure ic_imstack (images, nimages, output)
+
+char images[SZ_FNAME-1, nimages] #I Input images
+int nimages #I Number of images
+char output #I Name of output image
+
+int i, j, npix
+long line_in[IM_MAXDIM], line_out[IM_MAXDIM]
+pointer sp, key, in, out, buf_in, buf_out, ptr
+
+int imgnls(), imgnli(), imgnll(), imgnlr(), imgnld(), imgnlx()
+int impnls(), impnli(), impnll(), impnlr(), impnld(), impnlx()
+pointer immap()
+errchk immap
+
+begin
+ call smark (sp)
+ call salloc (key, SZ_FNAME, TY_CHAR)
+
+ iferr {
+ # Add each input image to the output image.
+ out = NULL
+ do i = 1, nimages {
+ in = NULL
+ ptr = immap (images[1,i], READ_ONLY, 0)
+ in = ptr
+
+ # For the first input image map the output image as a copy
+ # and increment the dimension. Set the output line counter.
+
+ if (i == 1) {
+ ptr = immap (output, NEW_COPY, in)
+ out = ptr
+ IM_NDIM(out) = IM_NDIM(out) + 1
+ IM_LEN(out, IM_NDIM(out)) = nimages
+ npix = IM_LEN(out, 1)
+ call amovkl (long(1), line_out, IM_MAXDIM)
+ }
+
+ # Check next input image for consistency with the output image.
+ if (IM_NDIM(in) != IM_NDIM(out) - 1)
+ call error (0, "Input images not consistent")
+ do j = 1, IM_NDIM(in) {
+ if (IM_LEN(in, j) != IM_LEN(out, j))
+ call error (0, "Input images not consistent")
+ }
+
+ call sprintf (Memc[key], SZ_FNAME, "stck%04d")
+ call pargi (i)
+ call imastr (out, Memc[key], images[1,i])
+
+ # Copy the input lines from the image to the next lines of
+ # the output image. Switch on the output data type to optimize
+ # IMIO.
+
+ call amovkl (long(1), line_in, IM_MAXDIM)
+ switch (IM_PIXTYPE (out)) {
+ case TY_SHORT:
+ while (imgnls (in, buf_in, line_in) != EOF) {
+ if (impnls (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovs (Mems[buf_in], Mems[buf_out], npix)
+ }
+ case TY_INT:
+ while (imgnli (in, buf_in, line_in) != EOF) {
+ if (impnli (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovi (Memi[buf_in], Memi[buf_out], npix)
+ }
+ case TY_USHORT, TY_LONG:
+ while (imgnll (in, buf_in, line_in) != EOF) {
+ if (impnll (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovl (Meml[buf_in], Meml[buf_out], npix)
+ }
+ case TY_REAL:
+ while (imgnlr (in, buf_in, line_in) != EOF) {
+ if (impnlr (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovr (Memr[buf_in], Memr[buf_out], npix)
+ }
+ case TY_DOUBLE:
+ while (imgnld (in, buf_in, line_in) != EOF) {
+ if (impnld (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovd (Memd[buf_in], Memd[buf_out], npix)
+ }
+ case TY_COMPLEX:
+ while (imgnlx (in, buf_in, line_in) != EOF) {
+ if (impnlx (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovx (Memx[buf_in], Memx[buf_out], npix)
+ }
+ default:
+ while (imgnlr (in, buf_in, line_in) != EOF) {
+ if (impnlr (out, buf_out, line_out) == EOF)
+ call error (0, "Error writing output image")
+ call amovr (Memr[buf_in], Memr[buf_out], npix)
+ }
+ }
+ call imunmap (in)
+ }
+ } then {
+ if (out != NULL) {
+ call imunmap (out)
+ call imdelete (out)
+ }
+ if (in != NULL)
+ call imunmap (in)
+ call sfree (sp)
+ call erract (EA_ERROR)
+ }
+
+ # Finish up.
+ call imunmap (out)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/iclog.x b/noao/imred/ccdred/src/iclog.x
new file mode 100644
index 00000000..82135866
--- /dev/null
+++ b/noao/imred/ccdred/src/iclog.x
@@ -0,0 +1,378 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <mach.h>
+include "icombine.h"
+include "icmask.h"
+
+# IC_LOG -- Output log information is a log file has been specfied.
+
+procedure ic_log (in, out, ncombine, exptime, sname, zname, wname,
+ mode, median, mean, scales, zeros, wts, offsets, nimages,
+ dozero, nout, expname, exposure)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int ncombine[nimages] # Number of previous combined images
+real exptime[nimages] # Exposure times
+char sname[ARB] # Scale name
+char zname[ARB] # Zero name
+char wname[ARB] # Weight name
+real mode[nimages] # Modes
+real median[nimages] # Medians
+real mean[nimages] # Means
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero or sky levels
+real wts[nimages] # Weights
+int offsets[nimages,ARB] # Image offsets
+int nimages # Number of images
+bool dozero # Zero flag
+int nout # Number of images combined in output
+char expname[ARB] # Exposure name
+real exposure # Output exposure
+
+int i, j, stack, ctor()
+real rval, imgetr()
+long clktime()
+bool prncombine, prexptime, prmode, prmedian, prmean, prmask
+bool prrdn, prgain, prsn
+pointer sp, fname, key
+errchk imgetr
+
+include "icombine.com"
+
+begin
+ if (logfd == NULL)
+ return
+
+ call smark (sp)
+ call salloc (fname, SZ_LINE, TY_CHAR)
+
+ stack = NO
+ if (project) {
+ ifnoerr (call imgstr (in[1], "stck0001", Memc[fname], SZ_LINE))
+ stack = YES
+ }
+ if (stack == YES)
+ call salloc (key, SZ_FNAME, TY_CHAR)
+
+ # Time stamp the log and print parameter information.
+
+ call cnvdate (clktime(0), Memc[fname], SZ_LINE)
+ call fprintf (logfd, "\n%s: IMCOMBINE\n")
+ call pargstr (Memc[fname])
+ switch (combine) {
+ case AVERAGE:
+ call fprintf (logfd, " combine = average, ")
+ case MEDIAN:
+ call fprintf (logfd, " combine = median, ")
+ }
+ call fprintf (logfd, "scale = %s, zero = %s, weight = %s\n")
+ call pargstr (sname)
+ call pargstr (zname)
+ call pargstr (wname)
+
+ switch (reject) {
+ case MINMAX:
+ call fprintf (logfd, " reject = minmax, nlow = %d, nhigh = %d\n")
+ call pargi (nint (flow * nimages))
+ call pargi (nint (fhigh * nimages))
+ case CCDCLIP:
+ call fprintf (logfd, " reject = ccdclip, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd,
+ " rdnoise = %s, gain = %s, snoise = %s, sigma = %g, hsigma = %g\n")
+ call pargstr (Memc[rdnoise])
+ call pargstr (Memc[gain])
+ call pargstr (Memc[snoise])
+ call pargr (lsigma)
+ call pargr (hsigma)
+ case CRREJECT:
+ call fprintf (logfd,
+ " reject = crreject, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd,
+ " rdnoise = %s, gain = %s, snoise = %s, hsigma = %g\n")
+ call pargstr (Memc[rdnoise])
+ call pargstr (Memc[gain])
+ call pargstr (Memc[snoise])
+ call pargr (hsigma)
+ case PCLIP:
+ call fprintf (logfd, " reject = pclip, nkeep = %d\n")
+ call pargi (nkeep)
+ call fprintf (logfd, " pclip = %g, lsigma = %g, hsigma = %g\n")
+ call pargr (pclip)
+ call pargr (lsigma)
+ call pargr (hsigma)
+ case SIGCLIP:
+ call fprintf (logfd, " reject = sigclip, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd, " lsigma = %g, hsigma = %g\n")
+ call pargr (lsigma)
+ call pargr (hsigma)
+ case AVSIGCLIP:
+ call fprintf (logfd,
+ " reject = avsigclip, mclip = %b, nkeep = %d\n")
+ call pargb (mclip)
+ call pargi (nkeep)
+ call fprintf (logfd, " lsigma = %g, hsigma = %g\n")
+ call pargr (lsigma)
+ call pargr (hsigma)
+ }
+ if (reject != NONE && grow > 0) {
+ call fprintf (logfd, " grow = %d\n")
+ call pargi (grow)
+ }
+ if (dothresh) {
+ if (lthresh > -MAX_REAL && hthresh < MAX_REAL) {
+ call fprintf (logfd, " lthreshold = %g, hthreshold = %g\n")
+ call pargr (lthresh)
+ call pargr (hthresh)
+ } else if (lthresh > -MAX_REAL) {
+ call fprintf (logfd, " lthreshold = %g\n")
+ call pargr (lthresh)
+ } else {
+ call fprintf (logfd, " hthreshold = %g\n")
+ call pargr (hthresh)
+ }
+ }
+ call fprintf (logfd, " blank = %g\n")
+ call pargr (blank)
+ call clgstr ("statsec", Memc[fname], SZ_LINE)
+ if (Memc[fname] != EOS) {
+ call fprintf (logfd, " statsec = %s\n")
+ call pargstr (Memc[fname])
+ }
+
+ if (ICM_TYPE(icm) != M_NONE) {
+ switch (ICM_TYPE(icm)) {
+ case M_BOOLEAN, M_GOODVAL:
+ call fprintf (logfd, " masktype = goodval, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ case M_BADVAL:
+ call fprintf (logfd, " masktype = badval, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ case M_GOODBITS:
+ call fprintf (logfd, " masktype = goodbits, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ case M_BADBITS:
+ call fprintf (logfd, " masktype = badbits, maskval = %d\n")
+ call pargi (ICM_VALUE(icm))
+ }
+ }
+
+ # Print information pertaining to individual images as a set of
+ # columns with the image name being the first column. Determine
+ # what information is relevant and print the appropriate header.
+
+ prncombine = false
+ prexptime = false
+ prmode = false
+ prmedian = false
+ prmean = false
+ prmask = false
+ prrdn = false
+ prgain = false
+ prsn = false
+ do i = 1, nimages {
+ if (ncombine[i] != ncombine[1])
+ prncombine = true
+ if (exptime[i] != exptime[1])
+ prexptime = true
+ if (mode[i] != mode[1])
+ prmode = true
+ if (median[i] != median[1])
+ prmedian = true
+ if (mean[i] != mean[1])
+ prmean = true
+ if (ICM_TYPE(icm) != M_NONE && Memi[ICM_PMS(icm)+i-1] != NULL)
+ prmask = true
+ if (reject == CCDCLIP || reject == CRREJECT) {
+ j = 1
+ if (ctor (Memc[rdnoise], j, rval) == 0)
+ prrdn = true
+ j = 1
+ if (ctor (Memc[gain], j, rval) == 0)
+ prgain = true
+ j = 1
+ if (ctor (Memc[snoise], j, rval) == 0)
+ prsn = true
+ }
+ }
+
+ call fprintf (logfd, " %20s ")
+ call pargstr ("Images")
+ if (prncombine) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("N")
+ }
+ if (prexptime) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Exp")
+ }
+ if (prmode) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Mode")
+ }
+ if (prmedian) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Median")
+ }
+ if (prmean) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Mean")
+ }
+ if (prrdn) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Rdnoise")
+ }
+ if (prgain) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Gain")
+ }
+ if (prsn) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Snoise")
+ }
+ if (doscale) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Scale")
+ }
+ if (dozero) {
+ call fprintf (logfd, " %7s")
+ call pargstr ("Zero")
+ }
+ if (dowts) {
+ call fprintf (logfd, " %6s")
+ call pargstr ("Weight")
+ }
+ if (!aligned) {
+ call fprintf (logfd, " %9s")
+ call pargstr ("Offsets")
+ }
+ if (prmask) {
+ call fprintf (logfd, " %s")
+ call pargstr ("Maskfile")
+ }
+ call fprintf (logfd, "\n")
+
+ do i = 1, nimages {
+ if (stack == YES) {
+ call sprintf (Memc[key], SZ_FNAME, "stck%04d")
+ call pargi (i)
+ ifnoerr (call imgstr (in[i], Memc[key], Memc[fname], SZ_LINE)) {
+ call fprintf (logfd, " %21s")
+ call pargstr (Memc[fname])
+ } else {
+ call imstats (in[i], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %16s[%3d]")
+ call pargstr (Memc[fname])
+ call pargi (i)
+ }
+ } else if (project) {
+ call imstats (in[i], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %16s[%3d]")
+ call pargstr (Memc[fname])
+ call pargi (i)
+ } else {
+ call imstats (in[i], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %21s")
+ call pargstr (Memc[fname])
+ }
+ if (prncombine) {
+ call fprintf (logfd, " %6d")
+ call pargi (ncombine[i])
+ }
+ if (prexptime) {
+ call fprintf (logfd, " %6.1f")
+ call pargr (exptime[i])
+ }
+ if (prmode) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (mode[i])
+ }
+ if (prmedian) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (median[i])
+ }
+ if (prmean) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (mean[i])
+ }
+ if (prrdn) {
+ rval = imgetr (in[i], Memc[rdnoise])
+ call fprintf (logfd, " %7g")
+ call pargr (rval)
+ }
+ if (prgain) {
+ rval = imgetr (in[i], Memc[gain])
+ call fprintf (logfd, " %6g")
+ call pargr (rval)
+ }
+ if (prsn) {
+ rval = imgetr (in[i], Memc[snoise])
+ call fprintf (logfd, " %6g")
+ call pargr (rval)
+ }
+ if (doscale) {
+ call fprintf (logfd, " %6.3f")
+ call pargr (1./scales[i])
+ }
+ if (dozero) {
+ call fprintf (logfd, " %7.5g")
+ call pargr (-zeros[i])
+ }
+ if (dowts) {
+ call fprintf (logfd, " %6.3f")
+ call pargr (wts[i])
+ }
+ if (!aligned) {
+ if (IM_NDIM(out[1]) == 1) {
+ call fprintf (logfd, " %9d")
+ call pargi (offsets[i,1])
+ } else {
+ do j = 1, IM_NDIM(out[1]) {
+ call fprintf (logfd, " %4d")
+ call pargi (offsets[i,j])
+ }
+ }
+ }
+ if (prmask && Memi[ICM_PMS(icm)+i-1] != NULL) {
+ call imgstr (in[i], "BPM", Memc[fname], SZ_LINE)
+ call fprintf (logfd, " %s")
+ call pargstr (Memc[fname])
+ }
+ call fprintf (logfd, "\n")
+ }
+
+ # Log information about the output images.
+ call imstats (out[1], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, "\n Output image = %s, ncombine = %d")
+ call pargstr (Memc[fname])
+ call pargi (nout)
+ if (expname[1] != EOS) {
+ call fprintf (logfd, ", %s = %g")
+ call pargstr (expname)
+ call pargr (exposure)
+ }
+ call fprintf (logfd, "\n")
+
+ if (out[2] != NULL) {
+ call imstats (out[2], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " Pixel list image = %s\n")
+ call pargstr (Memc[fname])
+ }
+
+ if (out[3] != NULL) {
+ call imstats (out[3], IM_IMAGENAME, Memc[fname], SZ_LINE)
+ call fprintf (logfd, " Sigma image = %s\n")
+ call pargstr (Memc[fname])
+ }
+
+ call flush (logfd)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/icmask.com b/noao/imred/ccdred/src/icmask.com
new file mode 100644
index 00000000..baba6f6a
--- /dev/null
+++ b/noao/imred/ccdred/src/icmask.com
@@ -0,0 +1,8 @@
+# IMCMASK -- Common for IMCOMBINE mask interface.
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+common /imcmask/ mtype, mvalue, bufs, pms
diff --git a/noao/imred/ccdred/src/icmask.h b/noao/imred/ccdred/src/icmask.h
new file mode 100644
index 00000000..b2d30530
--- /dev/null
+++ b/noao/imred/ccdred/src/icmask.h
@@ -0,0 +1,7 @@
+# ICMASK -- Data structure for IMCOMBINE mask interface.
+
+define ICM_LEN 4 # Structure length
+define ICM_TYPE Memi[$1] # Mask type
+define ICM_VALUE Memi[$1+1] # Mask value
+define ICM_BUFS Memi[$1+2] # Pointer to data line buffers
+define ICM_PMS Memi[$1+3] # Pointer to array of PMIO pointers
diff --git a/noao/imred/ccdred/src/icmask.x b/noao/imred/ccdred/src/icmask.x
new file mode 100644
index 00000000..ba448b68
--- /dev/null
+++ b/noao/imred/ccdred/src/icmask.x
@@ -0,0 +1,354 @@
+include <imhdr.h>
+include <pmset.h>
+include "icombine.h"
+include "icmask.h"
+
+# IC_MASK -- ICOMBINE mask interface
+#
+# IC_MOPEN -- Open masks
+# IC_MCLOSE -- Close the mask interface
+# IC_MGET -- Get lines of mask pixels for all the images
+# IC_MGET1 -- Get a line of mask pixels for the specified image
+
+
+# IC_MOPEN -- Open masks.
+# Parse and interpret the mask selection parameters.
+
+procedure ic_mopen (in, out, nimages)
+
+pointer in[nimages] #I Input images
+pointer out[ARB] #I Output images
+int nimages #I Number of images
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+int i, npix, npms, clgwrd()
+real clgetr()
+pointer sp, fname, title, pm, pm_open()
+bool invert, pm_empty()
+errchk calloc, pm_open, pm_loadf
+
+include "icombine.com"
+
+begin
+ icm = NULL
+ if (IM_NDIM(out[1]) == 0)
+ return
+
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (title, SZ_FNAME, TY_CHAR)
+
+ # Determine the mask parameters and allocate memory.
+ # The mask buffers are initialize to all excluded so that
+ # output points outside the input data are always excluded
+ # and don't need to be set on a line-by-line basis.
+
+ mtype = clgwrd ("masktype", Memc[title], SZ_FNAME, MASKTYPES)
+ mvalue = clgetr ("maskvalue")
+ npix = IM_LEN(out[1],1)
+ call calloc (pms, nimages, TY_POINTER)
+ call calloc (bufs, nimages, TY_POINTER)
+ do i = 1, nimages {
+ call malloc (Memi[bufs+i-1], npix, TY_INT)
+ call amovki (1, Memi[Memi[bufs+i-1]], npix)
+ }
+
+ # Check for special cases. The BOOLEAN type is used when only
+ # zero and nonzero are significant; i.e. the actual mask values are
+ # not important. The invert flag is used to indicate that
+ # empty masks are all bad rather the all good.
+
+ if (mtype == 0)
+ mtype = M_NONE
+ if (mtype == M_BADBITS && mvalue == 0)
+ mtype = M_NONE
+ if (mvalue == 0 && (mtype == M_GOODVAL || mtype == M_GOODBITS))
+ mtype = M_BOOLEAN
+ if ((mtype == M_BADVAL && mvalue == 0) ||
+ (mtype == M_GOODVAL && mvalue != 0) ||
+ (mtype == M_GOODBITS && mvalue == 0))
+ invert = true
+ else
+ invert = false
+
+ # If mask images are to be used, get the mask name from the image
+ # header and open it saving the descriptor in the pms array.
+ # Empty masks (all good) are treated as if there was no mask image.
+
+ npms = 0
+ do i = 1, nimages {
+ if (mtype != M_NONE) {
+ ifnoerr (call imgstr (in[i], "BPM", Memc[fname], SZ_FNAME)) {
+ pm = pm_open (NULL)
+ call pm_loadf (pm, Memc[fname], Memc[title], SZ_FNAME)
+ call pm_seti (pm, P_REFIM, in[i])
+ if (pm_empty (pm) && !invert)
+ call pm_close (pm)
+ else {
+ if (project) {
+ npms = nimages
+ call amovki (pm, Memi[pms], nimages)
+ } else {
+ npms = npms + 1
+ Memi[pms+i-1] = pm
+ }
+ }
+ if (project)
+ break
+ }
+ }
+ }
+
+ # If no mask images are found and the mask parameters imply that
+ # good values are 0 then use the special case of no masks.
+
+ if (npms == 0) {
+ if (!invert)
+ mtype = M_NONE
+ }
+
+ # Set up mask structure.
+ call calloc (icm, ICM_LEN, TY_STRUCT)
+ ICM_TYPE(icm) = mtype
+ ICM_VALUE(icm) = mvalue
+ ICM_BUFS(icm) = bufs
+ ICM_PMS(icm) = pms
+
+ call sfree (sp)
+end
+
+
+# IC_MCLOSE -- Close the mask interface.
+
+procedure ic_mclose (nimages)
+
+int nimages # Number of images
+
+int i
+include "icombine.com"
+
+begin
+ if (icm == NULL)
+ return
+
+ do i = 1, nimages
+ call mfree (Memi[ICM_BUFS(icm)+i-1], TY_INT)
+ do i = 1, nimages {
+ if (Memi[ICM_PMS(icm)+i-1] != NULL)
+ call pm_close (Memi[ICM_PMS(icm)+i-1])
+ if (project)
+ break
+ }
+ call mfree (ICM_BUFS(icm), TY_POINTER)
+ call mfree (ICM_PMS(icm), TY_POINTER)
+ call mfree (icm, TY_STRUCT)
+end
+
+
+# IC_MGET -- Get lines of mask pixels in the output coordinate system.
+# This converts the mask format to an array where zero is good and nonzero
+# is bad. This has special cases for optimization.
+
+procedure ic_mget (in, out, offsets, v1, v2, m, lflag, nimages)
+
+pointer in[nimages] # Input image pointers
+pointer out[ARB] # Output image pointer
+int offsets[nimages,ARB] # Offsets to output image
+long v1[IM_MAXDIM] # Data vector desired in output image
+long v2[IM_MAXDIM] # Data vector in input image
+pointer m[nimages] # Pointer to mask pointers
+int lflag[nimages] # Line flags
+int nimages # Number of images
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+int i, j, ndim, nout, npix
+pointer buf, pm
+bool pm_linenotempty()
+errchk pm_glpi
+
+include "icombine.com"
+
+begin
+ # Determine if masks are needed at all. Note that the threshold
+ # is applied by simulating mask values so the mask pointers have to
+ # be set.
+
+ dflag = D_ALL
+ if (icm == NULL)
+ return
+ if (ICM_TYPE(icm) == M_NONE && aligned && !dothresh)
+ return
+
+ mtype = ICM_TYPE(icm)
+ mvalue = ICM_VALUE(icm)
+ bufs = ICM_BUFS(icm)
+ pms = ICM_PMS(icm)
+
+ # Set the mask pointers and line flags and apply offsets if needed.
+
+ ndim = IM_NDIM(out[1])
+ nout = IM_LEN(out[1],1)
+ do i = 1, nimages {
+ npix = IM_LEN(in[i],1)
+ j = offsets[i,1]
+ m[i] = Memi[bufs+i-1]
+ buf = Memi[bufs+i-1] + j
+ pm = Memi[pms+i-1]
+ if (npix == nout)
+ lflag[i] = D_ALL
+ else
+ lflag[i] = D_MIX
+
+ v2[1] = v1[1]
+ do j = 2, ndim {
+ v2[j] = v1[j] - offsets[i,j]
+ if (v2[j] < 1 || v2[j] > IM_LEN(in[i],j)) {
+ lflag[i] = D_NONE
+ break
+ }
+ }
+ if (project)
+ v2[ndim+1] = i
+
+ if (lflag[i] == D_NONE)
+ next
+
+ if (pm == NULL) {
+ call aclri (Memi[buf], npix)
+ next
+ }
+
+ # Do mask I/O and convert to appropriate values in order of
+ # expected usage.
+
+ if (pm_linenotempty (pm, v2)) {
+ call pm_glpi (pm, v2, Memi[buf], 32, npix, 0)
+
+ if (mtype == M_BOOLEAN)
+ ;
+ else if (mtype == M_BADBITS)
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_BADVAL)
+ call abeqki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_GOODBITS) {
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ call abeqki (Memi[buf], 0, Memi[buf], npix)
+ } else if (mtype == M_GOODVAL)
+ call abneki (Memi[buf], mvalue, Memi[buf], npix)
+
+ lflag[i] = D_NONE
+ do j = 1, npix
+ if (Memi[buf+j-1] == 0) {
+ lflag[i] = D_MIX
+ break
+ }
+ } else {
+ if (mtype == M_BOOLEAN || mtype == M_BADBITS) {
+ call aclri (Memi[buf], npix)
+ } else if ((mtype == M_BADVAL && mvalue != 0) ||
+ (mtype == M_GOODVAL && mvalue == 0)) {
+ call aclri (Memi[buf], npix)
+ } else {
+ call amovki (1, Memi[buf], npix)
+ lflag[i] = D_NONE
+ }
+ }
+ }
+
+ # Set overall data flag
+ dflag = lflag[1]
+ do i = 2, nimages {
+ if (lflag[i] != dflag) {
+ dflag = D_MIX
+ break
+ }
+ }
+end
+
+
+# IC_MGET1 -- Get line of mask pixels from a specified image.
+# This is used by the IC_STAT procedure. This procedure converts the
+# stored mask format to an array where zero is good and nonzero is bad.
+# The data vector and returned mask array are in the input image pixel system.
+
+procedure ic_mget1 (in, image, offset, v, m)
+
+pointer in # Input image pointer
+int image # Image index
+int offset # Column offset
+long v[IM_MAXDIM] # Data vector desired
+pointer m # Pointer to mask
+
+int mtype # Mask type
+int mvalue # Mask value
+pointer bufs # Pointer to data line buffers
+pointer pms # Pointer to array of PMIO pointers
+
+int i, npix
+pointer buf, pm
+bool pm_linenotempty()
+errchk pm_glpi
+
+include "icombine.com"
+
+begin
+ dflag = D_ALL
+ if (icm == NULL)
+ return
+ if (ICM_TYPE(icm) == M_NONE)
+ return
+
+ mtype = ICM_TYPE(icm)
+ mvalue = ICM_VALUE(icm)
+ bufs = ICM_BUFS(icm)
+ pms = ICM_PMS(icm)
+
+ npix = IM_LEN(in,1)
+ m = Memi[bufs+image-1] + offset
+ pm = Memi[pms+image-1]
+ if (pm == NULL)
+ return
+
+ # Do mask I/O and convert to appropriate values in order of
+ # expected usage.
+
+ buf = m
+ if (pm_linenotempty (pm, v)) {
+ call pm_glpi (pm, v, Memi[buf], 32, npix, 0)
+
+ if (mtype == M_BOOLEAN)
+ ;
+ else if (mtype == M_BADBITS)
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_BADVAL)
+ call abeqki (Memi[buf], mvalue, Memi[buf], npix)
+ else if (mtype == M_GOODBITS) {
+ call aandki (Memi[buf], mvalue, Memi[buf], npix)
+ call abeqki (Memi[buf], 0, Memi[buf], npix)
+ } else if (mtype == M_GOODVAL)
+ call abneki (Memi[buf], mvalue, Memi[buf], npix)
+
+ dflag = D_NONE
+ do i = 1, npix
+ if (Memi[buf+i-1] == 0) {
+ dflag = D_MIX
+ break
+ }
+ } else {
+ if (mtype == M_BOOLEAN || mtype == M_BADBITS) {
+ ;
+ } else if ((mtype == M_BADVAL && mvalue != 0) ||
+ (mtype == M_GOODVAL && mvalue == 0)) {
+ ;
+ } else
+ dflag = D_NONE
+ }
+end
diff --git a/noao/imred/ccdred/src/icmedian.gx b/noao/imred/ccdred/src/icmedian.gx
new file mode 100644
index 00000000..dc8488d9
--- /dev/null
+++ b/noao/imred/ccdred/src/icmedian.gx
@@ -0,0 +1,228 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+$for (sr)
+# IC_MEDIAN -- Median of lines
+
+procedure ic_median$t (d, n, npts, median)
+
+pointer d[ARB] # Input data line pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, j1, j2, n1, lo, up, lo1, up1
+bool even
+$if (datatype == silx)
+real val1, val2, val3
+$else
+PIXEL val1, val2, val3
+$endif
+PIXEL temp, wtemp
+$if (datatype == x)
+real abs_temp
+$endif
+
+include "../icombine.com"
+
+begin
+ # If no data return after possibly setting blank values.
+ if (dflag == D_NONE) {
+ do i = 1, npts
+ median[i]= blank
+ return
+ }
+
+ # If the data were previously sorted then directly compute the median.
+ if (mclip) {
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ even = (mod (n1, 2) == 0)
+ j1 = n1 / 2 + 1
+ j2 = n1 / 2
+ do i = 1, npts {
+ k = i - 1
+ if (even) {
+ val1 = Mem$t[d[j1]+k]
+ val2 = Mem$t[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mem$t[d[j1]+k]
+ }
+ } else {
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (n1 > 0) {
+ j1 = n1 / 2 + 1
+ if (mod (n1, 2) == 0) {
+ j2 = n1 / 2
+ val1 = Mem$t[d[j1]+k]
+ val2 = Mem$t[d[j2]+k]
+ median[i] = (val1 + val2) / 2.
+ } else
+ median[i] = Mem$t[d[j1]+k]
+ } else
+ median[i] = blank
+ }
+ }
+ return
+ }
+
+ # Compute the median.
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+
+ # If there are more than 3 points use Wirth algorithm. This
+ # is the same as vops$amed.gx except for an even number of
+ # points it selects the middle two and averages.
+ if (n1 > 3) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2))
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mem$t[d[j]+k]; lo1 = lo; up1 = up
+ $if (datatype == x)
+ abs_temp = abs (temp)
+ $endif
+
+ repeat {
+ $if (datatype == x)
+ while (abs (Mem$t[d[lo1]+k]) < abs_temp)
+ $else
+ while (Mem$t[d[lo1]+k] < temp)
+ $endif
+ lo1 = lo1 + 1
+ $if (datatype == x)
+ while (abs_temp < abs (Mem$t[d[up1]+k]))
+ $else
+ while (temp < Mem$t[d[up1]+k])
+ $endif
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mem$t[d[lo1]+k]
+ Mem$t[d[lo1]+k] = Mem$t[d[up1]+k]
+ Mem$t[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+
+ median[i] = Mem$t[d[j]+k]
+
+ if (mod (n1,2) == 0) {
+ lo = 1
+ up = n1
+ j = max (lo, min (up, (up+1)/2)+1)
+
+ while (lo < up) {
+ if (! (lo < up))
+ break
+
+ temp = Mem$t[d[j]+k]; lo1 = lo; up1 = up
+ $if (datatype == x)
+ abs_temp = abs (temp)
+ $endif
+
+ repeat {
+ $if (datatype == x)
+ while (abs (Mem$t[d[lo1]+k]) < abs_temp)
+ $else
+ while (Mem$t[d[lo1]+k] < temp)
+ $endif
+ lo1 = lo1 + 1
+ $if (datatype == x)
+ while (abs_temp < abs (Mem$t[d[up1]+k]))
+ $else
+ while (temp < Mem$t[d[up1]+k])
+ $endif
+ up1 = up1 - 1
+ if (lo1 <= up1) {
+ wtemp = Mem$t[d[lo1]+k]
+ Mem$t[d[lo1]+k] = Mem$t[d[up1]+k]
+ Mem$t[d[up1]+k] = wtemp
+ lo1 = lo1 + 1; up1 = up1 - 1
+ }
+ } until (lo1 > up1)
+
+ if (up1 < j)
+ lo = lo1
+ if (j < lo1)
+ up = up1
+ }
+ median[i] = (median[i] + Mem$t[d[j]+k]) / 2
+ }
+
+ # If 3 points find the median directly.
+ } else if (n1 == 3) {
+ $if (datatype == x)
+ val1 = abs (Mem$t[d[1]+k])
+ val2 = abs (Mem$t[d[2]+k])
+ val3 = abs (Mem$t[d[3]+k])
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = Mem$t[d[2]+k]
+ else if (val1 < val3) # acb
+ median[i] = Mem$t[d[3]+k]
+ else # cab
+ median[i] = Mem$t[d[1]+k]
+ } else {
+ if (val2 > val3) # cba
+ median[i] = Mem$t[d[2]+k]
+ else if (val1 > val3) # bca
+ median[i] = Mem$t[d[3]+k]
+ else # bac
+ median[i] = Mem$t[d[1]+k]
+ }
+ $else
+ val1 = Mem$t[d[1]+k]
+ val2 = Mem$t[d[2]+k]
+ val3 = Mem$t[d[3]+k]
+ if (val1 < val2) {
+ if (val2 < val3) # abc
+ median[i] = val2
+ else if (val1 < val3) # acb
+ median[i] = val3
+ else # cab
+ median[i] = val1
+ } else {
+ if (val2 > val3) # cba
+ median[i] = val2
+ else if (val1 > val3) # bca
+ median[i] = val3
+ else # bac
+ median[i] = val1
+ }
+ $endif
+
+ # If 2 points average.
+ } else if (n1 == 2) {
+ val1 = Mem$t[d[1]+k]
+ val2 = Mem$t[d[2]+k]
+ median[i] = (val1 + val2) / 2
+
+ # If 1 point return the value.
+ } else if (n1 == 1)
+ median[i] = Mem$t[d[1]+k]
+
+ # If no points return with a possibly blank value.
+ else
+ median[i] = blank
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icmm.gx b/noao/imred/ccdred/src/icmm.gx
new file mode 100644
index 00000000..90837ae5
--- /dev/null
+++ b/noao/imred/ccdred/src/icmm.gx
@@ -0,0 +1,177 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+$for (sr)
+# IC_MM -- Reject a specified number of high and low pixels
+
+procedure ic_mm$t (d, m, n, npts)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of good pixels
+int npts # Number of output points per line
+
+int n1, ncombine, npairs, nlow, nhigh, np
+int i, i1, j, jmax, jmin
+pointer k, kmax, kmin
+PIXEL d1, d2, dmin, dmax
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_NONE)
+ return
+
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = n1 - nlow - nhigh
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ do i = 1, npts {
+ i1 = i - 1
+ n1 = n[i]
+ if (dflag == D_MIX) {
+ nlow = flow * n1 + 0.001
+ nhigh = fhigh * n1 + 0.001
+ ncombine = max (ncombine, n1 - nlow - nhigh)
+ npairs = min (nlow, nhigh)
+ nlow = nlow - npairs
+ nhigh = nhigh - npairs
+ }
+
+ # Reject the npairs low and high points.
+ do np = 1, npairs {
+ k = d[1] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ dmax = d1; dmin = d1; jmax = 1; jmin = 1; kmax = k; kmin = k
+ do j = 2, n1 {
+ d2 = d1
+ k = d[j] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ } else if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ j = n1 - 1
+ if (keepids) {
+ if (jmax < j) {
+ if (jmin != j) {
+ Mem$t[kmax] = d2
+ Memi[m[jmax]+i1] = Memi[m[j]+i1]
+ } else {
+ Mem$t[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ }
+ if (jmin < j) {
+ if (jmax != n1) {
+ Mem$t[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ } else {
+ Mem$t[kmin] = d2
+ Memi[m[jmin]+i1] = Memi[m[j]+i1]
+ }
+ }
+ } else {
+ if (jmax < j) {
+ if (jmin != j)
+ Mem$t[kmax] = d2
+ else
+ Mem$t[kmax] = d1
+ }
+ if (jmin < j) {
+ if (jmax != n1)
+ Mem$t[kmin] = d1
+ else
+ Mem$t[kmin] = d2
+ }
+ }
+ n1 = n1 - 2
+ }
+
+ # Reject the excess low points.
+ do np = 1, nlow {
+ k = d[1] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ dmin = d1; jmin = 1; kmin = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ if (d1 < dmin) {
+ dmin = d1; jmin = j; kmin = k
+ }
+ }
+ if (keepids) {
+ if (jmin < n1) {
+ Mem$t[kmin] = d1
+ Memi[m[jmin]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmin < n1)
+ Mem$t[kmin] = d1
+ }
+ n1 = n1 - 1
+ }
+
+ # Reject the excess high points.
+ do np = 1, nhigh {
+ k = d[1] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ dmax = d1; jmax = 1; kmax = k
+ do j = 2, n1 {
+ k = d[j] + i1
+ $if (datatype == x)
+ d1 = abs (Mem$t[k])
+ $else
+ d1 = Mem$t[k]
+ $endif
+ if (d1 > dmax) {
+ dmax = d1; jmax = j; kmax = k
+ }
+ }
+ if (keepids) {
+ if (jmax < n1) {
+ Mem$t[kmax] = d1
+ Memi[m[jmax]+i1] = Memi[m[n1]+i1]
+ }
+ } else {
+ if (jmax < n1)
+ Mem$t[kmax] = d1
+ }
+ n1 = n1 - 1
+ }
+ n[i] = n1
+ }
+
+ if (dflag == D_ALL && npairs + nlow + nhigh > 0)
+ dflag = D_MIX
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icombine.com b/noao/imred/ccdred/src/icombine.com
new file mode 100644
index 00000000..cb826d58
--- /dev/null
+++ b/noao/imred/ccdred/src/icombine.com
@@ -0,0 +1,40 @@
+# ICOMBINE Common
+
+int combine # Combine algorithm
+int reject # Rejection algorithm
+bool project # Combine across the highest dimension?
+real blank # Blank value
+pointer rdnoise # CCD read noise
+pointer gain # CCD gain
+pointer snoise # CCD sensitivity noise
+real lthresh # Low threshold
+real hthresh # High threshold
+int nkeep # Minimum to keep
+real lsigma # Low sigma cutoff
+real hsigma # High sigma cutoff
+real pclip # Number or fraction of pixels from median
+real flow # Fraction of low pixels to reject
+real fhigh # Fraction of high pixels to reject
+int grow # Grow radius
+bool mclip # Use median in sigma clipping?
+real sigscale # Sigma scaling tolerance
+int logfd # Log file descriptor
+
+# These flags allow special conditions to be optimized.
+
+int dflag # Data flag (D_ALL, D_NONE, D_MIX)
+bool aligned # Are the images aligned?
+bool doscale # Do the images have to be scaled?
+bool doscale1 # Do the sigma calculations have to be scaled?
+bool dothresh # Check pixels outside specified thresholds?
+bool dowts # Does the final average have to be weighted?
+bool keepids # Keep track of the image indices?
+bool docombine # Call the combine procedure?
+bool sort # Sort data?
+
+pointer icm # Mask data structure
+
+common /imccom/ combine, reject, blank, rdnoise, gain, snoise, lsigma, hsigma,
+ lthresh, hthresh, nkeep, pclip, flow, fhigh, grow, logfd,
+ dflag, sigscale, project, mclip, aligned, doscale, doscale1,
+ dothresh, dowts, keepids, docombine, sort, icm
diff --git a/noao/imred/ccdred/src/icombine.gx b/noao/imred/ccdred/src/icombine.gx
new file mode 100644
index 00000000..d6e93ef0
--- /dev/null
+++ b/noao/imred/ccdred/src/icombine.gx
@@ -0,0 +1,395 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <error.h>
+include <syserr.h>
+include <mach.h>
+include "../icombine.h"
+
+
+# ICOMBINE -- Combine images
+#
+# The memory and open file descriptor limits are checked and an attempt
+# to recover is made either by setting the image pixel files to be
+# closed after I/O or by notifying the calling program that memory
+# ran out and the IMIO buffer size should be reduced. After the checks
+# a procedure for the selected combine option is called.
+# Because there may be several failure modes when reaching the file
+# limits we first assume an error is due to the file limit, except for
+# out of memory, and close some pixel files. If the error then repeats
+# on accessing the pixels the error is passed back.
+
+$for (sr)
+procedure icombine$t (in, out, offsets, nimages, bufsize)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Input image offsets
+int nimages # Number of input images
+int bufsize # IMIO buffer size
+
+char str[1]
+int i, j, npts, fd, stropen(), errcode(), imstati()
+pointer sp, d, id, n, m, lflag, scales, zeros, wts, dbuf
+pointer buf, imgl1$t(), impl1i()
+errchk stropen, imgl1$t, impl1i
+$if (datatype == sil)
+pointer impl1r()
+errchk impl1r
+$else
+pointer impl1$t()
+errchk impl1$t
+$endif
+
+include "../icombine.com"
+
+begin
+ npts = IM_LEN(out[1],1)
+
+ # Allocate memory.
+ call smark (sp)
+ call salloc (d, nimages, TY_POINTER)
+ call salloc (id, nimages, TY_POINTER)
+ call salloc (n, npts, TY_INT)
+ call salloc (m, nimages, TY_POINTER)
+ call salloc (lflag, nimages, TY_INT)
+ call salloc (scales, nimages, TY_REAL)
+ call salloc (zeros, nimages, TY_REAL)
+ call salloc (wts, nimages, TY_REAL)
+ call amovki (D_ALL, Memi[lflag], nimages)
+
+ # If aligned use the IMIO buffer otherwise we need vectors of
+ # output length.
+
+ if (!aligned) {
+ call salloc (dbuf, nimages, TY_POINTER)
+ do i = 1, nimages
+ call salloc (Memi[dbuf+i-1], npts, TY_PIXEL)
+ }
+
+ if (project) {
+ call imseti (in[1], IM_NBUFS, nimages)
+ call imseti (in[1], IM_BUFSIZE, bufsize)
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ } else {
+ # Reserve FD for string operations.
+ fd = stropen (str, 1, NEW_FILE)
+
+ # Do I/O to the images.
+ do i = 1, 3 {
+ if (out[i] != NULL)
+ call imseti (out[i], IM_BUFSIZE, bufsize)
+ }
+ $if (datatype == sil)
+ buf = impl1r (out[1])
+ call aclrr (Memr[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1r (out[3])
+ call aclrr (Memr[buf], npts)
+ }
+ $else
+ buf = impl1$t (out[1])
+ call aclr$t (Mem$t[buf], npts)
+ if (out[3] != NULL) {
+ buf = impl1$t (out[3])
+ call aclr$t (Mem$t[buf], npts)
+ }
+ $endif
+ if (out[2] != NULL) {
+ buf = impl1i (out[2])
+ call aclri (Memi[buf], npts)
+ }
+
+ do i = 1, nimages {
+ call imseti (in[i], IM_BUFSIZE, bufsize)
+ iferr (buf = imgl1$t (in[i])) {
+ switch (errcode()) {
+ case SYS_MFULL:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (imstati (in[i], IM_CLOSEFD) == YES) {
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ do j = i-2, nimages
+ call imseti (in[j], IM_CLOSEFD, YES)
+ buf = imgl1$t (in[i])
+ default:
+ call sfree (sp)
+ call strclose (fd)
+ call erract (EA_ERROR)
+ }
+ }
+ }
+
+ call strclose (fd)
+ }
+
+ call ic_combine$t (in, out, Memi[dbuf], Memi[d], Memi[id], Memi[n],
+ Memi[m], Memi[lflag], offsets, Memr[scales], Memr[zeros],
+ Memr[wts], nimages, npts)
+end
+
+
+# IC_COMBINE -- Combine images.
+
+procedure ic_combine$t (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, wts, nimages, npts)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output image
+pointer dbuf[nimages] # Data buffers for nonaligned images
+pointer d[nimages] # Data pointers
+pointer id[nimages] # Image index ID pointers
+int n[npts] # Number of good pixels
+pointer m[nimages] # Mask pointers
+int lflag[nimages] # Line flags
+int offsets[nimages,ARB] # Input image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero offset factors
+real wts[nimages] # Combining weights
+int nimages # Number of input images
+int npts # Number of points per output line
+
+int i, ctor()
+real r, imgetr()
+pointer sp, v1, v2, v3, outdata, buf, nm, impnli()
+$if (datatype == sil)
+pointer impnlr()
+$else
+pointer impnl$t()
+$endif
+errchk ic_scale, imgetr
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (v3, IM_MAXDIM, TY_LONG)
+ call amovkl (long(1), Meml[v1], IM_MAXDIM)
+ call amovkl (long(1), Meml[v2], IM_MAXDIM)
+ call amovkl (long(1), Meml[v3], IM_MAXDIM)
+
+ call ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+ # Set combine parameters
+ switch (combine) {
+ case AVERAGE:
+ if (dowts)
+ keepids = true
+ else
+ keepids = false
+ case MEDIAN:
+ dowts = false
+ keepids = false
+ }
+ docombine = true
+
+ # Set rejection algorithm specific parameters
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ call salloc (nm, 3*nimages, TY_REAL)
+ i = 1
+ if (ctor (Memc[rdnoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = r
+ } else {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)] = imgetr (in[i], Memc[rdnoise])
+ }
+ i = 1
+ if (ctor (Memc[gain], i, r) > 0) {
+ do i = 1, nimages {
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[gain])
+ Memr[nm+3*(i-1)+1] = r
+ Memr[nm+3*(i-1)] =
+ max ((Memr[nm+3*(i-1)] / r) ** 2, 1e4 / MAX_REAL)
+ }
+ }
+ i = 1
+ if (ctor (Memc[snoise], i, r) > 0) {
+ do i = 1, nimages
+ Memr[nm+3*(i-1)+2] = r
+ } else {
+ do i = 1, nimages {
+ r = imgetr (in[i], Memc[snoise])
+ Memr[nm+3*(i-1)+2] = r
+ }
+ }
+ if (!keepids) {
+ if (doscale1 || grow > 0)
+ keepids = true
+ else {
+ do i = 2, nimages {
+ if (Memr[nm+3*(i-1)] != Memr[nm] ||
+ Memr[nm+3*(i-1)+1] != Memr[nm+1] ||
+ Memr[nm+3*(i-1)+2] != Memr[nm+2]) {
+ keepids = true
+ break
+ }
+ }
+ }
+ }
+ if (reject == CRREJECT)
+ lsigma = MAX_REAL
+ case MINMAX:
+ mclip = false
+ if (grow > 0)
+ keepids = true
+ case PCLIP:
+ mclip = true
+ if (grow > 0)
+ keepids = true
+ case AVSIGCLIP, SIGCLIP:
+ if (doscale1 || grow > 0)
+ keepids = true
+ case NONE:
+ mclip = false
+ grow = 0
+ }
+
+ if (keepids) {
+ do i = 1, nimages
+ call salloc (id[i], npts, TY_INT)
+ }
+
+ $if (datatype == sil)
+ while (impnlr (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdata$t (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ else
+ call ic_accdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Memr[outdata])
+ case MINMAX:
+ call ic_mm$t (d, id, n, npts)
+ case PCLIP:
+ call ic_pclip$t (d, id, n, nimages, npts, Memr[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ else
+ call ic_asigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Memr[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ else
+ call ic_aavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Memr[outdata])
+ }
+
+ if (grow > 0)
+ call ic_grow$t (d, id, n, nimages, npts, Memr[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_average$t (d, id, n, wts, npts, Memr[outdata])
+ case MEDIAN:
+ call ic_median$t (d, n, npts, Memr[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnlr (out[3], buf, Meml[v1])
+ call ic_sigma$t (d, id, n, wts, npts, Memr[outdata],
+ Memr[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+ $else
+ while (impnl$t (out[1], outdata, Meml[v1]) != EOF) {
+ call ic_gdata$t (in, out, dbuf, d, id, n, m, lflag, offsets,
+ scales, zeros, nimages, npts, Meml[v2], Meml[v3])
+
+ switch (reject) {
+ case CCDCLIP, CRREJECT:
+ if (mclip)
+ call ic_mccdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Mem$t[outdata])
+ else
+ call ic_accdclip$t (d, id, n, scales, zeros, Memr[nm],
+ nimages, npts, Mem$t[outdata])
+ case MINMAX:
+ call ic_mm$t (d, id, n, npts)
+ case PCLIP:
+ call ic_pclip$t (d, id, n, nimages, npts, Mem$t[outdata])
+ case SIGCLIP:
+ if (mclip)
+ call ic_msigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Mem$t[outdata])
+ else
+ call ic_asigclip$t (d, id, n, scales, zeros, nimages, npts,
+ Mem$t[outdata])
+ case AVSIGCLIP:
+ if (mclip)
+ call ic_mavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Mem$t[outdata])
+ else
+ call ic_aavsigclip$t (d, id, n, scales, zeros, nimages,
+ npts, Mem$t[outdata])
+ }
+
+ if (grow > 0)
+ call ic_grow$t (d, id, n, nimages, npts, Mem$t[outdata])
+
+ if (docombine) {
+ switch (combine) {
+ case AVERAGE:
+ call ic_average$t (d, id, n, wts, npts, Mem$t[outdata])
+ case MEDIAN:
+ call ic_median$t (d, n, npts, Mem$t[outdata])
+ }
+ }
+
+ if (out[2] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnli (out[2], buf, Meml[v1])
+ call amovki (nimages, Memi[buf], npts)
+ call asubi (Memi[buf], n, Memi[buf], npts)
+ }
+
+ if (out[3] != NULL) {
+ call amovl (Meml[v2], Meml[v1], IM_MAXDIM)
+ i = impnl$t (out[3], buf, Meml[v1])
+ call ic_sigma$t (d, id, n, wts, npts, Mem$t[outdata],
+ Mem$t[buf])
+ }
+ call amovl (Meml[v1], Meml[v2], IM_MAXDIM)
+ }
+ $endif
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icombine.h b/noao/imred/ccdred/src/icombine.h
new file mode 100644
index 00000000..13b77117
--- /dev/null
+++ b/noao/imred/ccdred/src/icombine.h
@@ -0,0 +1,52 @@
+# ICOMBINE Definitions
+
+# Memory management parameters;
+define DEFBUFSIZE 65536 # default IMIO buffer size
+define FUDGE 0.8 # fudge factor
+
+# Rejection options:
+define REJECT "|none|ccdclip|crreject|minmax|pclip|sigclip|avsigclip|"
+define NONE 1 # No rejection algorithm
+define CCDCLIP 2 # CCD noise function clipping
+define CRREJECT 3 # CCD noise function clipping
+define MINMAX 4 # Minmax rejection
+define PCLIP 5 # Percentile clip
+define SIGCLIP 6 # Sigma clip
+define AVSIGCLIP 7 # Sigma clip with average poisson sigma
+
+# Combine options:
+define COMBINE "|average|median|"
+define AVERAGE 1
+define MEDIAN 2
+
+# Scaling options:
+define STYPES "|none|mode|median|mean|exposure|"
+define ZTYPES "|none|mode|median|mean|"
+define WTYPES "|none|mode|median|mean|exposure|"
+define S_NONE 1
+define S_MODE 2
+define S_MEDIAN 3
+define S_MEAN 4
+define S_EXPOSURE 5
+define S_FILE 6
+define S_KEYWORD 7
+define S_SECTION "|input|output|overlap|"
+define S_INPUT 1
+define S_OUTPUT 2
+define S_OVERLAP 3
+
+# Mask options
+define MASKTYPES "|none|goodvalue|badvalue|goodbits|badbits|"
+define M_NONE 1 # Don't use mask images
+define M_GOODVAL 2 # Value selecting good pixels
+define M_BADVAL 3 # Value selecting bad pixels
+define M_GOODBITS 4 # Bits selecting good pixels
+define M_BADBITS 5 # Bits selecting bad pixels
+define M_BOOLEAN -1 # Ignore mask values
+
+# Data flag
+define D_ALL 0 # All pixels are good
+define D_NONE 1 # All pixels are bad or rejected
+define D_MIX 2 # Mixture of good and bad pixels
+
+define TOL 0.001 # Tolerance for equal residuals
diff --git a/noao/imred/ccdred/src/icpclip.gx b/noao/imred/ccdred/src/icpclip.gx
new file mode 100644
index 00000000..223396c3
--- /dev/null
+++ b/noao/imred/ccdred/src/icpclip.gx
@@ -0,0 +1,233 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Minimum number for clipping
+
+$for (sr)
+# IC_PCLIP -- Percentile clip
+#
+# 1) Find the median
+# 2) Find the pixel which is the specified order index away
+# 3) Use the data value difference as a sigma and apply clipping
+# 4) Since the median is known return it so it does not have to be recomputed
+
+procedure ic_pclip$t (d, m, n, nimages, npts, median)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image id pointers
+int n[npts] # Number of good pixels
+int nimages # Number of input images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, n4, n5, nl, nh, nin, maxkeep
+bool even, fp_equalr()
+real sigma, r, s, t
+pointer sp, resid, mp1, mp2
+$if (datatype == sil)
+real med
+$else
+PIXEL med
+$endif
+
+include "../icombine.com"
+
+begin
+ # There must be at least MINCLIP and more than nkeep pixels.
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+
+ # Set sign of pclip parameter
+ if (pclip < 0)
+ t = -1.
+ else
+ t = 1.
+
+ # If there are no rejected pixels compute certain parameters once.
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0.) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ nin = n1
+ }
+
+ # Now apply clipping.
+ do i = 1, npts {
+ # Compute median.
+ if (dflag == D_MIX) {
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ if (n1 == 0) {
+ if (combine == MEDIAN)
+ median[i] = blank
+ next
+ }
+ n2 = 1 + n1 / 2
+ even = (mod (n1, 2) == 0)
+ if (pclip < 0) {
+ if (even)
+ n3 = max (1, nint (n2 - 1 + pclip))
+ else
+ n3 = max (1, nint (n2 + pclip))
+ } else
+ n3 = min (n1, nint (n2 + pclip))
+ }
+
+ j = i - 1
+ if (even) {
+ med = Mem$t[d[n2-1]+j]
+ med = (med + Mem$t[d[n2]+j]) / 2.
+ } else
+ med = Mem$t[d[n2]+j]
+
+ if (n1 < max (MINCLIP, maxkeep+1)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Define sigma for clipping
+ sigma = t * (Mem$t[d[n3]+j] - med)
+ if (fp_equalr (sigma, 0.)) {
+ if (combine == MEDIAN)
+ median[i] = med
+ next
+ }
+
+ # Reject pixels and save residuals.
+ # Check if any pixels are clipped.
+ # If so recompute the median and reset the number of good pixels.
+ # Only reorder if needed.
+
+ for (nl=1; nl<=n1; nl=nl+1) {
+ r = (med - Mem$t[d[nl]+j]) / sigma
+ if (r < lsigma)
+ break
+ Memr[resid+nl] = r
+ }
+ for (nh=n1; nh>=1; nh=nh-1) {
+ r = (Mem$t[d[nh]+j] - med) / sigma
+ if (r < hsigma)
+ break
+ Memr[resid+nh] = r
+ }
+ n4 = nh - nl + 1
+
+ # If too many pixels are rejected add some back in.
+ # All pixels with the same residual are added.
+ while (n4 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n4 = nh - nl + 1
+ }
+
+ # If any pixels are rejected recompute the median.
+ if (nl > 1 || nh < n1) {
+ n5 = nl + n4 / 2
+ if (mod (n4, 2) == 0) {
+ med = Mem$t[d[n5-1]+j]
+ med = (med + Mem$t[d[n5]+j]) / 2.
+ } else
+ med = Mem$t[d[n5]+j]
+ n[i] = n4
+ }
+ if (combine == MEDIAN)
+ median[i] = med
+
+ # Reorder if pixels only if necessary.
+ if (nl > 1 && (combine != MEDIAN || grow > 0)) {
+ k = max (nl, n4 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+j] = Mem$t[d[k]+j]
+ if (grow > 0) {
+ mp1 = m[l] + j
+ mp2 = m[k] + j
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+j] = Memi[m[k]+j]
+ k = k + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+j] = Mem$t[d[k]+j]
+ k = k + 1
+ }
+ }
+ }
+ }
+
+ # Check if data flag needs to be reset for rejected pixels.
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag whether the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icscale.x b/noao/imred/ccdred/src/icscale.x
new file mode 100644
index 00000000..fc4efb2f
--- /dev/null
+++ b/noao/imred/ccdred/src/icscale.x
@@ -0,0 +1,376 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include <imset.h>
+include <error.h>
+include "icombine.h"
+
+# IC_SCALE -- Get the scale factors for the images.
+# 1. This procedure does CLIO to determine the type of scaling desired.
+# 2. The output header parameters for exposure time and NCOMBINE are set.
+
+procedure ic_scale (in, out, offsets, scales, zeros, wts, nimages)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Image offsets
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero or sky levels
+real wts[nimages] # Weights
+int nimages # Number of images
+
+int stype, ztype, wtype
+int i, j, k, l, nout
+real mode, median, mean, exposure, zmean, darktime, dark
+pointer sp, ncombine, exptime, modes, medians, means
+pointer section, str, sname, zname, wname, imref
+bool domode, domedian, domean, dozero, snorm, znorm, wflag
+
+bool clgetb()
+int hdmgeti(), strdic(), ic_gscale()
+real hdmgetr(), asumr(), asumi()
+errchk ic_gscale, ic_statr
+
+include "icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (ncombine, nimages, TY_INT)
+ call salloc (exptime, nimages, TY_REAL)
+ call salloc (modes, nimages, TY_REAL)
+ call salloc (medians, nimages, TY_REAL)
+ call salloc (means, nimages, TY_REAL)
+ call salloc (section, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (sname, SZ_FNAME, TY_CHAR)
+ call salloc (zname, SZ_FNAME, TY_CHAR)
+ call salloc (wname, SZ_FNAME, TY_CHAR)
+
+ # Set the defaults.
+ call amovki (1, Memi[ncombine], nimages)
+ call amovkr (0., Memr[exptime], nimages)
+ call amovkr (INDEF, Memr[modes], nimages)
+ call amovkr (INDEF, Memr[medians], nimages)
+ call amovkr (INDEF, Memr[means], nimages)
+ call amovkr (1., scales, nimages)
+ call amovkr (0., zeros, nimages)
+ call amovkr (1., wts, nimages)
+
+ # Get the number of images previously combined and the exposure times.
+ # The default combine number is 1 and the default exposure is 0.
+
+ do i = 1, nimages {
+ iferr (Memi[ncombine+i-1] = hdmgeti (in[i], "ncombine"))
+ Memi[ncombine+i-1] = 1
+ iferr (Memr[exptime+i-1] = hdmgetr (in[i], "exptime"))
+ Memr[exptime+i-1] = 0.
+ if (project) {
+ call amovki (Memi[ncombine], Memi[ncombine], nimages)
+ call amovkr (Memr[exptime], Memr[exptime], nimages)
+ break
+ }
+ }
+
+ # Set scaling factors.
+
+ stype = ic_gscale ("scale", Memc[sname], STYPES, in, Memr[exptime],
+ scales, nimages)
+ ztype = ic_gscale ("zero", Memc[zname], ZTYPES, in, Memr[exptime],
+ zeros, nimages)
+ wtype = ic_gscale ("weight", Memc[wname], WTYPES, in, Memr[exptime],
+ wts, nimages)
+
+ # Get image statistics only if needed.
+ domode = ((stype==S_MODE)||(ztype==S_MODE)||(wtype==S_MODE))
+ domedian = ((stype==S_MEDIAN)||(ztype==S_MEDIAN)||(wtype==S_MEDIAN))
+ domean = ((stype==S_MEAN)||(ztype==S_MEAN)||(wtype==S_MEAN))
+ if (domode || domedian || domean) {
+ Memc[section] = EOS
+ Memc[str] = EOS
+ call clgstr ("statsec", Memc[section], SZ_FNAME)
+ call sscan (Memc[section])
+ call gargwrd (Memc[section], SZ_FNAME)
+ call gargwrd (Memc[str], SZ_LINE)
+
+ i = strdic (Memc[section], Memc[section], SZ_FNAME, S_SECTION)
+ switch (i) {
+ case S_INPUT:
+ call strcpy (Memc[str], Memc[section], SZ_FNAME)
+ imref = NULL
+ case S_OUTPUT:
+ call strcpy (Memc[str], Memc[section], SZ_FNAME)
+ imref = out[1]
+ case S_OVERLAP:
+ call strcpy ("[", Memc[section], SZ_FNAME)
+ do i = 1, IM_NDIM(out[1]) {
+ k = offsets[1,i] + 1
+ l = offsets[1,i] + IM_LEN(in[1],i)
+ do j = 2, nimages {
+ k = max (k, offsets[j,i]+1)
+ l = min (l, offsets[j,i]+IM_LEN(in[j],i))
+ }
+ if (i < IM_NDIM(out[1]))
+ call sprintf (Memc[str], SZ_LINE, "%d:%d,")
+ else
+ call sprintf (Memc[str], SZ_LINE, "%d:%d]")
+ call pargi (k)
+ call pargi (l)
+ call strcat (Memc[str], Memc[section], SZ_FNAME)
+ }
+ imref = out[1]
+ default:
+ imref = NULL
+ }
+
+ do i = 1, nimages {
+ if (imref != out[1])
+ imref = in[i]
+ call ic_statr (in[i], imref, Memc[section], offsets,
+ i, nimages, domode, domedian, domean, mode, median, mean)
+ if (domode) {
+ Memr[modes+i-1] = mode
+ if (stype == S_MODE)
+ scales[i] = mode
+ if (ztype == S_MODE)
+ zeros[i] = mode
+ if (wtype == S_MODE)
+ wts[i] = mode
+ }
+ if (domedian) {
+ Memr[medians+i-1] = median
+ if (stype == S_MEDIAN)
+ scales[i] = median
+ if (ztype == S_MEDIAN)
+ zeros[i] = median
+ if (wtype == S_MEDIAN)
+ wts[i] = median
+ }
+ if (domean) {
+ Memr[means+i-1] = mean
+ if (stype == S_MEAN)
+ scales[i] = mean
+ if (ztype == S_MEAN)
+ zeros[i] = mean
+ if (wtype == S_MEAN)
+ wts[i] = mean
+ }
+ }
+ }
+
+ do i = 1, nimages
+ if (scales[i] <= 0.) {
+ call eprintf ("WARNING: Negative scale factors")
+ call eprintf (" -- ignoring scaling\n")
+ call amovkr (1., scales, nimages)
+ break
+ }
+
+ # Convert to relative factors if needed.
+ snorm = (stype == S_FILE || stype == S_KEYWORD)
+ znorm = (ztype == S_FILE || ztype == S_KEYWORD)
+ wflag = (wtype == S_FILE || wtype == S_KEYWORD)
+ if (snorm)
+ call arcpr (1., scales, scales, nimages)
+ else {
+ mean = asumr (scales, nimages) / nimages
+ call adivkr (scales, mean, scales, nimages)
+ }
+ call adivr (zeros, scales, zeros, nimages)
+ zmean = asumr (zeros, nimages) / nimages
+
+ if (wtype != S_NONE) {
+ do i = 1, nimages {
+ if (wts[i] <= 0.) {
+ call eprintf ("WARNING: Negative weights")
+ call eprintf (" -- using only NCOMBINE weights\n")
+ do j = 1, nimages
+ wts[j] = Memi[ncombine+j-1]
+ break
+ }
+ if (ztype == S_NONE || znorm || wflag)
+ wts[i] = Memi[ncombine+i-1] * wts[i]
+ else {
+ if (zeros[i] <= 0.) {
+ call eprintf ("WARNING: Negative zero offsets")
+ call eprintf (" -- ignoring zero weight adjustments\n")
+ do j = 1, nimages
+ wts[j] = Memi[ncombine+j-1] * wts[j]
+ break
+ }
+ wts[i] = Memi[ncombine+i-1] * wts[i] * zmean / zeros[i]
+ }
+ }
+ }
+
+ if (znorm)
+ call anegr (zeros, zeros, nimages)
+ else {
+ # Because of finite arithmetic it is possible for the zero offsets
+ # to be nonzero even when they are all equal. Just for the sake of
+ # a nice log set the zero offsets in this case.
+
+ call asubkr (zeros, zmean, zeros, nimages)
+ for (i=2; (i<=nimages)&&(zeros[i]==zeros[1]); i=i+1)
+ ;
+ if (i > nimages)
+ call aclrr (zeros, nimages)
+ }
+ mean = asumr (wts, nimages)
+ call adivkr (wts, mean, wts, nimages)
+
+ # Set flags for scaling, zero offsets, sigma scaling, weights.
+ # Sigma scaling may be suppressed if the scales or zeros are
+ # different by a specified tolerance.
+
+ doscale = false
+ dozero = false
+ doscale1 = false
+ dowts = false
+ do i = 2, nimages {
+ if (snorm || scales[i] != scales[1])
+ doscale = true
+ if (znorm || zeros[i] != zeros[1])
+ dozero = true
+ if (wts[i] != wts[1])
+ dowts = true
+ }
+ if (doscale && sigscale != 0.) {
+ do i = 1, nimages {
+ if (abs (scales[i] - 1) > sigscale) {
+ doscale1 = true
+ break
+ }
+ }
+ if (!doscale1 && zmean > 0.) {
+ do i = 1, nimages {
+ if (abs (zeros[i] / zmean) > sigscale) {
+ doscale1 = true
+ break
+ }
+ }
+ }
+ }
+
+ # Set the output header parameters.
+ nout = asumi (Memi[ncombine], nimages)
+ call hdmputi (out[1], "ncombine", nout)
+ exposure = 0.
+ darktime = 0.
+ mean = 0.
+ do i = 1, nimages {
+ exposure = exposure + wts[i] * Memr[exptime+i-1] / scales[i]
+ ifnoerr (dark = hdmgetr (in[i], "darktime"))
+ darktime = darktime + wts[i] * dark / scales[i]
+ else
+ darktime = darktime + wts[i] * Memr[exptime+i-1] / scales[i]
+ ifnoerr (mode = hdmgetr (in[i], "ccdmean"))
+ mean = mean + wts[i] * mode / scales[i]
+ }
+ call hdmputr (out[1], "exptime", exposure)
+ call hdmputr (out[1], "darktime", darktime)
+ ifnoerr (mode = hdmgetr (out[1], "ccdmean")) {
+ call hdmputr (out[1], "ccdmean", mean)
+ iferr (call imdelf (out[1], "ccdmeant"))
+ ;
+ }
+ if (out[2] != NULL) {
+ call imstats (out[2], IM_IMAGENAME, Memc[str], SZ_FNAME)
+ call imastr (out[1], "BPM", Memc[str])
+ }
+
+ # Start the log here since much of the info is only available here.
+ if (clgetb ("verbose")) {
+ i = logfd
+ logfd = STDOUT
+ call ic_log (in, out, Memi[ncombine], Memr[exptime], Memc[sname],
+ Memc[zname], Memc[wname], Memr[modes], Memr[medians],
+ Memr[means], scales, zeros, wts, offsets, nimages, dozero,
+ nout, "", exposure)
+
+ logfd = i
+ }
+ call ic_log (in, out, Memi[ncombine], Memr[exptime], Memc[sname],
+ Memc[zname], Memc[wname], Memr[modes], Memr[medians], Memr[means],
+ scales, zeros, wts, offsets, nimages, dozero, nout,
+ "", exposure)
+
+ doscale = (doscale || dozero)
+
+ call sfree (sp)
+end
+
+
+# IC_GSCALE -- Get scale values as directed by CL parameter
+# The values can be one of those in the dictionary, from a file specified
+# with a @ prefix, or from an image header keyword specified by a ! prefix.
+
+int procedure ic_gscale (param, name, dic, in, exptime, values, nimages)
+
+char param[ARB] #I CL parameter name
+char name[SZ_FNAME] #O Parameter value
+char dic[ARB] #I Dictionary string
+pointer in[nimages] #I IMIO pointers
+real exptime[nimages] #I Exposure times
+real values[nimages] #O Values
+int nimages #I Number of images
+
+int type #O Type of value
+
+int fd, i, nowhite(), open(), fscan(), nscan(), strdic()
+real rval, hdmgetr()
+pointer errstr
+errchk open, hdmgetr()
+
+include "icombine.com"
+
+begin
+ call clgstr (param, name, SZ_FNAME)
+ if (nowhite (name, name, SZ_FNAME) == 0)
+ type = S_NONE
+ else if (name[1] == '@') {
+ type = S_FILE
+ fd = open (name[2], READ_ONLY, TEXT_FILE)
+ i = 0
+ while (fscan (fd) != EOF) {
+ call gargr (rval)
+ if (nscan() != 1)
+ next
+ if (i == nimages) {
+ call eprintf (
+ "Warning: Ignoring additional %s values in %s\n")
+ call pargstr (param)
+ call pargstr (name[2])
+ break
+ }
+ i = i + 1
+ values[i] = rval
+ }
+ call close (fd)
+ if (i < nimages) {
+ call salloc (errstr, SZ_LINE, TY_CHAR)
+ call sprintf (Memc[errstr], SZ_FNAME,
+ "Insufficient %s values in %s")
+ call pargstr (param)
+ call pargstr (name[2])
+ call error (1, Memc[errstr])
+ }
+ } else if (name[1] == '!') {
+ type = S_KEYWORD
+ do i = 1, nimages {
+ values[i] = hdmgetr (in[i], name[2])
+ if (project) {
+ call amovkr (values, values, nimages)
+ break
+ }
+ }
+ } else {
+ type = strdic (name, name, SZ_FNAME, dic)
+ if (type == 0)
+ call error (1, "Unknown scale, zero, or weight type")
+ if (type==S_EXPOSURE)
+ do i = 1, nimages
+ values[i] = max (0.001, exptime[i])
+ }
+
+ return (type)
+end
diff --git a/noao/imred/ccdred/src/icsclip.gx b/noao/imred/ccdred/src/icsclip.gx
new file mode 100644
index 00000000..f70611aa
--- /dev/null
+++ b/noao/imred/ccdred/src/icsclip.gx
@@ -0,0 +1,504 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include "../icombine.h"
+
+define MINCLIP 3 # Mininum number of images for algorithm
+
+$for (sr)
+# IC_ASIGCLIP -- Reject pixels using sigma clipping about the average
+# The initial average rejects the high and low pixels. A correction for
+# different scalings of the images may be made. Weights are not used.
+
+procedure ic_asigclip$t (d, m, n, scales, zeros, nimages, npts, average)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+$else
+PIXEL average[npts] # Average
+$endif
+
+int i, j, k, l, jj, n1, n2, nin, nk, maxkeep
+$if (datatype == sil)
+real d1, low, high, sum, a, s, r, one
+data one /1.0/
+$else
+PIXEL d1, low, high, sum, a, s, r, one
+data one /1$f/
+$endif
+pointer sp, resid, w, wp, dp1, dp2, mp1, mp2
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Flag whether returned average needs to be recomputed.
+ if (dowts || combine != AVERAGE)
+ docombine = true
+ else
+ docombine = false
+
+ # Save the residuals and the sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Do sigma clipping.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+
+ # If there are not enough pixels simply compute the average.
+ if (n1 < max (3, maxkeep)) {
+ if (!docombine) {
+ if (n1 == 0)
+ average[i] = blank
+ else {
+ sum = Mem$t[d[1]+k]
+ do j = 2, n1
+ sum = sum + Mem$t[d[j]+k]
+ average[i] = sum / n1
+ }
+ }
+ next
+ }
+
+ # Compute average with the high and low rejected.
+ low = Mem$t[d[1]+k]
+ high = Mem$t[d[2]+k]
+ if (low > high) {
+ d1 = low
+ low = high
+ high = d1
+ }
+ sum = 0.
+ do j = 3, n1 {
+ d1 = Mem$t[d[j]+k]
+ if (d1 < low) {
+ sum = sum + low
+ low = d1
+ } else if (d1 > high) {
+ sum = sum + high
+ high = d1
+ } else
+ sum = sum + d1
+ }
+ a = sum / (n1 - 2)
+ sum = sum + low + high
+
+ # Iteratively reject pixels and compute the final average if needed.
+ # Compact the data and keep track of the image IDs if needed.
+
+ repeat {
+ n2 = n1
+ if (doscale1) {
+ # Compute sigma corrected for scaling.
+ s = 0.
+ wp = w - 1
+ do j = 1, n1 {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mem$t[dp1]
+ l = Memi[mp1]
+ r = sqrt (max (one, (a + zeros[l]) / scales[l]))
+ s = s + ((d1 - a) / r) ** 2
+ Memr[wp] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ wp = w - 1
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ wp = wp + 1
+
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / (s * Memr[wp])
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ Memr[wp] = Memr[w+n1-1]
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ } else {
+ # Compute the sigma without scale correction.
+ s = 0.
+ do j = 1, n1
+ s = s + (Mem$t[d[j]+k] - a) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels. Save the residuals and data values.
+ if (s > 0.) {
+ for (j=1; j<=n1; j=j+1) {
+ dp1 = d[j] + k
+ d1 = Mem$t[dp1]
+ r = (d1 - a) / s
+ if (r < -lsigma || r > hsigma) {
+ Memr[resid+n1] = abs (r)
+ if (j < n1) {
+ dp2 = d[n1] + k
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[n1] + k
+ l = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = l
+ }
+ j = j - 1
+ }
+ sum = sum - d1
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ } until (n1 == n2 || n1 <= max (2, maxkeep))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ if (n1 < maxkeep) {
+ nk = maxkeep
+ if (doscale1) {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ mp1 = m[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ } else {
+ for (j=n1+1; j<=nk; j=j+1) {
+ dp1 = d[j] + k
+ r = Memr[resid+j]
+ jj = 0
+ do l = j+1, n2 {
+ s = Memr[resid+l]
+ if (s < r + TOL) {
+ if (s > r - TOL)
+ jj = jj + 1
+ else {
+ jj = 0
+ Memr[resid+l] = r
+ r = s
+ dp2 = d[l] + k
+ d1 = Mem$t[dp1]
+ Mem$t[dp1] = Mem$t[dp2]
+ Mem$t[dp2] = d1
+ if (keepids) {
+ mp1 = m[j] + k
+ mp2 = m[l] + k
+ s = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = s
+ }
+ }
+ }
+ }
+ sum = sum + Mem$t[dp1]
+ n1 = n1 + 1
+ nk = max (nk, j+jj)
+ }
+ }
+
+ # Recompute the average.
+ if (n1 > 1)
+ a = sum / n1
+ }
+
+ # Save the average if needed.
+ n[i] = n1
+ if (!docombine) {
+ if (n1 > 0)
+ average[i] = a
+ else
+ average[i] = blank
+ }
+ }
+
+ # Check if the data flag has to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# IC_MSIGCLIP -- Reject pixels using sigma clipping about the median
+
+procedure ic_msigclip$t (d, m, n, scales, zeros, nimages, npts, median)
+
+pointer d[nimages] # Data pointers
+pointer m[nimages] # Image id pointers
+int n[npts] # Number of good pixels
+real scales[nimages] # Scales
+real zeros[nimages] # Zeros
+int nimages # Number of images
+int npts # Number of output points per line
+$if (datatype == sil)
+real median[npts] # Median
+$else
+PIXEL median[npts] # Median
+$endif
+
+int i, j, k, l, id, n1, n2, n3, nl, nh, nin, maxkeep
+real r, s
+pointer sp, resid, w, mp1, mp2
+$if (datatype == sil)
+real med, one
+data one /1.0/
+$else
+PIXEL med, one
+data one /1$f/
+$endif
+
+include "../icombine.com"
+
+begin
+ # If there are insufficient pixels go on to the combining
+ if (nkeep < 0)
+ maxkeep = max (0, nimages + nkeep)
+ else
+ maxkeep = min (nimages, nkeep)
+ if (nimages < max (MINCLIP, maxkeep+1) || dflag == D_NONE) {
+ docombine = true
+ return
+ }
+
+ # Save the residuals and sigma scaling corrections if needed.
+ call smark (sp)
+ call salloc (resid, nimages+1, TY_REAL)
+ if (doscale1)
+ call salloc (w, nimages, TY_REAL)
+
+ # Compute median and sigma and iteratively clip.
+ nin = n[1]
+ do i = 1, npts {
+ k = i - 1
+ n1 = n[i]
+ if (nkeep < 0)
+ maxkeep = max (0, n1 + nkeep)
+ else
+ maxkeep = min (n1, nkeep)
+ nl = 1
+ nh = n1
+
+ repeat {
+ n2 = n1
+ n3 = nl + n1 / 2
+
+ if (n1 == 0)
+ med = blank
+ else if (mod (n1, 2) == 0)
+ med = (Mem$t[d[n3-1]+k] + Mem$t[d[n3]+k]) / 2.
+ else
+ med = Mem$t[d[n3]+k]
+
+ if (n1 >= max (MINCLIP, maxkeep+1)) {
+ if (doscale1) {
+ # Compute the sigma with scaling correction.
+ s = 0.
+ do j = nl, nh {
+ l = Memi[m[j]+k]
+ r = sqrt (max (one, (med + zeros[l]) / scales[l]))
+ s = s + ((Mem$t[d[j]+k] - med) / r) ** 2
+ Memr[w+j-1] = r
+ }
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mem$t[d[nl]+k]) / (s * Memr[w+nl-1])
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mem$t[d[nh]+k] - med) / (s * Memr[w+nh-1])
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ } else {
+ # Compute the sigma without scaling correction.
+ s = 0.
+ do j = nl, nh
+ s = s + (Mem$t[d[j]+k] - med) ** 2
+ s = sqrt (s / (n1 - 1))
+
+ # Reject pixels and save the residuals.
+ if (s > 0.) {
+ for (; nl <= n2; nl = nl + 1) {
+ r = (med - Mem$t[d[nl]+k]) / s
+ if (r <= lsigma)
+ break
+ Memr[resid+nl] = r
+ n1 = n1 - 1
+ }
+ for (; nh >= nl; nh = nh - 1) {
+ r = (Mem$t[d[nh]+k] - med) / s
+ if (r <= hsigma)
+ break
+ Memr[resid+nh] = r
+ n1 = n1 - 1
+ }
+ }
+ }
+ }
+ } until (n1 == n2 || n1 < max (MINCLIP, maxkeep+1))
+
+ # If too many pixels are rejected add some back.
+ # All pixels with equal residuals are added back.
+ while (n1 < maxkeep) {
+ if (nl == 1)
+ nh = nh + 1
+ else if (nh == n[i])
+ nl = nl - 1
+ else {
+ r = Memr[resid+nl-1]
+ s = Memr[resid+nh+1]
+ if (r < s) {
+ nl = nl - 1
+ r = r + TOL
+ if (s <= r)
+ nh = nh + 1
+ if (nl > 1) {
+ if (Memr[resid+nl-1] <= r)
+ nl = nl - 1
+ }
+ } else {
+ nh = nh + 1
+ s = s + TOL
+ if (r <= s)
+ nl = nl - 1
+ if (nh < n2) {
+ if (Memr[resid+nh+1] <= s)
+ nh = nh + 1
+ }
+ }
+ }
+ n1 = nh - nl + 1
+ }
+
+ # Only set median and reorder if needed
+ n[i] = n1
+ if (n1 > 0 && nl > 1 && (combine != MEDIAN || grow > 0)) {
+ j = max (nl, n1 + 1)
+ if (keepids) {
+ do l = 1, min (n1, nl-1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ if (grow > 0) {
+ mp1 = m[l] + k
+ mp2 = m[j] + k
+ id = Memi[mp1]
+ Memi[mp1] = Memi[mp2]
+ Memi[mp2] = id
+ } else
+ Memi[m[l]+k] = Memi[m[j]+k]
+ j = j + 1
+ }
+ } else {
+ do l = 1, min (n1, nl - 1) {
+ Mem$t[d[l]+k] = Mem$t[d[j]+k]
+ j = j + 1
+ }
+ }
+ }
+
+ if (combine == MEDIAN)
+ median[i] = med
+ }
+
+ # Check if data flag needs to be reset for rejected pixels
+ if (dflag == D_ALL) {
+ do i = 1, npts {
+ if (n[i] != nin) {
+ dflag = D_MIX
+ break
+ }
+ }
+ }
+
+ # Flag that the median has been computed.
+ if (combine == MEDIAN)
+ docombine = false
+ else
+ docombine = true
+
+ call sfree (sp)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icsection.x b/noao/imred/ccdred/src/icsection.x
new file mode 100644
index 00000000..746c1f51
--- /dev/null
+++ b/noao/imred/ccdred/src/icsection.x
@@ -0,0 +1,94 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <ctype.h>
+
+# IC_SECTION -- Parse an image section into its elements.
+# 1. The default values must be set by the caller.
+# 2. A null image section is OK.
+# 3. The first nonwhitespace character must be '['.
+# 4. The last interpreted character must be ']'.
+#
+# This procedure should be replaced with an IMIO procedure at some
+# point.
+
+procedure ic_section (section, x1, x2, xs, ndim)
+
+char section[ARB] # Image section
+int x1[ndim] # Starting pixel
+int x2[ndim] # Ending pixel
+int xs[ndim] # Step
+int ndim # Number of dimensions
+
+int i, ip, a, b, c, temp, ctoi()
+define error_ 99
+
+begin
+ # Decode the section string.
+ ip = 1
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == '[')
+ ip = ip + 1
+ else if (section[ip] == EOS)
+ return
+ else
+ goto error_
+
+ do i = 1, ndim {
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ']')
+ break
+
+ # Default values
+ a = x1[i]
+ b = x2[i]
+ c = xs[i]
+
+ # Get a:b:c. Allow notation such as "-*:c"
+ # (or even "-:c") where the step is obviously negative.
+
+ if (ctoi (section, ip, temp) > 0) { # a
+ a = temp
+ if (section[ip] == ':') {
+ ip = ip + 1
+ if (ctoi (section, ip, b) == 0) # a:b
+ goto error_
+ } else
+ b = a
+ } else if (section[ip] == '-') { # -*
+ temp = a
+ a = b
+ b = temp
+ ip = ip + 1
+ if (section[ip] == '*')
+ ip = ip + 1
+ } else if (section[ip] == '*') # *
+ ip = ip + 1
+ if (section[ip] == ':') { # ..:step
+ ip = ip + 1
+ if (ctoi (section, ip, c) == 0)
+ goto error_
+ else if (c == 0)
+ goto error_
+ }
+ if (a > b && c > 0)
+ c = -c
+
+ x1[i] = a
+ x2[i] = b
+ xs[i] = c
+
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ',')
+ ip = ip + 1
+ }
+
+ if (section[ip] != ']')
+ goto error_
+
+ return
+error_
+ call error (0, "Error in image section specification")
+end
diff --git a/noao/imred/ccdred/src/icsetout.x b/noao/imred/ccdred/src/icsetout.x
new file mode 100644
index 00000000..bd1d75ec
--- /dev/null
+++ b/noao/imred/ccdred/src/icsetout.x
@@ -0,0 +1,193 @@
+include <imhdr.h>
+include <mwset.h>
+
+# IC_SETOUT -- Set output image size and offsets of input images.
+
+procedure ic_setout (in, out, offsets, nimages)
+
+pointer in[nimages] # Input images
+pointer out[ARB] # Output images
+int offsets[nimages,ARB] # Offsets
+int nimages # Number of images
+
+int i, j, indim, outdim, mwdim, a, b, amin, bmax, fd
+real val
+bool reloff, streq()
+pointer sp, fname, lref, wref, cd, coord, shift, axno, axval
+pointer mw, ct, mw_openim(), mw_sctran()
+int open(), fscan(), nscan(), mw_stati()
+errchk mw_openim, mw_gwtermd, mw_gltermd, mw_gaxmap
+errchk mw_sctran, mw_ctrand, open
+
+include "icombine.com"
+define newscan_ 10
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (lref, IM_MAXDIM, TY_DOUBLE)
+ call salloc (wref, IM_MAXDIM, TY_DOUBLE)
+ call salloc (cd, IM_MAXDIM*IM_MAXDIM, TY_DOUBLE)
+ call salloc (coord, IM_MAXDIM, TY_DOUBLE)
+ call salloc (shift, IM_MAXDIM, TY_REAL)
+ call salloc (axno, IM_MAXDIM, TY_INT)
+ call salloc (axval, IM_MAXDIM, TY_INT)
+
+ # Check and set the image dimensionality.
+ indim = IM_NDIM(in[1])
+ outdim = IM_NDIM(out[1])
+ if (project) {
+ outdim = indim - 1
+ IM_NDIM(out[1]) = outdim
+ } else {
+ do i = 1, nimages
+ if (IM_NDIM(in[i]) != outdim) {
+ call sfree (sp)
+ call error (1, "Image dimensions are not the same")
+ }
+ }
+
+ # Set the reference point to that of the first image.
+ mw = mw_openim (in[1])
+ mwdim = mw_stati (mw, MW_NPHYSDIM)
+ call mw_gwtermd (mw, Memd[lref], Memd[wref], Memd[cd], mwdim)
+ ct = mw_sctran (mw, "world", "logical", 0)
+ call mw_ctrand (ct, Memd[wref], Memd[lref], mwdim)
+ call mw_ctfree (ct)
+ if (project)
+ Memd[lref+outdim] = 1
+
+ # Parse the user offset string. If "none" then there are no offsets.
+ # If "wcs" then set the offsets based on the image WCS.
+ # If "grid" then set the offsets based on the input grid parameters.
+ # If a file scan it.
+
+ call clgstr ("offsets", Memc[fname], SZ_FNAME)
+ call sscan (Memc[fname])
+ call gargwrd (Memc[fname], SZ_FNAME)
+ if (nscan() == 0 || streq (Memc[fname], "none")) {
+ call aclri (offsets, outdim*nimages)
+ reloff = true
+ } else if (streq (Memc[fname], "wcs")) {
+ do j = 1, outdim
+ offsets[1,j] = 0
+ if (project) {
+ ct = mw_sctran (mw, "world", "logical", 0)
+ do i = 2, nimages {
+ Memd[wref+outdim] = i
+ call mw_ctrand (ct, Memd[wref], Memd[coord], indim)
+ do j = 1, outdim
+ offsets[i,j] = nint (Memd[lref+j-1] - Memd[coord+j-1])
+ }
+ call mw_ctfree (ct)
+ call mw_close (mw)
+ } else {
+ do i = 2, nimages {
+ call mw_close (mw)
+ mw = mw_openim (in[i])
+ ct = mw_sctran (mw, "world", "logical", 0)
+ call mw_ctrand (ct, Memd[wref], Memd[coord], indim)
+ do j = 1, outdim
+ offsets[i,j] = nint (Memd[lref+j-1] - Memd[coord+j-1])
+ call mw_ctfree (ct)
+ }
+ }
+ reloff = true
+ } else if (streq (Memc[fname], "grid")) {
+ amin = 1
+ do j = 1, outdim {
+ call gargi (a)
+ call gargi (b)
+ if (nscan() < 1+2*j)
+ break
+ do i = 1, nimages
+ offsets[i,j] = mod ((i-1)/amin, a) * b
+ amin = amin * a
+ }
+ reloff = true
+ } else {
+ reloff = true
+ fd = open (Memc[fname], READ_ONLY, TEXT_FILE)
+ do i = 1, nimages {
+newscan_ if (fscan (fd) == EOF)
+ call error (1, "IMCOMBINE: Offset list too short")
+ call gargwrd (Memc[fname], SZ_FNAME)
+ if (Memc[fname] == '#') {
+ call gargwrd (Memc[fname], SZ_FNAME)
+ call strlwr (Memc[fname])
+ if (streq (Memc[fname], "absolute"))
+ reloff = false
+ else if (streq (Memc[fname], "relative"))
+ reloff = true
+ goto newscan_
+ }
+ call reset_scan ()
+ do j = 1, outdim {
+ call gargr (val)
+ offsets[i,j] = nint (val)
+ }
+ if (nscan() < outdim)
+ call error (1, "IMCOMBINE: Error in offset list")
+ }
+ call close (fd)
+ }
+
+ # Set the output image size and the aligned flag
+ aligned = true
+ do j = 1, outdim {
+ a = offsets[1,j]
+ b = IM_LEN(in[1],j) + a
+ amin = a
+ bmax = b
+ do i = 2, nimages {
+ a = offsets[i,j]
+ b = IM_LEN(in[i],j) + a
+ if (a != amin || b != bmax || !reloff)
+ aligned = false
+ amin = min (a, amin)
+ bmax = max (b, bmax)
+ }
+ IM_LEN(out[1],j) = bmax
+ if (reloff || amin < 0) {
+ do i = 1, nimages
+ offsets[i,j] = offsets[i,j] - amin
+ IM_LEN(out[1],j) = IM_LEN(out[1],j) - amin
+ }
+ }
+
+ # Update the WCS.
+ if (project || !aligned || !reloff) {
+ call mw_close (mw)
+ mw = mw_openim (out[1])
+ mwdim = mw_stati (mw, MW_NPHYSDIM)
+ call mw_gaxmap (mw, Memi[axno], Memi[axval], mwdim)
+ if (!aligned || !reloff) {
+ call mw_gltermd (mw, Memd[cd], Memd[lref], mwdim)
+ do i = 1, mwdim {
+ j = Memi[axno+i-1]
+ if (j > 0 && j <= indim)
+ Memd[lref+i-1] = Memd[lref+i-1] + offsets[1,j]
+ }
+ call mw_sltermd (mw, Memd[cd], Memd[lref], mwdim)
+ }
+ if (project) {
+ # Apply dimensional reduction.
+ do i = 1, mwdim {
+ j = Memi[axno+i-1]
+ if (j <= outdim)
+ next
+ else if (j > outdim+1)
+ Memi[axno+i-1] = j - 1
+ else {
+ Memi[axno+i-1] = 0
+ Memi[axval+i-1] = 0
+ }
+ }
+ call mw_saxmap (mw, Memi[axno], Memi[axval], mwdim)
+ }
+ call mw_saveim (mw, out)
+ }
+ call mw_close (mw)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/icsigma.gx b/noao/imred/ccdred/src/icsigma.gx
new file mode 100644
index 00000000..d0ae28d4
--- /dev/null
+++ b/noao/imred/ccdred/src/icsigma.gx
@@ -0,0 +1,115 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+$for (sr)
+# IC_SIGMA -- Compute the sigma image line.
+# The estimated sigma includes a correction for the finite population.
+# Weights are used if desired.
+
+procedure ic_sigma$t (d, m, n, wts, npts, average, sigma)
+
+pointer d[ARB] # Data pointers
+pointer m[ARB] # Image ID pointers
+int n[npts] # Number of points
+real wts[ARB] # Weights
+int npts # Number of output points per line
+$if (datatype == sil)
+real average[npts] # Average
+real sigma[npts] # Sigma line (returned)
+$else
+PIXEL average[npts] # Average
+PIXEL sigma[npts] # Sigma line (returned)
+$endif
+
+int i, j, k, n1
+real wt, sigcor, sumwt
+$if (datatype == sil)
+real a, sum
+$else
+PIXEL a, sum
+$endif
+
+include "../icombine.com"
+
+begin
+ if (dflag == D_ALL) {
+ n1 = n[1]
+ if (dowts) {
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mem$t[d[1]+k] - a) ** 2 * wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2 * wt
+ }
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ } else {
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ do i = 1, npts {
+ k = i - 1
+ a = average[i]
+ sum = (Mem$t[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ }
+ }
+ } else if (dflag == D_NONE) {
+ do i = 1, npts
+ sigma[i] = blank
+ } else {
+ if (dowts) {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = real (n1) / real (n1 -1)
+ else
+ sigcor = 1
+ a = average[i]
+ wt = wts[Memi[m[1]+k]]
+ sum = (Mem$t[d[1]+k] - a) ** 2 * wt
+ sumwt = wt
+ do j = 2, n1 {
+ wt = wts[Memi[m[j]+k]]
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2 * wt
+ sumwt = sumwt + wt
+ }
+ sigma[i] = sqrt (sum / sumwt * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ } else {
+ do i = 1, npts {
+ n1 = n[i]
+ if (n1 > 0) {
+ k = i - 1
+ if (n1 > 1)
+ sigcor = 1. / real (n1 - 1)
+ else
+ sigcor = 1.
+ a = average[i]
+ sum = (Mem$t[d[1]+k] - a) ** 2
+ do j = 2, n1
+ sum = sum + (Mem$t[d[j]+k] - a) ** 2
+ sigma[i] = sqrt (sum * sigcor)
+ } else
+ sigma[i] = blank
+ }
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icsort.gx b/noao/imred/ccdred/src/icsort.gx
new file mode 100644
index 00000000..2235dbd0
--- /dev/null
+++ b/noao/imred/ccdred/src/icsort.gx
@@ -0,0 +1,386 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+define LOGPTR 32 # log2(maxpts) (4e9)
+
+$for (sr)
+# IC_SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially.
+
+procedure ic_sort$t (a, b, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+PIXEL b[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+PIXEL pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR]
+define swap {temp=$1;$1=$2;$2=temp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix
+ b[i] = Mem$t[a[i]+l]
+
+ # Special cases
+ $if (datatype == x)
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (abs (temp) < abs (pivot)) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (abs (temp) < abs (pivot)) { # bac|bca|cba
+ if (abs (temp) < abs (temp3)) { # bac|bca
+ b[1] = temp
+ if (abs (pivot) < abs (temp3)) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (abs (temp3) < abs (temp)) { # acb|cab
+ b[3] = temp
+ if (abs (pivot) < abs (temp3)) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $else
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) # bac
+ b[2] = pivot
+ else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) # acb
+ b[2] = temp3
+ else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $endif
+
+ # General case
+ do i = 1, npix
+ b[i] = Mem$t[a[i]+l]
+
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ $if (datatype == x)
+ for (i=i+1; abs(b[i]) < abs(pivot); i=i+1)
+ $else
+ for (i=i+1; b[i] < pivot; i=i+1)
+ $endif
+ ;
+ for (j=j-1; j > i; j=j-1)
+ $if (datatype == x)
+ if (abs(b[j]) <= abs(pivot))
+ $else
+ if (b[j] <= pivot)
+ $endif
+ break
+ if (i < j) # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix
+ Mem$t[a[i]+l] = b[i]
+ }
+end
+
+
+# IC_2SORT -- Quicksort. This is based on the VOPS asrt except that
+# the input is an array of pointers to image lines and the sort is done
+# across the image lines at each point along the lines. The number of
+# valid pixels at each point is allowed to vary. The cases of 1, 2, and 3
+# pixels per point are treated specially. A second integer set of
+# vectors is sorted.
+
+procedure ic_2sort$t (a, b, c, d, nvecs, npts)
+
+pointer a[ARB] # pointer to input vectors
+PIXEL b[ARB] # work array
+pointer c[ARB] # pointer to associated integer vectors
+int d[ARB] # work array
+int nvecs[npts] # number of vectors
+int npts # number of points in vectors
+
+PIXEL pivot, temp, temp3
+int i, j, k, l, p, npix, lv[LOGPTR], uv[LOGPTR], itemp
+define swap {temp=$1;$1=$2;$2=temp}
+define iswap {itemp=$1;$1=$2;$2=itemp}
+define copy_ 10
+
+begin
+ do l = 0, npts-1 {
+ npix = nvecs[l+1]
+ if (npix <= 1)
+ next
+
+ do i = 1, npix {
+ b[i] = Mem$t[a[i]+l]
+ d[i] = Memi[c[i]+l]
+ }
+
+ # Special cases
+ $if (datatype == x)
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (abs (temp) < abs (pivot)) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (abs (temp) < abs (pivot)) { # bac|bca|cba
+ if (abs (temp) < abs (temp3)) { # bac|bca
+ b[1] = temp
+ if (abs (pivot) < abs (temp3)) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (abs (temp3) < abs (temp)) { # acb|cab
+ b[3] = temp
+ if (abs (pivot) < abs (temp3)) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $else
+ if (npix <= 3) {
+ pivot = b[1]
+ temp = b[2]
+ if (npix == 2) {
+ if (temp < pivot) {
+ b[1] = temp
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else
+ next
+ } else {
+ temp3 = b[3]
+ if (temp < pivot) { # bac|bca|cba
+ if (temp < temp3) { # bac|bca
+ b[1] = temp
+ if (pivot < temp3) { # bac
+ b[2] = pivot
+ iswap (d[1], d[2])
+ } else { # bca
+ b[2] = temp3
+ b[3] = pivot
+ itemp = d[2]
+ d[2] = d[3]
+ d[3] = d[1]
+ d[1] = itemp
+ }
+ } else { # cba
+ b[1] = temp3
+ b[3] = pivot
+ iswap (d[1], d[3])
+ }
+ } else if (temp3 < temp) { # acb|cab
+ b[3] = temp
+ if (pivot < temp3) { # acb
+ b[2] = temp3
+ iswap (d[2], d[3])
+ } else { # cab
+ b[1] = temp3
+ b[2] = pivot
+ itemp = d[2]
+ d[2] = d[1]
+ d[1] = d[3]
+ d[3] = itemp
+ }
+ } else
+ next
+ }
+ goto copy_
+ }
+ $endif
+
+ # General case
+ lv[1] = 1
+ uv[1] = npix
+ p = 1
+
+ while (p > 0) {
+ if (lv[p] >= uv[p]) # only one elem in this subset
+ p = p - 1 # pop stack
+ else {
+ # Dummy do loop to trigger the Fortran optimizer.
+ do p = p, ARB {
+ i = lv[p] - 1
+ j = uv[p]
+
+ # Select as the pivot the element at the center of the
+ # array, to avoid quadratic behavior on an already
+ # sorted array.
+
+ k = (lv[p] + uv[p]) / 2
+ swap (b[j], b[k]); swap (d[j], d[k])
+ pivot = b[j] # pivot line
+
+ while (i < j) {
+ $if (datatype == x)
+ for (i=i+1; abs(b[i]) < abs(pivot); i=i+1)
+ $else
+ for (i=i+1; b[i] < pivot; i=i+1)
+ $endif
+ ;
+ for (j=j-1; j > i; j=j-1)
+ $if (datatype == x)
+ if (abs(b[j]) <= abs(pivot))
+ $else
+ if (b[j] <= pivot)
+ $endif
+ break
+ if (i < j) { # out of order pair
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+ }
+ }
+
+ j = uv[p] # move pivot to position i
+ swap (b[i], b[j]) # interchange elements
+ swap (d[i], d[j])
+
+ if (i-lv[p] < uv[p] - i) { # stack so shorter done first
+ lv[p+1] = lv[p]
+ uv[p+1] = i - 1
+ lv[p] = i + 1
+ } else {
+ lv[p+1] = i + 1
+ uv[p+1] = uv[p]
+ uv[p] = i - 1
+ }
+
+ break
+ }
+ p = p + 1 # push onto stack
+ }
+ }
+
+copy_
+ do i = 1, npix {
+ Mem$t[a[i]+l] = b[i]
+ Memi[c[i]+l] = d[i]
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/icstat.gx b/noao/imred/ccdred/src/icstat.gx
new file mode 100644
index 00000000..099ddf5e
--- /dev/null
+++ b/noao/imred/ccdred/src/icstat.gx
@@ -0,0 +1,237 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "../icombine.h"
+
+define NMAX 10000 # Maximum number of pixels to sample
+
+$for (sr)
+# IC_STAT -- Compute image statistics within specified section.
+# The image section is relative to a reference image which may be
+# different than the input image and may have an offset. Only a
+# subsample of pixels is used. Masked and thresholded pixels are
+# ignored. Only the desired statistics are computed to increase
+# efficiency.
+
+procedure ic_stat$t (im, imref, section, offsets, image, nimages,
+ domode, domedian, domean, mode, median, mean)
+
+pointer im # Data image
+pointer imref # Reference image for image section
+char section[ARB] # Image section
+int offsets[nimages,ARB] # Image section offset from data to reference
+int image # Image index (for mask I/O)
+int nimages # Number of images in offsets.
+bool domode, domedian, domean # Statistics to compute
+real mode, median, mean # Statistics
+
+int i, j, ndim, n, nv
+real a
+pointer sp, v1, v2, dv, va, vb
+pointer data, mask, dp, lp, mp, imgnl$t()
+PIXEL ic_mode$t()
+$if (datatype == irs)
+real asum$t()
+$endif
+$if (datatype == dl)
+double asum$t()
+$endif
+$if (datatype == x)
+complex asum$t()
+$endif
+
+
+include "../icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (v1, IM_MAXDIM, TY_LONG)
+ call salloc (v2, IM_MAXDIM, TY_LONG)
+ call salloc (dv, IM_MAXDIM, TY_LONG)
+ call salloc (va, IM_MAXDIM, TY_LONG)
+ call salloc (vb, IM_MAXDIM, TY_LONG)
+
+ # Determine the image section parameters. This must be in terms of
+ # the data image pixel coordinates though the section may be specified
+ # in terms of the reference image coordinates. Limit the number of
+ # pixels in each dimension to a maximum.
+
+ ndim = IM_NDIM(im)
+ if (project)
+ ndim = ndim - 1
+ call amovki (1, Memi[v1], IM_MAXDIM)
+ call amovki (1, Memi[va], IM_MAXDIM)
+ call amovki (1, Memi[dv], IM_MAXDIM)
+ call amovi (IM_LEN(imref,1), Memi[vb], ndim)
+ call ic_section (section, Memi[va], Memi[vb], Memi[dv], ndim)
+ if (im != imref)
+ do i = 1, ndim {
+ Memi[va+i-1] = Memi[va+i-1] - offsets[image,i]
+ Memi[vb+i-1] = Memi[vb+i-1] - offsets[image,i]
+ }
+
+ do j = 1, 10 {
+ n = 1
+ do i = 0, ndim-1 {
+ Memi[v1+i] = max (1, min (Memi[va+i], Memi[vb+i]))
+ Memi[v2+i] = min (IM_LEN(im,i+1), max (Memi[va+i], Memi[vb+i]))
+ Memi[dv+i] = j
+ nv = max (1, (Memi[v2+i] - Memi[v1+i]) / Memi[dv+i] + 1)
+ Memi[v2+i] = Memi[v1+i] + (nv - 1) * Memi[dv+i]
+ n = n * nv
+ }
+ if (n < NMAX)
+ break
+ }
+
+ call amovl (Memi[v1], Memi[va], IM_MAXDIM)
+ Memi[va] = 1
+ if (project)
+ Memi[va+ndim] = image
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+
+ # Accumulate the pixel values within the section. Masked pixels and
+ # thresholded pixels are ignored.
+
+ call salloc (data, n, TY_PIXEL)
+ dp = data
+ while (imgnl$t (im, lp, Memi[vb]) != EOF) {
+ call ic_mget1 (im, image, offsets[image,1], Memi[va], mask)
+ lp = lp + Memi[v1] - 1
+ if (dflag == D_ALL) {
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ a = Mem$t[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mem$t[dp] = a
+ dp = dp + 1
+ }
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ Mem$t[dp] = Mem$t[lp]
+ dp = dp + 1
+ lp = lp + Memi[dv]
+ }
+ }
+ } else if (dflag == D_MIX) {
+ mp = mask + Memi[v1] - 1
+ if (dothresh) {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ a = Mem$t[lp]
+ if (a >= lthresh && a <= hthresh) {
+ Mem$t[dp] = a
+ dp = dp + 1
+ }
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ } else {
+ do i = Memi[v1], Memi[v2], Memi[dv] {
+ if (Memi[mp] == 0) {
+ Mem$t[dp] = Mem$t[lp]
+ dp = dp + 1
+ }
+ mp = mp + Memi[dv]
+ lp = lp + Memi[dv]
+ }
+ }
+ }
+ for (i=2; i<=ndim; i=i+1) {
+ Memi[va+i-1] = Memi[va+i-1] + Memi[dv+i-1]
+ if (Memi[va+i-1] <= Memi[v2+i-1])
+ break
+ Memi[va+i-1] = Memi[v1+i-1]
+ }
+ if (i > ndim)
+ break
+ call amovl (Memi[va], Memi[vb], IM_MAXDIM)
+ }
+
+ n = dp - data
+ if (n < 1) {
+ call sfree (sp)
+ call error (1, "Image section contains no pixels")
+ }
+
+ # Compute only statistics needed.
+ if (domode || domedian) {
+ call asrt$t (Mem$t[data], Mem$t[data], n)
+ mode = ic_mode$t (Mem$t[data], n)
+ median = Mem$t[data+n/2-1]
+ }
+ if (domean)
+ mean = asum$t (Mem$t[data], n) / n
+
+ call sfree (sp)
+end
+
+
+define NMIN 10 # Minimum number of pixels for mode calculation
+define ZRANGE 0.8 # Fraction of pixels about median to use
+define ZSTEP 0.01 # Step size for search for mode
+define ZBIN 0.1 # Bin size for mode.
+
+# IC_MODE -- Compute mode of an array. The mode is found by binning
+# with a bin size based on the data range over a fraction of the
+# pixels about the median and a bin step which may be smaller than the
+# bin size. If there are too few points the median is returned.
+# The input array must be sorted.
+
+PIXEL procedure ic_mode$t (a, n)
+
+PIXEL a[n] # Data array
+int n # Number of points
+
+int i, j, k, nmax
+real z1, z2, zstep, zbin
+PIXEL mode
+bool fp_equalr()
+
+begin
+ if (n < NMIN)
+ return (a[n/2])
+
+ # Compute the mode. The array must be sorted. Consider a
+ # range of values about the median point. Use a bin size which
+ # is ZBIN of the range. Step the bin limits in ZSTEP fraction of
+ # the bin size.
+
+ i = 1 + n * (1. - ZRANGE) / 2.
+ j = 1 + n * (1. + ZRANGE) / 2.
+ z1 = a[i]
+ z2 = a[j]
+ if (fp_equalr (z1, z2)) {
+ mode = z1
+ return (mode)
+ }
+
+ zstep = ZSTEP * (z2 - z1)
+ zbin = ZBIN * (z2 - z1)
+ $if (datatype == sil)
+ zstep = max (1., zstep)
+ zbin = max (1., zbin)
+ $endif
+
+ z1 = z1 - zstep
+ k = i
+ nmax = 0
+ repeat {
+ z1 = z1 + zstep
+ z2 = z1 + zbin
+ for (; i < j && a[i] < z1; i=i+1)
+ ;
+ for (; k < j && a[k] < z2; k=k+1)
+ ;
+ if (k - i > nmax) {
+ nmax = k - i
+ mode = a[(i+k)/2]
+ }
+ } until (k >= j)
+
+ return (mode)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/mkpkg b/noao/imred/ccdred/src/mkpkg
new file mode 100644
index 00000000..d2d46598
--- /dev/null
+++ b/noao/imred/ccdred/src/mkpkg
@@ -0,0 +1,75 @@
+# Make CCDRED Package.
+
+$checkout libpkg.a ..
+$update libpkg.a
+$checkin libpkg.a ..
+$exit
+
+generic:
+ $set GEN = "$$generic -k"
+
+ $ifolder (generic/ccdred.h, ccdred.h)
+ $copy ccdred.h generic/ccdred.h $endif
+ $ifolder (generic/proc.x, proc.gx)
+ $(GEN) proc.gx -o generic/proc.x $endif
+ $ifolder (generic/cor.x, cor.gx)
+ $(GEN) cor.gx -o generic/cor.x $endif
+ ;
+
+libpkg.a:
+ $ifeq (USE_GENERIC, yes) $call generic $endif
+ @generic
+
+ @combine
+
+ calimage.x ccdtypes.h <error.h> <imset.h>
+ ccdcache.x ccdcache.com ccdcache.h ccdcache.com <imhdr.h>\
+ <imset.h> <mach.h>
+ ccdcheck.x ccdtypes.h <imhdr.h>
+ ccdcmp.x
+ ccdcopy.x <imhdr.h>
+ ccddelete.x
+ ccdflag.x
+ ccdlog.x <imhdr.h> <imset.h>
+ ccdmean.x <imhdr.h>
+ ccdnscan.x ccdtypes.h
+ ccdproc.x ccdred.h ccdtypes.h <error.h>
+ ccdsection.x <ctype.h>
+ ccdsubsets.x <ctype.h>
+ ccdtypes.x ccdtypes.h
+ doproc.x ccdred.h
+ hdrmap.x hdrmap.com <error.h> <syserr.h>
+ readcor.x <imhdr.h>
+ scancor.x <imhdr.h> <imset.h>
+ setdark.x ccdred.h ccdtypes.h <imhdr.h>
+ setfixpix.x ccdred.h <imhdr.h> <imset.h> <pmset.h>
+ setflat.x ccdred.h ccdtypes.h <imhdr.h>
+ setfringe.x ccdred.h ccdtypes.h <imhdr.h>
+ setheader.x ccdred.h <imhdr.h>
+ setillum.x ccdred.h ccdtypes.h <imhdr.h>
+ setinput.x ccdtypes.h <error.h>
+ setinteract.x <pkg/xtanswer.h>
+ setoutput.x <imhdr.h> <imset.h>
+ setoverscan.x ccdred.h <imhdr.h> <imset.h> <pkg/xtanswer.h>\
+ <pkg/gtools.h>
+ setproc.x ccdred.h <imhdr.h>
+ setsections.x ccdred.h <imhdr.h> <mwset.h>
+ settrim.x ccdred.h <imhdr.h> <imset.h>
+ setzero.x ccdred.h ccdtypes.h <imhdr.h>
+ t_badpixim.x <imhdr.h>
+ t_ccdgroups.x <error.h> <math.h>
+ t_ccdhedit.x <error.h>
+ t_ccdinst.x ccdtypes.h <error.h> <imhdr.h> <imio.h>
+ t_ccdlist.x ccdtypes.h <error.h> <imhdr.h>
+ t_ccdmask.x <imhdr.h>
+ t_ccdproc.x ccdred.h ccdtypes.h <error.h> <imhdr.h>
+ t_combine.x ccdred.h combine/icombine.com combine/icombine.h\
+ <error.h> <imhdr.h> <mach.h> <syserr.h>
+ t_mkfringe.x ccdred.h <imhdr.h>
+ t_mkillumcor.x ccdred.h
+ t_mkillumft.x ccdred.h <imhdr.h>
+ t_mkskycor.x ccdred.h <mach.h> <imhdr.h> <imset.h>
+ t_mkskyflat.x ccdred.h ccdtypes.h <imhdr.h>
+ t_skyreplace.x <imhdr.h>
+ timelog.x <time.h>
+ ;
diff --git a/noao/imred/ccdred/src/proc.gx b/noao/imred/ccdred/src/proc.gx
new file mode 100644
index 00000000..3161d2e6
--- /dev/null
+++ b/noao/imred/ccdred/src/proc.gx
@@ -0,0 +1,408 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+.help proc Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+proc -- Process CCD images
+
+These are the main CCD reduction procedures. There is one for each
+readout axis (lines or columns) and one for short and real image data.
+They apply corrections for bad pixels, overscan levels, zero levels,
+dark counts, flat field response, illumination response, and fringe
+effects. The image is also trimmed if it was mapped with an image
+section. The mean value for the output image is computed when the flat
+field or illumination image is processed to form the scale factor for
+these calibrations in order to avoid reading through these image a
+second time.
+
+The processing information and parameters are specified in the CCD
+structure. The processing operations to be performed are specified by
+the correction array CORS in the ccd structure. There is one array
+element for each operation with indices defined symbolically by macro
+definitions (see ccdred.h); i.e. FLATCOR. The value of the array
+element is an integer bit field in which the bit set is the same as the
+array index; i.e element 3 will have the third bit set for an operation
+with array value 2**(3-1)=4. If an operation is not to be performed
+the bit is not set and the array element has the numeric value zero.
+Note that the addition of several correction elements gives a unique
+bit field describing a combination of operations. For efficiency the
+most common combinations are implemented as separate units.
+
+The CCD structure also contains the correction or calibration data
+consisting either pointers to data, IMIO pointers for the calibration
+images, and scale factors.
+
+The processing is performed line-by-line. The procedure CORINPUT is
+called to get an input line. This procedure trims and fixes bad pixels by
+interpolation. The output line and lines from the various calibration
+images are read. The image vectors as well as the overscan vector and
+the scale factors are passed to the procedure COR (which also
+dereferences the pointer data into simple arrays and variables). That
+procedure does the actual corrections apart from bad pixel
+corrections.
+
+The final optional step is to add each corrected output line to form a
+mean. This adds efficiency since the operation is done only if desired
+and the output image data is already in memory so there is no I/O
+penalty.
+
+SEE ALSO
+ ccdred.h, cor, fixpix, setfixpix, setoverscan, settrim,
+ setzero, setdark, setflat, setillum, setfringe
+.endhelp ----------------------------------------------------------------------
+
+
+$for (sr)
+# PROC1 -- Process CCD images with readout axis 1 (lines).
+
+procedure proc1$t (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+int overscan_type, overscan_c1, noverscan
+real overscan, darkscale, flatscale, illumscale, frgscale, mean
+PIXEL minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim, overscan_vec
+pointer inbuf, outbuf, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+$if (datatype == csir)
+real asum$t()
+$else $if (datatype == ld)
+double asum$t()
+$else
+PIXEL asum$t()
+$endif $endif
+real find_overscan$t()
+pointer imgl2$t(), impl2$t(), ccd_gl$t(), xt_fps$t()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ if (CORS(ccd, OVERSCAN) == 0)
+ overscan_type = 0
+ else {
+ overscan_type = OVERSCAN_TYPE(ccd)
+ overscan_vec = OVERSCAN_VEC(ccd)
+ overscan_c1 = BIAS_C1(ccd) - 1
+ noverscan = BIAS_C2(ccd) - overscan_c1
+ }
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),2) == 1) {
+ zeroim = NULL
+ zerobuf = ccd_gl$t (ZERO_IM(ccd), ZERO_C1(ccd), ZERO_C2(ccd), 1)
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),2) == 1) {
+ darkim = NULL
+ darkbuf = ccd_gl$t (DARK_IM(ccd), DARK_C1(ccd), DARK_C2(ccd), 1)
+ darkscale = FLATSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),2) == 1) {
+ flatim = NULL
+ flatbuf = ccd_gl$t (FLAT_IM(ccd), FLAT_C1(ccd), FLAT_C2(ccd), 1)
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure XT_FPS replaces
+ # bad pixels by interpolation. The trimmed region is copied to the
+ # output. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images. Call COR1
+ # to do the actual pixel corrections. Finally, add the output pixels
+ # to a sum for computing the mean. We must copy data outside of the
+ # output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2$t (out, OUT_L1(ccd)+line-1)
+
+ inbuf = xt_fps$t (MASK_FP(ccd), in, IN_L1(ccd)+line-1, IN_C1(ccd),
+ IN_C2(ccd), IN_L1(ccd), IN_L2(ccd), NULL)
+ call amov$t (Mem$t[inbuf+IN_C1(ccd)-OUT_C1(ccd)], Mem$t[outbuf],
+ IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (overscan_type != 0) {
+ if (overscan_type < OVERSCAN_FIT)
+ overscan = find_overscan$t (Mem$t[inbuf+overscan_c1],
+ noverscan, overscan_type)
+ else
+ overscan = Memr[overscan_vec+line-1]
+ }
+ if (zeroim != NULL)
+ zerobuf = ccd_gl$t (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gl$t (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gl$t (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gl$t (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gl$t (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor1$t (CORS(ccd,1), Mem$t[outbuf],
+ overscan, Mem$t[zerobuf], Mem$t[darkbuf],
+ Mem$t[flatbuf], Mem$t[illumbuf], Mem$t[fringebuf], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxk$t (Mem$t[outbuf], minrep, Mem$t[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asum$t (Mem$t[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+end
+
+
+# PROC2 -- Process CCD images with readout axis 2 (columns).
+
+procedure proc2$t (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+real darkscale, flatscale, illumscale, frgscale, mean
+PIXEL minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim, overscan_vec
+pointer inbuf, outbuf, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+$if (datatype == csir)
+real asum$t()
+$else $if (datatype == ld)
+double asum$t()
+$else
+PIXEL asum$t()
+$endif $endif
+pointer imgl2$t(), impl2$t(), imgs2$t(), ccd_gl$t(), xt_fps$t()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),1) == 1) {
+ zeroim = NULL
+ zerobuf = imgs2$t (ZERO_IM(ccd), 1, 1, ZERO_L1(ccd), ZERO_L2(ccd))
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),1) == 1) {
+ darkim = NULL
+ darkbuf = imgs2$t (DARK_IM(ccd), 1, 1, DARK_L1(ccd), DARK_L2(ccd))
+ darkscale = DARKSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),1) == 1) {
+ flatim = NULL
+ flatbuf = imgs2$t (FLAT_IM(ccd), 1, 1, FLAT_L1(ccd), FLAT_L2(ccd))
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR2 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2$t (out, OUT_L1(ccd)+line-1)
+
+ inbuf = xt_fps$t (MASK_FP(ccd), in, IN_L1(ccd)+line-1, IN_C1(ccd),
+ IN_C2(ccd), IN_L1(ccd), IN_L2(ccd), NULL)
+ call amov$t (Mem$t[inbuf+IN_C1(ccd)-OUT_C1(ccd)], Mem$t[outbuf],
+ IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (zeroim != NULL)
+ zerobuf = ccd_gl$t (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gl$t (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gl$t (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gl$t (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gl$t (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor2$t (line, CORS(ccd,1), Mem$t[outbuf],
+ Memr[overscan_vec], Mem$t[zerobuf], Mem$t[darkbuf],
+ Mem$t[flatbuf], Mem$t[illumbuf], Mem$t[fringebuf], ncols,
+ zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxk$t (Mem$t[outbuf], minrep, Mem$t[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asum$t (Mem$t[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+end
+
+
+# FIND_OVERSCAN -- Find the overscan value for a line.
+# No check is made on the number of pixels.
+# The median is the (npix+1)/2 element.
+
+real procedure find_overscan$t (data, npix, type)
+
+PIXEL data[npix] #I Overscan data
+int npix #I Number of overscan points
+int type #I Type of overscan calculation
+
+int i
+real overscan, d, dmin, dmax
+PIXEL asok$t()
+
+begin
+ if (type == OVERSCAN_MINMAX) {
+ overscan = data[1]
+ dmin = data[1]
+ dmax = data[1]
+ do i = 2, npix {
+ d = data[i]
+ overscan = overscan + d
+ if (d < dmin)
+ dmin = d
+ else if (d > dmax)
+ dmax = d
+ }
+ overscan = (overscan - dmin - dmax) / (npix - 2)
+ } else if (type == OVERSCAN_MEDIAN)
+ overscan = asok$t (data, npix, (npix + 1) / 2)
+ else {
+ overscan = data[1]
+ do i = 2, npix
+ overscan = overscan + data[i]
+ overscan = overscan / npix
+ }
+
+ return (overscan)
+end
+$endfor
diff --git a/noao/imred/ccdred/src/readcor.x b/noao/imred/ccdred/src/readcor.x
new file mode 100644
index 00000000..61fbd836
--- /dev/null
+++ b/noao/imred/ccdred/src/readcor.x
@@ -0,0 +1,138 @@
+include <imhdr.h>
+
+# READCOR -- Create a readout image.
+# Assume it is appropriate to perform this operation on the input image.
+# There is no CCD type checking.
+
+procedure readcor (input)
+
+char input[ARB] # Input image
+int readaxis # Readout axis
+
+int i, nc, nl, c1, c2, cs, l1, l2, ls
+int in_c1, in_c2, in_l1, in_l2, ccd_c1, ccd_c2, ccd_l1, ccd_l2
+pointer sp, output, str, in, out, data
+
+real asumr()
+int clgwrd()
+bool clgetb(), ccdflag()
+pointer immap(), imgl2r(), impl2r(), imps2r()
+errchk immap, ccddelete
+
+begin
+ # Check if this operation is desired.
+ if (!clgetb ("readcor"))
+ return
+
+ # Check if this operation has been done. Unfortunately this requires
+ # mapping the image.
+
+ in = immap (input, READ_ONLY, 0)
+ if (ccdflag (in, "readcor")) {
+ call imunmap (in)
+ return
+ }
+
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Convert %s to readout correction\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ call smark (sp)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # The default data section is the entire image.
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ c1 = 1
+ c2 = nc
+ cs = 1
+ l1 = 1
+ l2 = nl
+ ls = 1
+ call hdmgstr (in, "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ in_c1 = c1
+ in_c2 = c2
+ in_l1 = l1
+ in_l2 = l2
+
+ # The default ccd section is the data section.
+ call hdmgstr (in, "ccdsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs != 1) || (ls != 1))
+ call error (0, "Error in CCDSEC parameter")
+ ccd_c1 = c1
+ ccd_c2 = c2
+ ccd_l1 = l1
+ ccd_l2 = l2
+ if ((in_c2-in_c1 != ccd_c2-ccd_c1) || (in_l2-in_l1 != ccd_l2-ccd_l1))
+ call error (0, "Size of DATASEC and CCDSEC do not agree")
+
+ # Determine the readout axis.
+ readaxis = clgwrd ("readaxis", Memc[str], SZ_LINE, "|lines|columns|")
+
+ # Create output.
+ call mktemp ("tmp", Memc[output], SZ_FNAME)
+ call set_output (in, out, Memc[output])
+
+ # Average across the readout axis.
+ switch (readaxis) {
+ case 1:
+ IM_LEN(out,2) = 1
+ data = impl2r (out, 1)
+ call aclrr (Memr[data], nc)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ data = data + in_c1 - 1
+ do i = in_l1, in_l2
+ call aaddr (Memr[imgl2r(in,i)+in_c1-1], Memr[data],
+ Memr[data], nc)
+ call adivkr (Memr[data], real (nl), Memr[data], nc)
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,1:1]")
+ call pargi (in_c1)
+ call pargi (in_c2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,*]")
+ call pargi (ccd_c1)
+ call pargi (ccd_c2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ case 2:
+ IM_LEN(out,1) = 1
+ data = imps2r (out, 1, 1, 1, nl)
+ call aclrr (Memr[data], nl)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ do i = in_l1, in_l2
+ Memr[data+i-1] = asumr (Memr[imgl2r(in,i)+in_c1-1], nc) / nc
+ call sprintf (Memc[str], SZ_LINE, "[1:1,%d:%d]")
+ call pargi (in_l1)
+ call pargi (in_l2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[*,%d:%d]")
+ call pargi (ccd_l1)
+ call pargi (ccd_l2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ }
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Converted to readout format")
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (in, Memc[str])
+ call hdmpstr (out, "readcor", Memc[str])
+
+ # Replace the input image by the output image.
+ call imunmap (in)
+ call imunmap (out)
+ call ccddelete (input)
+ call imrename (Memc[output], input)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/scancor.x b/noao/imred/ccdred/src/scancor.x
new file mode 100644
index 00000000..6a5eb84c
--- /dev/null
+++ b/noao/imred/ccdred/src/scancor.x
@@ -0,0 +1,340 @@
+include <imhdr.h>
+include <imset.h>
+
+define SCANTYPES "|shortscan|longscan|"
+define SHORTSCAN 1 # Short scan accumulation, normal readout
+define LONGSCAN 2 # Long scan continuous readout
+
+# SCANCOR -- Create a scanned image from an unscanned image.
+
+procedure scancor (input, output, nscan, minreplace)
+
+char input[ARB] # Input image
+char output[ARB] # Output image (must be new image)
+int nscan # Number of scan lines
+real minreplace # Minmum value of output
+
+int scantype # Type of scan format
+int readaxis # Readout axis
+
+int clgwrd()
+pointer sp, str, in, out, immap()
+errchk immap
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Determine readout axis and create the temporary output image.
+ scantype = clgwrd ("scantype", Memc[str], SZ_LINE, SCANTYPES)
+ readaxis = clgwrd ("readaxis", Memc[str], SZ_LINE, "|lines|columns|")
+
+ # Make the output scanned image.
+ in = immap (input, READ_ONLY, 0)
+ call set_output (in, out, output)
+
+ switch (scantype) {
+ case SHORTSCAN:
+ call shortscan (in, out, nscan, minreplace, readaxis)
+ case LONGSCAN:
+ call longscan (in, out, readaxis)
+ }
+
+ # Log the operation.
+ switch (scantype) {
+ case SHORTSCAN:
+ call sprintf (Memc[str], SZ_LINE,
+ "Converted to shortscan from %s with nscan=%d")
+ call pargstr (input)
+ call pargi (nscan)
+ call hdmputi (out, "nscanrow", nscan)
+ case LONGSCAN:
+ call sprintf (Memc[str], SZ_LINE, "Converted to longscan from %s")
+ call pargstr (input)
+ }
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out, Memc[str])
+ call hdmpstr (out, "scancor", Memc[str])
+
+ call imunmap (in)
+ call imunmap (out)
+
+ call sfree (sp)
+end
+
+
+# SHORTSCAN -- Make a shortscan mode image by using a moving average.
+#
+# NOTE!! The value of nscan used here is increased by 1 because the
+# current information in the image header is actually the number of
+# scan steps and NOT the number of rows.
+
+procedure shortscan (in, out, nscan, minreplace, readaxis)
+
+pointer in # Input image
+pointer out # Output image
+int nscan # Number of lines scanned before readout
+real minreplace # Minimum output value
+int readaxis # Readout axis
+
+bool replace
+real nscanr, sum, mean, asumr()
+int i, j, k, l, len1, len2, nc, nl, nscani, c1, c2, cs, l1, l2, ls
+pointer sp, str, bufs, datain, dataout, data, imgl2r(), impl2r()
+long clktime()
+errchk malloc, calloc
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # The default data section is the entire image.
+ len1 = IM_LEN(in,1)
+ len2 = IM_LEN(in,2)
+ c1 = 1
+ c2 = len1
+ cs = 1
+ l1 = 1
+ l2 = len2
+ ls = 1
+ call hdmgstr (in, "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>len1)||(l1<1)||(l2>len2)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ nc = c2 - c1 + 1
+ nl = l2 - l1 + 1
+
+ # Copy initial lines.
+ do i = 1, l1 - 1
+ call amovr (Memr[imgl2r(in,i)], Memr[impl2r(out,i)], len1)
+
+ replace = !IS_INDEF(minreplace)
+ mean = 0.
+ switch (readaxis) {
+ case 1:
+ nscani = max (1, min (nscan, nl) + 1)
+ nscanr = nscani
+ call imseti (in, IM_NBUFS, nscani)
+ call malloc (bufs, nscani, TY_INT)
+ call calloc (data, nc, TY_REAL)
+ j = 1
+ k = 1
+ l = 1
+
+ # Ramp up
+ while (j <= nscani) {
+ i = j + l1 - 1
+ datain = imgl2r (in, i)
+ if (nc < len1)
+ call amovr (Memr[datain], Memr[impl2r(out,i)], len1)
+ datain = datain + c1 - 1
+ Memi[bufs+mod(j,nscani)] = datain
+ call aaddr (Memr[data], Memr[datain], Memr[data], nc)
+ j = j + 1
+ }
+ dataout = impl2r (out, l+l1-1) + c1 - 1
+ call adivkr (Memr[data], nscanr, Memr[dataout], nc)
+ if (replace)
+ call amaxkr (Memr[dataout], minreplace, Memr[dataout], nc)
+ mean = mean + asumr (Memr[dataout], nc)
+ l = l + 1
+
+ # Moving average
+ while (j <= nl) {
+ datain = Memi[bufs+mod(k,nscani)]
+ call asubr (Memr[data], Memr[datain], Memr[data], nc)
+ i = j + l1 - 1
+ datain = imgl2r (in, i)
+ if (nc < len1)
+ call amovr (Memr[datain], Memr[impl2r(out,i)], len1)
+ datain = datain + c1 - 1
+ Memi[bufs+mod(j,nscani)] = datain
+ call aaddr (Memr[data], Memr[datain], Memr[data], nc)
+ dataout = impl2r (out, l+l1-1) + c1 - 1
+ call adivkr (Memr[data], nscanr, Memr[dataout], nc)
+ if (replace)
+ call amaxkr (Memr[dataout], minreplace, Memr[dataout], nc)
+ mean = mean + asumr (Memr[dataout], nc)
+
+ j = j + 1
+ k = k + 1
+ l = l + 1
+ }
+
+ # Ramp down.
+ while (l <= nl) {
+ datain = Memi[bufs+mod(k,nscani)]
+ call asubr (Memr[data], Memr[datain], Memr[data], nc)
+ dataout = impl2r (out, l+l1-1) + c1 - 1
+ call adivkr (Memr[data], nscanr, Memr[dataout], nc)
+ if (replace)
+ call amaxkr (Memr[dataout], minreplace, Memr[dataout], nc)
+ mean = mean + asumr (Memr[dataout], nc)
+
+ k = k + 1
+ l = l + 1
+ }
+
+ call mfree (bufs, TY_INT)
+ call mfree (data, TY_REAL)
+
+ case 2:
+ nscani = max (1, min (nscan, nc) + 1)
+ nscanr = nscani
+ do i = 1, nl {
+ datain = imgl2r (in, i + l1 - 1)
+ datain = datain + c1 - 1
+ data = impl2r (out, i + l1 - 1)
+ call amovr (Memr[datain], Memr[data], len1)
+ datain = datain + c1 - 1
+ data = data + c1 - 1
+ sum = 0
+ j = 0
+ k = 0
+ l = 0
+
+ # Ramp up
+ while (j < nscani) {
+ sum = sum + Memr[datain+j]
+ j = j + 1
+ }
+ if (replace)
+ Memr[data] = max (minreplace, sum / nscani)
+ else
+ Memr[data] = sum / nscani
+ mean = mean + Memr[data]
+ l = l + 1
+
+ # Moving average
+ while (j < nl) {
+ sum = sum + Memr[datain+j] - Memr[datain+k]
+ if (replace)
+ Memr[data+l] = max (minreplace, sum / nscani)
+ else
+ Memr[data+l] = sum / nscani
+ mean = mean + Memr[data+l]
+ j = j + 1
+ k = k + 1
+ l = l + 1
+ }
+
+ # Ramp down
+ while (l < nl) {
+ sum = sum - Memr[datain+k]
+ if (replace)
+ Memr[data+l] = max (minreplace, sum / nscani)
+ else
+ Memr[data+l] = sum / nscani
+ mean = mean + Memr[data+l]
+ k = k + 1
+ l = l + 1
+ }
+ }
+ }
+
+ # Copy final lines.
+ do i = l2+1, len2
+ call amovr (Memr[imgl2r(in,i)], Memr[impl2r(out,i)], len1)
+
+ mean = mean / nc / nl
+ call hdmputr (out, "ccdmean", mean)
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+
+ call sfree (sp)
+end
+
+
+# LONGSCAN -- Make a longscan mode readout flat field correction by averaging
+# across the readout axis.
+
+procedure longscan (in, out, readaxis)
+
+pointer in # Input image
+pointer out # Output image
+int readaxis # Readout axis
+
+int i, nc, nl, c1, c2, cs, l1, l2, ls
+int in_c1, in_c2, in_l1, in_l2, ccd_c1, ccd_c2, ccd_l1, ccd_l2
+real mean, asumr()
+long clktime()
+pointer sp, str, data, imgl2r(), impl2r(), imps2r()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # The default data section is the entire image.
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ c1 = 1
+ c2 = nc
+ cs = 1
+ l1 = 1
+ l2 = nl
+ ls = 1
+ call hdmgstr (in, "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ in_c1 = c1
+ in_c2 = c2
+ in_l1 = l1
+ in_l2 = l2
+
+ # The default ccd section is the data section.
+ call hdmgstr (in, "ccdsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs != 1) || (ls != 1))
+ call error (0, "Error in CCDSEC parameter")
+ ccd_c1 = c1
+ ccd_c2 = c2
+ ccd_l1 = l1
+ ccd_l2 = l2
+ if ((in_c2-in_c1 != ccd_c2-ccd_c1) || (in_l2-in_l1 != ccd_l2-ccd_l1))
+ call error (0, "Size of DATASEC and CCDSEC do not agree")
+
+ switch (readaxis) {
+ case 1:
+ IM_LEN(out,2) = 1
+ data = impl2r (out, 1)
+ call aclrr (Memr[data], nc)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ data = data + in_c1 - 1
+ do i = in_l1, in_l2
+ call aaddr (Memr[imgl2r(in,i)+in_c1-1], Memr[data],
+ Memr[data], nc)
+ call adivkr (Memr[data], real (nl), Memr[data], nc)
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,1:1]")
+ call pargi (in_c1)
+ call pargi (in_c2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,*]")
+ call pargi (ccd_c1)
+ call pargi (ccd_c2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ mean = asumr (Memr[data], nc) / nl
+ case 2:
+ IM_LEN(out,1) = 1
+ data = imps2r (out, 1, 1, 1, nl)
+ call aclrr (Memr[data], nl)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ do i = in_l1, in_l2
+ Memr[data+i-1] = asumr (Memr[imgl2r(in,i)+in_c1-1], nc) / nc
+ call sprintf (Memc[str], SZ_LINE, "[1:1,%d:%d]")
+ call pargi (in_l1)
+ call pargi (in_l2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[*,%d:%d]")
+ call pargi (ccd_l1)
+ call pargi (ccd_l2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ mean = asumr (Memr[data], nl) / nc
+ }
+
+ call hdmputr (out, "ccdmean", mean)
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setdark.x b/noao/imred/ccdred/src/setdark.x
new file mode 100644
index 00000000..c872aba4
--- /dev/null
+++ b/noao/imred/ccdred/src/setdark.x
@@ -0,0 +1,160 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+
+# SET_DARK -- Set parameters for dark count correction.
+#
+# 1. Return immediately if the dark count correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the dark count correction image and return an error if not found.
+# 3. If the dark count image has not been processed call PROC.
+# 4. Compute the dark count integration time scale factor.
+# 5. Set the processing flags.
+# 6. Log the operation (to user, logfile, and output image header).
+
+procedure set_dark (ccd)
+
+pointer ccd # CCD structure
+
+int nscan, nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+real darktime1, darktime2
+pointer sp, image, str, im
+
+bool clgetb(), ccdflag(), ccdcheck()
+int ccdnscan(), ccdtypei()
+real hdmgetr()
+pointer ccd_cache()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr
+
+begin
+ # Check if the user wants this operation or it has already been done.
+ if (!clgetb ("darkcor") || ccdflag (IN_IM(ccd), "darkcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the dark count correction image name.
+ if (clgetb ("scancor"))
+ nscan = ccdnscan (IN_IM(ccd), ccdtypei(IN_IM(ccd)))
+ else
+ nscan = 1
+ call cal_image (IN_IM(ccd), DARK, nscan, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print dark count image and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Dark count correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the image and return on an error.
+ # Process the dark count image if necessary.
+ # If nscan > 1 then the dark may not yet exist so create it
+ # from the unscanned dark.
+
+ iferr (im = ccd_cache (Memc[image], DARK)) {
+ call cal_image (IN_IM(ccd), DARK, 1, Memc[str], SZ_LINE)
+ im = ccd_cache (Memc[str], DARK)
+ if (ccdcheck (im, DARK)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[str], DARK)
+ }
+ call scancor (Memc[str], Memc[image], nscan, INDEF)
+ im = ccd_cache (Memc[image], DARK)
+ }
+
+ if (ccdcheck (im, DARK)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[image], DARK)
+ im = ccd_cache (Memc[image], DARK)
+ }
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if (nc == 1) {
+ c1 = CCD_C1(ccd)
+ c2 = CCD_C2(ccd)
+ }
+ if (nl == 1) {
+ l1 = CCD_L1(ccd)
+ l2 = CCD_L2(ccd)
+ }
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ DARK_IM(ccd) = im
+ DARK_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ DARK_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ DARK_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ DARK_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ # Get the dark count integration times. Return an error if not found.
+ iferr (darktime1 = hdmgetr (IN_IM(ccd), "darktime"))
+ darktime1 = hdmgetr (IN_IM(ccd), "exptime")
+ iferr (darktime2 = hdmgetr (im, "darktime"))
+ darktime2 = hdmgetr (im, "exptime")
+ if (darktime2 <= 0.) {
+ call sprintf (Memc[str], SZ_LINE, "Dark time is zero for `%s'")
+ call pargstr (Memc[image])
+ call error (1, Memc[str])
+ }
+
+ DARKSCALE(ccd) = darktime1 / darktime2
+ CORS(ccd, DARKCOR) = D
+ COR(ccd) = YES
+
+ # Record the operation in the output image and write a log record.
+ call sprintf (Memc[str], SZ_LINE,
+ "Dark count correction image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (DARKSCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "darkcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setfixpix.x b/noao/imred/ccdred/src/setfixpix.x
new file mode 100644
index 00000000..e6b96298
--- /dev/null
+++ b/noao/imred/ccdred/src/setfixpix.x
@@ -0,0 +1,74 @@
+include <imhdr.h>
+include <imset.h>
+include <pmset.h>
+include "ccdred.h"
+
+
+# SET_FIXPIX -- Set parameters for bad pixel correction.
+# 1. Return immediately if the bad pixel correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the bad pixel mask. Return an error if not found.
+# 3. If the bad pixel mask has not been processed call PROC.
+# 4. Set the processing flag.
+# 5. Log the operation (to user, logfile, and output image header).
+#
+# This routine relies on the physical coordinate system and assumes
+# XT_PMMAP has taken care of matching the pixel mask to the input image.
+
+procedure set_fixpix (ccd)
+
+pointer ccd # CCD structure
+
+pointer sp, image, str, im
+
+int imstati()
+bool clgetb(), streq(), ccdflag()
+pointer xt_pmmap(), xt_fpinit()
+errchk xt_pmmap(), xt_fpinit()
+
+begin
+ # Check if the user wants this operation or it has been done.
+ if (!clgetb ("fixpix") || ccdflag (IN_IM(ccd), "fixpix"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the bad pixel file. If the name is "image" then get the file
+ # name from the image header or symbol table.
+
+ call clgstr ("fixfile", Memc[image], SZ_FNAME)
+ if (streq (Memc[image], "image"))
+ call hdmgstr (IN_IM(ccd), "fixfile", Memc[image], SZ_FNAME)
+
+ # If no processing is desired print message and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Bad pixel file is %s\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the bad pixel image and return on an error.
+ im = xt_pmmap (Memc[image], IN_IM(ccd), Memc[image], SZ_FNAME)
+ if (Memc[image] == EOS)
+ call error (1, "No bad pixel mask found")
+ if (im != NULL) {
+ MASK_IM(ccd) = im
+ MASK_PM(ccd) = imstati (im, IM_PMDES)
+ MASK_FP(ccd) = xt_fpinit (MASK_PM(ccd), 2, 3)
+
+ CORS(ccd, FIXPIX) = YES
+ COR(ccd) = YES
+ }
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE, "Bad pixel file is %s")
+ call pargstr (Memc[image])
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "fixpix", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setflat.x b/noao/imred/ccdred/src/setflat.x
new file mode 100644
index 00000000..87713404
--- /dev/null
+++ b/noao/imred/ccdred/src/setflat.x
@@ -0,0 +1,146 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_FLAT -- Set parameters for flat field correction.
+#
+# 1. Return immediately if the flat field correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the flat field image and return on an error.
+# 3. If the flat field image has not been processed call PROC.
+# 4. Set the processing flags and record the operation in the output
+# image and write a log record.
+
+procedure set_flat (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+pointer sp, str, image, im, ccd_cache()
+bool clgetb(), ccdflag(), ccdcheck()
+int nscan, ccdnscan(), ccdtypei()
+real hdmgetr()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("flatcor") || ccdflag (IN_IM(ccd), "flatcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the flat field correction image.
+ if (clgetb ("scancor"))
+ nscan = ccdnscan (IN_IM(ccd), ccdtypei(IN_IM(ccd)))
+ else
+ nscan = 1
+ call cal_image (IN_IM(ccd), FLAT, nscan, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print flat field image name and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Flat correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the image and return on an error.
+ # Process the flat field image if necessary.
+ # If nscan > 1 then the flat field may not yet exist so create it
+ # from the unscanned flat field.
+
+ iferr (im = ccd_cache (Memc[image], FLAT)) {
+ call cal_image (IN_IM(ccd), FLAT, 1, Memc[str], SZ_LINE)
+ im = ccd_cache (Memc[str], FLAT)
+ if (ccdcheck (im, FLAT)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[str], FLAT)
+ }
+ call scancor (Memc[str], Memc[image], nscan, MINREPLACE(ccd))
+ im = ccd_cache (Memc[image], FLAT)
+ }
+
+ if (ccdcheck (im, FLAT)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[image], FLAT)
+ im = ccd_cache (Memc[image], FLAT)
+ }
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if (nc == 1) {
+ c1 = CCD_C1(ccd)
+ c2 = CCD_C2(ccd)
+ }
+ if (nl == 1) {
+ l1 = CCD_L1(ccd)
+ l2 = CCD_L2(ccd)
+ }
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ FLAT_IM(ccd) = im
+ FLAT_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ FLAT_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ FLAT_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ FLAT_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ # If no mean value use 1 as the scale factor.
+ iferr (FLATSCALE(ccd) = hdmgetr (im, "ccdmean"))
+ FLATSCALE(ccd) = 1.
+ CORS(ccd, FLATCOR) = F
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Flat field image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (FLATSCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "flatcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setfringe.x b/noao/imred/ccdred/src/setfringe.x
new file mode 100644
index 00000000..7055f35f
--- /dev/null
+++ b/noao/imred/ccdred/src/setfringe.x
@@ -0,0 +1,123 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_FRINGE -- Set parameters for fringe correction.
+#
+# 1. Return immediately if the fringe correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the fringe image and return error if the mkfringe flag is missing.
+# 3. Set the processing flags and record the operation in the output
+# image and write a log record.
+
+procedure set_fringe (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+real exptime1, exptime2, fringescale
+pointer sp, str, image, im
+
+bool clgetb(), ccdflag()
+real hdmgetr()
+pointer ccd_cache()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("fringecor") || ccdflag (IN_IM(ccd), "fringcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the fringe correction image.
+ call cal_image (IN_IM(ccd), FRINGE, 1, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print fringe image name and return.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Fringe correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Return an error if the fringe flag is missing.
+ im = ccd_cache (Memc[image], FRINGE)
+ if (!ccdflag (im, "mkfringe"))
+ call error (0, "MKFRINGE flag missing from fringe image.")
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ FRINGE_IM(ccd) = im
+ FRINGE_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ FRINGE_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ FRINGE_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ FRINGE_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ # Get the scaling factors. If no fringe scale factor assume 1.
+ exptime1 = hdmgetr (IN_IM(ccd), "exptime")
+ exptime2 = hdmgetr (im, "exptime")
+ iferr (fringescale = hdmgetr (im, "fringscl"))
+ fringescale = 1.
+
+ FRINGESCALE(ccd) = exptime1 / exptime2 * fringescale
+ CORS(ccd, FRINGECOR) = Q
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Fringe image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (FRINGESCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "fringcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setheader.x b/noao/imred/ccdred/src/setheader.x
new file mode 100644
index 00000000..aa13730a
--- /dev/null
+++ b/noao/imred/ccdred/src/setheader.x
@@ -0,0 +1,83 @@
+include <imhdr.h>
+include "ccdred.h"
+
+# SET_HEADER -- Set the output image header.
+
+procedure set_header (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl
+real shift[2]
+pointer sp, str, out, mw, mw_openim()
+long clktime()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ out = OUT_IM(ccd)
+ nc = IM_LEN(out,1)
+ nl = IM_LEN(out,2)
+
+ # Set the data section if it is not the whole image.
+ if ((OUT_C1(ccd) != 1) || (OUT_C2(ccd) != nc) ||
+ (OUT_L1(ccd) != 1) || (OUT_L2(ccd) != nl)) {
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (OUT_C1(ccd))
+ call pargi (OUT_C2(ccd))
+ call pargi (OUT_L1(ccd))
+ call pargi (OUT_L2(ccd))
+ call hdmpstr (out, "datasec", Memc[str])
+ } else {
+ iferr (call hdmdelf (out, "datasec"))
+ ;
+ }
+
+ # Set the CCD section.
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call hdmpstr (out, "ccdsec", Memc[str])
+
+ # If trimming update the trim and bias section parameters.
+ if (CORS(ccd, TRIM) == YES) {
+ iferr (call hdmdelf (out, "trimsec"))
+ ;
+ iferr (call hdmdelf (out, "biassec"))
+ ;
+ BIAS_C1(ccd) = max (1, BIAS_C1(ccd) - TRIM_C1(ccd) + 1)
+ BIAS_C2(ccd) = min (nc, BIAS_C2(ccd) - TRIM_C1(ccd) + 1)
+ BIAS_L1(ccd) = max (1, BIAS_L1(ccd) - TRIM_L1(ccd) + 1)
+ BIAS_L2(ccd) = min (nl, BIAS_L2(ccd) - TRIM_L1(ccd) + 1)
+ if ((BIAS_C1(ccd)<=BIAS_C2(ccd)) && (BIAS_L1(ccd)<=BIAS_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (BIAS_C1(ccd))
+ call pargi (BIAS_C2(ccd))
+ call pargi (BIAS_L1(ccd))
+ call pargi (BIAS_L2(ccd))
+ call hdmpstr (out, "biassec", Memc[str])
+ }
+
+ mw = mw_openim (out)
+ shift[1] = 1 - IN_C1(ccd)
+ shift[2] = 1 - IN_L1(ccd)
+ call mw_shift (mw, shift, 3)
+ call mw_saveim (mw, out)
+ }
+
+ # Set mean value if desired.
+ if (CORS(ccd, FINDMEAN) == YES) {
+ call hdmputr (out, "ccdmean", MEAN(ccd))
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+ }
+
+ # Mark image as processed.
+ call sprintf (Memc[str], SZ_LINE, "CCD processing done")
+ call timelog (Memc[str], SZ_LINE)
+ call hdmpstr (out, "ccdproc", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setillum.x b/noao/imred/ccdred/src/setillum.x
new file mode 100644
index 00000000..d1677301
--- /dev/null
+++ b/noao/imred/ccdred/src/setillum.x
@@ -0,0 +1,132 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_ILLUM -- Set parameters for illumination correction.
+#
+# 1. Return immediately if the illumination correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the illumination image and return error if mkillum flag missing.
+# 3. Set the processing flags and record the operation in the output
+# image and write a log record.
+
+procedure set_illum (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+long time
+pointer sp, str, image, im
+
+bool clgetb(), ccdflag()
+long hdmgeti()
+real hdmgetr()
+pointer ccd_cache()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr, hdmgeti
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("illumcor") || ccdflag (IN_IM(ccd), "illumcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the illumcor correction image.
+ call cal_image (IN_IM(ccd), ILLUM, 1, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print illumination image name and return.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Illumination correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Return a warning if the illumination flag is missing.
+ im = ccd_cache (Memc[image], ILLUM)
+ if (!ccdflag (im, "mkillum")) {
+ call ccd_flush (im)
+ call error (0, "MKILLUM flag missing from illumination image")
+ }
+
+ # If no mean value for the scale factor compute it.
+ iferr (ILLUMSCALE(ccd) = hdmgetr (im, "ccdmean"))
+ ILLUMSCALE(ccd) = INDEF
+ iferr (time = hdmgeti (im, "ccdmeant"))
+ time = IM_MTIME(im)
+ if (IS_INDEF(ILLUMSCALE(ccd)) || time < IM_MTIME(im)) {
+ call ccd_flush (im)
+ call ccdmean (Memc[image])
+ im = ccd_cache (Memc[image], ILLUM)
+ }
+ iferr (ILLUMSCALE(ccd) = hdmgetr (im, "ccdmean"))
+ ILLUMSCALE(ccd) = 1.
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ ILLUM_IM(ccd) = im
+ ILLUM_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ ILLUM_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ ILLUM_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ ILLUM_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ CORS(ccd, ILLUMCOR) = I
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Illumination image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (ILLUMSCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "illumcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setinput.x b/noao/imred/ccdred/src/setinput.x
new file mode 100644
index 00000000..3d3170db
--- /dev/null
+++ b/noao/imred/ccdred/src/setinput.x
@@ -0,0 +1,48 @@
+include <error.h>
+include "ccdtypes.h"
+
+# SET_INPUT -- Set the input image and image type.
+#
+# 1. Open the input image. Return warning and NULL pointer for an error.
+# 2. Get the requested CCD image type.
+# a. If no type is requested then accept the image.
+# b. If a type is requested then match against the image type.
+# Unmap the image if no match.
+# 3. If the image is acceptable then get the CCD type code.
+
+procedure set_input (image, im, ccdtype)
+
+char image[ARB] # Input image name
+pointer im # IMIO pointer (returned)
+int ccdtype # CCD image type
+
+bool strne()
+int ccdtypei()
+pointer sp, str1, str2, immap()
+
+begin
+ # Open the image. Return a warning and NULL pointer for an error.
+ iferr (im = immap (image, READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ im = NULL
+ return
+ }
+
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the requested CCD type.
+ call clgstr ("ccdtype", Memc[str1], SZ_LINE)
+ call xt_stripwhite (Memc[str1])
+ if (Memc[str1] != EOS) {
+ call ccdtypes (im, Memc[str2], SZ_LINE)
+ if (strne (Memc[str1], Memc[str2]))
+ call imunmap (im)
+ }
+
+ if (im != NULL)
+ ccdtype = ccdtypei (im)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setinteract.x b/noao/imred/ccdred/src/setinteract.x
new file mode 100644
index 00000000..05bc0f71
--- /dev/null
+++ b/noao/imred/ccdred/src/setinteract.x
@@ -0,0 +1,31 @@
+include <pkg/xtanswer.h>
+
+# SET_INTERACTIVE -- Set the interactive flag. Query the user if necessary.
+#
+# This procedure initializes the interactive flag if there is no query.
+# If there is a query it is issued by XT_ANSWER. The four valued
+# interactive flag is returned.
+
+procedure set_interactive (query, interactive)
+
+char query[ARB] # Query prompt
+int interactive # Fit overscan interactively? (returned)
+
+int interact # Saves last value of interactive flag
+bool clgetb()
+
+begin
+ # If the query is null then initialize from the CL otherwise
+ # query the user. This response is four valued to allow the user
+ # to turn off the query when processing multiple images.
+
+ if (query[1] == EOS) {
+ if (clgetb ("interactive"))
+ interact = YES
+ else
+ interact = ALWAYSNO
+ } else
+ call xt_answer (query, interact)
+
+ interactive = interact
+end
diff --git a/noao/imred/ccdred/src/setoutput.x b/noao/imred/ccdred/src/setoutput.x
new file mode 100644
index 00000000..b401b5aa
--- /dev/null
+++ b/noao/imred/ccdred/src/setoutput.x
@@ -0,0 +1,52 @@
+include <imhdr.h>
+include <imset.h>
+
+# SET_OUTPUT -- Setup the output image.
+# The output image is a NEW_COPY of the input image.
+# The user may select a pixel datatype with higher precision though not
+# lower.
+
+procedure set_output (in, out, output)
+
+pointer in # Input IMIO pointer to copy
+pointer out # Output IMIO pointer
+char output[SZ_FNAME] # Output image name
+
+int i, clscan(), nscan()
+char type[1]
+pointer immap()
+errchk immap
+
+begin
+ out = immap (output, NEW_COPY, in)
+ IM_PIXTYPE(out) = TY_REAL
+ if (clscan ("pixeltype") != EOF) {
+ call gargwrd (type, 1)
+ if (nscan() == 1) {
+ i = IM_PIXTYPE(in)
+ IM_PIXTYPE(out) = i
+ switch (type[1]) {
+ case 's':
+ if (i == TY_USHORT)
+ IM_PIXTYPE(out) = TY_SHORT
+ case 'u':
+ if (i == TY_SHORT)
+ IM_PIXTYPE(out) = TY_USHORT
+ case 'i':
+ if (i == TY_SHORT || i == TY_USHORT)
+ IM_PIXTYPE(out) = TY_INT
+ case 'l':
+ if (i == TY_SHORT || i == TY_USHORT || i == TY_INT)
+ IM_PIXTYPE(out) = TY_LONG
+ case 'r':
+ if (i != TY_DOUBLE)
+ IM_PIXTYPE(out) = TY_REAL
+ case 'd':
+ IM_PIXTYPE(out) = TY_DOUBLE
+ default:
+ call imunmap (out)
+ call error (0, "Unknown pixel type")
+ }
+ }
+ }
+end
diff --git a/noao/imred/ccdred/src/setoverscan.x b/noao/imred/ccdred/src/setoverscan.x
new file mode 100644
index 00000000..e344aa92
--- /dev/null
+++ b/noao/imred/ccdred/src/setoverscan.x
@@ -0,0 +1,310 @@
+include <imhdr.h>
+include <imset.h>
+include <pkg/gtools.h>
+include <pkg/xtanswer.h>
+include "ccdred.h"
+
+
+# SET_OVERSCAN -- Set the overscan vector.
+#
+# 1. Return immediately if the overscan correction is not requested or
+# if the image has been previously corrected.
+# 2. Determine the overscan columns or lines. This may be specifed
+# directly or indirectly through the image header or symbol table.
+# 3. Determine the type of overscan.
+# 4. If fitting the overscan average the overscan columns or lines and
+# fit a function with the ICFIT routines to smooth the overscan vector.
+# 5. Set the processing flag.
+# 6. Log the operation (to user, logfile, and output image header).
+
+procedure set_overscan (ccd)
+
+pointer ccd # CCD structure pointer
+
+int i, first, last, navg, npts, type
+int nc, nl, c1, c2, l1, l2
+pointer sp, str, errstr, func, buf, x, overscan
+
+int clgwrd()
+real asumr()
+bool clgetb(), ccdflag()
+pointer imgl2r(), imgs2r()
+errchk imgl2r, imgs2r, fit_overscan
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("overscan") || ccdflag (IN_IM(ccd), "overscan"))
+ return
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (errstr, SZ_LINE, TY_CHAR)
+ call salloc (func, SZ_LINE, TY_CHAR)
+ call imstats (IN_IM(ccd), IM_IMAGENAME, Memc[str], SZ_LINE)
+
+ # Check bias section.
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+ c1 = BIAS_C1(ccd)
+ c2 = BIAS_C2(ccd)
+ l1 = BIAS_L1(ccd)
+ l2 = BIAS_L2(ccd)
+ if ((c1 < 1) || (c2 > nc) || (l1 < 1) || (l2 > nl)) {
+ call sprintf (Memc[errstr], SZ_LINE,
+ "Error in bias section: image=%s[%d,%d], biassec=[%d:%d,%d:%d]")
+ call pargstr (Memc[str])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[errstr])
+ }
+ if ((c1 == 1) && (c2 == nc) && (l1 == 1) && (l2 == nl)) {
+ call error (0, "Bias section not specified or given as full image")
+ }
+
+ # If no processing is desired then print overscan strip and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Overscan section is [%d:%d,%d:%d].\n")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call sfree (sp)
+ return
+ }
+
+ # Determine the overscan section parameters. The readout axis
+ # determines the type of overscan. The step sizes are ignored.
+ # The limits in the long dimension are replaced by the trim limits.
+
+ type = clgwrd ("function", Memc[func], SZ_LINE, OVERSCAN_TYPES)
+ if (type < OVERSCAN_FIT) {
+ overscan = NULL
+ if (READAXIS(ccd) == 2)
+ call error (1,
+ "Overscan function type not allowed with readaxis of 2")
+ } else {
+ if (READAXIS(ccd) == 1) {
+ first = c1
+ last = c2
+ navg = last - first + 1
+ npts = nl
+ call salloc (buf, npts, TY_REAL)
+ do i = 1, npts
+ Memr[buf+i-1] = asumr (Memr[imgs2r (IN_IM(ccd), first, last,
+ i, i)], navg)
+ if (navg > 1)
+ call adivkr (Memr[buf], real (navg), Memr[buf], npts)
+
+ # Trim the overscan vector and set the pixel coordinate.
+ npts = CCD_L2(ccd) - CCD_L1(ccd) + 1
+ call malloc (overscan, npts, TY_REAL)
+ call salloc (x, npts, TY_REAL)
+ call trim_overscan (Memr[buf], npts, IN_L1(ccd), Memr[x],
+ Memr[overscan])
+
+ call fit_overscan (Memc[str], c1, c2, l1, l2, Memr[x],
+ Memr[overscan], npts)
+
+ } else {
+ first = l1
+ last = l2
+ navg = last - first + 1
+ npts = nc
+ call salloc (buf, npts, TY_REAL)
+ call aclrr (Memr[buf], npts)
+ do i = first, last
+ call aaddr (Memr[imgl2r(IN_IM(ccd),i)], Memr[buf],
+ Memr[buf], npts)
+ if (navg > 1)
+ call adivkr (Memr[buf], real (navg), Memr[buf], npts)
+
+ # Trim the overscan vector and set the pixel coordinate.
+ npts = CCD_C2(ccd) - CCD_C1(ccd) + 1
+ call malloc (overscan, npts, TY_REAL)
+ call salloc (x, npts, TY_REAL)
+ call trim_overscan (Memr[buf], npts, IN_C1(ccd), Memr[x],
+ Memr[overscan])
+
+ call fit_overscan (Memc[str], c1, c2, l1, l2, Memr[x],
+ Memr[overscan], npts)
+ }
+ }
+
+ # Set the CCD structure overscan parameters.
+ CORS(ccd, OVERSCAN) = O
+ COR(ccd) = YES
+ OVERSCAN_TYPE(ccd) = type
+ OVERSCAN_VEC(ccd) = overscan
+
+ # Log the operation.
+ if (type < OVERSCAN_FIT) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan section is [%d:%d,%d:%d] with function=%s")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call pargstr (Memc[func])
+ } else {
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan section is [%d:%d,%d:%d] with mean=%g")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call pargr (asumr (Memr[overscan], npts) / npts)
+ }
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "overscan", Memc[str])
+
+ call sfree (sp)
+end
+
+
+# FIT_OVERSCAN -- Fit a function to smooth the overscan vector.
+# The fitting uses the ICFIT procedures which may be interactive.
+# Changes to these parameters are "learned". The user is queried with a four
+# valued logical query (XT_ANSWER routine) which may be turned off when
+# multiple images are processed.
+
+procedure fit_overscan (image, c1, c2, l1, l2, x, overscan, npts)
+
+char image[ARB] # Image name for query and title
+int c1, c2, l1, l2 # Overscan strip
+real x[npts] # Pixel coordinates of overscan
+real overscan[npts] # Input overscan and output fitted overscan
+int npts # Number of data points
+
+int interactive, fd
+pointer sp, str, w, ic, cv, gp, gt
+
+int clgeti(), ic_geti(), open()
+real clgetr(), ic_getr()
+pointer gopen(), gt_init()
+errchk gopen, open
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (w, npts, TY_REAL)
+ call amovkr (1., Memr[w], npts)
+
+ # Open the ICFIT procedures, get the fitting parameters, and
+ # set the fitting limits.
+
+ call ic_open (ic)
+ call clgstr ("function", Memc[str], SZ_LINE)
+ call ic_pstr (ic, "function", Memc[str])
+ call ic_puti (ic, "order", clgeti ("order"))
+ call clgstr ("sample", Memc[str], SZ_LINE)
+ call ic_pstr (ic, "sample", Memc[str])
+ call ic_puti (ic, "naverage", clgeti ("naverage"))
+ call ic_puti (ic, "niterate", clgeti ("niterate"))
+ call ic_putr (ic, "low", clgetr ("low_reject"))
+ call ic_putr (ic, "high", clgetr ("high_reject"))
+ call ic_putr (ic, "grow", clgetr ("grow"))
+ call ic_putr (ic, "xmin", min (x[1], x[npts]))
+ call ic_putr (ic, "xmax", max (x[1], x[npts]))
+ call ic_pstr (ic, "xlabel", "Pixel")
+ call ic_pstr (ic, "ylabel", "Overscan")
+
+ # If the fitting is done interactively set the GTOOLS and GIO
+ # pointers. Also "learn" the fitting parameters since they may
+ # be changed when fitting interactively.
+
+ call sprintf (Memc[str], SZ_LINE,
+ "Fit overscan vector for %s interactively")
+ call pargstr (image)
+ call set_interactive (Memc[str], interactive)
+ if ((interactive == YES) || (interactive == ALWAYSYES)) {
+ gt = gt_init ()
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan vector for %s from section [%d:%d,%d:%d]\n")
+ call pargstr (image)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call gt_sets (gt, GTTITLE, Memc[str])
+ call gt_sets (gt, GTTYPE, "line")
+ call gt_setr (gt, GTXMIN, x[1])
+ call gt_setr (gt, GTXMAX, x[npts])
+ call clgstr ("graphics", Memc[str], SZ_FNAME)
+ gp = gopen (Memc[str], NEW_FILE, STDGRAPH)
+
+ call icg_fit (ic, gp, "cursor", gt, cv, x, overscan, Memr[w], npts)
+
+ call ic_gstr (ic, "function", Memc[str], SZ_LINE)
+ call clpstr ("function", Memc[str])
+ call clputi ("order", ic_geti (ic, "order"))
+ call ic_gstr (ic, "sample", Memc[str], SZ_LINE)
+ call clpstr ("sample", Memc[str])
+ call clputi ("naverage", ic_geti (ic, "naverage"))
+ call clputi ("niterate", ic_geti (ic, "niterate"))
+ call clputr ("low_reject", ic_getr (ic, "low"))
+ call clputr ("high_reject", ic_getr (ic, "high"))
+ call clputr ("grow", ic_getr (ic, "grow"))
+
+ call gclose (gp)
+ call gt_free (gt)
+ } else
+ call ic_fit (ic, cv, x, overscan, Memr[w], npts, YES, YES, YES, YES)
+
+ # Make a log of the fit in the plot file if given.
+ call clgstr ("plotfile", Memc[str], SZ_LINE)
+ call xt_stripwhite (Memc[str])
+ if (Memc[str] != EOS) {
+ fd = open (Memc[str], APPEND, BINARY_FILE)
+ gp = gopen ("stdvdm", NEW_FILE, fd)
+ gt = gt_init ()
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan vector for %s from section [%d:%d,%d:%d]\n")
+ call pargstr (image)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call gt_sets (gt, GTTITLE, Memc[str])
+ call gt_sets (gt, GTTYPE, "line")
+ call gt_setr (gt, GTXMIN, 1.)
+ call gt_setr (gt, GTXMAX, real (npts))
+ call icg_graphr (ic, gp, gt, cv, x, overscan, Memr[w], npts)
+ call gclose (gp)
+ call close (fd)
+ call gt_free (gt)
+ }
+
+ # Replace the raw overscan vector with the smooth fit.
+ call cvvector (cv, x, overscan, npts)
+
+ # Finish up.
+ call ic_closer (ic)
+ call cvfree (cv)
+ call sfree (sp)
+end
+
+
+# TRIM_OVERSCAN -- Trim the overscan vector.
+
+procedure trim_overscan (data, npts, start, x, overscan)
+
+real data[ARB] # Full overscan vector
+int npts # Length of trimmed vector
+int start # Trim start
+real x[npts] # Trimmed pixel coordinates (returned)
+real overscan[npts] # Trimmed overscan vector (returned)
+
+int i, j
+
+begin
+ do i = 1, npts {
+ j = start + i - 1
+ x[i] = j
+ overscan[i] = data[j]
+ }
+end
diff --git a/noao/imred/ccdred/src/setproc.x b/noao/imred/ccdred/src/setproc.x
new file mode 100644
index 00000000..06c7977b
--- /dev/null
+++ b/noao/imred/ccdred/src/setproc.x
@@ -0,0 +1,77 @@
+include <imhdr.h>
+include "ccdred.h"
+
+# SET_PROC -- Set the processing parameter structure pointer.
+
+procedure set_proc (in, out, ccd)
+
+pointer in # Input IMIO pointer
+pointer out # Output IMIO pointer
+pointer ccd # CCD structure (returned)
+
+int clgwrd(), clscan(), nscan()
+real clgetr()
+pointer sp, str
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Allocate the ccd structure.
+ call calloc (ccd, LEN_CCD, TY_STRUCT)
+
+ IN_IM(ccd) = in
+ OUT_IM(ccd) = out
+ COR(ccd) = NO
+ CORS(ccd, FIXPIX) = NO
+ CORS(ccd, OVERSCAN) = NO
+ CORS(ccd, TRIM) = NO
+ READAXIS(ccd) = clgwrd ("readaxis",Memc[str],SZ_LINE,"|line|columns|")
+ MINREPLACE(ccd) = clgetr ("minreplace")
+
+ CALCTYPE(ccd) = TY_REAL
+ if (clscan ("pixeltype") != EOF) {
+ call gargwrd (Memc[str], SZ_LINE)
+ call gargwrd (Memc[str], SZ_LINE)
+ if (nscan() == 2) {
+ if (Memc[str] == 'r')
+ CALCTYPE(ccd) = TY_REAL
+ else if (Memc[str] == 's')
+ CALCTYPE(ccd) = TY_SHORT
+ else
+ call error (1, "Invalid calculation datatype")
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# FREE_PROC -- Free the processing structure pointer.
+
+procedure free_proc (ccd)
+
+pointer ccd # CCD structure
+
+begin
+ # Unmap calibration images.
+ if (MASK_IM(ccd) != NULL)
+ call imunmap (MASK_IM(ccd))
+ if (ZERO_IM(ccd) != NULL)
+ call ccd_unmap (ZERO_IM(ccd))
+ if (DARK_IM(ccd) != NULL)
+ call ccd_unmap (DARK_IM(ccd))
+ if (FLAT_IM(ccd) != NULL)
+ call ccd_unmap (FLAT_IM(ccd))
+ if (ILLUM_IM(ccd) != NULL)
+ call ccd_unmap (ILLUM_IM(ccd))
+ if (FRINGE_IM(ccd) != NULL)
+ call ccd_unmap (FRINGE_IM(ccd))
+
+ # Free memory
+ if (OVERSCAN_VEC(ccd) != NULL)
+ call mfree (OVERSCAN_VEC(ccd), TY_REAL)
+ if (MASK_FP(ccd) != NULL)
+ call xt_fpfree (MASK_FP(ccd))
+ call mfree (ccd, TY_STRUCT)
+end
diff --git a/noao/imred/ccdred/src/setsections.x b/noao/imred/ccdred/src/setsections.x
new file mode 100644
index 00000000..80e61e49
--- /dev/null
+++ b/noao/imred/ccdred/src/setsections.x
@@ -0,0 +1,113 @@
+include <imhdr.h>
+include <mwset.h>
+include "ccdred.h"
+
+# SET_SECTIONS -- Set the data section, ccd section, trim section and
+# bias section. Also set the WCS.
+
+procedure set_sections (ccd)
+
+pointer ccd # CCD structure (returned)
+
+pointer sp, str, mw, lterm, mw_openim()
+int nc, nl, c1, c2, cs, l1, l2, ls, ndim, mw_stati()
+bool streq()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+
+ # The default data section is the entire image.
+ c1 = 1
+ c2 = nc
+ cs = 1
+ l1 = 1
+ l2 = nl
+ ls = 1
+ call hdmgstr (IN_IM(ccd), "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ IN_C1(ccd) = c1
+ IN_C2(ccd) = c2
+ IN_L1(ccd) = l1
+ IN_L2(ccd) = l2
+
+ # The default trim section is the data section.
+ # Defer limit checking until actually used.
+ c1 = IN_C1(ccd)
+ c2 = IN_C2(ccd)
+ l1 = IN_L1(ccd)
+ l2 = IN_L2(ccd)
+ call clgstr ("trimsec", Memc[str], SZ_LINE)
+ if (streq (Memc[str], "image"))
+ call hdmgstr (IN_IM(ccd), "trimsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs!=1)||(ls!=1))
+ call error (0, "Error in TRIMSEC parameter")
+ TRIM_C1(ccd) = c1
+ TRIM_C2(ccd) = c2
+ TRIM_L1(ccd) = l1
+ TRIM_L2(ccd) = l2
+
+ # The default bias section is the whole image.
+ # Defer limit checking until actually used.
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ call clgstr ("biassec", Memc[str], SZ_LINE)
+ if (streq (Memc[str], "image"))
+ call hdmgstr (IN_IM(ccd), "biassec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs!=1)||(ls!=1))
+ call error (0, "Error in BIASSEC parameter")
+ BIAS_C1(ccd) = c1
+ BIAS_C2(ccd) = c2
+ BIAS_L1(ccd) = l1
+ BIAS_L2(ccd) = l2
+
+ # The default ccd section is the size of the data section.
+ c1 = 1
+ c2 = IN_C2(ccd) - IN_C1(ccd) + 1
+ l1 = 1
+ l2 = IN_L2(ccd) - IN_L1(ccd) + 1
+ call hdmgstr (IN_IM(ccd), "ccdsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs != 1) || (ls != 1))
+ call error (0, "Error in CCDSEC parameter")
+ CCD_C1(ccd) = c1
+ CCD_C2(ccd) = c2
+ CCD_L1(ccd) = l1
+ CCD_L2(ccd) = l2
+ if ((IN_C2(ccd)-IN_C1(ccd) != CCD_C2(ccd)-CCD_C1(ccd)) ||
+ (IN_L2(ccd)-IN_L1(ccd) != CCD_L2(ccd)-CCD_L1(ccd)))
+ call error (0, "Size of DATASEC and CCDSEC do not agree")
+
+ # The default output data section is the input data section.
+ OUT_C1(ccd) = IN_C1(ccd)
+ OUT_C2(ccd) = IN_C2(ccd)
+ OUT_L1(ccd) = IN_L1(ccd)
+ OUT_L2(ccd) = IN_L2(ccd)
+
+ # Set the physical WCS to be CCD coordinates.
+ mw = mw_openim (IN_IM(ccd))
+ ndim = mw_stati (mw, MW_NPHYSDIM)
+ call salloc (lterm, ndim * (1 + ndim), TY_REAL)
+ call mw_gltermr (mw, Memr[lterm+ndim], Memr[lterm], ndim)
+ Memr[lterm] = IN_C1(ccd) - CCD_C1(ccd)
+ Memr[lterm+1] = IN_L1(ccd) - CCD_L1(ccd)
+ Memr[lterm+ndim] = 1. / cs
+ Memr[lterm+ndim+1] = 0.
+ Memr[lterm+ndim+ndim] = 0.
+ Memr[lterm+ndim+ndim+1] = 1. / ls
+ call mw_sltermr (mw, Memr[lterm+ndim], Memr[lterm], ndim)
+ call mw_saveim (mw, IN_IM(ccd))
+ call mw_saveim (mw, OUT_IM(ccd))
+ call mw_close (mw)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/settrim.x b/noao/imred/ccdred/src/settrim.x
new file mode 100644
index 00000000..65d5d09c
--- /dev/null
+++ b/noao/imred/ccdred/src/settrim.x
@@ -0,0 +1,99 @@
+include <imhdr.h>
+include <imset.h>
+include "ccdred.h"
+
+# SET_TRIM -- Set the trim parameters.
+#
+# 1. Return immediately if the trim correction is not requested or
+# if the image has been previously corrected.
+# 2. Determine the trim section. This may be specifed directly or
+# indirectly through the image header or symbol table.
+# 3. Parse the trim section and apply it to the output image.
+# 4. If the image is trimmed then log the operation and reset the output
+# image size.
+
+procedure set_trim (ccd)
+
+pointer ccd # CCD structure
+
+int xt1, xt2, yt1, yt2
+int nc, nl, c1, c2, l1, l2
+pointer sp, str, image
+bool clgetb(), ccdflag()
+
+begin
+ # Check if the user wants this operation or it has been done.
+ if (!clgetb ("trim") || ccdflag (IN_IM(ccd), "trim"))
+ return
+
+ # Check trim section.
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+ c1 = TRIM_C1(ccd)
+ c2 = TRIM_C2(ccd)
+ l1 = TRIM_L1(ccd)
+ l2 = TRIM_L2(ccd)
+ if ((c1 < 1) || (c2 > nc) || (l1 < 1) || (l2 > nl)) {
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (image, SZ_LINE, TY_CHAR)
+ call imstats (IN_IM(ccd), IM_IMAGENAME, Memc[image], SZ_FNAME)
+ call sprintf (Memc[str], SZ_LINE,
+ "Error in trim section: image=%s[%d,%d], trimsec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ # If no processing is desired print trim section and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Trim section is [%d:%d,%d:%d].\n")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ return
+ }
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ xt1 = max (0, c1 - IN_C1(ccd))
+ xt2 = min (0, c2 - IN_C2(ccd))
+ yt1 = max (0, l1 - IN_L1(ccd))
+ yt2 = min (0, l2 - IN_L2(ccd))
+
+ CCD_C1(ccd) = CCD_C1(ccd) + xt1
+ CCD_C2(ccd) = CCD_C2(ccd) + xt2
+ CCD_L1(ccd) = CCD_L1(ccd) + yt1
+ CCD_L2(ccd) = CCD_L2(ccd) + yt2
+ IN_C1(ccd) = IN_C1(ccd) + xt1
+ IN_C2(ccd) = IN_C2(ccd) + xt2
+ IN_L1(ccd) = IN_L1(ccd) + yt1
+ IN_L2(ccd) = IN_L2(ccd) + yt2
+ OUT_C1(ccd) = IN_C1(ccd) - c1 + 1
+ OUT_C2(ccd) = IN_C2(ccd) - c1 + 1
+ OUT_L1(ccd) = IN_L1(ccd) - l1 + 1
+ OUT_L2(ccd) = IN_L2(ccd) - l1 + 1
+ IM_LEN(OUT_IM(ccd),1) = c2 - c1 + 1
+ IM_LEN(OUT_IM(ccd),2) = l2 - l1 + 1
+
+ CORS(ccd, TRIM) = YES
+ COR(ccd) = YES
+
+ call sprintf (Memc[str], SZ_LINE, "Trim data section is [%d:%d,%d:%d]")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "trim", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/setzero.x b/noao/imred/ccdred/src/setzero.x
new file mode 100644
index 00000000..610aeee7
--- /dev/null
+++ b/noao/imred/ccdred/src/setzero.x
@@ -0,0 +1,141 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_ZERO -- Set parameters for zero level correction.
+# 1. Return immediately if the zero level correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the zero level correction image. Return an error if not found.
+# 3. If the zero level image has not been processed call ZEROPROC.
+# 4. Set the processing flag.
+# 5. Log the operation (to user, logfile, and output image header).
+
+procedure set_zero (ccd)
+
+pointer ccd # CCD structure
+
+int nscan, nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+pointer sp, str, image, im, ccd_cache()
+bool clgetb(), ccdflag(), ccdcheck()
+int ccdtypei(), ccdnscan()
+errchk cal_image, ccd_cache, ccdproc
+
+begin
+ # Check if the user wants this operation or it has been done.
+ if (!clgetb ("zerocor") || ccdflag (IN_IM(ccd), "zerocor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the zero level correction image.
+ if (clgetb ("scancor"))
+ nscan = ccdnscan (IN_IM(ccd), ccdtypei(IN_IM(ccd)))
+ else
+ nscan = 1
+ call cal_image (IN_IM(ccd), ZERO, nscan, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print zero correction image and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Zero level correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the image and return on an error.
+ # Process the zero image if necessary.
+ # If nscan > 1 then the zero may not yet exist so create it
+ # from the unscanned zero.
+
+ iferr (im = ccd_cache (Memc[image], ZERO)) {
+ call cal_image (IN_IM(ccd), ZERO, 1, Memc[str], SZ_LINE)
+ im = ccd_cache (Memc[str], ZERO)
+ if (ccdcheck (im, ZERO)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[str], ZERO)
+ }
+ call scancor (Memc[str], Memc[image], nscan, INDEF)
+ im = ccd_cache (Memc[image], ZERO)
+ }
+
+ if (ccdcheck (im, ZERO)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[image], ZERO)
+ im = ccd_cache (Memc[image], ZERO)
+ }
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if (nc == 1) {
+ c1 = CCD_C1(ccd)
+ c2 = CCD_C2(ccd)
+ }
+ if (nl == 1) {
+ l1 = CCD_L1(ccd)
+ l2 = CCD_L2(ccd)
+ }
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ ZERO_IM(ccd) = im
+ ZERO_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ ZERO_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ ZERO_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ ZERO_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ CORS(ccd, ZEROCOR) = Z
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE, "Zero level correction image is %s")
+ call pargstr (Memc[image])
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "zerocor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/sigma.gx b/noao/imred/ccdred/src/sigma.gx
new file mode 100644
index 00000000..8b59f1f6
--- /dev/null
+++ b/noao/imred/ccdred/src/sigma.gx
@@ -0,0 +1,89 @@
+$for (sr)
+# SIGMA -- Compute sigma line from image lines with rejection.
+
+procedure sigma$t (data, nimages, mean, sigma, npts)
+
+pointer data[nimages] # Data vectors
+int nimages # Number of data vectors
+$if (datatype == sil)
+real mean[npts] # Mean vector
+real sigma[npts] # Sigma vector (returned)
+$else
+PIXEL mean[npts] # Mean vector
+PIXEL sigma[npts] # Sigma vector (returned)
+$endif
+int npts # Number of points in each vector
+
+$if (datatype == sil)
+real val, sig, pixval
+$else
+PIXEL val, sig, pixval
+$endif
+int i, j, n, n1
+
+begin
+ n = nimages - 1
+ do i = 1, npts {
+ val = mean[i]
+ sig = 0.
+ n1 = n
+ do j = 1, nimages {
+ pixval = Mem$t[data[j]+i-1]
+ if (IS_INDEF (pixval))
+ n1 = n1 - 1
+ else
+ sig = sig + (pixval - val) ** 2
+ }
+ if (n1 > 0)
+ sigma[i] = sqrt (sig / n1)
+ else
+ sigma[i] = 0.
+ }
+end
+
+
+# WTSIGMA -- Compute scaled and weighted sigma line from image lines with
+# rejection.
+
+procedure wtsigma$t (data, scales, zeros, wts, nimages, mean, sigma, npts)
+
+pointer data[nimages] # Data vectors
+real scales[nimages] # Scale factors
+real zeros[nimages] # Zero levels
+real wts[nimages] # Weights
+int nimages # Number of data vectors
+$if (datatype == sil)
+real mean[npts] # Mean vector
+real sigma[npts] # Sigma vector (returned)
+real val, sig, pixval
+$else
+PIXEL mean[npts] # Mean vector
+PIXEL sigma[npts] # Sigma vector (returned)
+PIXEL val, sig, pixval
+$endif
+int npts # Number of points in each vector
+
+int i, j, n
+real sumwts
+
+begin
+ do i = 1, npts {
+ val = mean[i]
+ n = 0
+ sig = 0.
+ sumwts = 0.
+ do j = 1, nimages {
+ pixval = Mem$t[data[j]+i-1]
+ if (!IS_INDEF (pixval)) {
+ n = n + 1
+ sig = sig + wts[j]*(pixval/scales[j]-zeros[j]-val) ** 2
+ sumwts = sumwts + wts[j]
+ }
+ }
+ if (n > 1)
+ sigma[i] = sqrt (sig / sumwts * n / (n - 1))
+ else
+ sigma[i] = 0.
+ }
+end
+$endfor
diff --git a/noao/imred/ccdred/src/t_badpixim.x b/noao/imred/ccdred/src/t_badpixim.x
new file mode 100644
index 00000000..3a44dfa0
--- /dev/null
+++ b/noao/imred/ccdred/src/t_badpixim.x
@@ -0,0 +1,114 @@
+include <imhdr.h>
+
+# T_BADPIXIMAGE -- Create a bad pixel image mask from a bad pixel file.
+
+procedure t_badpiximage ()
+
+pointer bpfile # Bad pixel file
+pointer bpimage # Bad pixel image
+pointer template # Template image
+short goodval, badval # Good and bad values
+
+int i, nc, nl, c1, c2, l1, l2, fd, x1, x2, xstep, y1, y2, ystep
+pointer sp, str, im, im1
+
+short clgets()
+bool ccdflag()
+pointer immap(), impl2s(), imps2s()
+int open(), fscan(), nscan(), stridxs(), strmatch()
+errchk open, immap
+
+begin
+ call smark (sp)
+ call salloc (bpfile, SZ_FNAME, TY_CHAR)
+ call salloc (bpimage, SZ_FNAME, TY_CHAR)
+ call salloc (template, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get task parameters.
+ call clgstr ("fixfile", Memc[bpfile], SZ_FNAME)
+ call clgstr ("template", Memc[template], SZ_FNAME)
+ call clgstr ("image", Memc[bpimage], SZ_FNAME)
+ goodval = clgets ("goodvalue")
+ badval = clgets ("badvalue")
+
+ # Open the files and abort on an error.
+ fd = open (Memc[bpfile], READ_ONLY, TEXT_FILE)
+ im1 = immap (Memc[template], READ_ONLY, 0)
+ im = immap (Memc[bpimage], NEW_COPY, im1)
+
+ # Set the output image.
+ IM_PIXTYPE(im) = TY_SHORT
+ call sprintf (IM_TITLE(im), SZ_IMTITLE,
+ "Bad pixel image from bad pixel file %s")
+ call pargstr (Memc[bpfile])
+
+ # Set the good pixel values.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ do i = 1, nl
+ call amovks (goodval, Mems[impl2s(im,i)], nc)
+
+ # Set the bad pixel values. By default the bad pixel coordinates
+ # refer to the image directly but if the word "untrimmed" appears
+ # in a comment then the coordinates refer to the untrimmed image.
+ # This is the same algorithm as used in SETFIXPIX for CCDPROC.
+
+ x1 = 1
+ xstep = 1
+ y1 = 1
+ ystep = 1
+ while (fscan (fd) != EOF) {
+ call gargwrd (Memc[str], SZ_LINE)
+ if (Memc[str] == '#') {
+ call gargstr (Memc[str], SZ_LINE)
+ if (strmatch (Memc[str], "{untrimmed}") != 0) {
+ if (ccdflag (im, "trim")) {
+ call hdmgstr (im, "trim", Memc[str], SZ_LINE)
+ x2 = stridxs ("[", Memc[str])
+ if (x2 != 0) {
+ x1 = 1
+ x2 = IM_LEN(im,1)
+ xstep = 1
+ y1 = 1
+ y2 = IM_LEN(im,2)
+ ystep = 1
+ call ccd_section (Memc[str+x2-1], x1, x2, xstep,
+ y1, y2, ystep)
+ }
+ }
+ }
+ next
+ }
+
+ call reset_scan()
+ call gargi (c1)
+ call gargi (c2)
+ call gargi (l1)
+ call gargi (l2)
+ if (nscan() != 4) {
+ if (nscan() == 2) {
+ l1 = c2
+ c2 = c1
+ l2 = l1
+ } else
+ next
+ }
+
+ c1 = max (1, (c1 - x1 + xstep - 1) / xstep + 1)
+ c2 = min (nc, (c2 - x1) / xstep + 1)
+ l1 = max (1, (l1 - y1 + ystep - 1) / ystep + 1)
+ l2 = min (nl, (l2 - y1) / ystep + 1)
+
+ if ((c1 > c2) || (l1 > l2))
+ next
+
+ i = (c2 - c1 + 1) * (l2 - l1 + 1)
+ call amovks (badval, Mems[imps2s(im,c1,c2,l1,l2)], i)
+ }
+
+ # Finish up.
+ call imunmap (im)
+ call imunmap (im1)
+ call close (fd)
+end
diff --git a/noao/imred/ccdred/src/t_ccdgroups.x b/noao/imred/ccdred/src/t_ccdgroups.x
new file mode 100644
index 00000000..225589e5
--- /dev/null
+++ b/noao/imred/ccdred/src/t_ccdgroups.x
@@ -0,0 +1,258 @@
+include <error.h>
+include <math.h>
+
+# Group type definitions.
+define GROUPS "|position|title|date|ccdtype|subset|"
+define POSITION 1 # Group by position
+define TITLE 2 # Group by title
+define DATE 3 # Group by date
+define CCDTYPE 4 # Group by ccdtype
+define SUBSET 5 # Group by subset
+
+define NALLOC 10 # Allocate memory in this size block
+
+# T_CCDGROUPS -- Group images into files based on parameters with common values.
+# The output consists of files containing the image names of images from the
+# input image list which have the same group type such as position, date,
+# or title.
+
+procedure t_ccdgroups ()
+
+int images # List of images
+pointer root # Output group root name
+int group # Group type
+real radius # Position radius
+bool verbose # Verbose output (package parameter)
+
+int ngroup, fd, ntitles, npositions, ndates, ccdtype
+pointer im, sp, image, output, suffix, titles, positions, dates
+
+bool clgetb()
+real clgetr()
+int position_group(), title_group(), date_group()
+int imtopenp(), imtgetim(), open(), clgwrd()
+errchk set_input, position_group, title_group, date_group, open
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (root, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (suffix, SZ_FNAME, TY_CHAR)
+
+ # Get the task parameters.
+ images = imtopenp ("images")
+ call clgstr ("output", Memc[root], SZ_FNAME)
+ group = clgwrd ("group", Memc[image], SZ_FNAME, GROUPS)
+ radius = clgetr ("radius")
+ call clgstr ("instrument", Memc[image], SZ_FNAME)
+ if (Memc[image] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[image])
+ verbose = clgetb ("verbose")
+
+ # Loop through the images and place them into groups.
+ positions = NULL
+ npositions = 0
+ titles = NULL
+ ntitles = 0
+ dates = NULL
+ ndates = 0
+ while (imtgetim (images, Memc[image], SZ_FNAME) != EOF) {
+ call set_input (Memc[image], im, ccdtype)
+ if (im == NULL)
+ next
+
+ iferr {
+ switch (group) {
+ case POSITION:
+ ngroup = position_group (im, positions, npositions, radius)
+ case TITLE:
+ ngroup = title_group (im, titles, ntitles)
+ case DATE:
+ ngroup = date_group (im, dates, ndates)
+ }
+
+ # Define the output group file.
+ switch (group) {
+ case POSITION, TITLE, DATE:
+ call sprintf (Memc[output], SZ_FNAME, "%s%d")
+ call pargstr (Memc[root])
+ call pargi (ngroup)
+ case CCDTYPE:
+ call ccdtypes (im, Memc[suffix], SZ_FNAME)
+ call sprintf (Memc[output], SZ_FNAME, "%s%d")
+ call pargstr (Memc[root])
+ call pargstr (Memc[suffix])
+ case SUBSET:
+ call ccdsubset (im, Memc[suffix], SZ_FNAME)
+ call sprintf (Memc[output], SZ_FNAME, "%s%d")
+ call pargstr (Memc[root])
+ call pargstr (Memc[suffix])
+ }
+
+ # Print the operation if verbose.
+ if (verbose) {
+ call printf ("%s --> %s\n")
+ call pargstr (Memc[image])
+ call pargstr (Memc[output])
+ }
+
+ # Enter the image in the appropriate group file.
+ fd = open (Memc[output], APPEND, TEXT_FILE)
+ call fprintf (fd, "%s\n")
+ call pargstr (Memc[image])
+ call close (fd)
+ } then
+ call erract (EA_WARN)
+
+ call imunmap (im)
+ }
+
+ # Finish up.
+ call imtclose (images)
+ if (positions != NULL)
+ call mfree (positions, TY_REAL)
+ if (titles != NULL)
+ call mfree (titles, TY_CHAR)
+ if (dates != NULL)
+ call mfree (dates, TY_CHAR)
+ call sfree (sp)
+end
+
+
+# TITLE_GROUP -- Group images by title.
+
+int procedure title_group (im, titles, ntitles)
+
+pointer im # Image
+pointer titles # Pointer to title strings
+int ntitles # Number of titles
+
+int i, nalloc
+pointer sp, title, ptr
+bool streq()
+errchk hdmgstr
+
+begin
+ call smark (sp)
+ call salloc (title, SZ_LINE, TY_CHAR)
+ call hdmgstr (im, "title", Memc[title], SZ_LINE)
+
+ for (i=1; i<=ntitles; i=i+1) {
+ ptr = titles + (i - 1) * SZ_LINE
+ if (streq (Memc[title], Memc[ptr]))
+ break
+ }
+ if (i > ntitles) {
+ if (i == 1) {
+ nalloc = NALLOC
+ call malloc (titles, nalloc * SZ_LINE, TY_CHAR)
+ } else if (i > nalloc) {
+ nalloc = nalloc + NALLOC
+ call realloc (titles, nalloc * SZ_LINE, TY_CHAR)
+ }
+ ptr = titles + (i - 1) * SZ_LINE
+ call strcpy (Memc[title], Memc[ptr], SZ_LINE-1)
+ ntitles = i
+ }
+
+ call sfree (sp)
+ return (i)
+end
+
+
+# POSITION_GROUP -- Group by RA and DEC position. The RA is in hours and
+# the DEC is in degrees. The radius is in seconds of arc.
+
+int procedure position_group (im, positions, npositions, radius)
+
+pointer im # Image
+pointer positions # Positions
+int npositions # Number of positions
+real radius # Matching radius
+
+real ra, dec, dra, ddec, r, hdmgetr()
+int i, nalloc
+pointer ptr
+errchk hdmgetr
+
+begin
+ ra = hdmgetr (im, "ra")
+ dec = hdmgetr (im, "dec")
+
+ for (i=1; i<=npositions; i=i+1) {
+ ptr = positions + 2 * i - 2
+ dra = ra - Memr[ptr]
+ ddec = dec - Memr[ptr+1]
+ if (dra > 12.)
+ dra = dra - 24.
+ if (dra < -12.)
+ dra = dra + 24.
+ dra = dra * cos (DEGTORAD (dec)) * 15.
+ r = sqrt (dra ** 2 + ddec ** 2) * 3600.
+ if (r < radius)
+ break
+ }
+ if (i > npositions) {
+ if (i == 1) {
+ nalloc = NALLOC
+ call malloc (positions, nalloc * 2, TY_REAL)
+ } else if (i > nalloc) {
+ nalloc = nalloc + NALLOC
+ call realloc (positions, nalloc * 2, TY_REAL)
+ }
+ ptr = positions + 2 * i - 2
+ Memr[ptr] = ra
+ Memr[ptr+1] = dec
+ npositions = i
+ }
+
+ return (i)
+end
+
+
+# DATE_GROUP -- Group by date.
+
+int procedure date_group (im, dates, ndates)
+
+pointer im # Image
+pointer dates # Pointer to date strings
+int ndates # Number of dates
+
+int i, nalloc, stridxs()
+pointer sp, date, ptr
+bool streq()
+errchk hdmgstr
+
+begin
+ call smark (sp)
+ call salloc (date, SZ_LINE, TY_CHAR)
+ call hdmgstr (im, "date-obs", Memc[date], SZ_LINE)
+
+ # Strip time if present.
+ i = stridxs ("T", Memc[date])
+ if (i > 0)
+ Memc[date+i-1] = EOS
+
+ for (i=1; i<=ndates; i=i+1) {
+ ptr = dates + (i - 1) * SZ_LINE
+ if (streq (Memc[date], Memc[ptr]))
+ break
+ }
+ if (i > ndates) {
+ if (i == 1) {
+ nalloc = NALLOC
+ call malloc (dates, nalloc * SZ_LINE, TY_CHAR)
+ } else if (i > nalloc) {
+ nalloc = nalloc + NALLOC
+ call realloc (dates, nalloc * SZ_LINE, TY_CHAR)
+ }
+ ptr = dates + (i - 1) * SZ_LINE
+ call strcpy (Memc[date], Memc[ptr], SZ_LINE-1)
+ ndates = i
+ }
+
+ call sfree (sp)
+ return (i)
+end
diff --git a/noao/imred/ccdred/src/t_ccdhedit.x b/noao/imred/ccdred/src/t_ccdhedit.x
new file mode 100644
index 00000000..a7fd9121
--- /dev/null
+++ b/noao/imred/ccdred/src/t_ccdhedit.x
@@ -0,0 +1,87 @@
+include <error.h>
+
+define TYPES "|string|real|integer|"
+define SVAL 1 # String value
+define RVAL 2 # Real value
+define IVAL 3 # Integer value
+
+# T_CCDHEDIT -- Add, delete, or change CCD image header parameters.
+# This task differs from HEDIT in that it uses the CCD instrument translation
+# file.
+
+procedure t_ccdhedit ()
+
+int list # List of CCD images
+pointer param # Parameter name
+int type # Parameter type
+pointer sval # Parameter value
+pointer instrument # Instrument file
+
+int ip, ival, imtopenp(), imtgetim(), clgwrd(), ctoi(), ctor()
+real rval
+bool streq()
+pointer sp, im, immap()
+errchk hdmpstr, hdmputr, hdmputi
+
+begin
+ call smark (sp)
+ call salloc (param, SZ_LINE, TY_CHAR)
+ call salloc (sval, SZ_LINE, TY_CHAR)
+ call salloc (instrument, SZ_FNAME, TY_CHAR)
+
+ # Get the task parameters.
+ list = imtopenp ("images")
+ call clgstr ("parameter", Memc[param], SZ_LINE)
+ type = clgwrd ("type", Memc[sval], SZ_LINE, TYPES)
+ call clgstr ("value", Memc[sval], SZ_LINE)
+ call clgstr ("instrument", Memc[instrument], SZ_FNAME)
+ call xt_stripwhite (Memc[sval])
+
+ # Open the instrument translation file.
+ call hdmopen (Memc[instrument])
+
+ # If the parameter is IMAGETYP then change the parameter value from
+ # the package form to the image form using the inverse mapping in the
+ # translation file.
+
+ if (streq (Memc[param], "imagetyp"))
+ call hdmparm (Memc[sval], Memc[sval], SZ_LINE)
+
+ # Edit each image in the input list.
+ while (imtgetim (list, Memc[instrument], SZ_FNAME) != EOF) {
+ iferr (im = immap (Memc[instrument], READ_WRITE, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+
+ # If the parameter value is null then delete the entry.
+ if (Memc[sval] == EOS) {
+ iferr (call hdmdelf (im, Memc[param]))
+ call erract (EA_WARN)
+
+ # Otherwise add the parameter of the specified type.
+ } else {
+ switch (type) {
+ case SVAL:
+ call hdmpstr (im, Memc[param], Memc[sval])
+ case RVAL:
+ ip = 1
+ if (ctor (Memc[sval], ip, rval) == 0)
+ call error (0, "Parameter value is not a number")
+ call hdmputr (im, Memc[param], rval)
+ case IVAL:
+ ip = 1
+ if (ctoi (Memc[sval], ip, ival) == 0)
+ call error (0, "Parameter value is not a number")
+ call hdmputi (im, Memc[param], ival)
+ }
+ }
+
+ call imunmap (im)
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (list)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_ccdinst.x b/noao/imred/ccdred/src/t_ccdinst.x
new file mode 100644
index 00000000..e98763fd
--- /dev/null
+++ b/noao/imred/ccdred/src/t_ccdinst.x
@@ -0,0 +1,667 @@
+include <imhdr.h>
+include <imio.h>
+include <error.h>
+include "ccdtypes.h"
+
+define HELP1 "noao$imred/ccdred/src/ccdinst1.key"
+define HELP2 "noao$imred/ccdred/src/ccdinst2.key"
+define HELP3 "noao$imred/ccdred/src/ccdinst3.key"
+
+define LEVELS "|basic|common|all|"
+
+define CMDS "|quit|?|help|show|instrument|imheader|read|write|newimage\
+ |translate|imagetyp|subset|exptime|darktime|fixfile|biassec\
+ |ccdsec|datasec|trimsec|darkcor|fixpix|flatcor|fringcor\
+ |illumcor|overscan|readcor|scancor|trim|zerocor|ccdmean\
+ |fringscl|illumflt|mkfringe|mkillum|skyflat|ncombine\
+ |date-obs|dec|ra|title|next|nscanrow|"
+
+define QUIT 1 # Quit
+define QUESTION 2 # Help
+define HELP 3 # Help
+define SHOW 4 # Show current translations
+define INST 5 # Show instrument file
+define IMHEADER 6 # Print image header
+define READ 7 # Read instrument file
+define WRITE 8 # Write instrument file
+define NEWIMAGE 9 # Change image
+define TRANSLATE 10 # Translate image type
+define IMAGETYPE 11 # Image type
+define SUBSET 12 # Subset parameter
+define EXPTIME 13 # Exposure time
+define DARKTIME 14 # Dark time
+define FIXFILE 15 # Bad pixel file
+define BIASSEC 16 # Bias section
+define CCDSEC 17 # CCD section
+define DATASEC 18 # Data section
+define TRIMSEC 19 # Trim section
+define DARKCOR 20 # Dark count flag
+define FIXPIX 21 # Bad pixel flag
+define FLATCOR 22 # Flat field flag
+define FRINGCOR 23 # Fringe flag
+define ILLUMCOR 24 # Illumination flag
+define OVERSCAN 25 # Overscan flag
+define READCOR 26 # Readout flag
+define SCANCOR 27 # Scan mode flag
+define NSCANROW 42 # Number of scan rows
+define TRIM 28 # Trim flag
+define ZEROCOR 29 # Zero level flag
+define CCDMEAN 30 # CCD mean value
+define FRINGSCL 31 # Fringe scale value
+define ILLUMFLT 32 # Illumination flat flag
+define MKFRINGE 33 # Illumination flag
+define MKILLUM 34 # Illumination flag
+define SKYFLAT 35 # Sky flat flag
+define NCOMBINE 36 # NCOMBINE parameter
+define DATEOBS 37 # Date
+define DEC 38 # Dec
+define RA 39 # RA
+define TITLE 40 # Title
+define NEXT 41 # Next image
+
+# T_CCDINST -- Check and modify instrument translations
+
+procedure t_ccdinst ()
+
+int list, level, ncmd, imtopenp(), imtgetim(), scan(), access(), clgwrd()
+pointer sp, image, inst, ssfile, im, immap()
+bool update, clgetb()
+errchk delete, hdmwrite
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (inst, SZ_FNAME, TY_CHAR)
+ call salloc (ssfile, SZ_FNAME, TY_CHAR)
+
+ # Get the task parameters, open the translation file, set defaults.
+ list = imtopenp ("images")
+ call clgstr ("instrument", Memc[inst], SZ_FNAME)
+ call clgstr ("ssfile", Memc[ssfile], SZ_FNAME)
+ level = clgwrd ("parameters", Memc[image], SZ_FNAME, LEVELS)
+ if (Memc[image] == EOS)
+ call error (1, "No 'parameters' file value specified.")
+ call hdmopen (Memc[inst])
+ ncmd = NEXT
+ update = false
+
+ # Process each image.
+ while (imtgetim (list, Memc[image], SZ_FNAME) != EOF) {
+ iferr (im = immap (Memc[image], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+
+ if (clgetb ("edit"))
+ call ccdinst_edit (im, Memc[image], Memc[inst], Memc[ssfile],
+ level, ncmd, update)
+ else
+ call ccdinst_hdr (im, Memc[image], Memc[inst], Memc[ssfile],
+ level)
+ call imunmap (im)
+ if (ncmd == QUIT)
+ break
+ }
+
+ # Update instrument file if necessary.
+ if (update) {
+ call printf ("Update instrument file %s (%b)? ")
+ call pargstr (Memc[inst])
+ call pargb (update)
+ call flush (STDOUT)
+ if (scan() != EOF)
+ call gargb (update)
+ if (update) {
+ iferr {
+ if (access (Memc[inst], 0, 0) == YES)
+ call delete (Memc[inst])
+ call hdmwrite (Memc[inst], NEW_FILE)
+ } then
+ call erract (EA_WARN)
+ }
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (list)
+ call sfree (sp)
+end
+
+
+# CCDINST_EDIT -- Main instrument file editor loop.
+# This returns the last command (quit or next) and the update flag.
+# The image name may also be changed.
+
+procedure ccdinst_edit (im, image, inst, ssfile, level, ncmd, update)
+
+pointer im # Image pointer
+char image[SZ_FNAME] # Image name
+char inst[SZ_FNAME] # Instrument file
+char ssfile[SZ_FNAME] # Subset file
+int level # Parameter level
+int ncmd # Last command
+bool update # Update?
+
+bool strne()
+int scan(), nscan(), strdic(), access()
+pointer sp, cmd, key, def, imval, im1, immap()
+errchk delete, hdmwrite
+
+begin
+ call smark (sp)
+ call salloc (cmd, SZ_LINE, TY_CHAR)
+ call salloc (key, SZ_FNAME, TY_CHAR)
+ call salloc (def, SZ_LINE, TY_CHAR)
+ call salloc (imval, SZ_LINE, TY_CHAR)
+
+ call sscan ("show")
+ repeat {
+ call gargwrd (Memc[cmd], SZ_LINE)
+ ncmd = strdic (Memc[cmd], Memc[cmd], SZ_LINE, CMDS)
+ switch (ncmd) {
+ case NEXT, QUIT:
+ break
+ case QUESTION, HELP:
+ if (level == 1)
+ call pagefile (HELP1, "ccdinstrument")
+ else if (level == 2)
+ call pagefile (HELP2, "ccdinstrument")
+ else if (level == 3)
+ call pagefile (HELP3, "ccdinstrument")
+ case SHOW:
+ call ccdinst_hdr (im, image, inst, ssfile, level)
+ case INST:
+ call hdmwrite ("STDOUT", APPEND)
+ call printf ("\n")
+ case IMHEADER:
+ call ccdinst_i (im, image)
+ case READ:
+ call gargwrd (Memc[imval], SZ_LINE)
+ if (nscan() < 2)
+ call ccdinst_g ("Instrument file", inst, Memc[imval])
+ if (update)
+ call printf ("WARNING: Previous changes lost\n")
+ call hdmclose ()
+ update = false
+ if (strne (inst, Memc[imval])) {
+ iferr (call hdmopen (Memc[imval])) {
+ call erract (EA_WARN)
+ call hdmopen (inst)
+ } else {
+ call ccdinst_hdr (im, image, inst, ssfile, level)
+ update = true
+ }
+ }
+ case WRITE:
+ call gargwrd (Memc[imval], SZ_LINE)
+ if (nscan() < 2)
+ call ccdinst_g ("Instrument file", inst, Memc[imval])
+ iferr {
+ if (access (Memc[imval], 0, 0) == YES)
+ call delete (Memc[imval])
+ call hdmwrite (Memc[imval], NEW_FILE)
+ update = false
+ } then
+ call erract (EA_WARN)
+ case NEWIMAGE:
+ call gargwrd (Memc[imval], SZ_LINE)
+ if (nscan() < 2)
+ call ccdinst_g ("New image name", image, Memc[imval])
+ if (strne (image, Memc[imval])) {
+ iferr (im1 = immap (Memc[imval], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ im1 = NULL
+ }
+ if (im1 != NULL) {
+ call imunmap (im)
+ im = im1
+ call strcpy (Memc[imval], image, SZ_FNAME)
+ call ccdinst_hdr (im, image, inst, ssfile, level)
+ }
+ }
+ case TRANSLATE:
+ call ccdtypes (im, Memc[cmd], SZ_LINE)
+ call hdmgstr (im, "imagetyp", Memc[imval], SZ_LINE)
+
+ call gargwrd (Memc[def], SZ_FNAME)
+ if (nscan() < 2) {
+ call printf ("CCDRED image type for '%s' (%s): ")
+ call pargstr (Memc[imval])
+ call pargstr (Memc[cmd])
+ call flush (STDOUT)
+ if (scan() != EOF)
+ call gargwrd (Memc[def], SZ_FNAME)
+ if (nscan() == 0)
+ call strcpy (Memc[cmd], Memc[def], SZ_LINE)
+ }
+ if (strdic (Memc[def], Memc[def], SZ_LINE, CCDTYPES) == 0) {
+ call printf ("Unknown CCDRED image type\n")
+ call strcpy (Memc[cmd], Memc[def], SZ_LINE)
+ }
+ if (strne (Memc[def], Memc[cmd])) {
+ call hdmpname (Memc[imval], Memc[def])
+ call ccdinst_p (im, "imagetyp",
+ Memc[key], Memc[def], Memc[imval])
+ update = true
+ }
+ case IMAGETYPE:
+ call ccdinst_e (im, "image type", "imagetyp",
+ Memc[key], Memc[def], Memc[imval], update)
+ case SUBSET:
+ call ccdinst_e (im, "subset parameter", "subset",
+ Memc[key], Memc[def], Memc[imval], update)
+ case EXPTIME:
+ call ccdinst_e (im, "exposure time", "exptime",
+ Memc[key], Memc[def], Memc[imval], update)
+ case DARKTIME:
+ call ccdinst_e (im, "dark time", "darktime",
+ Memc[key], Memc[def], Memc[imval], update)
+ case FIXFILE:
+ call ccdinst_e (im, "bad pixel file", "fixfile",
+ Memc[key], Memc[def], Memc[imval], update)
+ case BIASSEC:
+ call ccdinst_e (im, "bias section", "biassec",
+ Memc[key], Memc[def], Memc[imval], update)
+ case CCDSEC:
+ call ccdinst_e (im, "original CCD section", "ccdsec",
+ Memc[key], Memc[def], Memc[imval], update)
+ case DATASEC:
+ call ccdinst_e (im, "data section", "datasec",
+ Memc[key], Memc[def], Memc[imval], update)
+ case TRIMSEC:
+ call ccdinst_e (im, "trim section", "trimsec",
+ Memc[key], Memc[def], Memc[imval], update)
+ case DARKCOR:
+ call ccdinst_e (im, "dark count flag", "darkcor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case FIXPIX:
+ call ccdinst_e (im, "bad pixel flag", "fixpix",
+ Memc[key], Memc[def], Memc[imval], update)
+ case FLATCOR:
+ call ccdinst_e (im, "flat field flag", "flatcor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case FRINGCOR:
+ call ccdinst_e (im, "fringe flag", "fringcor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case ILLUMCOR:
+ call ccdinst_e (im, "illumination flag", "illumcor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case OVERSCAN:
+ call ccdinst_e (im, "overscan flag", "overscan",
+ Memc[key], Memc[def], Memc[imval], update)
+ case READCOR:
+ call ccdinst_e (im, "read correction flag", "readcor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case SCANCOR:
+ call ccdinst_e (im, "scan mode flag", "scancor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case NSCANROW:
+ call ccdinst_e (im, "scan mode rows", "nscanrow",
+ Memc[key], Memc[def], Memc[imval], update)
+ case TRIM:
+ call ccdinst_e (im, "trim flag", "trim",
+ Memc[key], Memc[def], Memc[imval], update)
+ case ZEROCOR:
+ call ccdinst_e (im, "zero level flag", "zerocor",
+ Memc[key], Memc[def], Memc[imval], update)
+ case CCDMEAN:
+ call ccdinst_e (im, "mean value", "ccdmean",
+ Memc[key], Memc[def], Memc[imval], update)
+ case FRINGSCL:
+ call ccdinst_e (im, "fringe scale", "fringscl",
+ Memc[key], Memc[def], Memc[imval], update)
+ case ILLUMFLT:
+ call ccdinst_e (im, "illumination flat image", "illumflt",
+ Memc[key], Memc[def], Memc[imval], update)
+ case MKFRINGE:
+ call ccdinst_e (im, "fringe image", "mkfringe",
+ Memc[key], Memc[def], Memc[imval], update)
+ case MKILLUM:
+ call ccdinst_e (im, "illumination image", "mkillum",
+ Memc[key], Memc[def], Memc[imval], update)
+ case SKYFLAT:
+ call ccdinst_e (im, "sky flat image", "skyflat",
+ Memc[key], Memc[def], Memc[imval], update)
+ case NCOMBINE:
+ call ccdinst_e (im, "number of images combined", "ncombine",
+ Memc[key], Memc[def], Memc[imval], update)
+ case DATEOBS:
+ call ccdinst_e (im, "date of observation", "date-obs",
+ Memc[key], Memc[def], Memc[imval], update)
+ case DEC:
+ call ccdinst_e (im, "declination", "dec",
+ Memc[key], Memc[def], Memc[imval], update)
+ case RA:
+ call ccdinst_e (im, "ra", "ra",
+ Memc[key], Memc[def], Memc[imval], update)
+ case TITLE:
+ call ccdinst_e (im, "title", "title",
+ Memc[key], Memc[def], Memc[imval], update)
+ default:
+ if (nscan() > 0)
+ call eprintf ("Unrecognized or ambiguous command\007\n")
+ }
+ call printf ("ccdinstrument> ")
+ call flush (STDOUT)
+ } until (scan() == EOF)
+
+ call sfree (sp)
+end
+
+
+# CCDINST_HDR -- Print the current instrument translations for an image.
+
+procedure ccdinst_hdr (im, image, inst, ssfile, level)
+
+pointer im # Image pointer
+char image[SZ_FNAME] # Image name
+char inst[SZ_FNAME] # Instrument file
+char ssfile[SZ_FNAME] # Subset file
+int level # Parameter level
+
+pointer sp, key, def, ccdval, imval
+
+begin
+ call smark (sp)
+ call salloc (key, SZ_FNAME, TY_CHAR)
+ call salloc (def, SZ_LINE, TY_CHAR)
+ call salloc (ccdval, SZ_LINE, TY_CHAR)
+ call salloc (imval, SZ_LINE, TY_CHAR)
+
+ # General stuff
+ call printf ("Image: %s\n")
+ call pargstr (image)
+ call printf ("Instrument file: %s\n")
+ call pargstr (inst)
+ call printf ("Subset file: %s\n")
+ call pargstr (ssfile)
+
+ # Table labels
+ call printf ("\n%-8s %-8s %-8s %-8s %-8s\n")
+ call pargstr ("CCDRED")
+ call pargstr ("IMAGE")
+ call pargstr ("DEFAULT")
+ call pargstr ("CCDRED")
+ call pargstr ("IMAGE")
+ call printf ("%-8s %-8s %-8s %-8s %-8s\n")
+ call pargstr ("PARAM")
+ call pargstr ("KEYWORD")
+ call pargstr ("VALUE")
+ call pargstr ("VALUE")
+ call pargstr ("VALUE")
+ call printf ("---------------------------------------")
+ call printf ("---------------------------------------\n")
+
+ # Print translations. Select those printed only with the all parameter.
+ call ccdinst_p (im, "imagetyp", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "subset", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "exptime", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "darktime", Memc[key], Memc[def], Memc[imval])
+ if (level > 1) {
+ call printf ("\n")
+ call ccdinst_p (im, "biassec", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "trimsec", Memc[key], Memc[def], Memc[imval])
+ call printf ("\n")
+ call ccdinst_p (im, "fixpix", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "overscan", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "trim", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "zerocor", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "darkcor", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "flatcor", Memc[key], Memc[def], Memc[imval])
+ }
+ if (level > 2) {
+ call ccdinst_p (im, "datasec", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "ccdsec", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "fixfile", Memc[key], Memc[def], Memc[imval])
+ call printf ("\n")
+ call ccdinst_p (im, "illumcor", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "fringcor", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "readcor", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "scancor", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "nscanrow", Memc[key], Memc[def], Memc[imval])
+ call printf ("\n")
+ call ccdinst_p (im, "illumflt", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "mkfringe", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "mkillum", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "skyflat", Memc[key], Memc[def], Memc[imval])
+ call printf ("\n")
+ call ccdinst_p (im, "ccdmean", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "fringscl", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "ncombine", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "date-obs", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "dec", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "ra", Memc[key], Memc[def], Memc[imval])
+ call ccdinst_p (im, "title", Memc[key], Memc[def], Memc[imval])
+ }
+
+ call printf ("\n")
+ call flush (STDOUT)
+ call sfree (sp)
+end
+
+
+# CCDINST_P -- Print the translation for the specified translation name.
+
+procedure ccdinst_p (im, name, key, def, value)
+
+pointer im # Image pointer
+char name[SZ_FNAME] # CCDRED name
+char key[SZ_FNAME] # Image header keyword
+char def[SZ_LINE] # Default value
+char value[SZ_LINE] # Value
+
+int i, strdic(), hdmaccf()
+bool bval, ccdflag()
+
+begin
+ i = strdic (name, key, SZ_FNAME, CMDS)
+ if (i == 0)
+ return
+
+ # Get translaltion image keyword, default, and image value.
+ call hdmname (name, key, SZ_FNAME)
+ call hdmgdef (name, def, SZ_LINE)
+ call hdmgstr (im, name, value, SZ_LINE)
+ if (value[1] == EOS)
+ call strcpy ("?", value, SZ_LINE)
+
+ switch (i) {
+ case IMAGETYPE:
+ call printf ("%-8s %-8s %-8s")
+ call pargstr (name)
+ call pargstr (key)
+ call pargstr (def)
+ call ccdtypes (im, def, SZ_LINE)
+ call printf (" %-8s %-.39s\n")
+ call pargstr (def)
+ call pargstr (value)
+ case SUBSET:
+ call printf ("%-8s %-8s %-8s")
+ call pargstr (name)
+ call pargstr (key)
+ call pargstr (def)
+ call ccdsubset (im, def, SZ_LINE)
+ call printf (" %-8s %-.39s\n")
+ call pargstr (def)
+ call pargstr (value)
+ case FIXPIX, OVERSCAN, TRIM, ZEROCOR, DARKCOR, FLATCOR, ILLUMCOR,
+ FRINGCOR, READCOR, SCANCOR, ILLUMFLT, MKFRINGE, MKILLUM,
+ SKYFLAT:
+ bval = ccdflag (im, name)
+ if (hdmaccf (im, name) == NO)
+ call strcpy ("?", value, SZ_LINE)
+ call printf ("%-8s %-8s %-8s %-8b %-.39s\n")
+ call pargstr (name)
+ call pargstr (key)
+ call pargstr (def)
+ call pargb (bval)
+ call pargstr (value)
+ default:
+ call printf ("%-8s %-8s %-8s %-8s")
+ call pargstr (name)
+ call pargstr (key)
+ call pargstr (def)
+ call pargstr (value)
+ if (hdmaccf (im, name) == NO)
+ call strcpy ("?", value, SZ_LINE)
+ call printf (" %-.39s\n")
+ call pargstr (value)
+ }
+end
+
+
+# CCDINST_E -- Edit a single translation entry.
+# This checks for parameters on the command line and if missing queries.
+# The default value may only be changed on the command line.
+
+procedure ccdinst_e (im, prompt, name, key, def, imval, update)
+
+pointer im # Image pointer
+char prompt[ARB] # Parameter prompt name
+char name[SZ_FNAME] # CCDRED name
+char key[SZ_FNAME] # Image header keyword
+char def[SZ_LINE] # Default value
+char imval[SZ_LINE] # Value
+bool update # Update translation file?
+
+bool strne()
+int i, scan(), nscan()
+pointer sp, oldkey, olddef
+
+begin
+ call smark (sp)
+ call salloc (oldkey, SZ_FNAME, TY_CHAR)
+ call salloc (olddef, SZ_LINE, TY_CHAR)
+
+ # Get command line values
+ call gargwrd (key, SZ_FNAME)
+ call gargwrd (def, SZ_LINE)
+
+ # Get current values
+ call hdmname (name, Memc[oldkey], SZ_FNAME)
+ call hdmgdef (name, Memc[olddef], SZ_LINE)
+
+ # Query for keyword if needed.
+ i = nscan()
+ if (i < 2) {
+ call printf ("Image keyword for %s (%s): ")
+ call pargstr (prompt)
+ call pargstr (Memc[oldkey])
+ call flush (STDOUT)
+ if (scan() != EOF)
+ call gargwrd (key, SZ_FNAME)
+ if (nscan() == 0)
+ call strcpy (Memc[oldkey], key, SZ_FNAME)
+ }
+ if (i < 3) {
+ #call printf ("Default %s (%s): ")
+ # call pargstr (prompt)
+ # call pargstr (Memc[olddef])
+ #call flush (STDOUT)
+ #if (scan() != EOF)
+ # call gargwrd (def, SZ_LINE)
+ #if (nscan() == 0)
+ call strcpy (Memc[olddef], def, SZ_LINE)
+ }
+
+ # Update only if the new value is different from the old value.
+ if (strne (key, Memc[oldkey])) {
+ call hdmpname (name, key)
+ update = true
+ }
+ if (strne (def, Memc[olddef])) {
+ call hdmpdef (name, def)
+ update = true
+ }
+
+ # Print the revised translation.
+ call ccdinst_p (im, name, key, def, imval)
+ call sfree (sp)
+end
+
+
+# CCDINST_G -- General procedure to prompt for value.
+
+procedure ccdinst_g (prompt, def, val)
+
+char prompt[ARB] # Prompt
+char def[ARB] # Default value
+char val[SZ_LINE] # Value
+
+int scan(), nscan()
+
+begin
+ call printf ("%s (%s): ")
+ call pargstr (prompt)
+ call pargstr (def)
+ call flush (STDOUT)
+ if (scan() != EOF)
+ call gargwrd (val, SZ_FNAME)
+ if (nscan() == 0)
+ call strcpy (def, val, SZ_LINE)
+end
+
+
+define USER_AREA Memc[($1+IMU-1)*SZ_STRUCT + 1]
+
+# CCDINST_IMH -- Print the user area of the image, if nonzero length
+# and it contains only ascii values. This copied from the code for
+# IMHEADER. It differs in including the OBJECT keyword, using a temporary
+# file to page the header, and no leading blanks.
+
+procedure ccdinst_i (im, image)
+
+pointer im # image descriptor
+char image[ARB] # image name
+
+pointer sp, tmp, lbuf, ip
+int in, out, ncols, min_lenuserarea
+int open(), stropen(), getline(), envgeti()
+
+begin
+ call smark (sp)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+ call salloc (lbuf, SZ_LINE, TY_CHAR)
+
+ # Open user area in header.
+ min_lenuserarea = (LEN_IMDES + IM_LENHDRMEM(im) - IMU) * SZ_STRUCT - 1
+ in = stropen (USER_AREA(im), min_lenuserarea, READ_ONLY)
+ ncols = envgeti ("ttyncols")
+
+ # Open temporary output file.
+ call mktemp ("tmp$", Memc[tmp], SZ_FNAME)
+ iferr (out = open (Memc[tmp], NEW_FILE, TEXT_FILE)) {
+ call erract (EA_WARN)
+ call sfree (sp)
+ return
+ }
+
+ # Copy standard header records.
+ call fprintf (out, "OBJECT = '%s'\n")
+ call pargstr (IM_TITLE(im))
+
+ # Copy header records to the output, stripping any trailing
+ # whitespace and clipping at the right margin.
+
+ while (getline (in, Memc[lbuf]) != EOF) {
+ for (ip=lbuf; Memc[ip] != EOS && Memc[ip] != '\n'; ip=ip+1)
+ ;
+ while (ip > lbuf && Memc[ip-1] == ' ')
+ ip = ip - 1
+ if (ip - lbuf > ncols)
+ ip = lbuf + ncols
+ Memc[ip] = '\n'
+ Memc[ip+1] = EOS
+
+ call putline (out, Memc[lbuf])
+ }
+ call putline (out, "\n")
+
+ call close (in)
+ call close (out)
+
+ call pagefile (Memc[tmp], image)
+ call delete (Memc[tmp])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_ccdlist.x b/noao/imred/ccdred/src/t_ccdlist.x
new file mode 100644
index 00000000..1b438b27
--- /dev/null
+++ b/noao/imred/ccdred/src/t_ccdlist.x
@@ -0,0 +1,325 @@
+include <imhdr.h>
+include <error.h>
+include "ccdtypes.h"
+
+define SZ_CCDLINE 80 # Size of line for output
+
+
+# T_CCDLIST -- List CCD image information and processing status.
+#
+# Each input image of the specified image type is listed in either a one
+# line short format, a name only format, or a longer format. The image
+# name, size, pixel type, image type, subset ID, processing flags and
+# title are printed on one line. For the long format image details of
+# the processing operations are printed.
+
+procedure t_ccdlist ()
+
+int list, ccdtype
+bool names, lformat
+pointer sp, image, im
+
+bool clgetb()
+int imtopenp(), imtgetim()
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+
+ # Get the task parameters and open the translation file.
+ list = imtopenp ("images")
+ names = clgetb ("names")
+ lformat = clgetb ("long")
+ call clgstr ("instrument", Memc[image], SZ_FNAME)
+ if (Memc[image] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[image])
+
+ # List each iamge.
+ while (imtgetim (list, Memc[image], SZ_FNAME) != EOF) {
+ # Map the image and the instrument header translation.
+ # Check the image type.
+ call set_input (Memc[image], im, ccdtype)
+ if (im == NULL)
+ next
+
+ # Select the output format.
+ if (names) {
+ call printf ("%s\n")
+ call pargstr (Memc[image])
+ } else if (lformat) {
+ call shortlist (Memc[image], ccdtype, im)
+ call longlist (im, ccdtype)
+ } else
+ call shortlist (Memc[image], ccdtype, im)
+ call flush (STDOUT)
+
+ call imunmap (im)
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (list)
+ call sfree (sp)
+end
+
+
+# SHORTLIST -- List the one line short format consisting of the image name,
+# iamge size, pixel type, image type, subset ID, processing flags, and
+# title.
+
+procedure shortlist (image, ccdtype, im)
+
+char image # Image name
+int ccdtype # CCD image type
+pointer im # IMIO pointer
+
+bool ccdflag()
+pointer sp, str, subset
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_CCDLINE, TY_CHAR)
+ call salloc (subset, SZ_CCDLINE, TY_CHAR)
+
+ # Get the image type and subset ID.
+ call ccdtypes (im, Memc[str], SZ_CCDLINE)
+ call ccdsubset (im, Memc[subset], SZ_CCDLINE)
+
+ # List the image name, size, pixel type, image type, and subset.
+ call printf ("%s[%d,%d][%s][%s][%d]")
+ call pargstr (image)
+ call pargi (IM_LEN(im,1))
+ call pargi (IM_LEN(im,2))
+ call pargtype1 (IM_PIXTYPE(im))
+ call pargstr (Memc[str])
+ call pargstr (Memc[subset])
+
+ # Format and list the processing flags.
+ Memc[str] = EOS
+ if (ccdflag (im, "fixpix"))
+ call strcat ("B", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "overscan"))
+ call strcat ("O", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "trim"))
+ call strcat ("T", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "zerocor"))
+ call strcat ("Z", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "darkcor"))
+ call strcat ("D", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "flatcor"))
+ call strcat ("F", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "illumcor"))
+ call strcat ("I", Memc[str], SZ_CCDLINE)
+ if (ccdflag (im, "fringcor"))
+ call strcat ("Q", Memc[str], SZ_CCDLINE)
+ if (Memc[str] != EOS) {
+ call printf ("[%s]")
+ call pargstr (Memc[str])
+ }
+
+ # List the title.
+ call printf (":%s\n")
+ call pargstr (IM_TITLE(im))
+
+ call sfree (sp)
+end
+
+
+# LONGLIST -- Add the long format listing.
+# List some instrument parameters and information about each processing
+# step indicated by the processing parameters. If the processing step has
+# not been done yet indicate this and the parameters to be used.
+
+procedure longlist (im, ccdtype)
+
+pointer im # IMIO pointer
+int ccdtype # CCD image type
+
+real rval, hdmgetr()
+pointer sp, instr, outstr
+bool clgetb(), ccdflag(), streq()
+define done_ 99
+
+begin
+ call smark (sp)
+ call salloc (instr, SZ_LINE, TY_CHAR)
+ call salloc (outstr, SZ_LINE, TY_CHAR)
+
+ # List some image parameters.
+ Memc[outstr] = EOS
+ ifnoerr (rval = hdmgetr (im, "exptime")) {
+ call sprintf (Memc[instr], SZ_LINE, " exposure=%d")
+ call pargr (rval)
+ call strcat (Memc[instr], Memc[outstr], SZ_LINE)
+ }
+ ifnoerr (rval = hdmgetr (im, "darktime")) {
+ call sprintf (Memc[instr], SZ_LINE, " darktime=%d")
+ call pargr (rval)
+ call strcat (Memc[instr], Memc[outstr], SZ_LINE)
+ }
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+
+ # List the processing strings.
+ if (ccdflag (im, "fixpix")) {
+ call hdmgstr (im, "fixpix", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("fixpix")) {
+ call clgstr ("fixfile", Memc[outstr], SZ_LINE)
+ if (streq (Memc[outstr], "image"))
+ call hdmgstr (im, "fixfile", Memc[outstr], SZ_LINE)
+ if (Memc[outstr] != EOS) {
+ call printf (" [TO BE DONE] Bad pixel file is %s\n")
+ call pargstr (Memc[outstr])
+ } else
+ call printf (
+ " [TO BE DONE] Bad pixel file needs to be specified\n")
+ }
+
+ if (ccdflag (im, "overscan")) {
+ call hdmgstr (im, "overscan", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("overscan")) {
+ call clgstr ("biassec", Memc[outstr], SZ_LINE)
+ if (streq (Memc[outstr], "image"))
+ call hdmgstr (im, "biassec", Memc[outstr], SZ_LINE)
+ call printf (" [TO BE DONE] Overscan strip is %s\n")
+ call pargstr (Memc[outstr])
+ }
+
+ if (ccdflag (im, "trim")) {
+ call hdmgstr (im, "trim", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("trim")) {
+ call clgstr ("trimsec", Memc[outstr], SZ_LINE)
+ if (streq (Memc[outstr], "image"))
+ call hdmgstr (im, "trimsec", Memc[outstr], SZ_LINE)
+ call printf (" [TO BE DONE] Trim image section is %s\n")
+ call pargstr (Memc[outstr])
+ }
+
+ if (ccdtype == ZERO) {
+ if (ccdflag (im, "readcor")) {
+ call hdmgstr (im, "readcor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("readcor"))
+ call printf (
+ " [TO BE DONE] Convert to readout format\n")
+ goto done_
+ }
+ if (ccdflag (im, "zerocor")) {
+ call hdmgstr (im, "zerocor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("zerocor"))
+ call printf (" [TO BE DONE] Zero level correction\n")
+
+ if (ccdtype == DARK)
+ goto done_
+ if (ccdflag (im, "darkcor")) {
+ call hdmgstr (im, "darkcor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("darkcor"))
+ call printf (" [TO BE DONE] Dark count correction\n")
+
+ if (ccdtype == FLAT) {
+ if (ccdflag (im, "scancor")) {
+ call hdmgstr (im, "scancor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("scancor"))
+ call printf (
+ " [TO BE DONE] Convert to scan format\n")
+ if (ccdflag (im, "skyflat")) {
+ call hdmgstr (im, "skyflat", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ }
+ if (ccdflag (im, "illumflt")) {
+ call hdmgstr (im, "illumflt", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ }
+ goto done_
+ }
+ if (ccdflag (im, "flatcor")) {
+ call hdmgstr (im, "flatcor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("flatcor"))
+ call printf (" [TO BE DONE] Flat field correction\n")
+
+ if (ccdtype == ILLUM) {
+ if (ccdflag (im, "mkillum")) {
+ call hdmgstr (im, "mkillum", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else
+ call printf (
+ " [TO BE DONE] Convert to illumination correction\n")
+ goto done_
+ }
+ if (ccdflag (im, "illumcor")) {
+ call hdmgstr (im, "illumcor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("illumcor"))
+ call printf (" [TO BE DONE] Illumination correction\n")
+
+ if (ccdtype == FRINGE)
+ goto done_
+ if (ccdflag (im, "fringcor")) {
+ call hdmgstr (im, "fringecor", Memc[outstr], SZ_LINE)
+ call printf (" %s\n")
+ call pargstr (Memc[outstr])
+ } else if (clgetb ("fringecor"))
+ call printf (" [TO BE DONE] Fringe correction\n")
+
+done_
+ call sfree (sp)
+end
+
+
+# PARGTYPE1 -- Convert an integer type code into a string, and output the
+# string with PARGSTR to FMTIO. Taken from IMHEADER.
+
+procedure pargtype1 (dtype)
+
+int dtype
+
+begin
+ switch (dtype) {
+ case TY_UBYTE:
+ call pargstr ("ubyte")
+ case TY_BOOL:
+ call pargstr ("bool")
+ case TY_CHAR:
+ call pargstr ("char")
+ case TY_SHORT:
+ call pargstr ("short")
+ case TY_USHORT:
+ call pargstr ("ushort")
+ case TY_INT:
+ call pargstr ("int")
+ case TY_LONG:
+ call pargstr ("long")
+ case TY_REAL:
+ call pargstr ("real")
+ case TY_DOUBLE:
+ call pargstr ("double")
+ case TY_COMPLEX:
+ call pargstr ("complex")
+ case TY_POINTER:
+ call pargstr ("pointer")
+ case TY_STRUCT:
+ call pargstr ("struct")
+ default:
+ call pargstr ("unknown datatype")
+ }
+end
diff --git a/noao/imred/ccdred/src/t_ccdmask.x b/noao/imred/ccdred/src/t_ccdmask.x
new file mode 100644
index 00000000..d5d074cb
--- /dev/null
+++ b/noao/imred/ccdred/src/t_ccdmask.x
@@ -0,0 +1,384 @@
+include <imhdr.h>
+
+
+define MAXBUF 500000 # Maximum pixel buffer
+
+define PLSIG 30.9 # Low percentile
+define PHSIG 69.1 # High percentile
+
+
+# T_CCDMASK -- Create a bad pixel mask from CCD images.
+# Deviant pixels relative to a local median and sigma are detected and
+# written to a pixel mask file. There is a special algorithm for detecting
+# long column oriented features typical of CCD defects. This task
+# is intended for use on flat fields or, even better, the ratio of
+# two flat fields at different exposure levels.
+
+procedure t_ccdmask ()
+
+pointer image # Input image
+pointer mask # Output mask
+int ncmed, nlmed # Median box size
+int ncsig, nlsig # Sigma box size
+real lsig, hsig # Threshold sigmas
+int ngood # Minmum good pixel sequence
+short linterp # Mask value for line interpolation
+short cinterp # Mask value for column interpolation
+short eqinterp # Mask value for equal interpolation
+
+int i, j, c1, c2, c3, c4, nc, nl, ncstep, nc1
+pointer sp, in, out, inbuf, outbuf
+real clgetr()
+int clgeti(), nowhite(), strmatch()
+pointer immap(), imgs2r(), imps2s(), imgl2s(), impl2s()
+errchk immap, imgs2r, imps2r, imgl2s, impl2s, cm_mask
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (mask, SZ_FNAME, TY_CHAR)
+
+ # Get parameters.
+ call clgstr ("image", Memc[image], SZ_FNAME)
+ call clgstr ("mask", Memc[mask], SZ_FNAME)
+ ncmed = clgeti ("ncmed")
+ nlmed = clgeti ("nlmed")
+ ncsig = clgeti ("ncsig")
+ nlsig = clgeti ("nlsig")
+ lsig = clgetr ("lsigma")
+ hsig = clgetr ("hsigma")
+ ngood = clgeti ("ngood")
+ linterp = clgeti ("linterp")
+ cinterp = clgeti ("cinterp")
+ eqinterp = clgeti ("eqinterp")
+
+ # Force a pixel list format.
+ i = nowhite (Memc[mask], Memc[mask], SZ_FNAME)
+ if (strmatch (Memc[mask], ".pl$") == 0)
+ call strcat (".pl", Memc[mask], SZ_FNAME)
+
+ # Map the input and output images.
+ in = immap (Memc[image], READ_ONLY, 0)
+ out = immap (Memc[mask], NEW_COPY, in)
+
+ # Go through the input in large blocks of columns. If the
+ # block is smaller than the whole image overlap the blocks
+ # so the median only has boundaries at the ends of the image.
+ # Set the mask values based on the distances to the nearest
+ # good pixels.
+
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ ncstep = max (1, MAXBUF / nl - ncmed)
+
+ outbuf = NULL
+ do i = 1, nc, ncstep {
+ c1 = i
+ c2 = min (nc, i + ncstep - 1)
+ c3 = max (1, c1 - ncmed / 2)
+ c4 = min (nc, c2 + ncmed / 2)
+ nc1 = c4 - c3 + 1
+ inbuf = imgs2r (in, c3, c4, 1, nl)
+ if (outbuf == NULL)
+ call malloc (outbuf, nc1*nl, TY_SHORT)
+ else
+ call realloc (outbuf, nc1*nl, TY_SHORT)
+ call aclrs (Memc[outbuf], nc1*nl)
+ call cm_mask (Memr[inbuf], Mems[outbuf], nc1, nl, c1-c3+1,
+ c2-c3+1, ncmed, nlmed, ncsig, nlsig, lsig, hsig, ngood)
+ call cm_interp (Mems[outbuf], nc1, nl, c1-c3+1, c2-c3+1, nc,
+ linterp, cinterp, eqinterp)
+ do j = 1, nl
+ call amovs (Mems[outbuf+(j-1)*nc1+c1-c3],
+ Mems[imps2s(out,c1,c2,j,j)], c2-c1+1)
+ }
+ call mfree (outbuf, TY_SHORT)
+
+ call imunmap (out)
+ call imunmap (in)
+
+ # If the image was searched in blocks we need another pass to find
+ # the lengths of bad pixel regions along lines since they may
+ # span the block edges. Previously the mask values were set
+ # to the column lengths so in this pass we can just look at
+ # whole lines sequentially.
+
+ if (nc1 != nc) {
+ out = immap (Memc[mask], READ_WRITE, 0)
+ do i = 1, nl {
+ inbuf = imgl2s (out, i)
+ outbuf = impl2s (out, i)
+ call cm_interp1 (Mems[inbuf], Mems[outbuf], nc, nl,
+ linterp, cinterp, eqinterp)
+ }
+ call imunmap (out)
+ }
+
+ call sfree (sp)
+end
+
+
+# CM_MASK -- Compute the mask image.
+# A local background is computed using moving box medians to avoid
+# contaminating bad pixels. The local sigma is computed in blocks (it is not
+# a moving box for efficiency) by using a percentile point of the sorted
+# pixel values to estimate the width of the distribution uncontaminated by
+# bad pixels). Once the background and sigma are known deviant pixels are
+# found by using sigma threshold factors. Sums of pixels along columns are
+# checked at various scales from single pixels to whole columns with the
+# sigma level set appropriately. The provides sensitivity to weaker column
+# features such as CCD traps.
+
+procedure cm_mask (data, bp, nc, nl, nc1, nc2, ncmed, nlmed, ncsig, nlsig,
+ lsig, hsig, ngood)
+
+real data[nc,nl] #I Pixel array
+short bp[nc,nl] #U Bad pixel array (0=good, 1=bad)
+int nc, nl #I Number of columns and lines
+int nc1, nc2 #I Columns to compute
+int ncmed, nlmed #I Median box size
+int ncsig, nlsig #I Sigma box size
+real lsig, hsig #I Threshold sigmas
+int ngood #I Minimum good pixel sequence
+
+int i, j, k, l, m, nsum, plsig, phsig, jsig
+real back, sigma, sum1, sum2, low, high, amedr()
+pointer sp, bkg, sig, work, bp1, ptr
+
+begin
+ call smark (sp)
+ call salloc (bkg, nl, TY_REAL)
+ call salloc (sig, nl/nlsig, TY_REAL)
+ call salloc (work, max (ncsig*nlsig, ncmed*nlmed), TY_REAL)
+ call salloc (bp1, nl, TY_SHORT)
+
+ bkg = bkg - 1
+ sig = sig - 1
+
+ i = nlsig * ncsig
+ plsig = nint (PLSIG*i/100.-1)
+ phsig = nint (PHSIG*i/100.-1)
+
+ do i = nc1, nc2 {
+
+ # Compute median background. This is a moving median.
+ l = min (nc, i+ncmed/2)
+ l = max (1, l-ncmed+1)
+ do j = 1, nl {
+ k = min (nl, j+nlmed/2)
+ k = max (1, k-nlmed+1)
+ ptr = work
+ do m = k, k+nlmed-1 {
+ call amovr (data[l,m], Memr[ptr], ncmed)
+ ptr = ptr + ncmed
+ }
+ back = amedr (Memr[work], ncmed * nlmed)
+ Memr[bkg+j] = back
+ }
+
+ # Compute sigmas from percentiles. This is done in blocks.
+ if (mod (i-nc1, ncsig) == 0 && i<nc-ncsig+1) {
+ do j = 1, nl-nlsig+1, nlsig {
+ ptr = work
+ do k = j, j+nlsig-1 {
+ call amovr (data[i,k], Memr[ptr], ncsig)
+ ptr = ptr + ncsig
+ }
+ call asrtr (Memr[work], Memr[work], ncsig*nlsig)
+ sigma = Memr[work+phsig] - Memr[work+plsig]
+ jsig = (j+nlsig-1) / nlsig
+ Memr[sig+jsig] = sigma**2
+ }
+ }
+
+ # Single pixel iterative rejection.
+ k = 0
+ do j = 1, nl {
+ if (bp[i,j] == 1)
+ k = k + 1
+ else {
+ jsig = min ((j+nlsig-1)/nlsig, nl/nlsig)
+ back = Memr[bkg+j]
+ sigma = sqrt (Memr[sig+jsig])
+ low = back - lsig * sigma
+ high = back + hsig * sigma
+ if (data[i,j] < low || data[i,j] > high) {
+ bp[i,j] = 1
+ k = k + 1
+ }
+ }
+ }
+ if (k == nl)
+ next
+
+ # Reject over column sums at various scales.
+ # Ignore previously rejected pixels.
+
+ l = 2
+ while (l <= nl) {
+ do j = 1, nl
+ Mems[bp1+j-1] = bp[i,j]
+ sum1 = 0
+ sum2 = 0
+ nsum = 0
+ k = 1
+ do j = k, l-1 {
+ if (bp[i,j] == 1)
+ next
+ jsig = min ((j+nlsig-1)/nlsig, nl/nlsig)
+ sum1 = sum1 + data[i,j] - Memr[bkg+j]
+ sum2 = sum2 + Memr[sig+jsig]
+ nsum = nsum + 1
+ }
+ do j = l, nl {
+ if (bp[i,j] == 0) {
+ jsig = min ((j+nlsig-1)/nlsig, nl/nlsig)
+ sum1 = sum1 + data[i,j] - Memr[bkg+j]
+ sum2 = sum2 + Memr[sig+jsig]
+ nsum = nsum + 1
+ }
+ if (nsum > 0) {
+ sigma = sqrt (sum2)
+ low = -lsig * sigma
+ high = hsig * sigma
+ if (sum1 < low || sum1 > high)
+ do m = k, j
+ bp[i,m] = 1
+ }
+ if (Mems[bp1+k-1] == 0) {
+ jsig = min ((k+nlsig-1)/nlsig, nl/nlsig)
+ sum1 = sum1 - data[i,k] + Memr[bkg+k]
+ sum2 = sum2 - Memr[sig+jsig]
+ nsum = nsum - 1
+ }
+ k = k + 1
+ }
+
+ if (l == nl)
+ break
+ else if (l < 10)
+ l = l + 1
+ else
+ l = min (l * 2, nl)
+ }
+
+ # Coalesce small good regions along columns.
+ if (ngood > 1) {
+ for (k=1; k<=nl && bp[i,k]!=0; k=k+1)
+ ;
+ while (k < nl) {
+ for (l=k+1; l<=nl && bp[i,l]==0; l=l+1)
+ ;
+ if (l-k < ngood)
+ do j = k, l-1
+ bp[i,j] = 1
+ for (k=l+1; k<=nl && bp[i,k]!=0; k=k+1)
+ ;
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# CM_INTERP -- Compute the lengths of bad regions along columns and lines.
+# If only part of the image is buffered set the pixel mask values
+# to the column lengths so a later pass can compare these values against
+# the full line lengths. If the whole image is buffered then both
+# the column and line lengths can be determined and the the mask values
+# set based on these lengths.
+
+procedure cm_interp (bp, nc, nl, nc1, nc2, ncimage, linterp, cinterp, eqinterp)
+
+short bp[nc,nl] #U Bad pixel array
+int nc, nl #I Number of columns and lines
+int nc1, nc2 #I Columns to compute
+int ncimage #I Number of columns in image
+short linterp #I Mask value for line interpolation
+short cinterp #I Mask value for column interpolation
+short eqinterp #I Mask value for equal interpolation
+
+int i, j, k, l, m, n
+
+begin
+ do i = nc1, nc2 {
+
+ # Set values to column length.
+ for (k=1; k<=nl && bp[i,k]==0; k=k+1)
+ ;
+ while (k <= nl) {
+ for (l=k+1; l<=nl && bp[i,l]!=0; l=l+1)
+ ;
+ m = l - k
+ do j = k, l-1
+ bp[i,j] = m
+ for (k=l+1; k<=nl && bp[i,k]==0; k=k+1)
+ ;
+ }
+ }
+
+ # Set values to minimum axis length for interpolation.
+ if (nc == ncimage) {
+ do j = 1, nl {
+ for (k=1; k<=nc && bp[k,j]==0; k=k+1)
+ ;
+ while (k <= nc) {
+ for (l=k+1; l<=nc && bp[l,j]!=0; l=l+1)
+ ;
+ m = l - k
+ do i = k, l-1 {
+ n = bp[i,j]
+ if (n > m || n == nl)
+ bp[i,j] = linterp
+ else if (n < m)
+ bp[i,j] = cinterp
+ else
+ bp[i,j] = eqinterp
+ }
+ for (k=l+1; k<=nc && bp[k,j]==0; k=k+1)
+ ;
+ }
+ }
+ }
+end
+
+
+# CM_INTERP1 -- Set the mask values based on the column and line lengths
+# of the bad pixel regions. If this routine is called the pixel mask
+# is open READ/WRITE and the pixel mask values have been previously set
+# to the column lengths. So here we just need to compute the line
+# lengths across the entire image and reset the mask values to the
+# appropriate interpolation mask code.
+
+procedure cm_interp1 (in, out, nc, nl, linterp, cinterp, eqinterp)
+
+short in[nc] #I Bad pixel array with column length codes
+short out[nc] #O Bad pixel array with interp axis codes
+int nc, nl #I Image dimensions
+short linterp #I Mask value for line interpolation
+short cinterp #I Mask value for column interpolation
+short eqinterp #I Mask value for equal interpolation
+
+int i, j, l, m, n
+
+begin
+ for (j=1; j<=nc && in[j]==0; j=j+1)
+ out[j] = 0
+ while (j < nc) {
+ for (l=j+1; l<=nc && in[l]!=0; l=l+1)
+ ;
+ m = l - j
+ do i = j, l-1 {
+ n = in[i]
+ if (n > m || n == nl)
+ out[i] = linterp
+ else if (n < m)
+ out[i] = cinterp
+ else
+ out[i] = eqinterp
+ }
+ for (j=l+1; j<=nc && in[j]==0; j=j+1)
+ out[j] = 0
+ }
+end
diff --git a/noao/imred/ccdred/src/t_ccdproc.x b/noao/imred/ccdred/src/t_ccdproc.x
new file mode 100644
index 00000000..31e9ae6e
--- /dev/null
+++ b/noao/imred/ccdred/src/t_ccdproc.x
@@ -0,0 +1,176 @@
+include <imhdr.h>
+include <error.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+define CACHEUNIT 1000000. # Units of max_cache parameter
+
+# T_CCDPROC -- Process CCD images
+#
+# This is the main procedure for processing CCD images. The images are
+# corrected for bad pixels, overscan levels, zero levels, dark counts,
+# flat field response, illumination errors, and fringe response. They
+# may also be trimmed. The input is a list of images to be processed.
+# Each image must match any image type requested. The checking of
+# whether to apply each correction, getting the required parameters, and
+# logging the operations is left to separate procedures, one for each
+# correction. The actual processing is done by a specialized procedure
+# designed to be very efficient. These procedures may also process
+# calibration images if necessary. There are two data type paths; one
+# for short pixel types and one for all other pixel types (usually
+# real).
+
+procedure t_ccdproc ()
+
+int list # List of CCD images to process
+int outlist # LIst of output images
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+int max_cache # Maximum image cache size
+
+bool clgetb()
+real clgetr()
+int imtopenp(), imtgetim(), imtlen()
+pointer sp, input, output, str, in, out, ccd
+errchk set_input, set_output, ccddelete, cal_open
+errchk set_fixpix, set_zero, set_dark, set_flat, set_illum, set_fringe
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get input and output lists and check they make sense.
+ list = imtopenp ("images")
+ outlist = imtopenp ("output")
+ if (imtlen (outlist) > 0 && imtlen (outlist) != imtlen (list))
+ call error (1, "Input and output lists do not match")
+
+ # Get instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ if (Memc[input] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+ call cal_open (list)
+ if (imtlen (list) < 3)
+ max_cache = 0.
+ else
+ max_cache = CACHEUNIT * clgetr ("max_cache")
+ call ccd_open (max_cache)
+
+ # Process each image.
+ while (imtgetim (list, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s:\n")
+ call pargstr (Memc[input])
+ }
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ # Set output image.
+ if (imtlen (outlist) == 0)
+ call mktemp ("tmp", Memc[output], SZ_FNAME)
+ else if (imtgetim (outlist, Memc[output], SZ_FNAME) == EOF)
+ call error (1, "Premature end of output list")
+ call set_output (in, out, Memc[output])
+
+ # Set processing parameters applicable to all images.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+
+ # Set processing parameters for the standard CCD image types.
+ switch (ccdtype) {
+ case ZERO:
+ case DARK:
+ call set_zero (ccd)
+ case FLAT:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ CORS(ccd, FINDMEAN) = YES
+ CORS(ccd, MINREP) = YES
+ case ILLUM:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ case OBJECT, COMP:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ iferr {
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ } then
+ call erract (EA_WARN)
+ default:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ iferr {
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ } then
+ call erract (EA_WARN)
+ CORS(ccd, FINDMEAN) = YES
+ }
+
+ # Do the processing if the COR flag is set.
+
+ if (COR(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ call imunmap (in)
+ call imunmap (out)
+ if (imtlen (outlist) == 0) {
+ # Replace the input image by the corrected image.
+ iferr (call ccddelete (Memc[input])) {
+ call imdelete (Memc[output])
+ call error (1,
+ "Can't delete or make backup of original image")
+ }
+ call imrename (Memc[output], Memc[input])
+ }
+ } else {
+ # Delete the output image.
+ call imunmap (in)
+ iferr (call imunmap (out))
+ ;
+ iferr (call imdelete (Memc[output]))
+ ;
+ }
+ call free_proc (ccd)
+
+ # Do special processing on certain image types.
+ if (imtlen (outlist) == 0) {
+ switch (ccdtype) {
+ case ZERO:
+ call readcor (Memc[input])
+ case FLAT:
+ call ccdmean (Memc[input])
+ }
+ } else {
+ switch (ccdtype) {
+ case ZERO:
+ call readcor (Memc[output])
+ case FLAT:
+ call ccdmean (Memc[output])
+ }
+ }
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (list)
+ call imtclose (outlist)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_combine.x b/noao/imred/ccdred/src/t_combine.x
new file mode 100644
index 00000000..66c14089
--- /dev/null
+++ b/noao/imred/ccdred/src/t_combine.x
@@ -0,0 +1,653 @@
+include <imhdr.h>
+include <error.h>
+include <syserr.h>
+include <mach.h>
+include "ccdred.h"
+include "icombine.h"
+
+
+# T_COMBINE -- Combine CCD images.
+# This task is a copy of IMAGES.IMCOMBINE except that it recognizes the
+# CCD types and can group images by AMP and SUBSET. It also uses header
+# keyword translation for the exposure times.
+
+procedure t_combine ()
+
+pointer images # Images
+pointer extns # Image extensions for each subset
+pointer subsets # Subsets
+pointer nimages # Number of images in each subset
+int nsubsets # Number of subsets
+pointer outroot # Output root image name
+pointer plroot # Output pixel list root name
+pointer sigroot # Output root sigma image name
+pointer logfile # Log filename
+bool delete # Delete input images?
+
+int i
+pointer sp, output, plfile, sigma
+
+bool clgetb()
+int clgeti(), clgwrd()
+real clgetr()
+
+include "icombine.com"
+
+begin
+ call smark (sp)
+ call salloc (outroot, SZ_FNAME, TY_CHAR)
+ call salloc (plroot, SZ_FNAME, TY_CHAR)
+ call salloc (sigroot, SZ_FNAME, TY_CHAR)
+ call salloc (logfile, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (plfile, SZ_FNAME, TY_CHAR)
+ call salloc (sigma, SZ_FNAME, TY_CHAR)
+ call salloc (gain, SZ_FNAME, TY_CHAR)
+ call salloc (snoise, SZ_FNAME, TY_CHAR)
+ call salloc (rdnoise, SZ_FNAME, TY_CHAR)
+ call salloc (logfile, SZ_FNAME, TY_CHAR)
+
+ # Open the header translation which is needed to determine the
+ # amps, subsets and ccdtypes. Get the input images.
+ # There must be a least one image in order to continue.
+
+ call clgstr ("instrument", Memc[output], SZ_FNAME)
+ if (Memc[output] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[output])
+ call cmb_images (images, extns, subsets, nimages, nsubsets)
+ if (nsubsets == 0)
+ call error (0, "No images to combine")
+
+ # Get task parameters. Some additional parameters are obtained later.
+ call clgstr ("output", Memc[outroot], SZ_FNAME)
+ call clgstr ("plfile", Memc[plroot], SZ_FNAME)
+ call clgstr ("sigma", Memc[sigroot], SZ_FNAME)
+ call clgstr ("logfile", Memc[logfile], SZ_FNAME)
+ call xt_stripwhite (Memc[outroot])
+ call xt_stripwhite (Memc[sigroot])
+ call xt_stripwhite (Memc[logfile])
+
+ project = clgetb ("project")
+ combine = clgwrd ("combine", Memc[output], SZ_FNAME, COMBINE)
+ reject = clgwrd ("reject", Memc[output], SZ_FNAME, REJECT)
+ blank = clgetr ("blank")
+ call clgstr ("gain", Memc[gain], SZ_FNAME)
+ call clgstr ("rdnoise", Memc[rdnoise], SZ_FNAME)
+ call clgstr ("snoise", Memc[snoise], SZ_FNAME)
+ lthresh = clgetr ("lthreshold")
+ hthresh = clgetr ("hthreshold")
+ lsigma = clgetr ("lsigma")
+ hsigma = clgetr ("hsigma")
+ grow = clgeti ("grow")
+ mclip = clgetb ("mclip")
+ sigscale = clgetr ("sigscale")
+ delete = clgetb ("delete")
+
+ # Check parameters, map INDEFs, and set threshold flag
+ if (IS_INDEFR (blank))
+ blank = 0.
+ if (IS_INDEFR (lsigma))
+ lsigma = MAX_REAL
+ if (IS_INDEFR (hsigma))
+ hsigma = MAX_REAL
+ if (IS_INDEFI (grow))
+ grow = 0
+ if (IS_INDEF (sigscale))
+ sigscale = 0.
+
+ if (IS_INDEF(lthresh) && IS_INDEF(hthresh))
+ dothresh = false
+ else {
+ dothresh = true
+ if (IS_INDEF(lthresh))
+ lthresh = -MAX_REAL
+ if (IS_INDEF(hthresh))
+ hthresh = MAX_REAL
+ }
+
+ # This is here for backward compatibility.
+ if (clgetb ("clobber"))
+ call error (1, "Clobber option is no longer supported")
+
+ # Combine each input subset.
+ do i = 1, nsubsets {
+ # Set the output, pl, and sigma image names with subset extension.
+
+ call strcpy (Memc[outroot], Memc[output], SZ_FNAME)
+ call sprintf (Memc[output], SZ_FNAME, "%s%s")
+ call pargstr (Memc[outroot])
+ call pargstr (Memc[Memi[extns+i-1]])
+
+ call strcpy (Memc[plroot], Memc[plfile], SZ_FNAME)
+ if (Memc[plfile] != EOS) {
+ call sprintf (Memc[plfile], SZ_FNAME, "%s%s")
+ call pargstr (Memc[plroot])
+ # Use this if we can append pl files.
+ #call pargstr (Memc[Memi[extns+i-1]])
+ call pargstr (Memc[Memi[subsets+i-1]])
+ }
+
+ call strcpy (Memc[sigroot], Memc[sigma], SZ_FNAME)
+ if (Memc[sigma] != EOS) {
+ call sprintf (Memc[sigma], SZ_FNAME, "%s%s")
+ call pargstr (Memc[sigroot])
+ call pargstr (Memc[Memi[extns+i-1]])
+ }
+
+ # Combine all images from the (subset) list.
+ iferr (call icombine (Memc[Memi[images+i-1]], Memi[nimages+i-1],
+ Memc[output], Memc[plfile], Memc[sigma],
+ Memc[logfile], NO, delete)) {
+ call erract (EA_WARN)
+ }
+ call mfree (Memi[images+i-1], TY_CHAR)
+ call mfree (Memi[extns+i-1], TY_CHAR)
+ call mfree (Memi[subsets+i-1], TY_CHAR)
+ }
+
+ # Finish up.
+ call mfree (images, TY_POINTER)
+ call mfree (extns, TY_POINTER)
+ call mfree (subsets, TY_POINTER)
+ call mfree (nimages, TY_INT)
+ call hdmclose ()
+ call sfree (sp)
+end
+
+
+# CMB_IMAGES -- Get images from a list of images.
+# The images are filtered by ccdtype and sorted by amplifier and subset.
+# The allocated lists must be freed by the caller.
+
+procedure cmb_images (images, extns, subsets, nimages, nsubsets)
+
+pointer images # Pointer to lists of subsets (allocated)
+pointer extns # Image extensions for each subset (allocated)
+pointer subsets # Subset names (allocated)
+pointer nimages # Number of images in subset (allocated)
+int nsubsets # Number of subsets
+
+int list # List of input images
+bool doamps # Divide input into subsets by amplifier?
+bool dosubsets # Divide input into subsets by subset parameter?
+bool extend # Add extensions to output image names?
+
+int i, nimage, ccdtype
+pointer sp, type, image, extn, subset, str, ptr, im
+#int imtopenp(), imtlen(), imtgetim(), ccdtypecl(), ccdtypes()
+int imtopenp(), imtlen(), imtgetim()
+pointer immap()
+bool clgetb(), streq()
+
+begin
+ # Get the input image list and check that there is at least one image.
+ nsubsets = 0
+ list = imtopenp ("input")
+ nimage = imtlen (list)
+ if (nimage == 0) {
+ call imtclose (list)
+ return
+ }
+
+ # Determine whether to divide images into subsets and append extensions.
+ #doamps = clgetb ("amps")
+ doamps = false
+ dosubsets = clgetb ("subsets")
+ #extend = clgetb ("extensions")
+ extend = true
+
+ call smark (sp)
+ call salloc (type, SZ_FNAME, TY_CHAR)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (extn, SZ_FNAME, TY_CHAR)
+ call salloc (subset, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_FNAME, TY_CHAR)
+
+ # Go through the input list and eliminate images not satisfying the
+ # CCD image type. Separate into subsets if desired. Create image
+ # and subset lists.
+
+ #ccdtype = ccdtypecl ("ccdtype", Memc[type], SZ_FNAME)
+ ccdtype = 0
+ call clgstr ("ccdtype", Memc[type], SZ_FNAME)
+ call xt_stripwhite (Memc[type])
+
+ while (imtgetim (list, Memc[image], SZ_FNAME)!=EOF) {
+ iferr (im = immap (Memc[image], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+ #ccdtype = ccdtypes (im, Memc[str], SZ_FNAME)
+ call ccdtypes (im, Memc[str], SZ_FNAME)
+ if (Memc[type] != EOS && !streq (Memc[str], Memc[type]))
+ next
+
+ Memc[extn] = EOS
+ Memc[subset] = EOS
+ if (doamps) {
+ #call ccdamp (im, Memc[str], SZ_FNAME)
+ Memc[str] = EOS
+ if (extend)
+ call strcat (Memc[str], Memc[extn], SZ_FNAME)
+ call strcat (Memc[str], Memc[subset], SZ_FNAME)
+ }
+ if (dosubsets) {
+ call ccdsubset (im, Memc[str], SZ_FNAME)
+ call strcat (Memc[str], Memc[extn], SZ_FNAME)
+ call strcat (Memc[str], Memc[subset], SZ_FNAME)
+ }
+ for (i=1; i <= nsubsets; i=i+1)
+ if (streq (Memc[subset], Memc[Memi[subsets+i-1]]))
+ break
+
+ if (i > nsubsets) {
+ if (nsubsets == 0) {
+ call malloc (images, nimage, TY_POINTER)
+ call malloc (extns, nimage, TY_POINTER)
+ call malloc (subsets, nimage, TY_POINTER)
+ call malloc (nimages, nimage, TY_INT)
+ } else if (mod (nsubsets, nimage) == 0) {
+ call realloc (images, nsubsets+nimage, TY_POINTER)
+ call realloc (extns, nsubsets+nimage, TY_POINTER)
+ call realloc (subsets, nsubsets+nimage, TY_POINTER)
+ call realloc (nimages, nsubsets+nimage, TY_INT)
+ }
+ nsubsets = i
+ call malloc (ptr, SZ_FNAME, TY_CHAR)
+ call strcpy (Memc[image], Memc[ptr], SZ_FNAME-1)
+ Memi[images+i-1] = ptr
+ call malloc (ptr, SZ_FNAME, TY_CHAR)
+ call strcpy (Memc[extn], Memc[ptr], SZ_FNAME)
+ Memi[extns+i-1] = ptr
+ call malloc (ptr, SZ_FNAME, TY_CHAR)
+ call strcpy (Memc[subset], Memc[ptr], SZ_FNAME)
+ Memi[subsets+i-1] = ptr
+ Memi[nimages+i-1] = 1
+ } else {
+ ptr = Memi[images+i-1]
+ nimage = Memi[nimages+i-1] + 1
+ call realloc (ptr, nimage * SZ_FNAME, TY_CHAR)
+ Memi[images+i-1] = ptr
+ Memi[nimages+i-1] = nimage
+ ptr = ptr + (nimage - 1) * SZ_FNAME
+ call strcpy (Memc[image], Memc[ptr], SZ_FNAME-1)
+ }
+
+ call imunmap (im)
+ }
+ call realloc (images, nsubsets, TY_POINTER)
+ call realloc (extns, nsubsets, TY_POINTER)
+ call realloc (subsets, nsubsets, TY_POINTER)
+ call realloc (nimages, nsubsets, TY_INT)
+ call imtclose (list)
+ call sfree (sp)
+end
+
+
+# ICOMBINE -- Combine the CCD images in a list.
+# This procedure maps the images, sets the output dimensions and datatype,
+# opens the logfile, and sets IMIO parameters. It attempts to adjust
+# buffer sizes and memory requirements for maximum efficiency.
+
+procedure icombine (images, nims, output, plfile, sigma, logfile, stack,
+ delete)
+
+char images[SZ_FNAME-1, nims] # Input images
+int nims # Number of images in list
+char output[ARB] # Output image
+char plfile[ARB] # Pixel list file
+char sigma[ARB] # Output sigma image
+char logfile[ARB] # Log filename
+int stack # Stack input images?
+bool delete # Delete input images?
+
+char errstr[SZ_LINE]
+int i, j, nimages, intype, bufsize, maxsize, memory, oldsize, stack1, err
+pointer sp, sp1, in, out[3], offsets, temp, key, tmp
+
+int getdatatype()
+real clgetr()
+char clgetc()
+int clgeti(), begmem(), errget(), open(), ty_max(), sizeof()
+pointer immap(), ic_plfile()
+errchk ic_imstack, immap, ic_plfile, ic_setout, ccddelete
+
+include "icombine.com"
+
+define retry_ 98
+define done_ 99
+
+begin
+ call smark (sp)
+
+ # Set number of images to combine.
+ if (project) {
+ if (nims > 1) {
+ call sfree (sp)
+ call error (1, "Cannot project combine a list of images")
+ }
+ tmp = immap (images[1,1], READ_ONLY, 0); out[1] = tmp
+ if (IM_NDIM(out[1]) == 1)
+ call error (1, "Can't project one dimensional images")
+ nimages = IM_LEN(out[1],IM_NDIM(out[1]))
+ call imunmap (out[1])
+ } else
+ nimages = nims
+
+ # Convert the nkeep parameter if needed.
+ # Convert the pclip parameter to a number of pixels rather than
+ # a fraction. This number stays constant even if pixels are
+ # rejected. The number of low and high pixel rejected, however,
+ # are converted to a fraction of the valid pixels.
+
+ nkeep = clgeti ("nkeep")
+ if (nkeep < 0)
+ nkeep = max (0, nimages + nkeep)
+
+ if (reject == PCLIP) {
+ pclip = clgetr ("pclip")
+ if (pclip == 0.)
+ call error (1, "Pclip parameter may not be zero")
+ if (IS_INDEFR (pclip))
+ pclip = -0.5
+
+ i = nimages / 2.
+ if (abs (pclip) < 1.)
+ pclip = pclip * i
+ if (pclip < 0.)
+ pclip = min (-1, max (-i, int (pclip)))
+ else
+ pclip = max (1, min (i, int (pclip)))
+ }
+
+ if (reject == MINMAX) {
+ flow = clgetr ("nlow")
+ fhigh = clgetr ("nhigh")
+ if (IS_INDEFR (flow))
+ flow = 0
+ if (IS_INDEFR (fhigh))
+ fhigh = 0
+
+ if (flow >= 1)
+ flow = flow / nimages
+ if (fhigh >= 1)
+ fhigh = fhigh / nimages
+ i = flow * nimages
+ j = fhigh * nimages
+ if (i + j == 0)
+ reject = NONE
+ else if (i + j >= nimages) {
+ call eprintf ("Bad minmax rejection parameters\n")
+ call sfree (sp)
+ return
+ }
+ }
+
+ # Map the input images.
+ bufsize = 0
+ stack1 = stack
+retry_
+ iferr {
+ out[1] = NULL
+ out[2] = NULL
+ out[3] = NULL
+ icm = NULL
+ logfd = NULL
+
+ call smark (sp1)
+ if (stack1 == YES) {
+ call salloc (temp, SZ_FNAME, TY_CHAR)
+ call mktemp ("tmp", Memc[temp], SZ_FNAME)
+ call ic_imstack (images, nimages, Memc[temp])
+ project = true
+ }
+
+ # Map the input image(s).
+ if (project) {
+ if (stack1 == YES) {
+ tmp = immap (Memc[temp], READ_ONLY, 0); out[1] = tmp
+ } else {
+ tmp = immap (images[1,1], READ_ONLY, 0); out[1] = tmp
+ }
+ nimages = IM_LEN(out[1],IM_NDIM(out[1]))
+ call calloc (in, nimages, TY_POINTER)
+ call amovki (out[1], Memi[in], nimages)
+ } else {
+ call calloc (in, nimages, TY_POINTER)
+ do i = 1, nimages {
+ tmp = immap (images[1,i], READ_ONLY, 0); Memi[in+i-1] = tmp
+ }
+ }
+
+ # Map the output image and set dimensions and offsets.
+ tmp = immap (output, NEW_COPY, Memi[in]); out[1] = tmp
+ if (stack1 == YES) {
+ call salloc (key, SZ_FNAME, TY_CHAR)
+ do i = 1, nimages {
+ call sprintf (Memc[key], SZ_FNAME, "stck%04d")
+ call pargi (i)
+ call imdelf (out[1], Memc[key])
+ }
+ }
+ call salloc (offsets, nimages*IM_NDIM(out[1]), TY_INT)
+ call ic_setout (Memi[in], out, Memi[offsets], nimages)
+
+ # Determine the highest precedence datatype and set output datatype.
+ intype = IM_PIXTYPE(Memi[in])
+ do i = 2, nimages
+ intype = ty_max (intype, IM_PIXTYPE(Memi[in+i-1]))
+ IM_PIXTYPE(out[1]) = getdatatype (clgetc ("outtype"))
+ if (IM_PIXTYPE(out[1]) == ERR)
+ IM_PIXTYPE(out[1]) = intype
+
+ # Open pixel list file if given.
+ if (plfile[1] != EOS) {
+ tmp = ic_plfile (plfile, NEW_COPY, out[1]); out[2] = tmp
+ } else
+ out[2] = NULL
+
+ # Open the sigma image if given.
+ if (sigma[1] != EOS) {
+ tmp = immap (sigma, NEW_COPY, out[1]); out[3] = tmp
+ IM_PIXTYPE(out[3]) = ty_max (TY_REAL, IM_PIXTYPE(out[1]))
+ call sprintf (IM_TITLE(out[3]), SZ_IMTITLE,
+ "Combine sigma images for %s")
+ call pargstr (output)
+ } else
+ out[3] = NULL
+
+ # This is done here to work around problem adding a keyword to
+ # an NEW_COPY header and then using that header in a NEW_COPY.
+
+ # Open masks.
+ call ic_mopen (Memi[in], out, nimages)
+
+ # Open the log file.
+ logfd = NULL
+ if (logfile[1] != EOS) {
+ iferr (logfd = open (logfile, APPEND, TEXT_FILE)) {
+ logfd = NULL
+ call erract (EA_WARN)
+ }
+ }
+
+ if (bufsize == 0) {
+ # Set initial IMIO buffer size based on the number of images
+ # and maximum amount of working memory available. The buffer
+ # size may be adjusted later if the task runs out of memory.
+ # The FUDGE factor is used to allow for the size of the
+ # program, memory allocator inefficiencies, and any other
+ # memory requirements besides IMIO.
+
+ bufsize = 1
+ do i = 1, IM_NDIM(out[1])
+ bufsize = bufsize * IM_LEN(out[1],i)
+ bufsize = bufsize * sizeof (intype)
+ bufsize = min (bufsize, DEFBUFSIZE)
+ memory = begmem ((nimages + 1) * bufsize, oldsize, maxsize)
+ memory = min (memory, int (FUDGE * maxsize))
+ bufsize = memory / (nimages + 1)
+ }
+
+ # Combine the images. If an out of memory error occurs close all
+ # images and files, divide the IMIO buffer size in half and try
+ # again.
+
+ switch (intype) {
+ case TY_SHORT:
+ call icombines (Memi[in], out, Memi[offsets], nimages,
+ bufsize)
+ default:
+ call icombiner (Memi[in], out, Memi[offsets], nimages,
+ bufsize)
+ }
+ } then {
+ err = errget (errstr, SZ_LINE)
+ if (icm != NULL)
+ call ic_mclose (nimages)
+ if (!project) {
+ do j = 2, nimages
+ if (Memi[in+j-1] != NULL)
+ call imunmap (Memi[in+j-1])
+ }
+ if (out[2] != NULL) {
+ call imunmap (out[2])
+ call imdelete (plfile)
+ }
+ if (out[3] != NULL) {
+ call imunmap (out[3])
+ call imdelete (sigma)
+ }
+ if (out[1] != NULL) {
+ call imunmap (out[1])
+ call imdelete (output)
+ }
+ if (Memi[in] != NULL)
+ call imunmap (Memi[in])
+ if (logfd != NULL)
+ call close (logfd)
+
+ switch (err) {
+ case SYS_MFULL:
+ bufsize = bufsize / 2
+ call sfree (sp1)
+ goto retry_
+ case SYS_FTOOMANYFILES, SYS_IKIOPIX:
+ if (!project) {
+ stack1 = YES
+ call sfree (sp1)
+ goto retry_
+ }
+ if (stack1 == YES)
+ call imdelete (Memc[temp])
+ call fixmem (oldsize)
+ call sfree (sp1)
+ call error (err, errstr)
+ default:
+ if (stack1 == YES)
+ call imdelete (Memc[temp])
+ call fixmem (oldsize)
+ call sfree (sp1)
+ call error (err, errstr)
+ }
+ }
+
+ # Unmap all the images, close the log file, and restore memory.
+ # The input images must be unmapped first to insure that there
+ # is a FD for the output images since the headers are opened to
+ # update them. However, the order of the NEW_COPY pointers must
+ # be preserved; i.e. the output depends on the first input image,
+ # and the extra output images depend on the output image.
+
+ if (!project) {
+ do i = 2, nimages {
+ if (Memi[in+i-1] != NULL) {
+ call imunmap (Memi[in+i-1])
+ if (delete)
+ call ccddelete (images[1,i])
+ }
+ }
+ }
+ if (out[2] != NULL)
+ call imunmap (out[2])
+ if (out[3] != NULL)
+ call imunmap (out[3])
+ if (out[1] != NULL)
+ call imunmap (out[1])
+ if (Memi[in] != NULL)
+ call imunmap (Memi[in])
+ if (stack1 == YES)
+ call imdelete (Memc[temp])
+ if (delete)
+ call ccddelete (images[1,1])
+ if (logfd != NULL)
+ call close (logfd)
+ if (icm != NULL)
+ call ic_mclose (nimages)
+
+ call fixmem (oldsize)
+ call sfree (sp)
+end
+
+
+# TY_MAX -- Return the datatype of highest precedence.
+
+int procedure ty_max (type1, type2)
+
+int type1, type2 # Datatypes
+
+int i, j, type, order[8]
+data order/TY_SHORT,TY_USHORT,TY_INT,TY_LONG,TY_REAL,TY_DOUBLE,TY_COMPLEX,TY_REAL/
+
+begin
+ for (i=1; (i<=7) && (type1!=order[i]); i=i+1)
+ ;
+ for (j=1; (j<=7) && (type2!=order[j]); j=j+1)
+ ;
+ type = order[max(i,j)]
+
+ # Special case of mixing short and unsigned short.
+ if (type == TY_USHORT && type1 != type2)
+ type = TY_INT
+
+ return (type)
+end
+
+
+# IC_PLFILE -- Map pixel list file
+# This routine strips any image extensions and then adds .pl.
+
+pointer procedure ic_plfile (plfile, mode, refim)
+
+char plfile[ARB] # Pixel list file name
+int mode # Image mode
+pointer refim # Reference image
+pointer pl # IMIO pointer (returned)
+
+int i, strlen()
+bool streq
+pointer sp, str, immap()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_FNAME, TY_CHAR)
+
+ call imgimage (plfile, Memc[str], SZ_FNAME)
+
+ # Strip any existing extensions
+ i = strlen(Memc[str])
+ switch (Memc[str+i-1]) {
+ case 'h':
+ if (i > 3 && Memc[str+i-4] == '.')
+ Memc[str+i-4] = EOS
+ case 'l':
+ if (i > 2 && streq (Memc[str+i-3], ".pl"))
+ Memc[str+i-3] = EOS
+ }
+
+ call strcat (".pl", Memc[str], SZ_FNAME)
+ pl = immap (Memc[str], NEW_COPY, refim)
+ call sfree (sp)
+ return (pl)
+end
diff --git a/noao/imred/ccdred/src/t_mkfringe.x b/noao/imred/ccdred/src/t_mkfringe.x
new file mode 100644
index 00000000..d3e2e82d
--- /dev/null
+++ b/noao/imred/ccdred/src/t_mkfringe.x
@@ -0,0 +1,191 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+# T_MKFRINGECOR -- CL task to make fringe correction image. The large scale
+# background of the input images is subtracted from the input image to obtain
+# the output fringe correction image. The image is first processed if needed.
+
+procedure t_mkfringecor()
+
+int listin # List of input CCD images
+int listout # List of output CCD images
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+
+bool clgetb(), streq()
+int imtopenp(), imtgetim()
+pointer sp, input, output, tmp, str, in, out, ccd
+errchk set_input, set_output, ccddelete
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the lists and instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ listin = imtopenp ("input")
+ listout = imtopenp ("mkfringecor.output")
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ if (Memc[input] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+ call cal_open (NULL)
+ call ccd_open (0)
+
+ # Process each image.
+ while (imtgetim (listin, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s: mkfringecor\n")
+ call pargstr (Memc[input])
+ }
+
+ # Set input and output images. Use temporary image if needed.
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ if (imtgetim (listout, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (Memc[output] == EOS)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (streq (Memc[input], Memc[output]))
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ else
+ call strcpy (Memc[output], Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+
+ # Process image as a flat field image.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ call set_illum (ccd)
+
+ # Do the processing.
+ if (CORS(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Finish up
+ call imunmap (in)
+ call imunmap (out)
+ if (streq (Memc[input], Memc[output])) {
+ call ccddelete (Memc[input])
+ call imrename (Memc[tmp], Memc[input])
+ } else
+ call strcpy (Memc[output], Memc[input], SZ_FNAME)
+ } else {
+ # Delete the temporary output image. Make a copy if needed.
+ call imunmap (in)
+ call imunmap (out)
+ call imdelete (Memc[tmp])
+ }
+ call free_proc (ccd)
+
+ # Do special processing.
+ call mkfringecor (Memc[input], Memc[output])
+ if (!streq (Memc[input], Memc[output]))
+ call ccdcopy (Memc[input], Memc[output])
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (listin)
+ call imtclose (listout)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
+
+
+# MKFRINGECOR -- Given an input image which has been processed make the output
+# fringe correction image.
+
+procedure mkfringecor (input, output)
+
+char input[SZ_FNAME] # Input image
+char output[SZ_FNAME] # Output image
+
+int i, nc, nl
+pointer sp, str, illum, tmp, in, im, out, out1
+bool clgetb(), ccdflag(), streq()
+pointer immap(), imgl2r(), impl2r()
+errchk immap, ccddelete
+
+begin
+ # Check if this operation has been done.
+ in = immap (input, READ_ONLY, 0)
+ if (ccdflag (in, "mkfringe")) {
+ call imunmap (in)
+ return
+ }
+
+ # Print operation if not processing.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Make fringe correction\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (illum, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+
+ # Make the illumination image.
+ call imunmap (in)
+ call strcpy (input, Memc[tmp], SZ_FNAME)
+ call mktemp ("tmp", Memc[illum], SZ_FNAME)
+ call mkillumination (Memc[tmp], Memc[illum], NO, NO)
+
+ in = immap (input, READ_ONLY, 0)
+ im = immap (Memc[illum], READ_ONLY, 0)
+
+ # Create the temporary output.
+ if (streq (input, output)) {
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+ out1 = in
+ } else {
+ call set_output (in, out, output)
+ out1 = out
+ }
+
+ # Subtract the illumination from input image.
+ nc = IM_LEN(out,1)
+ nl = IM_LEN(out,2)
+ do i = 1, nl
+ call asubr (Memr[imgl2r(in,i)], Memr[imgl2r(im,i)],
+ Memr[impl2r(out,i)], nc)
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE, "Fringe correction created")
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out1, Memc[str])
+ call hdmpstr (out, "mkfringe", Memc[str])
+ call hdmpstr (out, "imagetyp", "fringe")
+
+ # Finish up
+ call imunmap (in)
+ call imunmap (im)
+ call imunmap (out)
+ call imdelete (Memc[illum])
+ if (streq (input, output)) {
+ call ccddelete (input)
+ call imrename (Memc[tmp], input)
+ } else
+ call strcpy (output, input, SZ_FNAME)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_mkillumcor.x b/noao/imred/ccdred/src/t_mkillumcor.x
new file mode 100644
index 00000000..e9113f01
--- /dev/null
+++ b/noao/imred/ccdred/src/t_mkillumcor.x
@@ -0,0 +1,108 @@
+include "ccdred.h"
+
+# T_MKILLUMCOR -- Make flat field illumination correction images.
+#
+# The input flat field images are processed and smoothed to obtain
+# illumination correction images. These illumination correction images
+# are used to correct already processed images for illumination effects
+# introduced by the flat field.
+
+procedure t_mkillumcor()
+
+int listin # List of input CCD images
+int listout # List of output CCD images
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+
+bool clgetb(), streq()
+int imtopenp(), imtgetim()
+pointer sp, input, output, tmp, str, in, out, ccd
+errchk set_input, set_output, ccddelete
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the lists and instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ listin = imtopenp ("input")
+ listout = imtopenp ("mkillumcor.output")
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ if (Memc[input] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+ call cal_open (NULL)
+ call ccd_open (0)
+
+ # Process each image.
+ while (imtgetim (listin, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s: mkillumcor\n")
+ call pargstr (Memc[input])
+ }
+
+ # Set input and output images.
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ if (imtgetim (listout, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (Memc[output] == EOS)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (streq (Memc[input], Memc[output]))
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ else
+ call strcpy (Memc[output], Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+
+ # Process image as an illumination image.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+ call set_zero (ccd)
+ call set_dark (ccd)
+ CORS(ccd, FINDMEAN) = YES
+
+ # Do the processing if the COR flag is set.
+ if (COR(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Replace the input image by the corrected image.
+ call imunmap (in)
+ call imunmap (out)
+ if (streq (Memc[input], Memc[output])) {
+ call ccddelete (Memc[input])
+ call imrename (Memc[tmp], Memc[input])
+ } else
+ call strcpy (Memc[output], Memc[input], SZ_FNAME)
+ } else {
+ # Make a copy if necessary.
+ call imunmap (in)
+ call imunmap (out)
+ call imdelete (Memc[tmp])
+ }
+ call free_proc (ccd)
+
+ # Do special processing.
+ call mkillumination (Memc[input], Memc[output], YES, YES)
+ if (!streq (Memc[input], Memc[output]))
+ call ccdcopy (Memc[input], Memc[output])
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (listin)
+ call imtclose (listout)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_mkillumft.x b/noao/imred/ccdred/src/t_mkillumft.x
new file mode 100644
index 00000000..ecb66a8e
--- /dev/null
+++ b/noao/imred/ccdred/src/t_mkillumft.x
@@ -0,0 +1,229 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+# T_MKILLUMFLAT -- Make illumination corrected flat field images.
+#
+# The input flat field images are processed and smoothed to obtain
+# illumination pattern. The illumination pattern is then divided out
+# of the input image to make the output illumination corrected flat field
+# image.
+
+procedure t_mkillumflat()
+
+int listin # List of input CCD images
+int listout # List of output CCD images
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+
+bool clgetb(), streq()
+int imtopenp(), imtgetim()
+pointer sp, input, output, tmp, str, in, out, ccd
+errchk set_input, set_output, ccddelete
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the lists and instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ listin = imtopenp ("input")
+ listout = imtopenp ("mkillumflat.output")
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+ call cal_open (NULL)
+ call ccd_open (0)
+
+ # Process each image.
+ while (imtgetim (listin, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s: mkillumflat\n")
+ call pargstr (Memc[input])
+ }
+
+ # Set input and output images. Use temporary image if needed.
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ if (imtgetim (listout, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (Memc[output] == EOS)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (streq (Memc[input], Memc[output]))
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ else
+ call strcpy (Memc[output], Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+
+ # Process image as a flat field image.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+ call set_zero (ccd)
+ call set_dark (ccd)
+
+ # Do the processing.
+ if (CORS(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Finish up
+ call imunmap (in)
+ call imunmap (out)
+ if (streq (Memc[input], Memc[output])) {
+ call ccddelete (Memc[input])
+ call imrename (Memc[tmp], Memc[input])
+ } else
+ call strcpy (Memc[output], Memc[input], SZ_FNAME)
+ } else {
+ # Delete the temporary output image. Make a copy if needed.
+ call imunmap (in)
+ call imunmap (out)
+ call imdelete (Memc[tmp])
+ }
+ call free_proc (ccd)
+
+ # Do special processing.
+ call mkillumflat (Memc[input], Memc[output])
+ if (!streq (Memc[input], Memc[output]))
+ call ccdcopy (Memc[input], Memc[output])
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (listin)
+ call imtclose (listout)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
+
+
+# MKILLUMFLAT -- Take the processed input image and make the illumination
+# corrected flat field output image. The illumination pattern is created
+# as a temporary image and then the applied to the input flat field
+# image to make the final output flat field image. If the input and
+# output names are the same the operation is done in place.
+
+procedure mkillumflat (input, output)
+
+char input[SZ_FNAME] # Input image
+char output[SZ_FNAME] # Output image
+
+int i, nc, nl
+real scale
+long time
+pointer sp, str, illum, tmp, in, im, out, out1, data
+
+bool clgetb(), ccdflag(), streq()
+int hdmgeti()
+real hdmgetr(), clgetr(), divzero()
+pointer immap(), imgl2r(), impl2r()
+errchk immap, ccddelete
+extern divzero()
+
+real rdivzero # Result for divion by zero
+int ndivzero # Number of zero divisions
+common /cdivzero/ rdivzero, ndivzero
+
+begin
+ # Check if this operation has been done.
+ in = immap (input, READ_ONLY, 0)
+ if (ccdflag (in, "illumflt")) {
+ call imunmap (in)
+ return
+ }
+
+ # Print operation if not processing.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Remove illumination\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ # Get and set task parameters for division by zero.
+ rdivzero = clgetr ("divbyzero")
+ ndivzero = 0
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (illum, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+
+ # Make the illumination image.
+ call imunmap (in)
+ call strcpy (input, Memc[tmp], SZ_FNAME)
+ call mktemp ("tmp", Memc[illum], SZ_FNAME)
+ call mkillumination (Memc[tmp], Memc[illum], NO, NO)
+
+ in = immap (input, READ_ONLY, 0)
+ im = immap (Memc[illum], READ_ONLY, 0)
+ iferr (scale = hdmgetr (im, "ccdmean"))
+ scale = 1.
+ iferr (time = hdmgeti (im, "ccdmeant"))
+ time = IM_MTIME(im)
+ if (time < IM_MTIME(im))
+ scale = 1.
+
+ # Create the temporary output.
+ if (streq (input, output)) {
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+ out1 = in
+ } else {
+ call set_output (in, out, output)
+ out1 = out
+ }
+
+ # Divide the illumination and flat field images with scaling.
+ nc = IM_LEN(out,1)
+ nl = IM_LEN(out,2)
+ do i = 1, nl {
+ data = impl2r (out, i)
+ call advzr (Memr[imgl2r(in,i)], Memr[imgl2r(im,i)],
+ Memr[data], nc, divzero)
+ if (scale != 1.)
+ call amulkr (Memr[data], scale, Memr[data], nc)
+ }
+
+ # Log the operation.
+ if (ndivzero > 0) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Warning: %d divisions by zero replaced by %g")
+ call pargi (ndivzero)
+ call pargr (rdivzero)
+ call ccdlog (out1, Memc[str])
+ }
+ call sprintf (Memc[str], SZ_LINE, "Removed illumination from flat")
+ call sprintf (Memc[str], SZ_LINE,
+ "Illumination flat created from %s")
+ call pargstr (input)
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out1, Memc[str])
+ call hdmpstr (out, "illumflt", Memc[str])
+ call hdmpstr (out, "imagetyp", "flat")
+
+ # Finish up
+ call imunmap (in)
+ call imunmap (im)
+ call imunmap (out)
+ call imdelete (Memc[illum])
+
+ # The input name is changed to the output name for further processing.
+ if (streq (input, output)) {
+ call ccddelete (input)
+ call imrename (Memc[tmp], input)
+ } else
+ call strcpy (output, input, SZ_FNAME)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_mkskycor.x b/noao/imred/ccdred/src/t_mkskycor.x
new file mode 100644
index 00000000..fa3f3cd4
--- /dev/null
+++ b/noao/imred/ccdred/src/t_mkskycor.x
@@ -0,0 +1,694 @@
+include <imhdr.h>
+include <imset.h>
+include <mach.h>
+include "ccdred.h"
+
+define MINSIGMA 1. # Minimum sigma
+define NITERATE 10 # Maximum number of clipping iterations
+
+# T_MKSKYCOR -- Make sky illumination correction images.
+#
+# The input images processed and smoothed to obtain an illumination correction
+# image. This task is a version of T_CCDPROC which treats the images as
+# illumination images regardless of there CCD image type.
+
+procedure t_mkskycor()
+
+int listin # List of input CCD images
+int listout # List of output CCD images
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+
+bool flatcor, ccdflag(), clgetb(), streq()
+int imtopenp(), imtgetim()
+pointer sp, input, output, tmp, str, in, out, ccd
+errchk set_input, set_output, ccddelete
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the lists and instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ listin = imtopenp ("input")
+ listout = imtopenp ("mkskycor.output")
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ if (Memc[input] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+ call cal_open (NULL)
+ call ccd_open (0)
+
+ # Process each image.
+ while (imtgetim (listin, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s: mkskycor\n")
+ call pargstr (Memc[input])
+ }
+
+ # Set input and output images.
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ if (imtgetim (listout, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (Memc[output] == EOS)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (streq (Memc[input], Memc[output]))
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ else
+ call strcpy (Memc[output], Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+
+ # Process image as an illumination image.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+
+ # Do the processing if the COR flag is set.
+ if (COR(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Replace the input image by the corrected image.
+ flatcor = ccdflag (out, "flatcor")
+ call imunmap (in)
+ call imunmap (out)
+ if (streq (Memc[input], Memc[output])) {
+ call ccddelete (Memc[input])
+ call imrename (Memc[tmp], Memc[input])
+ } else
+ call strcpy (Memc[output], Memc[input], SZ_FNAME)
+ } else {
+ # Make a copy if necessary.
+ flatcor = ccdflag (out, "flatcor")
+ call imunmap (in)
+ call imunmap (out)
+ call imdelete (Memc[tmp])
+ }
+ call free_proc (ccd)
+
+ # Do special processing.
+ if (!flatcor) {
+ call eprintf (
+ "%s: WARNING - Image should be flat fielded first\n")
+ call pargstr (Memc[input])
+ }
+ call mkillumination (Memc[input], Memc[output], NO, YES)
+ if (!streq (Memc[input], Memc[output]))
+ call ccdcopy (Memc[input], Memc[output])
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (listin)
+ call imtclose (listout)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
+
+
+# MKILLUMINATION -- Make illumination images.
+#
+# The images are boxcar smoothed to obtain the large scale illumination.
+# Objects in the images are excluded from the average by sigma clipping.
+
+procedure mkillumination (input, output, inverse, log)
+
+char input[SZ_FNAME] # Input image
+char output[SZ_FNAME] # Output image
+int inverse # Return inverse of illumination
+int log # Add log info?
+
+real xbminr # Minimum size of X smoothing box
+real ybminr # Minimum size of Y smoothing box
+real xbmaxr # Maximum size of X smoothing box
+real ybmaxr # Maximum size of Y smoothing box
+bool clip # Sigma clip
+real lowsigma # Low sigma clip
+real highsigma # High sigma clip
+
+int xbmin, ybmin, xbmax, ybmax
+pointer sp, str, tmp, in, out, out1
+
+bool clgetb(), ccdflag(), streq()
+real clgetr()
+pointer immap()
+errchk immap, ccddelete
+
+real rdivzero # Result for divion by zero
+int ndivzero # Number of zero divisions
+common /cdivzero/ rdivzero, ndivzero
+
+begin
+ # Check if this operation has been done. Unfortunately this requires
+ # mapping the image.
+
+ in = immap (input, READ_ONLY, 0)
+ if (ccdflag (in, "mkillum")) {
+ call imunmap (in)
+ return
+ }
+
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Convert %s to illumination correction\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ # Get task parameters
+ xbminr = clgetr ("xboxmin")
+ ybminr = clgetr ("yboxmin")
+ xbmaxr = clgetr ("xboxmax")
+ ybmaxr = clgetr ("yboxmax")
+ clip = clgetb ("clip")
+ if (clip) {
+ lowsigma = max (MINSIGMA, clgetr ("lowsigma"))
+ highsigma = max (MINSIGMA, clgetr ("highsigma"))
+ }
+ if (inverse == YES)
+ rdivzero = clgetr ("divbyzero")
+ ndivzero = 0
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+
+ # Create output.
+ if (streq (input, output)) {
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+ out1 = in
+ } else {
+ call set_output (in, out, output)
+ out1 = out
+ }
+
+ if (xbminr < 1.)
+ xbminr = xbminr * IM_LEN(in,1)
+ if (ybminr < 1.)
+ ybminr = ybminr * IM_LEN(in,2)
+ if (xbmaxr < 1.)
+ xbmaxr = xbmaxr * IM_LEN(in,1)
+ if (ybmaxr < 1.)
+ ybmaxr = ybmaxr * IM_LEN(in,2)
+
+ xbmin = max (1, min (IM_LEN(in,1), nint (min (xbminr, xbmaxr))))
+ xbmax = max (1, min (IM_LEN(in,1), nint (max (xbminr, xbmaxr))))
+ ybmin = max (1, min (IM_LEN(in,2), nint (min (ybminr, ybmaxr))))
+ ybmax = max (1, min (IM_LEN(in,2), nint (max (ybminr, ybmaxr))))
+
+ if (clip)
+ call illumination (in, out, xbmin, ybmin, xbmax, ybmax,
+ lowsigma, highsigma, inverse)
+ else
+ call qillumination (in, out, xbmin, ybmin, xbmax, ybmax, inverse)
+
+ # Log the operation.
+ if (log == YES) {
+ if (ndivzero > 0) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Warning: %d divisions by zero replaced by %g")
+ call pargi (ndivzero)
+ call pargr (rdivzero)
+ call ccdlog (out1, Memc[str])
+ }
+ call sprintf (Memc[str], SZ_LINE,
+ "Illumination correction created from %s")
+ call pargstr (input)
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out1, Memc[str])
+ }
+ call hdmpstr (out, "mkillum", Memc[str])
+ call hdmpstr (out, "imagetyp", "illum")
+
+ # Finish up
+ call imunmap (in)
+ call imunmap (out)
+ if (streq (input, output)) {
+ call ccddelete (input)
+ call imrename (Memc[tmp], input)
+ } else
+ call strcpy (output, input, SZ_FNAME)
+ call sfree (sp)
+end
+
+
+# ILLUMINATION -- Make illumination correction image with clipping.
+
+procedure illumination (in, out, xbmin, ybmin, xbmax, ybmax, low, high, inverse)
+
+pointer in # Pointer to the input image
+pointer out # Pointer to the output image
+int xbmin, ybmin # Minimum dimensions of the boxcar
+int xbmax, ybmax # Maximum dimensions of the boxcar
+real low, high # Clipping sigma thresholds
+int inverse # Return inverse of illumination?
+
+real scale, ccdmean
+int i, ncols, nlines, linein, lineout, ybox2, nrej
+pointer sp, ptr, ptrs, data, sum, avg, output
+
+long clktime()
+int boxclean()
+real asumr(), divzero()
+pointer imgl2r(), impl2r()
+extern divzero()
+
+begin
+ # Set up an array of linepointers and accumulators
+ ncols = IM_LEN(out,1)
+ nlines = IM_LEN(out,2)
+ call smark (sp)
+ call salloc (ptrs, ybmax, TY_POINTER)
+ call salloc (sum, ncols, TY_REAL)
+ call salloc (avg, ncols, TY_REAL)
+ if (inverse == YES)
+ call salloc (output, ncols, TY_REAL)
+ else
+ output = avg
+
+ # Set input buffers.
+ if (ybmax < nlines)
+ call imseti (in, IM_NBUFS, ybmax)
+
+ # Get the first average over the minimum y box.
+ call aclrr (Memr[sum], ncols)
+ linein = 0
+ while (linein < ybmin) {
+ linein = linein + 1
+ data = imgl2r (in, linein)
+ call aaddr (Memr[data], Memr[sum], Memr[sum], ncols)
+ ptr = ptrs + mod (linein, ybmax)
+ Memi[ptr] = data
+ }
+ ybox2 = ybmin
+ scale = ybmin
+ call agboxcar (Memr[sum], Memr[avg], ncols, xbmin, xbmax, scale)
+
+ # Iteratively clean the initial lines.
+ ptr = ptrs
+ if (ybox2 != ybmax)
+ ptr = ptr + 1
+ do i = 1, NITERATE {
+ nrej = 0
+ do lineout = 1, linein {
+ data = Memi[ptr+lineout-1]
+ nrej = nrej + boxclean (Memr[data], Memr[avg], Memr[sum],
+ ncols, low, high)
+ }
+ if (nrej > 0)
+ call agboxcar (Memr[sum], Memr[avg], ncols, xbmin, xbmax,
+ scale)
+ else
+ break
+ }
+
+ # Output the minimum smoothing y box.
+ if (inverse == YES)
+ call arczr (1., Memr[avg], Memr[output], ncols, divzero)
+ ybox2 = (ybmin + 1) / 2
+ lineout = 0
+ while (lineout < ybox2) {
+ lineout = lineout + 1
+ call amovr (Memr[output], Memr[impl2r(out, lineout)], ncols)
+ }
+ ccdmean = ybox2 * asumr (Memr[output], ncols)
+
+ # Increase the y box size by factors of 2 until the maximum size.
+ while (linein < ybmax) {
+ linein = linein + 1
+ data = imgl2r (in, linein)
+ call aaddr (Memr[sum], Memr[data], Memr[sum], ncols)
+ ptr = ptrs + mod (linein, ybmax)
+ Memi[ptr] = data
+ scale = scale + 1
+
+ nrej = boxclean (Memr[data], Memr[avg], Memr[sum], ncols,
+ low, high)
+ call agboxcar (Memr[sum], Memr[avg], ncols, xbmin, xbmax, scale)
+
+ linein = linein + 1
+ data = imgl2r (in, linein)
+ call aaddr (Memr[sum], Memr[data], Memr[sum], ncols)
+ ptr = ptrs + mod (linein, ybmax)
+ Memi[ptr] = data
+
+ nrej = boxclean (Memr[data], Memr[avg], Memr[sum], ncols, low, high)
+ scale = scale + 1
+ call agboxcar (Memr[sum], Memr[avg], ncols, xbmin, xbmax, scale)
+
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ if (inverse == YES)
+ call arczr (1., Memr[avg], Memr[data], ncols, divzero)
+ else
+ call amovr (Memr[avg], Memr[data], ncols)
+ ccdmean = ccdmean + asumr (Memr[data], ncols)
+ }
+
+ # For each line subtract the last line from the sum, add the
+ # next line to the sum, and output a line.
+
+ while (linein < nlines) {
+ linein = linein + 1
+ ptr = ptrs + mod (linein, ybmax)
+ data = Memi[ptr]
+ call asubr (Memr[sum], Memr[data], Memr[sum], ncols)
+ data = imgl2r (in, linein)
+ call aaddr (Memr[sum], Memr[data], Memr[sum], ncols)
+ Memi[ptr] = data
+
+ nrej = boxclean (Memr[data], Memr[avg], Memr[sum], ncols, low, high)
+
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call agboxcar (Memr[sum], Memr[avg], ncols, xbmin, xbmax, scale)
+
+ if (inverse == YES)
+ call arczr (1., Memr[avg], Memr[data], ncols, divzero)
+ else
+ call amovr (Memr[avg], Memr[data], ncols)
+ ccdmean = ccdmean + asumr (Memr[data], ncols)
+ }
+
+ # Decrease the y box in factors of 2 until minimum y box.
+ while (lineout < nlines - ybox2) {
+ linein = linein + 1
+ ptr = ptrs + mod (linein, ybmax)
+ data = Memi[ptr]
+ call asubr (Memr[sum], Memr[data], Memr[sum], ncols)
+ linein = linein + 1
+ ptr = ptrs + mod (linein, ybmax)
+ data = Memi[ptr]
+ call asubr (Memr[sum], Memr[data], Memr[sum], ncols)
+ scale = scale - 2
+
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call agboxcar (Memr[sum], Memr[data], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[data], Memr[data], ncols, divzero)
+ ccdmean = ccdmean + asumr (Memr[data], ncols)
+ }
+
+ # Output the last lines of the minimum y box size.
+ call agboxcar (Memr[sum], Memr[avg], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[avg], Memr[output], ncols, divzero)
+ ybox2 = nlines - lineout
+ while (lineout < nlines) {
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call amovr (Memr[output], Memr[data], ncols)
+ }
+ ccdmean = ccdmean + ybox2 * asumr (Memr[output], ncols)
+
+ # Write scale factor out.
+ ccdmean = ccdmean / (ncols * nlines)
+ call hdmputr (out, "ccdmean", ccdmean)
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+
+ # Free buffers
+ call sfree (sp)
+end
+
+
+# QILLUMCOR -- Quick (no clipping) illumination correction image.
+
+procedure qillumination (in, out, xbmin, ybmin, xbmax, ybmax, inverse)
+
+pointer in # pointer to the input image
+pointer out # pointer to the output image
+int xbmin, ybmin # Minimum dimensions of the boxcar
+int xbmax, ybmax # Maximum dimensions of the boxcar
+int inverse # return inverse of illumination
+
+real scale, ccdmean
+int ncols, nlines, linein, lineout, ybox1
+pointer sp, ptr, ptrs, data, sum, output
+
+long clktime()
+real asumr(), divzero()
+pointer imgl2r(), impl2r()
+extern divzero()
+
+begin
+ # Set up an array of linepointers and accumulators
+ ncols = IM_LEN(out,1)
+ nlines = IM_LEN(out,2)
+
+ call smark (sp)
+ call salloc (ptrs, ybmax, TY_POINTER)
+ call salloc (sum, ncols, TY_REAL)
+ call salloc (output, ncols, TY_REAL)
+
+ # Set input buffers.
+ if (ybmax < nlines)
+ call imseti (in, IM_NBUFS, ybmax)
+
+ # Accumulate the minimum y box.
+ call aclrr (Memr[sum], ncols)
+ linein = 0
+ while (linein < ybmin) {
+ linein = linein + 1
+ data = imgl2r (in, linein)
+ call aaddr (Memr[data], Memr[sum], Memr[sum], ncols)
+ ptr = ptrs + mod (linein, ybmax)
+ Memi[ptr] = data
+ }
+
+ # Output the minimum y box.
+ ybox1 = (ybmin + 1) / 2
+ scale = ybmin
+ call agboxcar (Memr[sum], Memr[output], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[output], Memr[output], ncols, divzero)
+ lineout = 0
+ while (lineout < ybox1) {
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call amovr (Memr[output], Memr[data], ncols)
+ }
+ ccdmean = ybox1 * asumr (Memr[output], ncols)
+
+ # Increase the y box size by steps of 2 until the maximum size.
+ while (linein < ybmax) {
+ linein = linein + 1
+ data = imgl2r (in, linein)
+ call aaddr (Memr[sum], Memr[data], Memr[sum], ncols)
+ ptr = ptrs + mod (linein, ybmax)
+ Memi[ptr] = data
+ linein = linein + 1
+ data = imgl2r (in, linein)
+ call aaddr (Memr[sum], Memr[data], Memr[sum], ncols)
+ ptr = ptrs + mod (linein, ybmax)
+ Memi[ptr] = data
+
+ scale = scale + 2
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call agboxcar (Memr[sum], Memr[data], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[data], Memr[data], ncols, divzero)
+ ccdmean = ccdmean + asumr (Memr[data], ncols)
+ }
+
+ # For each line subtract the last line from the sum, add the
+ # next line to the sum, and output a line.
+
+ while (linein < nlines) {
+ linein = linein + 1
+ ptr = ptrs + mod (linein, ybmax)
+ data = Memi[ptr]
+ call asubr (Memr[sum], Memr[data], Memr[sum], ncols)
+ data = imgl2r (in, linein)
+ call aaddr (Memr[sum], Memr[data], Memr[sum], ncols)
+ Memi[ptr] = data
+
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call agboxcar (Memr[sum], Memr[data], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[data], Memr[data], ncols, divzero)
+ ccdmean = ccdmean + asumr (Memr[data], ncols)
+ }
+
+ # Decrease the y box in steps of 2 until minimum y box.
+ while (lineout < nlines - ybox1) {
+ linein = linein + 1
+ ptr = ptrs + mod (linein, ybmax)
+ data = Memi[ptr]
+ call asubr (Memr[sum], Memr[data], Memr[sum], ncols)
+ linein = linein + 1
+ ptr = ptrs + mod (linein, ybmax)
+ data = Memi[ptr]
+ call asubr (Memr[sum], Memr[data], Memr[sum], ncols)
+
+ lineout = lineout + 1
+ scale = scale - 2
+ data = impl2r (out, lineout)
+ call agboxcar (Memr[sum], Memr[data], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[data], Memr[data], ncols, divzero)
+ ccdmean = ccdmean + asumr (Memr[data], ncols)
+ }
+
+ # Output the last lines of the minimum y box size.
+ call agboxcar (Memr[sum], Memr[output], ncols, xbmin, xbmax, scale)
+ if (inverse == YES)
+ call arczr (1., Memr[output], Memr[output], ncols, divzero)
+ ybox1 = nlines - lineout
+ while (lineout < nlines) {
+ lineout = lineout + 1
+ data = impl2r (out, lineout)
+ call amovr (Memr[output], Memr[data], ncols)
+ }
+ ccdmean = ccdmean + ybox1 * asumr (Memr[output], ncols)
+
+ # Write scale factor out.
+ ccdmean = ccdmean / (ncols * nlines)
+ call hdmputr (out, "ccdmean", ccdmean)
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+
+ # Free buffers
+ call sfree (sp)
+end
+
+
+# AGBOXCAR -- Vector growing boxcar smooth.
+# This implements the growing box algorithm which differs from the
+# normal boxcar smoothing which uses a fixed size box.
+
+procedure agboxcar (in, out, ncols, xbmin, xbmax, ybox)
+
+real in[ncols] # Sum of ybox lines
+real out[ncols] # Boxcar smoothed output
+int ncols # Number of columns
+int xbmin, xbmax # Boxcar size in x
+real ybox # Boxcar size in y
+
+int colin, colout, lastcol, npix, xbmin2
+real sum, output
+
+begin
+ xbmin2 = (xbmin + 1) / 2
+ colin = 0
+ sum = 0.
+ while (colin < xbmin) {
+ colin = colin + 1
+ sum = sum + in[colin]
+ }
+
+ npix = xbmin * ybox
+ output = sum / npix
+ colout = 0
+ while (colout < xbmin2) {
+ colout = colout + 1
+ out[colout] = output
+ }
+
+ while (colin < xbmax) {
+ colin = colin + 1
+ sum = sum + in[colin]
+ colin = colin + 1
+ sum = sum + in[colin]
+ npix = npix + 2 * ybox
+ colout = colout + 1
+ out[colout] = sum / npix
+ }
+
+ lastcol = 0
+ while (colin < ncols) {
+ colin = colin + 1
+ lastcol = lastcol + 1
+ sum = sum + in[colin] - in[lastcol]
+ colout = colout + 1
+ out[colout] = sum / npix
+ }
+
+ while (colout < ncols - xbmin2) {
+ lastcol = lastcol + 1
+ sum = sum - in[lastcol]
+ lastcol = lastcol + 1
+ sum = sum - in[lastcol]
+ npix = npix - 2 * ybox
+ colout = colout + 1
+ out[colout] = sum / npix
+ }
+
+ output = sum / npix
+ while (colout < ncols) {
+ colout = colout + 1
+ out[colout] = output
+ }
+end
+
+
+# BOXCLEAN -- Reject data values from the sum for the next boxcar average
+# which exceed the minimum and maximum residual values from the current
+# boxcar average. This excludes data from the moving average before it
+# enters the average.
+
+int procedure boxclean (data, boxavg, sum, ncols, low, high)
+
+real data[ncols] # Data line
+real boxavg[ncols] # Box average line
+real sum[ncols] # Moving sum
+int ncols # Number of columns
+real low # Low clipping factor
+real high # High clipping factor
+
+int i, nrej
+real rms, resid, minresid, maxresid
+
+begin
+ rms = 0.
+ do i = 1, ncols
+ rms = rms + (data[i] - boxavg[i]) ** 2
+ rms = sqrt (rms / ncols)
+ minresid = -low * rms
+ maxresid = high * rms
+
+ nrej = 0
+ do i = 1, ncols {
+ resid = data[i] - boxavg[i]
+ if ((resid < minresid) || (resid > maxresid)) {
+ data[i] = boxavg[i]
+ sum[i] = sum[i] - resid
+ nrej = nrej + 1
+ }
+ }
+
+ return (nrej)
+end
+
+
+# DIVZERO -- Error action for division by zero.
+
+real procedure divzero (x)
+
+real x # Value to be inversed
+
+real rdivzero # Result for divion by zero
+int ndivzero # Number of zero divisions
+common /cdivzero/ rdivzero, ndivzero
+
+begin
+ ndivzero = ndivzero + 1
+ return (rdivzero)
+end
diff --git a/noao/imred/ccdred/src/t_mkskyflat.x b/noao/imred/ccdred/src/t_mkskyflat.x
new file mode 100644
index 00000000..02696905
--- /dev/null
+++ b/noao/imred/ccdred/src/t_mkskyflat.x
@@ -0,0 +1,215 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+
+# T_MKSKYFLAT -- Apply a sky observation to a flat field to remove the
+# residual illumination pattern.
+
+procedure t_mkskyflat()
+
+int listin # List of input CCD images
+int listout # List of output CCD images
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+
+bool flatcor, ccdflag(), clgetb(), streq()
+int imtopenp(), imtgetim()
+pointer sp, input, output, tmp, str, in, out, ccd
+errchk set_input, set_output, ccddelete
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the lists and instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ listin = imtopenp ("input")
+ listout = imtopenp ("mkskyflat.output")
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ if (Memc[input] == EOS)
+ call error (1, "No 'instrument' translation file specified.")
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+
+ # Force flat fields even if flatcor=no
+ flatcor = clgetb ("flatcor")
+ call clputb ("flatcor", true)
+ call cal_open (NULL)
+ call ccd_open (0)
+ call clputb ("flatcor", flatcor)
+
+ # Process each image.
+ while (imtgetim (listin, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s: mkskyflat\n")
+ call pargstr (Memc[input])
+ }
+
+ # Set input and output images. Use temporary image if needed.
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ if (imtgetim (listout, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (Memc[output] == EOS)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (streq (Memc[input], Memc[output]))
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ else
+ call strcpy (Memc[output], Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+
+ # Process image as an illumination image.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+
+ # Do the processing.
+ if (CORS(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Finish up
+ flatcor = ccdflag (out, "flatcor")
+ call imunmap (in)
+ call imunmap (out)
+ if (streq (Memc[input], Memc[output])) {
+ call ccddelete (Memc[input])
+ call imrename (Memc[tmp], Memc[input])
+ } else
+ call strcpy (Memc[output], Memc[input], SZ_FNAME)
+ } else {
+ # Delete the temporary output image. Make a copy if needed.
+ flatcor = ccdflag (out, "flatcor")
+ call imunmap (in)
+ call imunmap (out)
+ call imdelete (Memc[tmp])
+ }
+ call free_proc (ccd)
+
+ # Do special processing.
+ if (!flatcor) {
+ call eprintf (
+ "%s: WARNING - Image should be flat fielded first\n")
+ call pargstr (Memc[input])
+ }
+ call mkillumination (Memc[input], Memc[output], NO, YES)
+ call mkskyflat (Memc[input], Memc[output])
+ if (!streq (Memc[input], Memc[output]))
+ call ccdcopy (Memc[input], Memc[output])
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (listin)
+ call imtclose (listout)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
+
+
+# MKSKYFLAT -- Make a sky flat by dividing the input illumination image by
+# the flat field.
+
+procedure mkskyflat (input, output)
+
+char input[SZ_FNAME] # Input image
+char output[SZ_FNAME] # Output image
+
+int i, nc, nl
+long time
+real scale
+pointer sp, str, flat, tmp, in, im, out, out1, data
+
+int hdmgeti()
+bool clgetb(), ccdflag(), streq()
+real hdmgetr()
+pointer immap(), imgl2r(), impl2r()
+errchk immap, ccddelete
+
+begin
+ # Check if this operation has been done.
+ in = immap (input, READ_ONLY, 0)
+ if (ccdflag (in, "skyflat")) {
+ call imunmap (in)
+ return
+ }
+
+ # Print operation if not processing.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Convert %s to sky flat\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (flat, SZ_FNAME, TY_CHAR)
+ call salloc (tmp, SZ_FNAME, TY_CHAR)
+
+ # Get the flat field.
+ call cal_image (in, FLAT, 1, Memc[flat], SZ_FNAME)
+ im = immap (Memc[flat], READ_ONLY, 0)
+ iferr (scale = hdmgetr (im, "ccdmean"))
+ scale = 1.
+ iferr (time = hdmgeti (im, "ccdmeant"))
+ time = IM_MTIME(im)
+ if (time < IM_MTIME(im))
+ scale = 1.
+
+ # Create the temporary output.
+ if (streq (input, output)) {
+ call mktemp ("tmp", Memc[tmp], SZ_FNAME)
+ call set_output (in, out, Memc[tmp])
+ out1 = in
+ } else {
+ call set_output (in, out, output)
+ out1 = out
+ }
+
+ # Multiply the illumination and flat field images with scaling.
+ nc = IM_LEN(out,1)
+ nl = IM_LEN(out,2)
+ do i = 1, nl {
+ data = impl2r (out, i)
+ call amulr (Memr[imgl2r(in,i)], Memr[imgl2r(im,i)],
+ Memr[data], nc)
+ if (scale != 1.)
+ call adivkr (Memr[data], scale, Memr[data], nc)
+ }
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Sky flat created from %s and %s")
+ call pargstr (input)
+ call pargstr (Memc[flat])
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out1, Memc[str])
+ call hdmpstr (out, "skyflat", Memc[str])
+ call hdmpstr (out, "imagetyp", "flat")
+
+ # Finish up
+ call imunmap (in)
+ call imunmap (im)
+ call imunmap (out)
+ if (streq (input, output)) {
+ call ccddelete (input)
+ call imrename (Memc[tmp], input)
+ } else
+ call strcpy (output, input, SZ_FNAME)
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/src/t_skyreplace.x b/noao/imred/ccdred/src/t_skyreplace.x
new file mode 100644
index 00000000..9bd2e9d0
--- /dev/null
+++ b/noao/imred/ccdred/src/t_skyreplace.x
@@ -0,0 +1,301 @@
+include <imhdr.h>
+
+
+# T_SKYREPLACE -- Replace objects by sky. This development code as is not
+# used in the package. It is here to be worked on further when an image
+# display interface is added.
+
+procedure t_skyreplace ()
+
+char image[SZ_FNAME] # Image to be modified
+
+char graph[SZ_LINE], display[SZ_LINE], cmd[SZ_LINE]
+pointer im, immap()
+int clgeti(), wcs, key, clgcur(), nrep, skyreplace()
+real wx, wy, xc, yc, r, s
+
+begin
+ call clgstr ("image", image, SZ_FNAME)
+ call sprintf (graph, SZ_LINE, "contour %s")
+ call pargstr (image)
+ call sprintf (display, SZ_LINE, "display %s %d")
+ call pargstr (image)
+ call pargi (clgeti ("frame"))
+
+ im = immap (image, READ_WRITE, 0)
+ while (clgcur ("cursor",wx, wy, wcs, key, cmd, SZ_LINE) != EOF) {
+ switch (key) {
+ case 'a':
+ r = sqrt ((wx - xc) ** 2 + (wy - yc) ** 2)
+ s = 2 * r
+ case 'b':
+ nrep = skyreplace (im, xc, yc, r, s)
+ case 'c':
+ xc = wx
+ yc = wy
+ case 'd':
+ call imunmap (im)
+ call clcmdw (display)
+ im = immap (image, READ_WRITE, 0)
+ case 'g':
+ call imunmap (im)
+ call clcmdw (graph)
+ im = immap (image, READ_WRITE, 0)
+ case 'q':
+ break
+ default:
+ call printf ("\007")
+ }
+ }
+
+ call imunmap (im)
+end
+
+
+define NSKY 100 # Minimum number of sky points
+
+int procedure skyreplace (im, xc, yc, r, s)
+
+pointer im # IMIO pointer
+real xc, yc # Object center
+real r # Object aperture radius
+real s # Sky aperture radius
+
+real avg, sigma, urand(), mode, find_mode()
+long seed
+int xlen, ylen, nx, nx1, nx2, ny, ny1, ny2, ntotal, nobj, nallsky, nsky[4]
+int i, j, x1, x2, x3, x4, y1, y2, y3, y4, y
+pointer sp, allsky, sky[4], ptr1, ptr2
+pointer datain, dataout, imgs2r(), imps2r()
+
+begin
+ xlen = IM_LEN(im,1)
+ ylen = IM_LEN(im,2)
+ x1 = max (1, int (xc - s))
+ x4 = min (xlen, int (xc + s + 0.5))
+ y1 = max (1, int (yc - s))
+ y4 = min (ylen, int (yc + s + 0.5))
+ nx = x4 - x1 + 1
+ ny = y4 - y1 + 1
+ ntotal = nx * ny
+
+ x2 = max (1, int (xc - r))
+ x3 = min (xlen, int (xc + r + 0.5))
+ y2 = max (1, int (yc - r))
+ y3 = min (xlen, int (yc + r + 0.5))
+ nx1 = (x3 - x2 + 1)
+ ny1 = (y3 - y2 + 1)
+ nobj = nx1 * ny1
+ nallsky = ntotal - nobj
+
+ if ((nallsky < NSKY) || (nobj < 1))
+ return (0)
+
+ call smark (sp)
+ call salloc (allsky, nallsky, TY_REAL)
+ datain = imgs2r (im, x1, x4, y1, y4)
+ dataout = imps2r (im, x2, x3, y2, y3)
+ ptr2 = allsky
+
+ # First quadrant
+ x2 = max (1, int (xc - r))
+ x3 = min (xlen, int (xc + 0.5))
+ y2 = max (1, int (yc - r))
+ y3 = min (xlen, int (yc + 0.5))
+ nx1 = x3 - x1 + 1
+ nx2 = x3 - x2
+ ny1 = y2 - y1
+ ny2 = y3 - y2 + 1
+ nsky[1] = nx1 * ny1 + nx2 * ny2
+ sky[1] = ptr2
+
+ if (nsky[1] > 0) {
+ ptr1 = datain
+ for (y=y1; y<y2; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx1)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx1
+ }
+ for (; y<=y3; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx2)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx2
+ }
+ }
+
+ # Second quadrant
+ x2 = max (1, int (xc + 1.5))
+ x3 = min (xlen, int (xc + r + 0.5))
+ y2 = max (1, int (yc - r))
+ y3 = min (xlen, int (yc + 0.5))
+ nx1 = x4 - x2 + 1
+ nx2 = x4 - x3
+ ny1 = y2 - y1
+ ny2 = y3 - y2 + 1
+ nsky[2] = nx1 * ny1 + nx2 * ny2
+ sky[2] = ptr2
+
+ if (nsky[2] > 0) {
+ ptr1 = datain + x2 - x1
+ for (y=y1; y<y2; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx1)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx1
+ }
+ ptr1 = ptr1 + x3 - x2 + 1
+ for (; y<=y3; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx2)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx2
+ }
+ }
+
+ # Third quadrant
+ x2 = max (1, int (xc - r))
+ x3 = min (xlen, int (xc + 0.5))
+ y2 = max (1, int (yc + 1.5))
+ y3 = min (xlen, int (yc + r + 0.5))
+ nx1 = x3 - x2
+ nx2 = x3 - x1 + 1
+ ny1 = y3 - y2 + 1
+ ny2 = y4 - y3
+ nsky[3] = nx1 * ny1 + nx2 * ny2
+ sky[3] = ptr2
+
+ if (nsky[3] > 0) {
+ ptr1 = datain + (y2 - y1) * nx
+ for (y=y2; y<=y3; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx1)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx1
+ }
+ for (; y<=y4; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx2)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx2
+ }
+ }
+
+ # Fourth quadrant
+ x2 = max (1, int (xc + 1.5))
+ x3 = min (xlen, int (xc + r + 0.5))
+ y2 = max (1, int (yc + 1.5))
+ y3 = min (xlen, int (yc + r + 0.5))
+ nx1 = x4 - x3
+ nx2 = x4 - x2 + 1
+ ny1 = y3 - y2 + 1
+ ny2 = y4 - y3
+ nsky[4] = ny1 * nx1 + ny2 * nx2
+ sky[4] = ptr2
+
+ if (nsky[4] > 0) {
+ ptr1 = datain + (y2 - y1) * nx + x3 - x1 + 1
+ for (y=y2; y<=y3; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx1)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx1
+ }
+ ptr1 = ptr1 - (x3 - x2 + 1)
+ for (; y<=y4; y=y+1) {
+ call amovr (Memr[ptr1], Memr[ptr2], nx2)
+ ptr1 = ptr1 + nx
+ ptr2 = ptr2 + nx2
+ }
+ }
+
+ # This part is for doing a gradient correction. It is not implemented.
+# if ((nsky[1]>NSKY)&&(nsky[2]>NSKY)&&(nsky[3]>NSKY)&&(nsky[4]>NSKY)) {
+# call asrtr (Memr[sky[1]], Memr[sky[1]], nsky[1])
+# call asrtr (Memr[sky[2]], Memr[sky[2]], nsky[2])
+# call asrtr (Memr[sky[3]], Memr[sky[3]], nsky[3])
+# call asrtr (Memr[sky[4]], Memr[sky[4]], nsky[4])
+
+ # Add a gradient correction here.
+
+# seed = dataout
+# do i = dataout, dataout+nobj-1 {
+# j = 4 * urand (seed) + 1
+# k = 0.95 * nsky[j] * urand (seed)
+# Memr[i] = Memr[sky[j]+k]
+# }
+# } else {
+ call asrtr (Memr[allsky], Memr[allsky], nallsky)
+
+ # Find the mean and sigma excluding the outer 20%
+ x1 = 0.1 * nallsky
+ x2 = 0.9 * nallsky
+ call aavgr (Memr[allsky+x1-1], x2-x1+1, avg, sigma)
+ mode = find_mode (Memr[allsky], nallsky, nallsky / 20)
+ call printf ("Mean = %g, Median = %g, Mode = %g\n")
+ call pargr (avg)
+ call pargr (Memr[allsky+nallsky/2-1])
+ call pargr (mode)
+ for (x1=0; (x1<nallsky)&&(Memr[allsky+x1]<avg-3*sigma); x1=x1+1)
+ ;
+ for (x2=nallsky-1; (x2>0)&&(Memr[allsky+x2]>avg+3*sigma); x2=x2-1)
+ ;
+ nx = x2 - x1 - 1
+
+ seed = dataout
+ do i = dataout, dataout+nobj-1 {
+ j = nx * urand (seed) + x1
+ Memr[i] = Memr[allsky+j]
+ }
+# }
+
+ call sfree (sp)
+ return (nobj)
+end
+
+real procedure find_mode (data, npts, n)
+
+real data[npts] # Data
+int npts # Number of data points
+int n # Bin size
+
+int x, xlast, xmin
+real sumx, sumy, sumxx, sumxy, a, amin
+pointer sp, slope
+
+begin
+ call smark (sp)
+ call salloc (slope, npts - n, TY_REAL)
+
+ sumx = 0.
+ sumy = 0.
+ sumxx = 0.
+ sumxy = 0.
+
+ x = 0
+ xlast = 0
+ while (x < n) {
+ x = x + 1
+ sumx = sumx + x
+ sumy = sumy + data[x]
+ sumxx = sumxx + x ** 2
+ sumxy = sumxy + x * data[x]
+ }
+ amin = (n * sumxy - sumx * sumy) / (n * sumxx - sumx ** 2)
+ xmin = (x + xlast) / 2
+ Memr[slope] = amin
+
+ while (x < npts - n) {
+ x = x + 1
+ xlast = xlast + 1
+ sumx = sumx + x - xlast
+ sumy = sumy + data[x] - data[xlast]
+ sumxx = sumxx + x * x - xlast * xlast
+ sumxy = sumxy + x * data[x] - xlast * data[xlast]
+
+ a = (n * sumxy - sumx * sumy) / (n * sumxx - sumx ** 2)
+ if (a < amin) {
+ amin = a
+ xmin = (x + xlast) / 2
+ }
+ Memr[slope+xlast] = a
+ }
+
+ call gplotv (Memr[slope+11], npts-2*n-22, 1., real (npts-2*n-22), "")
+ call sfree (sp)
+ return (data[xmin])
+end
diff --git a/noao/imred/ccdred/src/timelog.x b/noao/imred/ccdred/src/timelog.x
new file mode 100644
index 00000000..7a8d969f
--- /dev/null
+++ b/noao/imred/ccdred/src/timelog.x
@@ -0,0 +1,29 @@
+include <time.h>
+
+
+# TIMELOG -- Prepend a time stamp to the given string.
+#
+# For the purpose of a history logging prepend a short time stamp to the
+# given string. Note that the input string is modified.
+
+procedure timelog (str, max_char)
+
+char str[max_char] # String to be time stamped
+int max_char # Maximum characters in string
+
+pointer sp, time, temp
+long clktime()
+
+begin
+ call smark (sp)
+ call salloc (time, SZ_DATE, TY_CHAR)
+ call salloc (temp, max_char, TY_CHAR)
+
+ call cnvdate (clktime(0), Memc[time], SZ_DATE)
+ call sprintf (Memc[temp], max_char, "%s %s")
+ call pargstr (Memc[time])
+ call pargstr (str)
+ call strcpy (Memc[temp], str, max_char)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ccdred/x_ccdred.x b/noao/imred/ccdred/x_ccdred.x
new file mode 100644
index 00000000..f651b668
--- /dev/null
+++ b/noao/imred/ccdred/x_ccdred.x
@@ -0,0 +1,15 @@
+task badpiximage = t_badpiximage,
+ ccdgroups = t_ccdgroups,
+ ccdhedit = t_ccdhedit,
+ ccdinstrument = t_ccdinst,
+ ccdlist = t_ccdlist,
+ ccdmask = t_ccdmask,
+ ccdproc = t_ccdproc,
+ qccdproc = t_ccdproc,
+ combine = t_combine,
+ mkfringecor = t_mkfringecor,
+ mkillumcor = t_mkillumcor,
+ mkillumflat = t_mkillumflat,
+ mkimage = t_mkimage,
+ mkskycor = t_mkskycor,
+ mkskyflat = t_mkskyflat
diff --git a/noao/imred/ccdred/zerocombine.cl b/noao/imred/ccdred/zerocombine.cl
new file mode 100644
index 00000000..6fb9613b
--- /dev/null
+++ b/noao/imred/ccdred/zerocombine.cl
@@ -0,0 +1,48 @@
+# ZEROCOMBINE -- Process and combine zero level CCD images.
+
+procedure zerocombine (input)
+
+string input {prompt="List of zero level images to combine"}
+file output="Zero" {prompt="Output zero level name"}
+string combine="average" {prompt="Type of combine operation",
+ enum="average|median"}
+string reject="minmax" {prompt="Type of rejection",
+ enum="none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip"}
+string ccdtype="zero" {prompt="CCD image type to combine"}
+bool process=no {prompt="Process images before combining?"}
+bool delete=no {prompt="Delete input images after combining?"}
+bool clobber=no {prompt="Clobber existing output image?"}
+string scale="none" {prompt="Image scaling",
+ enum="none|mode|median|mean|exposure"}
+string statsec="" {prompt="Image section for computing statistics"}
+int nlow=0 {prompt="minmax: Number of low pixels to reject"}
+int nhigh=1 {prompt="minmax: Number of high pixels to reject"}
+int nkeep=1 {prompt="Minimum to keep (pos) or maximum to reject (neg)"}
+bool mclip=yes {prompt="Use median in sigma clipping algorithms?"}
+real lsigma=3. {prompt="Lower sigma clipping factor"}
+real hsigma=3. {prompt="Upper sigma clipping factor"}
+string rdnoise="0." {prompt="ccdclip: CCD readout noise (electrons)"}
+string gain="1." {prompt="ccdclip: CCD gain (electrons/DN)"}
+string snoise="0." {prompt="ccdclip: Sensitivity noise (fraction)"}
+real pclip=-0.5 {prompt="pclip: Percentile clipping parameter"}
+real blank=0. {prompt="Value if there are no pixels"}
+
+begin
+ string ims
+
+ ims = input
+
+ # Process images first if desired.
+ if (process == YES)
+ ccdproc (ims, output="", ccdtype=ccdtype, noproc=no)
+
+ # Combine the flat field images.
+ combine (ims, output=output, plfile="", sigma="", combine=combine,
+ reject=reject, ccdtype=ccdtype, subsets=no, delete=delete,
+ clobber=clobber, project=no, outtype="real", offsets="none",
+ masktype="none", blank=blank, scale=scale, zero="none", weight=no,
+ statsec=statsec, lthreshold=INDEF, hthreshold=INDEF, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, lsigma=lsigma, hsigma=hsigma,
+ rdnoise=rdnoise, gain=gain, snoise=snoise, sigscale=0.1,
+ pclip=pclip, grow=0)
+end
diff --git a/noao/imred/crutil/crutil.cl b/noao/imred/crutil/crutil.cl
new file mode 100644
index 00000000..89e7ff3e
--- /dev/null
+++ b/noao/imred/crutil/crutil.cl
@@ -0,0 +1,18 @@
+#{ CRUTIL -- Cosmic Ray Utility Package
+
+package crutil
+
+reset crusrc = "crutil$src/"
+
+task crcombine = crusrc$crcombine.cl
+task crnebula = crusrc$crnebula.cl
+task crfix = crusrc$crfix.cl
+task credit = crusrc$credit.cl
+
+task cosmicrays,
+ craverage,
+ crgrow,
+ crmedian = crusrc$x_crutil.e
+
+
+clbye()
diff --git a/noao/imred/crutil/crutil.hd b/noao/imred/crutil/crutil.hd
new file mode 100644
index 00000000..0b6ee225
--- /dev/null
+++ b/noao/imred/crutil/crutil.hd
@@ -0,0 +1,17 @@
+# Help directory for the CRUTIL package.
+
+$doc = "./doc/"
+$crusrc = "./src/"
+
+revisions sys=crusrc$Revisions
+
+overview hlp=doc$overview.hlp
+
+cosmicrays hlp=doc$cosmicrays.hlp, src=crusrc$t_cosmicrays.x
+craverage hlp=doc$craverage.hlp, src=crusrc$t_craverage.x
+crcombine hlp=doc$crcombine.hlp, src=crusrc$crcombine.cl
+credit hlp=doc$credit.hlp, src=crusrc$credit.cl
+crfix hlp=doc$crfix.hlp, src=crusrc$crfix.cl
+crgrow hlp=doc$crgrow.hlp, src=crusrc$t_crgrow.x
+crmedian hlp=doc$crmedian.hlp, src=crusrc$t_crmedian.x
+crnebula hlp=doc$crnebula.hlp, src=crusrc$crnebula.cl
diff --git a/noao/imred/crutil/crutil.men b/noao/imred/crutil/crutil.men
new file mode 100644
index 00000000..f6117e5f
--- /dev/null
+++ b/noao/imred/crutil/crutil.men
@@ -0,0 +1,10 @@
+ cosmicrays - Remove cosmic rays using flux ratio algorithm
+ craverage - Detect CRs against average and avoid objects
+ crcombine - Combine multiple exposures to eliminate cosmic rays
+ credit - Interactively edit cosmic rays using an image display
+ crfix - Fix cosmic rays in images using cosmic ray masks
+ crgrow - Grow cosmic rays in cosmic ray masks
+ crmedian - Detect and replace cosmic rays with median filter
+ crnebula - Detect and replace cosmic rays in nebular data
+
+ overview - Overview of the package
diff --git a/noao/imred/crutil/crutil.par b/noao/imred/crutil/crutil.par
new file mode 100644
index 00000000..6782c988
--- /dev/null
+++ b/noao/imred/crutil/crutil.par
@@ -0,0 +1,3 @@
+# CRUTIL package parameter file
+
+version,s,h,"V1.5: August 22, 2001"
diff --git a/noao/imred/crutil/doc/cosmicrays.hlp b/noao/imred/crutil/doc/cosmicrays.hlp
new file mode 100644
index 00000000..55a284da
--- /dev/null
+++ b/noao/imred/crutil/doc/cosmicrays.hlp
@@ -0,0 +1,306 @@
+.help cosmicrays Apr98 noao.imred.crutil
+.ih
+NAME
+cosmicrays -- detect and replace cosmic rays
+.ih
+USAGE
+cosmicrays input output
+.ih
+PARAMETERS
+.ls input
+List of input images in which to detect cosmic rays.
+.le
+.ls output
+List of output images in which the detected cosmic rays will be replaced
+by an average of neighboring pixels. If the output image name differs
+from the input image name then a copy of the input image is made with
+the detected cosmic rays replaced. If no output images are specified
+then the input images are modified in place. In place modification of
+an input image also occurs when the output image name is the same as
+the input image name.
+.le
+.ls crmasks = ""
+List of cosmic ray mask files to be created; one for each input image. If no
+file names are given then no cosmic ray mask is created. If an existing
+mask is specified the newly detected cosmic rays will be added.
+.le
+
+.ls threshold = 25.
+Detection threshold above the mean of the surrounding pixels for cosmic
+rays. The threshold will depend on the noise characteristics of the
+image and how weak the cosmic rays may be for detection. A typical value
+is 5 or more times the sigma of the background.
+.le
+.ls fluxratio = 2.
+The ratio (as a percent) of the mean neighboring pixel flux to the candidate
+cosmic ray pixel for rejection. The value depends on the seeing and the
+characteristics of the cosmic rays. Typical values are in the range
+2 to 10 percent. This value may be reset interactively from a plot
+or defined by identifying selected objects as stars or cosmic rays.
+.le
+.ls npasses = 5
+Number of cosmic ray detection passes. Since only the locally strongest
+pixel is considered a cosmic ray, multiple detection passes are needed to
+detect and replace cosmic ray events with multiple neighboring pixels.
+.le
+.ls window = 5
+Size of cosmic ray detection window. A square window of either 5 by 5 or
+7 by 7 is used to detect cosmic rays. The smaller window allows detection
+in the presence of greater background gradients but is less sensitive at
+discriminating multiple event cosmic rays from stars. It is also marginally
+faster.
+.le
+.ls interactive = yes
+Examine parameters interactively? A plot of the mean flux within the
+detection window (x100) vs the flux ratio (x100) is plotted and the user may
+set the flux ratio threshold, delete and undelete specific events, and
+examine specific events. This is useful for new data in which one is
+uncertain of an appropriate flux ratio threshold. Once determined the
+task need not be used interactively.
+.le
+.ls train = no
+Define the flux ratio threshold by using a set of objects identified
+as stars (or other astronomical objects) or cosmic rays?
+.le
+.ls objects = ""
+Cursor list of coordinates of training objects. If null (the null string "")
+then the image display cursor will be read. The user is responsible for first
+displaying the image. Otherwise a file containing cursor coordinates
+may be given. The format of the cursor file is "x y wcs key" where
+x and y are the pixel coordinates, wcs is an arbitrary number such as 1,
+and key may be 's' for star or 'c' for cosmic ray.
+.le
+.ls savefile = ""
+File to save (by appending) the training object coordinates. This is of
+use when the objects are identified using the image display cursor. The
+saved file can then be input as the object cursor list for repeating the
+execution.
+.le
+.ls plotfile
+If a plot file is specified then the graph of the flux ratio (x100) vs
+the mean flux (x100) is recorded as metacode. This may be spooled or examined
+later.
+.le
+.ls graphics = "stdgraph"
+Interactive graphic output device for interactive examination of the
+detection parameters.
+.le
+.ls cursor = ""
+Interactive graphics cursor input. If null the graphics display cursor
+is used, otherwise a file containing cursor input may be specified.
+.le
+.ls answer
+This parameter is used for interactive queries when processing a list of
+images. The responses may be "no", "yes", "NO", or "YES". The upper case
+responses permanently enable or disable the interactive review while
+the lower case reponses allow selective examination of certain input
+images. \fIThis parameter should not be specified on the command line.
+If it is then the value will be ignored and the task will act as if
+the answer "yes" is given for each image; i.e. it will enter the interactive
+phase without prompting.\fR
+.le
+.ih
+IMAGE CURSOR COMMANDS
+
+.nf
+? Help
+c Identify the object as a cosmic ray
+s Identify the object as a star
+g Switch to the graphics plot
+q Quit and continue with the cleaning
+.fi
+
+GRAPHICS CURSOR COMMANDS
+
+.nf
+? Help
+a Toggle between showing all candidates and only the training points
+d Mark candidate for replacement (applys to '+' points)
+e Mark candidates in a region for replacement (applys to '+' points)
+q Quit and return to image cursor or replace the selected pixels
+r Redraw the graph
+s Make a surface plot for the candidate nearest the cursor
+t Set the flux ratio threshold at the y cursor position
+u Mark candidate to not be replaced (applys to 'x' points)
+v Mark candidates in a region to not be replaced (applys to 'x' points)
+w Adjust the graph window (see \fBgtools\fR)
+<space> Print the pixel coordinates
+.fi
+
+There are no colon commands except those for the windowing options (type
+:\help or see \fBgtools\fR).
+.ih
+DESCRIPTION
+Cosmic ray events in each input image are detected and replaced by the
+average of the four neighbors. The replacement may be performed
+directly on the input image if no output image is specified or if the
+output image name is the same as the input image name. If a new image
+is created it is a copy of the input image except for the replaced
+pixels.
+Optional output includes
+a plot file showing the parameters of the
+detected cosmic ray candidates and the flux ratio threshold used, a
+cosmic ray mask identifying the cosmic rays found, and
+a file of training objects marked with the image display cursor. The
+cosmic ray mask may be used for display purposes, combined with other
+masks, and with \fBcrfix\fR.
+
+This task may be applied to an image previously processed to detect
+additional cosmic rays.
+
+The cosmic ray detection algorithm consists of the following steps.
+First a pixel must be the brightest pixel within the specified
+detection window (either 5x5 or 7x7). The mean flux in the surrounding
+pixels with the second brightest pixel excluded (which may also be a
+cosmic ray event) is computed and the candidate pixel must exceed this
+mean by the amount specified by the parameter \fIthreshold\fR. A plane
+is fit to the border pixels of the window and the fitted background is
+subtracted. The mean flux (now background subtracted) and the ratio of
+this mean to the cosmic ray candidate (the brightest pixel) are
+computed. The mean flux (x100) and the ratio (x100) are recorded for
+interactive examination if desired.
+
+Once the list of cosmic ray candidates has been created and a threshold for
+the flux ratio established (by the parameter \fIfluxratio\fR, by the
+"training" method, or by using the graphics cursor in the interactive plot)
+the pixels with ratios below the threshold are replaced in the output by
+the average of the four neighboring pixels (with the second strongest pixel
+in the detection window excluded if it is one of these pixels). Additonal
+pixels may then be detected and replaced in further passes as specified by
+the parameter \fInpasses\fR. Note that only pixels in the vicinity of
+replaced pixels need be considered in further passes.
+
+The division between the peaks of real objects and cosmic rays is made
+based on the flux ratio between the mean flux (excluding the center
+pixel and the second strongest pixel) and the candidate pixel. This
+threshold depends on the point spread function and the distribution of
+multiple cosmic ray events and any additional neighboring light caused
+by the events. This threshold is not strongly coupled to small changes
+in the data so that once it is set for a new type of image data it may
+be used for similar images. To set it initially one may examine the
+scatter plot of the flux ratio as a function of the mean flux. This
+may be done interactively or from the optional plot file produced.
+
+After the initial list of cosmic ray candidates has been created and before
+the final replacing cosmic rays there are two optional steps to allow
+examining the candidates and setting the flux ratio threshold dividing
+cosmic rays from real objects. The first optional step is define the flux
+ratio boundary by reference to user specified classifications; that is
+"training". To do this step the \fItrain\fR parameter must be set to yes.
+The user classified objects are specified by a cursor input list. This
+list can be an actual file or the image display cursor as defined by the
+\fIobjects\fR parameter. The \fIsavefile\fR parameter is also used during
+the training to record the objects specified. The parameter specifies a
+file to append the objects selected. This is useful when the objects are
+defined by interactive image cursor and does not make much sense when using
+an input list.
+
+If the \fIobjects\fR parameter is specified as a null string then
+the image display cursor will be repeatedly read until a 'q' is
+entered. The user first displays the image and then when the task
+reads the display cursor the cursor shape will change. The user
+points at objects and types 's' for a star (or other astronomical
+object) and 'c' for a cosmic ray. Note that this input is used
+to search for the matching object in the cosmic ray candidate list
+and so it is possible the selected object is not in the list though
+it is unlikely. The selection will be quietly ignored in that case.
+To exit the interactive selection of training objects type 'q'.
+
+If 'g' is typed a graph of all the candidates is drawn showing
+"flux" vs. "flux ratio" (see below for more). Training objects will
+be shown with a box and the currently set flux ratio threshold will
+also be shown. Exiting the plot will return to entering more training
+objects. The plot will remain and additional objects will immediately
+be shown with a new box. Thus, if one wants to see the training
+objects identified in the plot as one selects them from the image
+display first type a 'g' to draw the initial plot. Also by switching
+to the plot with 'g' allows you to draw surface plots (with 's') or
+get the pixel coordinates of a candidate (the space key) to be
+found in the display using the coordinate readout of the display.
+Note that the display interaction is simpler than might be desired
+because this task does not directly connect to the display.
+
+The most likely use for training is with the interactive image display.
+However one may prepare an input list by other means, one example
+is with \fBrimcursor\fR, and then specify the file name. The savefile
+may also be used a cursor input to repeat the cosmic ray operation
+(but be careful not to have the cursor input and save file be the
+same file!).
+
+The flux ratio threshold is determined from the training objects by
+finding the point with the minimum number of misclassifications
+(stars as cosmic rays or cosmic rays as stars). The threshold is
+set at the lowest value so that it will always go through one of
+the cosmic ray objects. There should be at least one of each type
+of object defined for this to work. The following option of
+examining the cosmic ray candidates and parameters may still be
+used to modify the derived flux ratio threshold. One last point
+about the training objects is that even if some of the points
+lie on the wrong side of the threshold they will remain classified
+as cosmic ray or non-cosmic ray. In other words, any object
+classified by the user will remain in that classification regardless
+of the final flux ratio threshold.
+
+After the training step the user will be queried to examine the candidates
+in the flux vs flux ratio plane if the \fIinteractive\fR flag is set.
+Responses may be made for specific images or for all images by using
+lower or upper case answers respectively. When the parameters are
+examined interactively the user may change the flux ratio threshold
+('t' key). Changes made are stored in the parameter file and, thus,
+learned for further images. Pixels to be deleted are marked by crosses
+and pixels which are peaks of objects are marked by pluses. The user
+may explicitly delete or undelete any point if desired but this is only
+for special cases near the threshold. In the future keys for
+interactive display of the specific detections will be added.
+Currently a surface plot of any candidate may be displayed graphically
+in four 90 degree rotated views using the 's' key. Note that the
+initial graph does not show all the points some of which are clearly
+cosmic rays because they have negative mean flux or flux ratio. To
+view all data one must rewindow the graph with the 'w' key or ":/"
+commands (see \fBgtools\fR).
+.ih
+EXAMPLES
+1. To replace cosmic rays in a set of images ccd* without training:
+
+.nf
+ cl> cosmicrays ccd* new//ccd*
+ ccd001: Examine parameters interactively? (yes):
+ [A scatter plot graph is made. One can adjust the threshold.]
+ [Looking at a few points using the 's' key can be instructive.]
+ [When done type 'q'.]
+ ccd002: Examine parameters interactively? (yes): NO
+ [No further interactive examination is done.]
+.fi
+
+After cleaning one typically displays the images and possibly blinks them.
+A difference image or mask image may also be created.
+
+2. To use the interactive training method for setting the flux ratio threshold:
+
+.nf
+ # First display the image.
+ cl> display ccd001 1
+ z1 = 123.45 z2= 543.21
+ cl> cosmicrays ccd001 ccd001cr train+
+ [After the cosmic ray candidates are found the image display
+ [cursor will be activated. Mark a cosmic ray with 'c' and
+ [a star with 's'. Type 'g' to get a plot showing the two
+ [points with boxes. Type 'q' to go back to the image display.
+ [As each new object is marked a box will appear in the plot and
+ [the threshold may change. To find the location of an object
+ [seen in the plot use 'g' to go to the graph, space key to find
+ [the pixel coordinates, 'q' to go back to the image display,
+ [and the image display coordinate box to find the object.
+ [When done with the training type 'q'.
+ ccd001: Examine parameters interactively? (yes): no
+.fi
+
+3. To create a mask image a bad pixel file must be specified.
+
+.nf
+ cl> cosmicrays ccd001 ccd001 crmask=crccd001
+.fi
+.ih
+SEE ALSO
+crmedian, crnebula, crgrow, crfix, credit, gtools, imedit, rimcursor
+.endhelp
diff --git a/noao/imred/crutil/doc/craverage.hlp b/noao/imred/crutil/doc/craverage.hlp
new file mode 100644
index 00000000..bd4ef7c9
--- /dev/null
+++ b/noao/imred/crutil/doc/craverage.hlp
@@ -0,0 +1,232 @@
+.help craverage Apr98 noao.imred.crutil
+.ih
+NAME
+craverage -- detect CRs and objects using average filter
+.ih
+SYNOPSIS
+\fBCraverage\fR detects cosmic rays and objects using a moving block
+average filter with the central pixel plus some number of additional high
+pixels excluded and a median of an annulus around the block average box.
+It avoids identification of the cores of objects as cosmic rays by
+excluding pixels within the detected objects as cosmic ray candidates.
+.ih
+USAGE
+.nf
+craverage input output
+.fi
+.ih
+PARAMETERS
+.ls input
+List of input images in which to detect cosmic rays and objects.
+.le
+.ls output
+List of output images in which cosmic rays are replaced by the block average
+value excluding the cosmic ray. If no output image name is given then
+no output image will be created.
+.le
+.ls crmask = ""
+List of input and output cosmic ray and object masks. If the mask exists
+then the mask values are used to exclude data pixels from the calculations
+and zero mask values are candidates for cosmic rays or objects.
+Detected cosmic rays and objects are identified in the mask with values
+given by the \fIcrval\fR and \fIobjval\fR parameters. If no output cosmic
+ray mask is given then no mask will be created.
+.le
+.ls average = ""
+List of output block average filtered images. If no image name is given
+then no image will be created.
+.le
+.ls sigma = ""
+List of output sigma images. If no image name is given then no image
+will be created.
+.le
+
+.ls navg = 5 (minimum of 3)
+Square block average filter size given as the number of pixels along an
+edge. The value will be rounded up to an odd value to be symmetrical
+around the center pixel excluded from the average.
+.le
+.ls nrej = 0 (minimum of 0)
+Number of additional highest pixels to exclude, in addition to the
+central pixel, in the block average. The value should be small but it
+is needed to deal with cosmic rays that are bigger than a single pixel.
+.le
+.ls nbkg = 5 (minimum of 1)
+Background annulus width around the box average filter in pixels. The
+median of the pixels in this annulus is used to estimate the background.
+.le
+.ls nsig = 25 (minimum of 10)
+Square box size for empirical sigma estimates given as the number of
+pixels along an edge. The sigma is estimated using percentile points
+of the pixels in the box. The size of the box should contain
+of order 100 pixels or more.
+.le
+.ls var0 = 0., var1 = 0., var2 = 0.
+Variance coefficients for the variance model. The variance model is
+
+.nf
+ variance = var0 + var1 * data + var2 * data^2
+.fi
+
+where data is the maximum of zero and the average filtered pixel value and
+the variance is in data numbers. All the coefficients must be positive or
+zero. If they are all zero then empirical data sigmas are estimated by a
+percentile method in boxes of size given by \fInsig\fR.
+.le
+
+.ls crval = 1
+Mask value for detected cosmic rays. It is legal for the value to be
+zero to not mark the cosmic rays in the output mask.
+.le
+.ls lcrsig = 10., hcrsig = 5.
+Low and high sigma factors for detecting cosmic rays. These factors
+multiply the computed or estimated sigma at each pixel and these threshold
+values are compared to the difference between the candidate pixel and the
+block average filter value (average of box around the pixel). This only
+applies to pixels where the block average filter value is within a
+specified threshold of the background estimate; i.e. the average value is
+not considered as part of an object.
+.le
+.ls crgrow = 0.
+Cosmic ray growing radius. Pixels detected and marked in the output cosmic
+ray mask by the \fIcrval\fR value are increased in size in the mask (but
+not replaced in the output image) by also flagging all zero valued mask
+pixels within this specified radius with the cosmic ray mask value. This
+is done after the detection phase is complete. The separation between
+pixels is the distance between pixel centers computed as a real value.
+Note a value of at least one is required to affect other mask pixels.
+.le
+
+.ls objval = 0
+Mask value for detected objects. It is legal for the value to be
+zero to not mark the objects in the output mask.
+.le
+.ls lobjsig = 10., hobjsig = 5.
+Low and high sigma factors for detecting objects. These factors multiply
+the computed or estimated sigma at each pixel and these threshold values
+are compared to the difference between the block average filter value and
+the background annulus median. If the values are made very large then
+object detection can be eliminated and cosmic rays will be detected
+everywhere.
+.le
+.ls objgrow = 0.
+Object detection growing radius. Pixels detected and marked in the output
+mask by the \fIobjval\fR value are increased in size in the mask by also
+flagging all zero valued mask pixels within this specified radius with the
+cosmic ray mask value. This is done after the detection phase is complete
+and so object grown pixels are not used in excluding cosmic ray
+candidates. The separation between pixels is the distance between pixel
+centers computed as a real value. Note a value of at least one is
+required to affect other mask pixels.
+.le
+.ih
+DESCRIPTION
+\fBCraverage\fR detects cosmic rays and objects using a moving block
+average filter with the central pixel and a specified number of additional
+highest pixels excluded and a median of an annulus around the block average
+box. It avoids identification of the cores of objects as cosmic rays by
+excluding pixels within the detected objects as cosmic ray candidates.
+
+The block average filter computes the average of pixels in a box with the
+central or target pixel excluded. In addition the \fInrej\fR parameter can
+be used to exclude that number of highest remaining pixels as possible
+contamination from cosmic rays which are larger than one pixel or possibly
+a very nearby additional cosmic ray. The \fInrej\fR value should be kept
+small relative to the total number of pixels in the average so that the
+average will still be elevated over the median in real underlying objects.
+The resulting average is used as the prediction for the value of the target
+pixel. The median of the pixels in a square background annulus around the
+block average box provides the prediction for the background at the target
+pixel.
+
+The target pixel is considered part of an object if the difference between
+the average value and the median background exceeds a specified threshold.
+If the pixel is NOT considered to be part of an object then if the
+difference between the pixel value and the average value exceeds a
+different specified threshold it is identified as a cosmic ray.
+
+The thresholds are defined in terms of sigma factors, which may be
+different for positive and negative deviations and for object and
+cosmic ray identification. The sigma factors multiply an estimate
+for the statistical sigma of the target pixel. The estimate is
+either based on a noise model or sigma of pixels in a box near the
+target pixel.
+
+The \fIcrmask\fR parameter specifies a pixel mask for the image. If the
+mask exists then non-zero mask values will be used to exclude pixels from
+the average, background median, and empirical sigma estimates. Also any
+pixels with non-zero mask values will not be altered either in the output
+image or in the final mask. If the mask does not exist then it behaves as
+if all mask values are zero. If all pixels in the average box or median
+annulus are previously flagged then the estimates will be undefined and
+nothing will be done to the output image or mask. Because the task can
+use an input mask to mark pixels not to be considered it can be used
+in an iterative fashion.
+
+The noise model is given by the formula
+
+.nf
+ variance = var0 + var1 * data + var2 * data^2
+.fi
+
+where data is the maximum of zero and the average estimate for the target
+pixel. The coefficients are all given in terms of the data numbers. This
+model can be related to common detector parameters. For CCDs var0 is the
+readout noise expressed as a variance in data numbers and var1 is the
+inverse gain (DN/electrons). The second order coefficient has the
+interpretation of flat field introduced variance.
+
+If all the coefficients are zero then an empirical sigma is estimated as
+follows. The input image is divided into square blocks of size
+\fInsig\fR. The (unmasked) pixel values in a block are sorted and the
+pixel values nearest the 15.9 and 84.1 percentiles are selected. These are
+the one sigma points in a Gaussian distribution. The sigma estimate is the
+difference of these two values divided by two. This algorithm is used to
+avoid contamination of the sigma estimate by the bad pixel values. The
+block size must be at least 10 pixels in each dimension to provide
+sufficient pixels for a good estimate of the percentile points. The sigma
+estimate for a pixel is the sigma from the nearest block. A moving box is
+not used for reasons of efficiency.
+
+If an output image name is specified then the output image is produced as a
+copy of the input image but with the identified cosmic ray pixels replaced
+by the average predicted value. Other optional output images are
+the average filtered values and the sigma values.
+
+If a mask is specified the detected cosmic rays will be identified with
+values given by the \fIcrval\fR parameter and object pixels will be
+identified with values given by the \fIobjval\fR parameter. Note that one
+does not need to use an output image and the cosmic rays can be replaced by
+interpolation in the data using the tasks \fIcrfix\fR, \fIfixpix\fR, or
+\fIccdproc\fR.
+
+One final step may be applied to the output mask. The mask values
+identified with the \fIcrval\fR and \fIobjval\fR values may be grown
+by identifying pixel values within a specified radius with the same
+mask value. Note that this step is done at the end and so any pixels
+in a preexisting input mask with the same values will also be grown.
+Also the grown pixels will not affect the output cosmic ray replaced
+image. See \fIcrgrow\fR for a further discussion.
+.ih
+EXAMPLES
+This example illustrates using the \fBcraverage\fR task to
+create a mask with cosmic rays and objects identified and displayed.
+The image is a CCD image with a readout noise of 5 electrons
+and a gain of 3 electrons per data number. This implies variance
+model coefficients of
+
+.nf
+ var0 = (5/3)^2 = 2.78
+ var1 = 1/3 = 0.34
+.fi
+
+.nf
+ cl> display obj001 1 # Display in first frame
+ cl> craverage obj001 "" crmask=mask001 var0=2.78 var1=0.34\
+ >>> crval=1 objval=2
+ cl> display crobj001 2 overlay=mask001 ocol="1=green,2=red"
+.fi
+.ih
+SEE ALSO
+cosmicrays, crnebula, median, crfix, crgrow, crmedian
+.endhelp
diff --git a/noao/imred/crutil/doc/crcombine.hlp b/noao/imred/crutil/doc/crcombine.hlp
new file mode 100644
index 00000000..3dddaebe
--- /dev/null
+++ b/noao/imred/crutil/doc/crcombine.hlp
@@ -0,0 +1,35 @@
+.help crcombine Apr98 noao.imred.crutil
+.ih
+NAME
+crcombine -- combine multiple exposures to eliminate cosmic rays
+.ih
+USAGE
+.nf
+crcombine input output
+.fi
+.ih
+PARAMETERS
+See parameters for \fBimcombine\fR.
+.ih
+DESCRIPTION
+This task is a version of \fBimcombine\fR. See the help for that task
+for a description of the parameters and algorithms.
+
+For the purpose of removing cosmic rays the most useful options
+are the "crreject" algorithm and/or combining with a median. Many other
+options may work as well. The best use of this task depends on the
+number of images available. If there are more than a few images the
+images should be combined with an "average" and using a rejection
+algorithm.
+.ih
+EXAMPLES
+1. To combine two images using the gain and read noise parameters in
+the image header:
+
+.nf
+ cl> crcombine obj012,obj013 abc gain=gain rdnoise=rdnoise
+.fi
+.ih
+SEE ALSO
+imcombine
+.endhelp
diff --git a/noao/imred/crutil/doc/credit.hlp b/noao/imred/crutil/doc/credit.hlp
new file mode 100644
index 00000000..f038e5c1
--- /dev/null
+++ b/noao/imred/crutil/doc/credit.hlp
@@ -0,0 +1,39 @@
+.help credit Apr98 noao.imred.crutil
+.ih
+NAME
+credit -- interactively edit cosmic rays using an image display
+.ih
+USAGE
+.nf
+credit input output
+.fi
+.ih
+PARAMETERS
+See parameters for \fBimedit\fR.
+.ih
+DESCRIPTION
+This task is a version of \fBimedit\fR. See the help for that task
+for a description of the parameters and algorithms.
+
+For the purpose of editing cosmic rays the most useful editing option
+is 'b' to replace cosmic rays in a circular annulus using local sky
+values. This can be done interactively or using a list of positions
+along with the \fIdefault\fR parameter value.
+.ih
+EXAMPLES
+1. To replace cosmic rays interactively.
+
+.nf
+ cl> credit obj012 crobj012 crmask012
+.fi
+
+2. To use a two column list of positions and remove the cosmic rays using
+the 'b' key algorithm.
+
+.nf
+ cl> credit obj012 crobj012 cursor=crlist.dat display-
+.fi
+.ih
+SEE ALSO
+imedit, epix
+.endhelp
diff --git a/noao/imred/crutil/doc/crfix.hlp b/noao/imred/crutil/doc/crfix.hlp
new file mode 100644
index 00000000..5794a001
--- /dev/null
+++ b/noao/imred/crutil/doc/crfix.hlp
@@ -0,0 +1,48 @@
+.help crfix Apr98 noao.imred.crutil
+.ih
+NAME
+crfix -- fix cosmic rays in images using cosmic ray masks
+.ih
+USAGE
+.nf
+crfix input output masks
+.fi
+.ih
+PARAMETERS
+.ls input
+Input two dimensional image to be "fixed" (modified) by linear interpolation.
+.le
+.ls output
+Output image. If the output image name exactly matches the input
+image name (including extensions) then the image will be modified in place.
+.le
+.ls crmask
+Cosmic ray mask identifying the cosmic rays to be fixed. The mask
+values are zero for good data and non-zero for cosmic rays.
+.le
+.ih
+DESCRIPTION
+The input and output images are specified by the \fIinput\fR and
+\fIoutput\fR parameters. If the input and output image names are
+identifical (including extensions) then image is modified in place. Cosmic
+rays, identified in a cosmic ray mask specified by the \fIcrmask\fR
+parameter, are replaced in the output image by linear interpolation along
+lines or columns using the nearest good pixels. The special mask name
+"BPM" may be used to select a mask name given in the input image header
+under the keyword "BPM".
+
+Cosmic ray pixels are "fixed" by replacing them with values
+by linear interpolation to the nearest pixels not identified as bad.
+The interpolation direction is the shortest length between good pixels
+along columns or lines.
+.ih
+EXAMPLES
+1. To replace cosmic rays in an image:
+
+.nf
+ cl> crfix obj012 crobj012 crmask012
+.fi
+.ih
+SEE ALSO
+fixpix, crmedian, crnebula, cosmicrays, credit, epix
+.endhelp
diff --git a/noao/imred/crutil/doc/crgrow.hlp b/noao/imred/crutil/doc/crgrow.hlp
new file mode 100644
index 00000000..f3a1ff5c
--- /dev/null
+++ b/noao/imred/crutil/doc/crgrow.hlp
@@ -0,0 +1,55 @@
+.help crgrow Apr98 noao.imred.crutil
+.ih
+NAME
+crgrow -- grow cosmic rays in cosmic ray masks
+.ih
+USAGE
+crgrow input output radius
+.ih
+PARAMETERS
+.ls input
+List of cosmic ray masks to be modified.
+.le
+.ls output
+List of output modified cosmic ray masks. The input and output lists must
+match. If the input and output cosmic ray masks are specified as the same
+then the input mask will be modified in place.
+.le
+.ls radius = 1.
+Replacement radius around cosmic rays.
+If a pixel is within this distance of a cosmic ray pixel
+it is identified by a value of 1 in the output cosmic ray mask. Distances are
+measured between pixel centers which are have integer coordinates.
+.le
+.ls inval = INDEF
+Mask value to be grown. A value of INDEF will grow all non-zero values.
+.le
+.ls outval = INDEF
+Mask value for grown pixels. A value of INDEF will use the value of the
+pixel being grown for the grown pixel value.
+.le
+.ih
+DESCRIPTION
+The cosmic ray pixels, identified by the "inval" parameter, in the input
+mask are located and all unmasked (zero valued) pixels within the specified
+grow radius are set to a value given by the "outval" parameter. The
+distance between pixels is measured as a cartisian logical pixel coordinate
+distance.
+.ih
+EXAMPLES
+1. A radius of 1 will grow cosmic rays in a "plus" pattern.
+
+.nf
+ cl> crgrow crmask1 crmask2 1
+.fi
+
+2. A radius of 1.5 will grow cosmic rays in a box pattern. The following
+will modify the input mask.
+
+.nf
+ cl> crgrow crmask crmask 1.5
+.fi
+.ih
+SEE ALSO
+imreplace
+.endhelp
diff --git a/noao/imred/crutil/doc/crmedian.hlp b/noao/imred/crutil/doc/crmedian.hlp
new file mode 100644
index 00000000..3147b676
--- /dev/null
+++ b/noao/imred/crutil/doc/crmedian.hlp
@@ -0,0 +1,157 @@
+.help crmedian Apr98 noao.imred.crutil
+.ih
+NAME
+crmedian -- detect, fix, and flag cosmic rays using median filtering
+.ih
+USAGE
+.nf
+crmedian input output
+.fi
+.ih
+PARAMETERS
+.ls input
+Input image in which to detect cosmic rays.
+.le
+.ls output
+Output image in which cosmic rays are replaced by the median value.
+If no output image name is given then no output image will be created.
+.le
+.ls crmask = ""
+Output cosmic ray mask. Detected cosmic rays (and other deviant pixels)
+are identified in the mask with values of one and good pixels with a values
+of zero. If no output cosmic ray mask is given then no mask will be
+created.
+.le
+.ls median = ""
+Output median filtered image. If no image name is given then no output will be
+created.
+.le
+.ls sigma = ""
+Output sigma image. If no image name is given then no output will be
+created.
+.le
+.ls residual = ""
+Output residual image. This is the input image minus the median filtered
+image divided by the sigma image. Thresholds in this image determine the
+cosmic rays detected. If no image name is given then no output will be
+created.
+.le
+.ls var0 = 0., var1 = 0., var2 = 0.
+Variance coefficients for the variance model. The variance model is
+
+.nf
+ variance = var0 + var1 * data + var2 * data^2
+.fi
+
+where data is the maximum of zero and median pixel value and the variance
+is in data numbers. All the coefficients must be positive or zero. If
+they are all zero then empirical data sigmas are estimated by a percentile
+method in boxes of size given by \fIncsig\fR and \fInlsig\fR.
+.le
+.ls lsigma = 10, hsigma = 3
+Positive sigma factors to use for selecting pixels below and above
+the median level based on the local percentile sigma. Cosmic rays will
+appear above the median level.
+.le
+.ls ncmed = 5, nlmed = 5
+The column and line size of a moving median rectangle used to estimate the
+uncontaminated local image.
+.le
+.ls ncsig = 25, nlsig = 25
+The column and line size of regions used to estimate the uncontaminated
+local sigma using a percentile. The size of the box should contain
+of order 100 pixels or more.
+.le
+.ih
+DESCRIPTION
+\fBCrmedian\fR detects cosmic rays from pixels deviating by a specified
+statistical amount from the median at each pixel. It outputs and set of
+the following: a copy of the input image with cosmic rays replaced by the
+median value, a cosmic ray mask identifying the cosmic rays, the median
+filtered image, a sigma image where each pixel has the estimated sigma, and
+the residual image used in detecting the cosmic rays.
+
+The residual image is computed by subtracting a median filtered version
+of the input data from the unfiltered input data and dividing by an
+estimate of the pixel sigmas. The median filter
+box size is given by the \fIncmed\fR and \fInlmed\fR parameters.
+If a name for the median image is specified the median filtered image
+will be output. The variance at each pixel is determined either from
+a variance model or empirically. If a name for the sigma image is specified
+then the sigma values (the square root of the variance) will be output.
+If a name for the residual image is given then the residual image
+will be output.
+
+The empirical variance model is given by the formula
+
+.nf
+ variance = var0 + var1 * data + var2 * data^2
+.fi
+
+where data is the maximum of zero and median pixel value and the variance
+is in data numbers. This model can be related to common detector
+parameters. For CCDs var0 is the readout noise expressed as a variance in
+data numbers and var1 is the inverse gain (DN/electrons). The second order
+coefficient has the interpretation of flat field introduced variance.
+
+If all the coefficients are zero then an empirical sigma is estimated
+as follows. The input image is divided into blocks of size
+\fIncsig\fR and \fInlsig\fR. The pixel values in a block are sorted
+and the pixel values nearest the 15.9 and 84.1 percentiles are
+selected. These are the one sigma points in a Gaussian distribution.
+The sigma estimate is the difference of these two values divided by
+two. This algorithm is used to avoid contamination of the sigma
+estimate by the bad pixel values. The block size must be at least 10
+pixels in each dimension to provide sufficient pixels for a good estimate
+of the percentile points. The sigma estimate for a pixel is the sigma
+from the nearest block. A moving box is not used for efficiency.
+
+The residual image is divided by the sigma estimate at each pixel.
+Cosmic rays are identified by finding those pixels in the
+residual image which have values greater than \fIhsigma\fR and bad
+pixels with values below \fIlsigma\fR are also identified.
+
+If an output image name is specified then the output image is produced as a
+copy of the input image but with the identified cosmic ray pixels replaced
+by the median value. If an output cosmic ray mask is specified a cosmic
+ray mask will be produced with values of zero for good pixels and one for
+bad pixels. The cosmic ray mask is used to display the cosmic ray
+positions found and the cosmic rays can be replaced by interpolation (as
+opposed to the median value) using the task \fIcrfix\fR.
+
+The \fBcrmedian\fR detections are very simple and do not take into account
+real structure with scales of a pixel. Thus this may clip the cores of
+stars and narrow nebular features in the data. More sophisticated
+algorithms are found in \fBcosmicrays\fR, \fIcraverage\fR, and
+\fBcrnebula\fR. The median, sigma, and residual images are available as
+output to evaluate the various aspects of the algorithm.
+.ih
+EXAMPLES
+This example illustrates using the \fBcrmedian\fR task to
+give a cosmic ray removed image and examining the results with an image
+display. The image is a CCD image with a readout noise of 5 electrons
+and a gain of 3 electrons per data number. This implies variance
+model coefficients of
+
+.nf
+ var0 = (5/3)^2 = 2.78
+ var1 = 1/3 = 0.34
+.fi
+
+.nf
+ cl> display obj001 1 # Display in first frame
+ cl> # Determine output image, cosmic ray mask, and residual image
+ cl> crmedian obj001 crobj001 crmask=mask001 resid=res001\
+ >>> var0=2.78 var1=0.34
+ cl> display crobj001 2 # Display final image
+ cl> display mask001 3 zs- zr- z1=-1 z2=2 # Display mask
+ cl> display res001 4 zs- zr- z1=-5 z2=5 # Display residuals
+.fi
+
+By looking at the residual image the sigma clippig threshold can be
+adjusted and the noise parameters can be tweaked to minimize clipping
+of real extended structure.
+.ih
+SEE ALSO
+cosmicrays, craverage, crnebula, median, crfix, crgrow
+.endhelp
diff --git a/noao/imred/crutil/doc/crnebula.hlp b/noao/imred/crutil/doc/crnebula.hlp
new file mode 100644
index 00000000..174a6771
--- /dev/null
+++ b/noao/imred/crutil/doc/crnebula.hlp
@@ -0,0 +1,139 @@
+.help crnebula Apr98 noao.imred.crutil
+.ih
+NAME
+crnebula -- create a cosmic ray mask from nebular images
+.ih
+USAGE
+.nf
+crnebula input output
+.fi
+.ih
+PARAMETERS
+.ls input
+Input image in which cosmic rays are to be detected.
+.le
+.ls output
+Output image in which cosmic rays are to be replaced by the median.
+If no output image is given (specified as "") then no output image
+is created.
+.le
+.ls crmask = ""
+Output cosmic ray mask identifying the cosmic rays found. The mask
+will have values of one for cosmic rays and zero for non-cosmic rays.
+If no output cosmic ray mask is given (specified as "") then no mask
+is created.
+.le
+.ls residual = ""
+Output residual image. This is the input image minus the median filtered
+image divided by the estimated sigma at each pixel. Thresholds in this
+image determine the cosmic rays detected. If no image name is given then
+no output will be created.
+.le
+.ls rmedresid = ""
+Output image for the difference between the box median filter image and
+the ring median filtered image divided by the estimated sigma at each
+pixel. If no image name is given then no output will be created.
+.le
+.ls var0 = 0., var1 = 0., var2 = 0.
+Variance coefficients for the variance model. The variance model is
+
+.nf
+ variance = var0 + var1 * data + var2 * data^2
+.fi
+
+where data is the maximum of zero and median pixel value and the variance is in
+data numbers. All the coefficients must be positive or zero. If they are
+all zero then empirical data sigmas are estimated by a percentile method in
+boxes of size given by \fIncsig\fR and \fInlsig\fR.
+.le
+.ls sigmed = 3.
+Sigma clipping factor for the residual image.
+.le
+.ls sigdiff = 3.
+Sigma clipping factor for the residuals between the box median and ring median
+filtered images.
+.le
+.ls mbox = 5
+Box size, in pixels, for the box median filtering.
+.le
+.ls rin = 1.5, rout = 6.
+Inner and outer radii, in pixels, for the ring median filtering.
+.le
+.ls verbose = no
+Print some progress information?
+.le
+.ih
+DESCRIPTION
+This task uses a combination of box median filtering to detect cosmic rays
+and the difference between box and ring median filtering to identify
+regions of fine nebular structure which should not be treated as cosmic
+rays. The output consists of some set of the input image with cosmic rays
+replaced by the median, a cosmic ray mask, the residual image used to
+detect the cosmic rays, and the residual image used to exclude cosmic rays
+in regions of nebular fine structure. The cosmic ray mask may be used
+later with \fBcrgrow\fR and \fBcrfix\fR to grow and remove the cosmic rays
+from the data by interpolation rather than the median.
+
+The algorithm is as follows. The input image is median filtered using a
+box of size given by \fImbox\fR. The residual image between the unfiltered
+and filter data is computed. The residuals are divided by the estimated
+sigma of the pixel. Cosmic rays are those which are more than \fIsigmed\fR
+above zero in the residual image. This residual image may be output if an
+output name is specified. This part of the algorithm is identical to that
+of the task \fIcrmedian\fR and, in fact, that task is used.
+
+The median image not only enhances cosmic rays it also enhances narrow fine
+structure in the input image. To avoid identifying this structure as
+cosmic rays a second filtered residual image is created which
+preferentially identifies this structure over the cosmic rays. The input
+image is filtered using a ring median of specified inner and outer radius.
+The inner radius is slightly larger than the scale of the cosmic rays and
+the outer radius is comparable to the box size of the box median filter. A
+ring filter replaces the center of the ring by the median of the ring. The
+difference between the input and ring median filtered image divided by the
+estimated sigma will then be very similar to the box median residual image both
+where there are cosmic rays and where there is diffuse structure but will
+be different where there are linear fine structure patterns. The
+difference between the median residual image and this ring median residual
+image highlights the regions of fine structure. If a image name is specified
+for the difference of the residual images it will be output.
+
+The difference of the median residual images is used to exclude any cosmic
+ray candidate pixels determined from sigma clipping the box median residual
+image which lie where the difference of the median residual images is
+greater than \fIsigdiff\fR different from zero (both positive or
+negative).
+
+To understand this algorithm it is recommended that the user save the
+residual and residual difference images and display them and blink against
+the original data.
+.ih
+EXAMPLES
+This example, the same as in \fBcrmedian\fR, illustrates using the
+\fBcrnebual\fR task to give a cosmic ray removed image and examining the
+results with an image display. The image is a CCD image with a readout
+noise of 5 electrons and a gain of 3 electrons per data number. This
+implies variance model coefficients of
+
+.nf
+ var0 = (5/3)^2 = 2.78
+ var1 = 1/3 = 0.34
+.fi
+
+.nf
+ cl> display obj001 1 # Display in first frame
+ cl> # Determine output image, cosmic ray mask, and residual images
+ cl> crnebula obj001 crobj001 crmask=mask001 resid=res001\
+ >>> rmedresid=rmed001 var0=2.78 var1=0.34
+ cl> display crobj001 2 # Display final image
+ cl> display res001 3 zs- zr- z1=-5 z2=5 # Display residuals
+ cl> display rmed001 4 zs- zr- z1=-5 z2=5
+.fi
+
+By looking at the residual image the sigma clippig threshold can be
+adjusted and the noise parameters can be tweaked to minimize clipping
+of real extended structure.
+.ih
+SEE ALSO
+cosmicrays, crmedian, median, rmedian, crfix, crgrow
+.endhelp
diff --git a/noao/imred/crutil/doc/overview.hlp b/noao/imred/crutil/doc/overview.hlp
new file mode 100644
index 00000000..cb4dc3de
--- /dev/null
+++ b/noao/imred/crutil/doc/overview.hlp
@@ -0,0 +1,76 @@
+.help overview Apr98 noao.imred.crutil
+
+.ce
+\fBThe Cosmic Ray Package: CRUTIL\fR
+
+The cosmic ray package provides tools for identifying and removing cosmic
+rays in images. The tasks are:
+
+.nf
+ cosmicrays - Remove cosmic rays using flux ratio algorithm
+ craverage - Detect CRs against average and avoid objects
+ crcombine - Combine multiple exposures to eliminate cosmic rays
+ credit - Interactively edit cosmic rays using an image display
+ crfix - Fix cosmic rays in images using cosmic ray masks
+ crgrow - Grow cosmic rays in cosmic ray masks
+ crmedian - Detect and replace cosmic rays with median filter
+ crnebula - Detect and replace cosmic rays in nebular data
+.fi
+
+The best way to remove cosmic rays is using multiple exposures of the same
+field. When this is done the task \fBcrcombine\fR is used to combine the
+exposures into a final single image with cosmic rays removed. The images
+are scaled (if necessary) to a common data level either by multiplicative
+scaling, an additive background offset, or some combination of both.
+Cosmic rays are then found as pixels which differ by some statistical
+amount away for the average or median of the data.
+
+A median is the simplest way to remove cosmic rays. This is an option
+with \fBcrcombine\fR. But this does not make optimal use of the data.
+An average of the pixels remaining after some rejection operation is better.
+If the noise characteristics of the data can be described by a gain and
+read noise then cosmic rays can be optimally rejected using the
+"crreject" algorithm. This works on two or more images. There are
+a number of other rejection algorithms which can be used as described in
+the task help.
+
+The rest of the tasks in the package are used when only a single exposure
+is available. These include interactive editing with \fBcredit\fR. The
+replacement algorithms in this task may also be used non-interactively if
+you have a list of pixel coordinates as input. Other tasks automatically
+identifying pixels which are significantly higher than surrounding pixels.
+
+The simplest of these tasks is \fBcrmedian\fR. This replaces
+cosmic rays with a median value and produces a cosmic ray
+mask which is a simple type of integer image where good pixels have a value
+of zero and bad pixels have a non-zero value. The tasks \fBcrgrow\fR and
+\fBcrfix\fR are provided to use this type of cosmic ray mask. The former
+will flag additional pixels within some radius of the flagged pixels in the
+mask. The latter is the basic tool for replacing the identified pixels in
+the data by neighboring data. It uses linear interpolation along lines or
+columns. The median task is simple but it often will flag the cores of
+stars or other small but real features.
+
+The task \fBcraverage\fR is similar to \fBcrmedian\fR in that it compares
+the pixel values against a smoothed version. Instead of a median it uses
+an average with the central pixel excluded. It is more sophisticated
+in that it also compares the average against a larger median to see if
+the region corresponds to an object. Thus it can detect objects and
+the task could be used as a simple object detection task in its own right.
+Because the hardest part of cosmic ray detection from a single image is
+avoiding truncation of the cores of stars this task does not allow cosmic
+rays to be detected where it thinks there is an object. This task is
+also more versatile in allow separate mask values and works on a list
+of images.
+
+Somewhat more sophisticated algorithms are available in the tasks
+\fBcosmicrays\fR and \fBcrnebula\fR. These attempt to determine if a
+deviant pixel is the core of a star or part of a linear nebular feature
+respectively.
+
+The best use of these tasks is to experiment and iterate. In particular,
+one may want to iterate a task several times and use both \fBcosmicrays\fR
+and \fBcraverage\fR.
+
+Good hunting!
+.endhelp
diff --git a/noao/imred/crutil/mkpkg b/noao/imred/crutil/mkpkg
new file mode 100644
index 00000000..3a55a03a
--- /dev/null
+++ b/noao/imred/crutil/mkpkg
@@ -0,0 +1,8 @@
+# Make the package.
+
+$call update@src
+$exit
+
+update:
+ $call update@src
+ ;
diff --git a/noao/imred/crutil/src/Revisions b/noao/imred/crutil/src/Revisions
new file mode 100644
index 00000000..a5ef52f1
--- /dev/null
+++ b/noao/imred/crutil/src/Revisions
@@ -0,0 +1,151 @@
+.help revisions Nov99 crutil
+.nf
+t_cosmicrays.x
+ A pointer to a an array of pointers was used in one place as a real. This
+ is an error when integer and real arrays are not of the same size; i.e.
+ on 64-bit architectures. (8/2/12, Valdes)
+
+=======
+V2.16.1
+=======
+
+t_craverage.x
+ The pointer for the input mask buffer when using the same mask for both
+ input and output was not being read to include the edge lines which are
+ used during the computation resulting in a segmentation violation.
+ (11/24/04, Valdes)
+
+=======
+V2.12.2
+=======
+
+t_craverage.x
+ To grow an output mask create by the task requires closing the new mask
+ and reopening it READ_WRITE. (10/23/02, Valdes)
+
+=======
+V2.12.1
+=======
+
+=====
+V2.12
+=====
+
+t_craverage.x
+t_crmedian.x
+ The mask name may be !<keyword>. (3/22/02, Valdes)
+
+xtmaskname.x +
+t_craverage.x
+t_crmedian.x
+t_crgrow.x
+mkpkg
+ Modified to allow FITS mask extensions. (3/22/02, Valdes)
+
+mkpkg
+ Added missing mkpkg depencies for several source files. (12/13/01, MJF)
+
+============================
+CRUTIL V1.5: August 22, 2001
+============================
+
+crcombine.cl
+crcombine.par
+../doc/crcombine.hlp
+ Modified to use new version of IMCOMBINE. (8/22/01, Valdes)
+
+noao$imred/crutil/ +
+ Installed package into the NOAO.IMRED package. (8/22/01, Valdes)
+
+t_craverage.x
+ The nrej features was done wrong for nrej of 1 or 2. (5/3/01, Valdes)
+
+t_craverage.x
+ 1. The amov and aclr calls were using the wrong buffer start.
+ 2. Added missing imtclose calls.
+ 3. The growing needed to be moved outside the block buffering.
+ (10/4/00, Valdes as diagnosed by Davis)
+
+t_craverage.x
+craverage.par
+../doc/craverage.hlp
+ Added an nrej parameter to excluded additional pixels from the average.
+ This is needed to deal with cosmic rays which are bigger than one
+ pixel or very nearby additional cosmic rays. (9/13/00, Valdes)
+
+========================
+CRUTIL V1.4: Jan 6, 2000
+========================
+
+t_crmedian.x
+ The calculation of the sigma would reference uninitialized data if
+ the image size was not a multiple of the sigma block size.
+ (1/6/00, Valdes)
+
+t_craverage.x
+ The indexing of the pixels to use for the sigma calculation was wrong.
+ (1/6/00, Valdes)
+
+t_craverage.x +
+craverage.par +
+x_crutil.e
+mkpkg
+../crutil.cl
+../crutil.men
+../crutil.hd
+ New task that finds cosmic rays against an average excluding the candidate
+ pixel. It also detects objects and prevents cosmic rays being detected
+ in them (i.e. the cores of stars). (11/30/99, Valdes)
+
+t_crgrow.x
+crgrow.par
+ 1. Broke main grow loop into a subroutine that can be called by other
+ tasks.
+ 2. Added inval and outval parameters to allow selecting mask values.
+ (11/30/99, Valdes)
+
+========================
+CRUTIL V1.3: Oct 19, 1999
+========================
+
+crnebula.cl
+ The rin and rout parameters were not being used and instead
+ the default values were hardwired. (10/19/99, Valdes)
+
+========================
+CRUTIL V1.2: Sep 4, 1998
+========================
+
+t_crmedian.x
+ On images with more than about 500K pixels the median operation is
+ done in overlapping blocks of lines. The amount of overlap is
+ half of the line median size "lmed". The bug is that on output
+ the overlap regions end up being zero. The output i/o is now done
+ in non-overlapping blocks. (9/4/98, Valdes)
+
+=====================
+CRUTIL V1.1: May 1998
+=====================
+
+crexamine.x
+cosmicrays.key +
+cosmicrays.hlp
+ The graphical deletion and undeletion of candidates now includes the
+ keys 'e' and 'v' to delete and undelete from a marked rectangular
+ region. Also the key file was moved to the source directory.
+ (Valdes/Shopbell 5/15/98)
+
+cosmicrays.par
+ Fixed type in "crmasks" parameter name.
+ (Valdes/Shopbell 5/15/98)
+
+===========
+CRUTIL V1.0
+===========
+
+New package created April 24, 1998.
+
+=======
+V2.11.1
+=======
+.endhelp
diff --git a/noao/imred/crutil/src/cosmicrays.key b/noao/imred/crutil/src/cosmicrays.key
new file mode 100644
index 00000000..beac2835
--- /dev/null
+++ b/noao/imred/crutil/src/cosmicrays.key
@@ -0,0 +1,43 @@
+ COSMIC RAY DETECTION AND REPLACEMENT
+
+
+INTERACTIVE IMAGE CURSOR COMMANDS
+
+When using the image display cursor to define a list of training objects
+the following keystrokes may be given.
+
+? Help
+c Identify the object as a cosmic ray
+s Identify the object as a star
+g Switch to the graphics plot
+q Quit and continue with the cleaning
+
+There are no colon commands.
+
+
+INTERACTIVE GRAPHICS CURSOR COMMANDS
+
+ The graph shows the ratio of the mean background subtracted flux within the
+detection window (excluding the candidate cosmic ray and the second brightest
+pixel) to the flux of the candidate cosmic ray pixel as a function of the
+mean flux. Both coordinates have been multiplied by 100 so the ratio is in
+precent. The peaks of extended objects have high flux ratios while true
+cosmic rays have low ratios. The main purpose of this step is to set the
+flux ratio threshold for discriminating stars and galaxies. The cursor
+keys are:
+
+? Help
+a Toggle between showing all objects and only the training objects
+d Mark candidate for replacement (applys to '+' points)
+e Mark candidates in a region for replacement (applys to '+' points)
+q Quit. Returns to training or to replace the selected pixels
+r Redraw the graph
+s Make a surface plot for the candidate nearest the cursor
+t Set the flux ratio threshold at the y cursor position
+u Mark candidate to not be replaced (applys to 'x' points)
+v Mark candidates in a region to not be replaced (applys to 'x' points)
+w Adjust the graph window (see GTOOLS)
+<space> Print the pixel coordinates for a candidate
+
+There are no colon commands except those for the windowing options (type
+:\help or see GTOOLS).
diff --git a/noao/imred/crutil/src/cosmicrays.par b/noao/imred/crutil/src/cosmicrays.par
new file mode 100644
index 00000000..9016d9f9
--- /dev/null
+++ b/noao/imred/crutil/src/cosmicrays.par
@@ -0,0 +1,17 @@
+input,s,a,,,,List of images in which to detect cosmic rays
+output,s,a,,,,List of cosmic ray replaced output images (optional)
+crmasks,s,h,"",,,"List of bad pixel masks (optional)
+"
+threshold,r,h,25.,,,Detection threshold above mean
+fluxratio,r,h,2.,,,Flux ratio threshold (in percent)
+npasses,i,h,5,1,,Number of detection passes
+window,s,h,"5","5|7",,"Size of detection window
+"
+interactive,b,h,yes,,,Examine parameters interactively?
+train,b,h,no,,,Use training objects?
+objects,*imcur,h,"",,,Cursor list of training objects
+savefile,f,h,"",,,File to save train objects
+plotfile,f,h,"",,,Plot file
+graphics,s,h,"stdgraph",,,Interactive graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+answer,s,q,,"no|yes|NO|YES",,Review parameters for a particular image?
diff --git a/noao/imred/crutil/src/craverage.par b/noao/imred/crutil/src/craverage.par
new file mode 100644
index 00000000..b67c4585
--- /dev/null
+++ b/noao/imred/crutil/src/craverage.par
@@ -0,0 +1,23 @@
+input,f,a,,,,List of input images
+output,f,a,,,,List of output images
+crmask,f,h,,,,List of output cosmic ray and object masks
+average,f,h,,,,List of output block average filtered images
+sigma,f,h,,,,"List of output sigma images
+"
+navg,i,h,5,3,,Block average box size
+nrej,i,h,0,0,,Number of high pixels to reject from the average
+nbkg,i,h,5,1,,Background annulus width
+nsig,i,h,25,10,,Box size for sigma calculation
+var0,r,h,0.,0.,,Variance coefficient for DN^0 term
+var1,r,h,0.,0.,,Variance coefficient for DN^1 term
+var2,r,h,0.,0.,,"Variance coefficient for DN^2 term
+"
+crval,i,h,1,0,,Mask value for cosmic rays
+lcrsig,r,h,10.,,,Low cosmic ray sigma outside object
+hcrsig,r,h,5.,,,High cosmic ray sigma outside object
+crgrow,r,h,0.,0.,,"Cosmic ray grow radius
+"
+objval,i,h,0,0,,Mask value for objects
+lobjsig,r,h,10.,,,Low object detection sigma
+hobjsig,r,h,5.,,,High object detection sigma
+objgrow,r,h,0.,0.,,Object grow radius
diff --git a/noao/imred/crutil/src/crcombine.cl b/noao/imred/crutil/src/crcombine.cl
new file mode 100644
index 00000000..7c6bdc77
--- /dev/null
+++ b/noao/imred/crutil/src/crcombine.cl
@@ -0,0 +1,17 @@
+# CRCOMBINE -- Reject cosmic rays by combining multiple exposures.
+
+procedure crcombine (input, output)
+
+begin
+ imcombine (input, output, logfile=logfile, combine=combine,
+ reject=reject, scale=scale, zero=zero, statsec=statsec,
+ lsigma=lsigma, hsigma=hsigma, rdnoise=rdnoise, gain=gain,
+ grow=grow, headers=headers, bpmasks=bpmasks, rejmasks=rejmasks,
+ nrejmasks=nrejmasks, expmasks=expmasks, sigmas=sigmas,
+ project=project, outtype=outtype, outlimits=outlimits,
+ offsets=offsets, masktype=masktype, maskvalue=maskvalue,
+ blank=blank, weight=weight, expname=expname,
+ lthreshold=lthreshold, hthreshold=hthreshold, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, snoise=snoise,
+ sigscale=sigscale, pclip=pclip)
+end
diff --git a/noao/imred/crutil/src/crcombine.par b/noao/imred/crutil/src/crcombine.par
new file mode 100644
index 00000000..95ae95ae
--- /dev/null
+++ b/noao/imred/crutil/src/crcombine.par
@@ -0,0 +1,45 @@
+# CRCOMBINE -- Image combine parameters
+
+input,s,a,,,,List of images to combine
+output,s,a,,,,List of output images
+logfile,s,h,"STDOUT",,,"Log file
+
+# Cosmic ray rejection parameters"
+combine,s,h,"average","average|median|sum",,Type of combine operation
+reject,s,h,"crreject","none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip",,Type of rejection
+scale,s,h,"mode",,,Image scaling
+zero,s,h,"none",,,Image zero point offset
+statsec,s,h,"",,,Image section for computing statistics
+lsigma,r,h,10.,0.,,Lower sigma clipping factor
+hsigma,r,h,3.,0.,,Upper sigma clipping factor
+rdnoise,s,h,"0.",,,CCD readout noise (electrons)
+gain,s,h,"1.",,,CCD gain (electrons/DN)
+grow,r,h,0.,0.,,"Radius (pixels) for neighbor rejection
+
+# Additional output"
+headers,s,h,"",,,List of header files (optional)
+bpmasks,s,h,"",,,List of bad pixel masks (optional)
+rejmasks,s,h,"",,,List of rejection masks (optional)
+nrejmasks,s,h,"",,,List of number rejected masks (optional)
+expmasks,s,h,"",,,List of exposure masks (optional)
+sigmas,s,h,"",,,"List of sigma images (optional)
+
+# Additional parameters"
+project,b,h,no,,,Project highest dimension of input images?
+outtype,s,h,"real","short|ushort|integer|long|real|double",,Output image pixel datatype
+outlimits,s,h,"",,,Output limits (x1 x2 y1 y2 ...)
+offsets,f,h,"none",,,Input image offsets
+masktype,s,h,"none","none|goodvalue|badvalue|goodbits|badbits",,Mask type
+maskvalue,r,h,0,,,Mask value
+blank,r,h,0.,,,Value if there are no pixels
+weight,s,h,"none",,,Image weights
+expname,s,h,"",,,Image header exposure time keyword
+lthreshold,r,h,INDEF,,,Lower threshold
+hthreshold,r,h,INDEF,,,Upper threshold
+nlow,i,h,1,0,,minmax: Number of low pixels to reject
+nhigh,i,h,1,0,,minmax: Number of high pixels to reject
+nkeep,i,h,1,,,Minimum to keep (pos) or maximum to reject (neg)
+mclip,b,h,yes,,,Use median in sigma clipping algorithms?
+snoise,s,h,"0.",,,Sensitivity noise (fraction)
+sigscale,r,h,0.1,0.,,Tolerance for sigma clipping scaling corrections
+pclip,r,h,-0.5,,,pclip: Percentile clipping parameter
diff --git a/noao/imred/crutil/src/credit.cl b/noao/imred/crutil/src/credit.cl
new file mode 100644
index 00000000..8b844f33
--- /dev/null
+++ b/noao/imred/crutil/src/credit.cl
@@ -0,0 +1,13 @@
+# CREDIT -- Edit cosmic rays with an image display.
+
+procedure credit (input, output)
+
+begin
+ imedit (input, output, cursor=cursor, logfile=logfile,
+ display=display, autodisplay=autodisplay,
+ autosurface=autosurface, aperture=aperture, radius=radius,
+ search=search, buffer=buffer, width=width, xorder=xorder,
+ yorder=yorder, value=value, sigma=sigma, angh=angh, angv=angv,
+ command=command, graphics=graphics, default=default,
+ fixpix=fixpix)
+end
diff --git a/noao/imred/crutil/src/credit.par b/noao/imred/crutil/src/credit.par
new file mode 100644
index 00000000..a23e0d35
--- /dev/null
+++ b/noao/imred/crutil/src/credit.par
@@ -0,0 +1,22 @@
+input,s,a,,,,Images to be edited
+output,s,a,,,,Output images
+cursor,*imcur,h,"",,,Cursor input
+logfile,s,h,"",,,Logfile for record of cursor commands
+display,b,h,yes,,,Display images?
+autodisplay,b,h,yes,,,Automatic image display?
+autosurface,b,h,no,,,Automatic surface plots?
+aperture,s,h,"circular","|circular|square|",,Aperture type
+radius,r,h,2.,,,Substitution radius
+search,r,h,2.,,,Search radius
+buffer,r,h,1.,0.,,Background buffer width
+width,r,h,2.,1.,,Background width
+xorder,i,h,2,0,,Background x order
+yorder,i,h,2,0,,Background y order
+value,r,h,0.,,,Constant value substitution
+sigma,r,h,INDEF,,,Added noise sigma
+angh,r,h, -33.,,,Horizontal viewing angle (degrees)
+angv,r,h,25.,,,Vertical viewing angle (degrees)
+command,s,h,"display $image 1 erase=$erase fill=yes order=0 >& dev$null",,,Display command
+graphics,s,h,"stdgraph",,,Graphics device
+default,s,h,"b",,,Default option for x-y input
+fixpix,b,h,no,,,Fixpix style input?
diff --git a/noao/imred/crutil/src/crexamine.x b/noao/imred/crutil/src/crexamine.x
new file mode 100644
index 00000000..14e8d589
--- /dev/null
+++ b/noao/imred/crutil/src/crexamine.x
@@ -0,0 +1,626 @@
+include <error.h>
+include <syserr.h>
+include <imhdr.h>
+include <gset.h>
+include <mach.h>
+include <pkg/gtools.h>
+include "crlist.h"
+
+# CR_EXAMINE -- Examine cosmic ray candidates interactively.
+# CR_GRAPH -- Make a graph
+# CR_NEAREST -- Find the nearest cosmic ray to the cursor.
+# CR_DELETE -- Set replace flag for cosmic ray candidate nearest cursor.
+# CR_UNDELETE -- Set no replace flag for cosmic ray candidate nearest cursor.
+# CR_UPDATE -- Change replacement flags, thresholds, and graphs.
+# CR_PLOT -- Make log plot
+
+define HELP "crutil$src/cosmicrays.key"
+define PROMPT "cosmic ray options"
+
+# CR_EXAMINE -- Examine cosmic ray candidates interactively.
+
+procedure cr_examine (cr, gp, gt, im, fluxratio, first)
+
+pointer cr # Cosmic ray list
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer im # Image pointer
+real fluxratio # Flux ratio threshold
+int first # Initial key
+
+char cmd[SZ_LINE]
+int i, newgraph, wcs, key, nc, nl, c1, c2, l1, l2, show
+real wx, wy, x1, y1, x2, y2
+pointer data
+
+int clgcur()
+pointer imgs2r()
+
+begin
+ # Set up the graphics.
+ call gt_sets (gt, GTPARAMS, IM_TITLE(im))
+
+ # Set image limits
+ nc = IM_LEN(im, 1)
+ nl = IM_LEN(im, 2)
+
+ # Enter cursor loop.
+ key = first
+ repeat {
+ switch (key) {
+ case '?': # Print help text.
+ call gpagefile (gp, HELP, PROMPT)
+ case ':': # Colon commands.
+ switch (cmd[1]) {
+ case '/':
+ call gt_colon (cmd, gp, gt, newgraph)
+ default:
+ call printf ("\007")
+ }
+ case 'a': # Toggle show all
+ if (show == 0)
+ show = 1
+ else
+ show = 0
+ newgraph = YES
+ case 'd': # Delete candidate
+ call cr_delete (gp, wx, wy, cr, i, show)
+ case 'e': # Delete candidates in region
+ x1 = wx; y1 = wy
+ call printf ("again:")
+ if (clgcur ("cursor", x2, y2, wcs, key, cmd, SZ_LINE) == EOF)
+ return
+ call cr_delete_reg (gp, x1, y1, x2, y2, cr, show)
+ case 'q': # Quit
+ break
+ case 'r': # Redraw the graph.
+ newgraph = YES
+ case 's': # Make surface plots
+ call cr_nearest (gp, wx, wy, cr, i, show)
+ c1 = max (1, int (Memr[CR_COL(cr)+i-1]) - 5)
+ c2 = min (nc, int (Memr[CR_COL(cr)+i-1]) + 5)
+ l1 = max (1, int (Memr[CR_LINE(cr)+i-1]) - 5)
+ l2 = min (nl, int (Memr[CR_LINE(cr)+i-1]) + 5)
+ data = imgs2r (im, c1, c2, l1, l2)
+ call gclear (gp)
+ call gsview (gp, 0.03, 0.48, 0.53, 0.98)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, -33., 25.)
+ call gsview (gp, 0.53, 0.98, 0.53, 0.98)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, -123., 25.)
+ call gsview (gp, 0.03, 0.48, 0.03, 0.48)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, 57., 25.)
+ call gsview (gp, 0.53, 0.98, 0.03, 0.48)
+ call cr_surface (gp, Memr[data], c2-c1+1, l2-l1+1, 147., 25.)
+ call fprintf (STDERR, "[Type any key to continue]")
+ i = clgcur ("cursor", wx, wy, wcs, key, cmd, SZ_LINE)
+ newgraph = YES
+ case 't': # Set threshold
+ call cr_update (gp, wy, cr, fluxratio, show)
+ call clputr ("fluxratio", fluxratio)
+ case 'u': # Undelete candidate
+ call cr_undelete (gp, wx, wy, cr, i, show)
+ case 'v': # Undelete candidates in region
+ x1 = wx; y1 = wy
+ call printf ("again:")
+ if (clgcur ("cursor", x2, y2, wcs, key, cmd, SZ_LINE) == EOF)
+ return
+ call cr_undelete_reg (gp, x1, y1, x2, y2, cr, show)
+ case 'w':# Window the graph.
+ call gt_window (gt, gp, "cursor", newgraph)
+ case ' ': # Print info
+ call cr_nearest (gp, wx, wy, cr, i, show)
+ call printf ("%d %d\n")
+ call pargr (Memr[CR_COL(cr)+i-1])
+ call pargr (Memr[CR_LINE(cr)+i-1])
+ case 'z': # NOP
+ newgraph = NO
+ default: # Ring bell for unrecognized commands.
+ call printf ("\007")
+ }
+
+ # Update the graph if needed.
+ if (newgraph == YES) {
+ call cr_graph (gp, gt, cr, fluxratio, show)
+ newgraph = NO
+ }
+ } until (clgcur ("cursor", wx, wy, wcs, key, cmd, SZ_LINE) == EOF)
+end
+
+
+# CR_GRAPH -- Make a graph
+
+procedure cr_graph (gp, gt, cr, fluxratio, show)
+
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointers
+pointer cr # Cosmic ray list
+real fluxratio # Flux ratio threshold
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x1, x2, y1, y2
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ call gclear (gp)
+ call gt_ascale (gp, gt, Memr[x+1], Memr[y+1], ncr)
+ call gt_swind (gp, gt)
+ call gt_labax (gp, gt)
+
+ do i = 1, ncr {
+ if ((Memi[flag+i] == NO) || (Memi[flag+i] == ALWAYSNO))
+ call gmark (gp, Memr[x+i], Memr[y+i], GM_PLUS, 2., 2.)
+ else
+ call gmark (gp, Memr[x+i], Memr[y+i], GM_CROSS, 2., 2.)
+ if (Memr[w+i] != 0.)
+ call gmark (gp, Memr[x+i], Memr[y+i], GM_BOX, 2., 2.)
+ }
+
+ call ggwind (gp, x1, x2, y1, y2)
+ call gseti (gp, G_PLTYPE, 2)
+ call gline (gp, x1, fluxratio, x2, fluxratio)
+
+ call sfree (sp)
+end
+
+
+# CR_NEAREST -- Find the nearest cosmic ray to the cursor.
+
+procedure cr_nearest (gp, wx, wy, cr, nearest, show)
+
+pointer gp # GIO pointer
+real wx, wy # Cursor position
+pointer cr # Cosmic ray list
+int nearest # Index of nearest point (returned)
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x0, y0, x1, y1, x2, y2, r2, r2min
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Search for nearest point in NDC.
+ r2min = MAX_REAL
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+ do i = 1, ncr {
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ x2 = x1
+ y2 = y1
+ nearest = i
+ }
+ }
+ if (index != NULL)
+ nearest = Memi[index+nearest]
+
+ # Move the cursor to the selected point.
+ call gscur (gp, x2, y2)
+
+ call sfree (sp)
+end
+
+
+# CR_DELETE -- Set replace flag for cosmic ray candidate nearest cursor.
+
+procedure cr_delete (gp, wx, wy, cr, nearest, show)
+
+pointer gp # GIO pointer
+real wx, wy # Cursor position
+pointer cr # Cosmic ray list
+int nearest # Index of nearest point (returned)
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x0, y0, x1, y1, x2, y2, r2, r2min
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Search for nearest point in NDC.
+ nearest = 0
+ r2min = MAX_REAL
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+ do i = 1, ncr {
+ if ((Memi[flag+i] == YES) || (Memi[flag+i] == ALWAYSYES))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ x2 = x1
+ y2 = y1
+ nearest = i
+ }
+ }
+
+ # Move the cursor to the selected point and mark the deleted point.
+ if (nearest > 0) {
+ if (index != NULL)
+ nearest = Memi[index+nearest]
+ Memi[CR_FLAG(cr)+nearest-1] = ALWAYSYES
+ Memi[CR_WT(cr)+nearest-1] = -1
+ call gscur (gp, x2, y2)
+ call gseti (gp, G_PMLTYPE, 0)
+ y2 = Memr[CR_RATIO(cr)+nearest-1]
+ call gmark (gp, x2, y2, GM_PLUS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x2, y2, GM_CROSS, 2., 2.)
+ }
+
+ call sfree (sp)
+end
+
+
+# CR_DELETE_REG -- Set replace flag for cosmic ray candidates in a region.
+
+procedure cr_delete_reg (gp, wx1, wy1, wx2, wy2, cr, show)
+
+pointer gp # GIO pointer
+real wx1, wy1, wx2, wy2 # Cursor positions
+pointer cr # Cosmic ray list
+int show # Show (0=all, 1=train)
+
+int i, j, ncr
+real x0, y0, x1, y1
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Check order of region points.
+ if (wx1 > wx2) {
+ x0 = wx1
+ wx1 = wx2
+ wx2 = x0
+ }
+ if (wy1 > wy2) {
+ y0 = wy1
+ wy1 = wy2
+ wy2 = y0
+ }
+
+ # Check if point in region.
+ call gctran (gp, wx1, wy1, wx1, wy1, 1, 0)
+ call gctran (gp, wx2, wy2, wx2, wy2, 1, 0)
+ do i = 1, ncr {
+ if ((Memi[flag+i] == YES) || (Memi[flag+i] == ALWAYSYES))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+
+ # Mark the deleted points.
+ if ((x0 > wx1) && (x0 < wx2) && (y0 > wy1) && (y0 < wy2)) {
+ if (index != NULL)
+ j = Memi[index+i]
+ else
+ j = i
+ Memi[CR_FLAG(cr)+j-1] = ALWAYSYES
+ Memi[CR_WT(cr)+j-1] = -1
+ call gscur (gp, x1, y1)
+ call gseti (gp, G_PMLTYPE, 0)
+ y1 = Memr[CR_RATIO(cr)+j-1]
+ call gmark (gp, x1, y1, GM_PLUS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x1, y1, GM_CROSS, 2., 2.)
+ }
+ }
+ call sfree (sp)
+end
+
+
+# CR_UNDELETE -- Set no replace flag for cosmic ray candidate nearest cursor.
+
+procedure cr_undelete (gp, wx, wy, cr, nearest, show)
+
+pointer gp # GIO pointer
+real wx, wy # Cursor position
+pointer cr # Cosmic ray list
+int nearest # Index of nearest point (returned)
+int show # Show (0=all, 1=train)
+
+int i, ncr
+real x0, y0, x1, y1, x2, y2, r2, r2min
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Search for nearest point in NDC.
+ nearest = 0
+ r2min = MAX_REAL
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+ do i = 1, ncr {
+ if ((Memi[flag+i] == NO) || (Memi[flag+i] == ALWAYSNO))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ x2 = x1
+ y2 = y1
+ nearest = i
+ }
+ }
+
+ # Move the cursor to the selected point and mark the delete point.
+ if (nearest > 0) {
+ if (index != NULL)
+ nearest = Memi[index+nearest]
+ Memi[CR_FLAG(cr)+nearest-1] = ALWAYSNO
+ Memi[CR_WT(cr)+nearest-1] = 1
+ call gscur (gp, x2, y2)
+
+ call gseti (gp, G_PMLTYPE, 0)
+ y2 = Memr[CR_RATIO(cr)+nearest-1]
+ call gmark (gp, x2, y2, GM_CROSS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x2, y2, GM_PLUS, 2., 2.)
+ }
+
+ call sfree (sp)
+end
+
+
+# CR_UNDELETE_REG -- Set no replace flag for cosmic ray candidates in a region.
+
+procedure cr_undelete_reg (gp, wx1, wy1, wx2, wy2, cr, show)
+
+pointer gp # GIO pointer
+real wx1, wy1, wx2, wy2 # Cursor positions
+pointer cr # Cosmic ray list
+int show # Show (0=all, 1=train)
+
+int i, j, ncr
+real x0, y0, x1, y1
+pointer sp, x, y, w, flag, index
+
+begin
+ call smark (sp)
+
+ call cr_show (show, cr, x, y, w, flag, index, ncr)
+ if (ncr == 0) {
+ call sfree (sp)
+ return
+ }
+
+ # Check order of region points.
+ if (wx1 > wx2) {
+ x0 = wx1
+ wx1 = wx2
+ wx2 = x0
+ }
+ if (wy1 > wy2) {
+ y0 = wy1
+ wy1 = wy2
+ wy2 = y0
+ }
+
+ # Check if point in region.
+ call gctran (gp, wx1, wy1, wx1, wy1, 1, 0)
+ call gctran (gp, wx2, wy2, wx2, wy2, 1, 0)
+ do i = 1, ncr {
+ if ((Memi[flag+i] == NO) || (Memi[flag+i] == ALWAYSNO))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ call gctran (gp, x1, y1, x0, y0, 1, 0)
+
+ # Mark the deleted points.
+ if ((x0 > wx1) && (x0 < wx2) && (y0 > wy1) && (y0 < wy2)) {
+ if (index != NULL)
+ j = Memi[index+i]
+ else
+ j = i
+ Memi[CR_FLAG(cr)+j-1] = ALWAYSNO
+ Memi[CR_WT(cr)+j-1] = 1
+ call gscur (gp, x1, y1)
+ call gseti (gp, G_PMLTYPE, 0)
+ y1 = Memr[CR_RATIO(cr)+j-1]
+ call gmark (gp, x1, y1, GM_CROSS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x1, y1, GM_PLUS, 2., 2.)
+ }
+ }
+ call sfree (sp)
+end
+
+
+# CR_UPDATE -- Change replacement flags, thresholds, and graphs.
+
+procedure cr_update (gp, wy, cr, fluxratio, show)
+
+pointer gp # GIO pointer
+real wy # Y cursor position
+pointer cr # Cosmic ray list
+real fluxratio # Flux ratio threshold
+int show # Show (0=all, 1=train)
+
+int i, ncr, flag
+real x1, x2, y1, y2
+pointer x, y, f
+
+begin
+ call gseti (gp, G_PLTYPE, 0)
+ call ggwind (gp, x1, x2, y1, y2)
+ call gline (gp, x1, fluxratio, x2, fluxratio)
+ fluxratio = wy
+ call gseti (gp, G_PLTYPE, 2)
+ call gline (gp, x1, fluxratio, x2, fluxratio)
+
+ if (show == 1)
+ return
+
+ ncr = CR_NCR(cr)
+ x = CR_FLUX(cr) - 1
+ y = CR_RATIO(cr) - 1
+ f = CR_FLAG(cr) - 1
+
+ do i = 1, ncr {
+ flag = Memi[f+i]
+ if ((flag == ALWAYSYES) || (flag == ALWAYSNO))
+ next
+ x1 = Memr[x+i]
+ y1 = Memr[y+i]
+ if (flag == NO) {
+ if (y1 < fluxratio) {
+ Memi[f+i] = YES
+ call gseti (gp, G_PMLTYPE, 0)
+ call gmark (gp, x1, y1, GM_PLUS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x1, y1, GM_CROSS, 2., 2.)
+ }
+ } else {
+ if (y1 >= fluxratio) {
+ Memi[f+i] = NO
+ call gseti (gp, G_PMLTYPE, 0)
+ call gmark (gp, x1, y1, GM_CROSS, 2., 2.)
+ call gseti (gp, G_PMLTYPE, 1)
+ call gmark (gp, x1, y1, GM_PLUS, 2., 2.)
+ }
+ }
+ }
+end
+
+
+# CR_PLOT -- Make log plot
+
+procedure cr_plot (cr, im, fluxratio)
+
+pointer cr # Cosmic ray list
+pointer im # Image pointer
+real fluxratio # Flux ratio threshold
+
+int fd, open(), errcode()
+pointer sp, fname, gp, gt, gopen(), gt_init()
+errchk gopen
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+
+ # Open the plotfile.
+ call clgstr ("plotfile", Memc[fname], SZ_FNAME)
+ iferr (fd = open (Memc[fname], APPEND, BINARY_FILE)) {
+ if (errcode() != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ # Set up the graphics.
+ gp = gopen ("stdplot", NEW_FILE, fd)
+ gt = gt_init()
+ call gt_sets (gt, GTTYPE, "mark")
+ call gt_sets (gt, GTXTRAN, "log")
+ call gt_setr (gt, GTXMIN, 10.)
+ call gt_setr (gt, GTYMIN, 0.)
+ call gt_sets (gt, GTTITLE, "Parameters of cosmic rays candidates")
+ call gt_sets (gt, GTPARAMS, IM_TITLE(im))
+ call gt_sets (gt, GTXLABEL, "Flux")
+ call gt_sets (gt, GTYLABEL, "Flux Ratio")
+
+ call cr_graph (gp, gt, cr, fluxratio, 'r')
+
+ call gt_free (gt)
+ call gclose (gp)
+ call close (fd)
+ call sfree (sp)
+end
+
+
+# CR_SHOW -- Select data to show.
+# This returns pointers to the data. Note the pointers are salloc from
+# the last smark which is done by the calling program.
+
+procedure cr_show (show, cr, x, y, w, flag, index, ncr)
+
+int show #I Data to show (0=all, 1=train)
+pointer cr #I CR data
+pointer x #O Fluxes
+pointer y #O Ratios
+pointer w #O Weights
+pointer flag #O Flags
+pointer index #O Index into CR data (if not null)
+int ncr #O Number of selected data points
+
+int i
+
+begin
+ switch (show) {
+ case 0:
+ ncr = CR_NCR(cr)
+ x = CR_FLUX(cr) - 1
+ y = CR_RATIO(cr) - 1
+ w = CR_WT(cr) - 1
+ flag = CR_FLAG(cr) - 1
+ index = NULL
+ case 1:
+ ncr = CR_NCR(cr)
+ call salloc (x, ncr, TY_REAL)
+ call salloc (y, ncr, TY_REAL)
+ call salloc (w, ncr, TY_REAL)
+ call salloc (flag, ncr, TY_INT)
+ call salloc (index, ncr, TY_INT)
+
+ ncr = 0
+ x = x - 1
+ y = y - 1
+ w = w - 1
+ flag = flag - 1
+ index = index - 1
+
+ do i = 1, CR_NCR(cr) {
+ if (Memr[CR_WT(cr)+i-1] == 0.)
+ next
+ ncr = ncr + 1
+ Memr[x+ncr] = Memr[CR_FLUX(cr)+i-1]
+ Memr[y+ncr] = Memr[CR_RATIO(cr)+i-1]
+ Memr[w+ncr] = Memr[CR_WT(cr)+i-1]
+ Memi[flag+ncr] = Memi[CR_FLAG(cr)+i-1]
+ Memi[index+ncr] = i
+ }
+ }
+end
diff --git a/noao/imred/crutil/src/crfind.x b/noao/imred/crutil/src/crfind.x
new file mode 100644
index 00000000..58850940
--- /dev/null
+++ b/noao/imred/crutil/src/crfind.x
@@ -0,0 +1,305 @@
+include <math/gsurfit.h>
+
+# CR_FIND -- Find cosmic ray candidates.
+# This procedure is an interface to special procedures specific to a given
+# window size.
+
+procedure cr_find (cr, threshold, data, nc, nl, col, line,
+ sf1, sf2, x, y, z, w)
+
+pointer cr # Cosmic ray list
+real threshold # Detection threshold
+pointer data[ARB] # Data lines
+int nc # Number of columns
+int nl # Number of lines
+int col # First column
+int line # Center line
+pointer sf1, sf2 # Surface fitting
+real x[ARB], y[ARB], z[ARB], w[ARB] # Surface arrays
+
+pointer a, b, c, d, e, f, g
+
+begin
+ switch (nl) {
+ case 5:
+ a = data[1]
+ b = data[2]
+ c = data[3]
+ d = data[4]
+ e = data[5]
+ call cr_find5 (cr, threshold, col, line, Memr[a], Memr[b],
+ Memr[c], Memr[d], Memr[e], nc, sf1, sf2, x, y, z, w)
+ case 7:
+ a = data[1]
+ b = data[2]
+ c = data[3]
+ d = data[4]
+ e = data[5]
+ f = data[6]
+ g = data[7]
+ call cr_find7 (cr, threshold, col, line, Memr[a], Memr[b],
+ Memr[c], Memr[d], Memr[e], Memr[f], Memr[g], nc,
+ sf1, sf2, x, y, z, w)
+ }
+end
+
+
+# CR_FIND7 -- Find cosmic rays candidates in 7x7 window.
+# This routine finds cosmic rays candidates with the following algorithm.
+# 1. If the pixel is not a local maximum relative to it's 48 neighbors
+# go on to the next pixel.
+# 2. Identify the next strongest pixel in the 7x7 region.
+# This suspect pixel is excluded in the following.
+# 2. Compute the flux of the 7x7 region excluding the cosmic ray
+# candidate and the suspect pixel.
+# 3. The candidate must exceed the average flux per pixel by a specified
+# threshold. If not go on to the next pixel.
+# 4. Fit a plane to the border pixels (excluding the suspect pixel).
+# 5. Subtract the background defined by the plane.
+# 6. Determine a replacement value as the average of the four adjacent
+# pixels (excluding the suspect pixels).
+# 7. Add the pixel to the cosmic ray candidate list.
+
+procedure cr_find7 (cr, threshold, col, line, a, b, c, d, e, f, g, n,
+ sf1, sf2, x, y, z, w)
+
+pointer cr # Cosmic ray list
+real threshold # Detection threshold
+int col # First column
+int line # Line
+real a[ARB], b[ARB], c[ARB], d[ARB] # Image lines
+real e[ARB], f[ARB], g[ARB] # Image lines
+int n # Number of columns
+pointer sf1, sf2 # Surface fitting
+real x[49], y[49], z[49], w[49] # Surface arrays
+
+real bkgd[49]
+int i1, i2, i3, i4, i5, i6, i7, j, j1, j2
+real p, flux, replace, asumr()
+pointer sf
+
+begin
+ for (i4=4; i4<=n-3; i4=i4+1) {
+ # Must be local maxima.
+ p = d[i4]
+ if (p<a[i4]||p<b[i4]||p<c[i4]||p<e[i4]||p<f[i4]||p<g[i4])
+ next
+ i1 = i4 - 3
+ if (p<a[i1]||p<b[i1]||p<c[i1]||p<d[i1]||p<e[i1]||p<f[i1]||p<g[i1])
+ next
+ i2 = i4 - 2
+ if (p<a[i2]||p<b[i2]||p<c[i2]||p<d[i2]||p<e[i2]||p<f[i2]||p<g[i2])
+ next
+ i3 = i4 - 1
+ if (p<a[i3]||p<b[i3]||p<c[i3]||p<d[i3]||p<e[i3]||p<f[i3]||p<g[i3])
+ next
+ i5 = i4 + 1
+ if (p<a[i5]||p<b[i5]||p<c[i5]||p<d[i5]||p<e[i5]||p<f[i5]||p<g[i5])
+ next
+ i6 = i4 + 2
+ if (p<a[i6]||p<b[i6]||p<c[i6]||p<d[i6]||p<e[i6]||p<f[i6]||p<g[i6])
+ next
+ i7 = i4 + 3
+ if (p<a[i7]||p<b[i7]||p<c[i7]||p<d[i7]||p<e[i7]||p<f[i7]||p<g[i7])
+ next
+
+ # Convert to a single array in surface fitting order.
+ call amovr (a[i1], z[1], 7)
+ z[8] = b[i7]; z[9] = c[i7]; z[10] = d[i7]; z[11] = e[i7]
+ z[12] = f[i7]; z[13] = g[i7]; z[14] = g[i6]; z[15] = g[i5]
+ z[16] = f[i4]; z[17] = g[i3]; z[18] = g[i2]; z[19] = g[i1]
+ z[20] = f[i1]; z[21] = e[i1]; z[22] = d[i1]; z[23] = c[i1]
+ z[24] = b[i1]
+ call amovr (b[i2], z[25], 5)
+ call amovr (c[i2], z[30], 5)
+ call amovr (d[i2], z[35], 5)
+ call amovr (e[i2], z[40], 5)
+ call amovr (f[i2], z[45], 5)
+
+ # Find the highest point excluding the center.
+ j1 = 37; j2 = 1
+ do j = 2, 49 {
+ if (j == j1)
+ next
+ if (z[j] > z[j2])
+ j2 = j
+ }
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 49) - z[j1] - z[j2]) / 47
+
+ # Pixel must be exceed specified threshold.
+ if (p < flux + threshold)
+ next
+
+ # Fit and subtract the background.
+ if (j2 < 25) {
+ w[j2] = 0
+ sf = sf2
+ call gsfit (sf, x, y, z, w, 24, WTS_USER, j)
+ w[j2] = 1
+ } else {
+ sf = sf1
+ call gsrefit (sf, x, y, z, w, j)
+ }
+
+ call gsvector (sf, x, y, bkgd, 49)
+ call asubr (z, bkgd, z, 49)
+ p = z[j1]
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 49) - z[j1] - z[j2]) / 47
+
+ # Determine replacement value from four nearest neighbors again
+ # excluding the most deviant pixels.
+ replace = 0
+ j = 0
+ if (j2 != 32) {
+ replace = replace + c[i4]
+ j = j + 1
+ }
+ if (j2 != 36) {
+ replace = replace + d[i3]
+ j = j + 1
+ }
+ if (j2 != 38) {
+ replace = replace + d[i5]
+ j = j + 1
+ }
+ if (j2 != 42) {
+ replace = replace + e[i4]
+ j = j + 1
+ }
+ replace = replace / j
+
+ # Add pixel to cosmic ray list.
+ flux = 100. * flux
+ call cr_add (cr, col+i4-1, line, flux, flux/p, 0., replace, 0)
+ i4 = i7
+ }
+end
+
+
+# CR_FIND5 -- Find cosmic rays candidates in 5x5 window.
+# This routine finds cosmic rays candidates with the following algorithm.
+# 1. If the pixel is not a local maximum relative to it's 24 neighbors
+# go on to the next pixel.
+# 2. Identify the next strongest pixel in the 5x5 region.
+# This suspect pixel is excluded in the following.
+# 2. Compute the flux of the 5x5 region excluding the cosmic ray
+# candidate and the suspect pixel.
+# 3. The candidate must exceed the average flux per pixel by a specified
+# threshold. If not go on to the next pixel.
+# 4. Fit a plane to the border pixels (excluding the suspect pixel).
+# 5. Subtract the background defined by the plane.
+# 6. Determine a replacement value as the average of the four adjacent
+# pixels (excluding the suspect pixels).
+# 7. Add the pixel to the cosmic ray candidate list.
+
+procedure cr_find5 (cr, threshold, col, line, a, b, c, d, e, n,
+ sf1, sf2, x, y, z, w)
+
+pointer cr # Cosmic ray list
+real threshold # Detection threshold
+int col # First column
+int line # Line
+real a[ARB], b[ARB], c[ARB], d[ARB], e[ARB] # Image lines
+int n # Number of columns
+pointer sf1, sf2 # Surface fitting
+real x[25], y[25], z[25], w[25] # Surface arrays
+
+real bkgd[25]
+int i1, i2, i3, i4, i5, j, j1, j2
+real p, flux, replace, asumr()
+pointer sf
+
+begin
+ for (i3=3; i3<=n-2; i3=i3+1) {
+ # Must be local maxima.
+ p = c[i3]
+ if (p<a[i3]||p<b[i3]||p<d[i3]||p<e[i3])
+ next
+ i1 = i3 - 2
+ if (p<a[i1]||p<b[i1]||p<c[i1]||p<d[i1]||p<e[i1])
+ next
+ i2 = i3 - 1
+ if (p<a[i2]||p<b[i2]||p<c[i2]||p<d[i2]||p<e[i2])
+ next
+ i4 = i3 + 1
+ if (p<a[i4]||p<b[i4]||p<c[i4]||p<d[i4]||p<e[i4])
+ next
+ i5 = i3 + 2
+ if (p<a[i5]||p<b[i5]||p<c[i5]||p<d[i5]||p<e[i5])
+ next
+
+ # Convert to a single array in surface fitting order.
+ call amovr (a[i1], z[1], 5)
+ z[6] = b[i5]; z[7] = c[i5]; z[8] = d[i5]; z[9] = e[i5]
+ z[10] = e[i4]; z[11] = e[i3]; z[12] = e[i2]; z[13] = e[i1]
+ z[14] = d[i1]; z[15] = c[i1]; z[16] = b[i1]
+ call amovr (b[i2], z[17], 3)
+ call amovr (c[i2], z[20], 3)
+ call amovr (d[i2], z[23], 3)
+
+ # Find the highest point excluding the center.
+ j1 = 21; j2 = 1
+ do j = 2, 25 {
+ if (j == j1)
+ next
+ if (z[j] > z[j2])
+ j2 = j
+ }
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 25) - z[j1] - z[j2]) / 23
+
+ # Pixel must be exceed specified threshold.
+ if (p < flux + threshold)
+ next
+
+ # Fit and subtract the background.
+ if (j2 < 17) {
+ w[j2] = 0
+ sf = sf2
+ call gsfit (sf, x, y, z, w, 16, WTS_USER, j)
+ w[j2] = 1
+ } else {
+ sf = sf1
+ call gsrefit (sf, x, y, z, w, j)
+ }
+
+ call gsvector (sf, x, y, bkgd, 25)
+ call asubr (z, bkgd, z, 25)
+ p = z[j1]
+
+ # Compute the flux excluding the extreme points.
+ flux = (asumr (z, 25) - z[j1] - z[j2]) / 23
+
+ # Determine replacement value from four nearest neighbors again
+ # excluding the most deviant pixels.
+ replace = 0
+ j = 0
+ if (j2 != 18) {
+ replace = replace + b[i3]
+ j = j + 1
+ }
+ if (j2 != 20) {
+ replace = replace + c[i2]
+ j = j + 1
+ }
+ if (j2 != 22) {
+ replace = replace + c[i4]
+ j = j + 1
+ }
+ if (j2 != 24) {
+ replace = replace + d[i3]
+ j = j + 1
+ }
+ replace = replace / j
+
+ # Add pixel to cosmic ray list.
+ flux = 100. * flux
+ call cr_add (cr, col+i3-1, line, flux, flux/p, 0., replace, 0)
+ i3 = i5
+ }
+end
diff --git a/noao/imred/crutil/src/crfix.cl b/noao/imred/crutil/src/crfix.cl
new file mode 100644
index 00000000..68947c7a
--- /dev/null
+++ b/noao/imred/crutil/src/crfix.cl
@@ -0,0 +1,20 @@
+# CRFIX -- Replace cosmic rays in an image using a cosmic ray mask.
+
+procedure crfix (input, output, crmask)
+
+file input {prompt="Input image"}
+file output {prompt="Output image"}
+file crmask {prompt="Cosmic ray mask"}
+
+begin
+ file in, out, crm
+
+ in = input
+ out = output
+ crm = crmask
+
+ if (in != out)
+ imcopy (in, out, verbose=no)
+ fixpix (out, crm, linterp="INDEF", cinterp="INDEF", verbose=no,
+ pixels=no)
+end
diff --git a/noao/imred/crutil/src/crgrow.par b/noao/imred/crutil/src/crgrow.par
new file mode 100644
index 00000000..f57eb3cc
--- /dev/null
+++ b/noao/imred/crutil/src/crgrow.par
@@ -0,0 +1,7 @@
+# CRGROW
+
+input,s,a,,,,Input cosmic ray masks
+output,s,a,,,,Output cosmic ray masks
+radius,r,h,1.,,,Grow radius
+inval,i,h,INDEF,,,Input mask value to grow (INDEF for any)
+outval,i,h,INDEF,,,Output grown mask value (INDEF for any)
diff --git a/noao/imred/crutil/src/crlist.h b/noao/imred/crutil/src/crlist.h
new file mode 100644
index 00000000..1ed498a7
--- /dev/null
+++ b/noao/imred/crutil/src/crlist.h
@@ -0,0 +1,17 @@
+define CR_ALLOC 100 # Allocation block size
+define CR_LENSTRUCT 9 # Length of structure
+
+define CR_NCR Memi[$1] # Number of cosmic rays
+define CR_NALLOC Memi[$1+1] # Length of cosmic ray list
+define CR_COL Memi[$1+2] # Pointer to columns
+define CR_LINE Memi[$1+3] # Pointer to lines
+define CR_FLUX Memi[$1+4] # Pointer to fluxes
+define CR_RATIO Memi[$1+5] # Pointer to flux ratios
+define CR_WT Memi[$1+6] # Pointer to training weights
+define CR_REPLACE Memi[$1+7] # Pointer to replacement values
+define CR_FLAG Memi[$1+8] # Pointer to rejection flag
+
+define ALWAYSNO 3
+define ALWAYSYES 4
+
+define CR_RMAX 3. # Maximum radius for matching
diff --git a/noao/imred/crutil/src/crlist.x b/noao/imred/crutil/src/crlist.x
new file mode 100644
index 00000000..bb49fb03
--- /dev/null
+++ b/noao/imred/crutil/src/crlist.x
@@ -0,0 +1,417 @@
+include <error.h>
+include <syserr.h>
+include <gset.h>
+include <pmset.h>
+include "crlist.h"
+
+define HELP "noao$lib/scr/cosmicrays.key"
+define PROMPT "cosmic ray options"
+
+# CR_OPEN -- Open cosmic ray list
+# CR_CLOSE -- Close cosmic ray list
+# CR_ADD -- Add a cosmic ray candidate to cosmic ray list.
+# CR_TRAIN -- Set flux ratio threshold from a training set.
+# CR_FINDTHRESH -- Find flux ratio.
+# CR_WEIGHT -- Compute the training weight at a particular flux ratio.
+# CR_FLAGS -- Set cosmic ray reject flags.
+# CR_BADPIX -- Store cosmic rays in bad pixel list.
+# CR_REPLACE -- Replace cosmic rays in image with replacement values.
+
+# CR_OPEN -- Open cosmic ray list
+
+procedure cr_open (cr)
+
+pointer cr # Cosmic ray list pointer
+errchk malloc
+
+begin
+ call malloc (cr, CR_LENSTRUCT, TY_STRUCT)
+ call malloc (CR_COL(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_LINE(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_FLUX(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_RATIO(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_WT(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_REPLACE(cr), CR_ALLOC, TY_REAL)
+ call malloc (CR_FLAG(cr), CR_ALLOC, TY_INT)
+ CR_NCR(cr) = 0
+ CR_NALLOC(cr) = CR_ALLOC
+end
+
+
+# CR_CLOSE -- Close cosmic ray list
+
+procedure cr_close (cr)
+
+pointer cr # Cosmic ray list pointer
+
+begin
+ call mfree (CR_COL(cr), TY_REAL)
+ call mfree (CR_LINE(cr), TY_REAL)
+ call mfree (CR_FLUX(cr), TY_REAL)
+ call mfree (CR_RATIO(cr), TY_REAL)
+ call mfree (CR_WT(cr), TY_REAL)
+ call mfree (CR_REPLACE(cr), TY_REAL)
+ call mfree (CR_FLAG(cr), TY_INT)
+ call mfree (cr, TY_STRUCT)
+end
+
+# CR_ADD -- Add a cosmic ray candidate to cosmic ray list.
+
+procedure cr_add (cr, col, line, flux, ratio, wt, replace, flag)
+
+pointer cr # Cosmic ray list pointer
+int col # Cofluxn
+int line # Line
+real flux # Luminosity
+real ratio # Ratio
+real wt # Weight
+real replace # Sky value
+int flag # Flag value
+
+int ncr
+errchk realloc
+
+begin
+ if (CR_NCR(cr) == CR_NALLOC(cr)) {
+ CR_NALLOC(cr) = CR_NALLOC(cr) + CR_ALLOC
+ call realloc (CR_COL(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_LINE(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_FLUX(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_RATIO(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_WT(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_REPLACE(cr), CR_NALLOC(cr), TY_REAL)
+ call realloc (CR_FLAG(cr), CR_NALLOC(cr), TY_INT)
+ }
+
+ ncr = CR_NCR(cr)
+ CR_NCR(cr) = ncr + 1
+ Memr[CR_COL(cr)+ncr] = col
+ Memr[CR_LINE(cr)+ncr] = line
+ Memr[CR_FLUX(cr)+ncr] = flux
+ Memr[CR_RATIO(cr)+ncr] = ratio
+ Memr[CR_WT(cr)+ncr] = wt
+ Memr[CR_REPLACE(cr)+ncr] = replace
+ Memi[CR_FLAG(cr)+ncr] = flag
+end
+
+
+# CR_TRAIN -- Set flux ratio threshold from a training set.
+
+procedure cr_train (cr, gp, gt, im, fluxratio, fname)
+
+pointer cr #I Cosmic ray list
+pointer gp #I GIO pointer
+pointer gt #I GTOOLS pointer
+pointer im #I IMIO pointer
+real fluxratio #O Flux ratio threshold
+char fname[ARB] #I Save file name
+
+char cmd[10]
+bool gflag
+real x, y, y1, y2, w, r, rmin
+int i, j, n, f, ncr, wcs, key, fd, clgcur(), open(), errcode()
+pointer col, line, ratio, flux, wt, flag
+
+begin
+ # Open save file
+ iferr (fd = open (fname, APPEND, TEXT_FILE)) {
+ if (errcode() != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ fd = 0
+ }
+
+ ncr = CR_NCR(cr)
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ flux = CR_FLUX(cr) - 1
+ ratio = CR_RATIO(cr) - 1
+ wt = CR_WT(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ gflag = false
+ n = 0
+ while (clgcur ("objects", x, y, wcs, key, cmd, 10) != EOF) {
+ switch (key) {
+ case '?':
+ call gpagefile (gp, HELP, PROMPT)
+ next
+ case 'q':
+ break
+ case 's':
+ w = 1
+ f = ALWAYSNO
+ case 'c':
+ w = -1
+ f = ALWAYSYES
+ case 'g':
+ if (gflag)
+ call cr_examine (cr, gp, gt, im, fluxratio, 'z')
+ else {
+ if (n > 1)
+ call cr_findthresh (cr, fluxratio)
+ call cr_flags (cr, fluxratio)
+ call cr_examine (cr, gp, gt, im, fluxratio, 'r')
+ gflag = true
+ }
+ next
+ default:
+ next
+ }
+
+ y1 = y - CR_RMAX
+ y2 = y + CR_RMAX
+ for (i=10; i<ncr && y1>Memr[line+i]; i=i+10)
+ ;
+ j = i - 9
+ rmin = (Memr[col+j] - x) ** 2 + (Memr[line+j] - y) ** 2
+ for (i=j+1; i<ncr && y2>Memr[line+i]; i=i+1) {
+ r = (Memr[col+i] - x) ** 2 + (Memr[line+i] - y) ** 2
+ if (r < rmin) {
+ rmin = r
+ j = i
+ }
+ }
+ if (sqrt (rmin) > CR_RMAX)
+ next
+
+ Memr[wt+j] = w
+ Memi[flag+j] = f
+ n = n + 1
+
+ if (gflag) {
+ if (n > 1) {
+ call cr_findthresh (cr, r)
+ call cr_update (gp, r, cr, fluxratio, 0)
+ }
+ call gmark (gp, Memr[flux+j], Memr[ratio+j], GM_BOX, 2., 2.)
+ }
+ if (fd > 0) {
+ call fprintf (fd, "%g %g %d %c\n")
+ call pargr (x)
+ call pargr (y)
+ call pargi (wcs)
+ call pargi (key)
+ }
+ }
+
+ if (fd > 0)
+ call close (fd)
+end
+
+
+# CR_FINDTHRESH -- Find flux ratio.
+
+procedure cr_findthresh (cr, fluxratio)
+
+pointer cr #I Cosmic ray list
+real fluxratio #O Flux ratio threshold
+
+real w, r, rmin, cr_weight()
+int i, ncr
+pointer ratio, wt
+
+begin
+ ncr = CR_NCR(cr)
+ ratio = CR_RATIO(cr) - 1
+ wt = CR_WT(cr) - 1
+
+ fluxratio = Memr[ratio+1]
+ rmin = cr_weight (fluxratio, Memr[ratio+1], Memr[wt+1], ncr)
+ do i = 2, ncr {
+ if (Memr[wt+i] == 0.)
+ next
+ r = Memr[ratio+i]
+ w = cr_weight (r, Memr[ratio+1], Memr[wt+1], ncr)
+ if (w <= rmin) {
+ if (w == rmin)
+ fluxratio = min (fluxratio, r)
+ else {
+ rmin = w
+ fluxratio = r
+ }
+ }
+ }
+end
+
+
+# CR_WEIGHT -- Compute the training weight at a particular flux ratio.
+
+real procedure cr_weight (fluxratio, ratio, wts, ncr)
+
+real fluxratio #I Flux ratio
+real ratio[ARB] #I Ratio Values
+real wts[ARB] #I Weights
+int ncr #I Number of ratio values
+real wt #O Sum of weights
+
+int i
+
+begin
+ wt = 0.
+ do i = 1, ncr {
+ if (ratio[i] > fluxratio) {
+ if (wts[i] < 0.)
+ wt = wt - wts[i]
+ } else {
+ if (wts[i] > 0.)
+ wt = wt + wts[i]
+ }
+ }
+ return (wt)
+end
+
+
+# CR_FLAGS -- Set cosmic ray reject flags.
+
+procedure cr_flags (cr, fluxratio)
+
+pointer cr # Cosmic ray candidate list
+real fluxratio # Rejection limits
+
+int i, ncr
+pointer ratio, flag
+
+begin
+ ncr = CR_NCR(cr)
+ ratio = CR_RATIO(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ do i = 1, ncr {
+ if ((Memi[flag+i] == ALWAYSYES) || (Memi[flag+i] == ALWAYSNO))
+ next
+ if (Memr[ratio+i] > fluxratio)
+ Memi[flag+i] = NO
+ else
+ Memi[flag+i] = YES
+ }
+end
+
+
+# CR_BADPIX -- Store cosmic rays in bad pixel list.
+# This is currently a temporary measure until a real bad pixel list is
+# implemented.
+
+procedure cr_badpix (cr, fname)
+
+pointer cr # Cosmic ray list
+char fname[ARB] # Bad pixel file name
+
+int i, ncr, c, l, f, fd, open(), errcode()
+pointer col, line, ratio, flux, flag
+errchk open
+
+begin
+ # Open bad pixel file
+ iferr (fd = open (fname, APPEND, TEXT_FILE)) {
+ if (errcode() != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ ncr = CR_NCR(cr)
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ flux = CR_FLUX(cr) - 1
+ ratio = CR_RATIO(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ do i = 1, ncr {
+ f = Memi[flag+i]
+ if ((f == NO) || (f == ALWAYSNO))
+ next
+
+ c = Memr[col+i]
+ l = Memr[line+i]
+ call fprintf (fd, "%d %d\n")
+ call pargi (c)
+ call pargi (l)
+ }
+ call close (fd)
+end
+
+
+# CR_CRMASK -- Store cosmic rays in cosmic ray mask.
+
+procedure cr_crmask (cr, fname, ref)
+
+pointer cr # Cosmic ray list
+char fname[ARB] # Cosmic ray mask
+pointer ref # Refrence image
+
+int i, axlen[7], depth, ncr, f, rl[3,2]
+long v[2]
+pointer sp, title, pm, col, line, flag, pm_newmask()
+errchk pm_loadf, pm_newmask, pm_savef
+
+begin
+ call smark (sp)
+ call salloc (title, SZ_LINE, TY_CHAR)
+
+ pm = pm_newmask (ref, 1)
+ iferr (call pm_loadf (pm, fname, Memc[title], SZ_LINE))
+ ;
+ call pm_gsize (pm, i, axlen, depth)
+
+ ncr = CR_NCR(cr)
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ RL_LEN(rl) = 2
+ RL_AXLEN(rl) = axlen[1]
+ RL_N(rl,2) = 1
+ RL_V(rl,2) = 1
+ RL_X(rl,2) = 1
+
+ do i = 1, ncr {
+ f = Memi[flag+i]
+ if ((f == NO) || (f == ALWAYSNO))
+ next
+
+ v[1] = Memr[col+i]
+ v[2] = Memr[line+i]
+ call pmplri (pm, v, rl, depth, 1, PIX_SRC)
+ }
+
+ call strcpy ("Cosmic ray mask from COSMICRAYS", Memc[title], SZ_LINE)
+ call pm_savef (pm, fname, Memc[title], PM_UPDATE)
+ call pm_close (pm)
+ call sfree (sp)
+end
+
+
+# CR_REPLACE -- Replace cosmic rays in image with replacement values.
+
+procedure cr_replace (cr, offset, im, nreplaced)
+
+pointer cr # Cosmic ray list
+int offset # Offset in list
+pointer im # IMIO pointer of output image
+int nreplaced # Number replaced (for log)
+
+int i, ncr, c, l, f
+real r
+pointer col, line, replace, flag, imps2r()
+
+begin
+ ncr = CR_NCR(cr)
+ if (ncr <= offset)
+ return
+
+ col = CR_COL(cr) - 1
+ line = CR_LINE(cr) - 1
+ replace = CR_REPLACE(cr) - 1
+ flag = CR_FLAG(cr) - 1
+
+ do i = offset+1, ncr {
+ f = Memi[flag+i]
+ if ((f == NO) || (f == ALWAYSNO))
+ next
+
+ c = Memr[col+i]
+ l = Memr[line+i]
+ r = Memr[replace+i]
+ Memr[imps2r (im, c, c, l, l)] = r
+ nreplaced = nreplaced + 1
+ }
+end
diff --git a/noao/imred/crutil/src/crmedian.par b/noao/imred/crutil/src/crmedian.par
new file mode 100644
index 00000000..2baccb05
--- /dev/null
+++ b/noao/imred/crutil/src/crmedian.par
@@ -0,0 +1,15 @@
+input,f,a,,,,Input image
+output,f,a,,,,Output image
+crmask,f,h,,,,Output cosmic ray mask
+median,f,h,,,,Output median image
+sigma,f,h,,,,Output sigma image
+residual,f,h,,,,Output residual image
+var0,r,h,0.,0.,,Variance coefficient for DN^0 term
+var1,r,h,0.,0.,,Variance coefficient for DN^1 term
+var2,r,h,0.,0.,,Variance coefficient for DN^2 term
+lsigma,r,h,10.,,,Low clipping sigma factor
+hsigma,r,h,3.,,,High clipping sigma factor
+ncmed,i,h,5,1,,Column box size for median level calculation
+nlmed,i,h,5,1,,Line box size for median level calculation
+ncsig,i,h,25,10,,Column box size for sigma calculation
+nlsig,i,h,25,10,,Line box size for sigma calculation
diff --git a/noao/imred/crutil/src/crnebula.cl b/noao/imred/crutil/src/crnebula.cl
new file mode 100644
index 00000000..c5f214f9
--- /dev/null
+++ b/noao/imred/crutil/src/crnebula.cl
@@ -0,0 +1,116 @@
+# CRNEBULA -- Cosmic ray cleaning for images with fine nebular structure.
+
+procedure crnebula (input, output)
+
+file input {prompt="Input image"}
+file output {prompt="Output image"}
+file crmask {prompt="Cosmic ray mask"}
+file residual {prompt="Residual median image"}
+file rmedresid {prompt="Residual ring median image"}
+real var0 = 1. {prompt="Variance coefficient for DN^0 term", min=0.}
+real var1 = 0. {prompt="Variance coefficient for DN^1 term", min=0.}
+real var2 = 0. {prompt="Variance coefficient for DN^2 term", min=0.}
+real sigmed = 3 {prompt="Sigma clip factor for residual median"}
+real sigrmed = 3 {prompt="Sigma clip factor for residual ring median"}
+
+int mbox = 5 {prompt="Median box size"}
+real rin = 1.5 {prompt="Inner radius for ring median"}
+real rout = 6 {prompt="Outer radius for ring median"}
+bool verbose = no {prompt="Verbose"}
+
+begin
+ file in, out, med, sig, resid, rmed
+ struct expr
+
+ # Query once for query parameters.
+ in = input
+ out = output
+
+ # Check on output images.
+ if (out != "" && imaccess (out))
+ error (1, "Output image already exists ("//out//")")
+ if (crmask != "" && imaccess (crmask))
+ error (1, "Output mask already exists ("//crmask//")")
+ if (residual != "" && imaccess (residual))
+ error (1, "Output residual image already exists ("//residual//")")
+ if (rmedresid != "" && imaccess (rmedresid))
+ error (1,
+ "Output ring median difference already exists ("//rmedresid//")")
+
+ # Create median results.
+ med = mktemp ("cr")
+ sig = mktemp ("cr")
+ resid = residual
+ if (resid == "")
+ resid = mktemp ("cr")
+ if (verbose)
+ printf ("Creating CRMEDIAN results\n")
+ crmedian (in, "", crmask="", median=med, sigma=sig, residual=resid,
+ var0=var0, var1=var1, var2=var2, lsigma=100., hsigma=sigmed,
+ ncmed=mbox, nlmed=mbox, ncsig=25, nlsig=25)
+
+ # Create ring median filtered image.
+ rmed = mktemp ("cr")
+ rmedian (in, rmed, rin, rout, ratio=1., theta=0., zloreject=INDEF,
+ zhireject=INDEF, boundary="wrap", constant=0., verbose=verbose)
+
+ # Create output images.
+ if (rmedresid != "") {
+ printf ("(a-b)/c\n") | scan (expr)
+ imexpr (expr, rmedresid, med, rmed, sig, dims="auto",
+ intype="auto", outtype="real", refim="auto", bwidth=0,
+ btype="nearest", bpixval=0., rangecheck=yes, verbose=no,
+ exprdb="none")
+ imdelete (rmed, verify-)
+ imdelete (sig, verify-)
+ if (out != "") {
+ if (verbose)
+ printf ("Create output image %s\n", out)
+ printf ("((a<%.3g)||(abs(b)>%.3g)) ? c : d\n",
+ sigmed, sigrmed) | scan (expr)
+ imexpr (expr, out, resid, rmedresid, in, med, dims="auto",
+ intype="auto", outtype="auto", refim="auto", bwidth=0,
+ btype="nearest", bpixval=0., rangecheck=yes, verbose=no,
+ exprdb="none")
+ }
+ if (crmask != "") {
+ if (verbose)
+ printf ("Create cosmic ray mask %s\n", crmask)
+ printf ("((a<%.3g)||(abs(b)>%.3g)) ? 0 : 1\n",
+ sigmed, sigrmed) | scan (expr)
+ set imtype = "pl"
+ imexpr (expr, crmask, resid, rmedresid, dims="auto",
+ intype="auto", outtype="short", refim="auto", bwidth=0,
+ btype="nearest", bpixval=0., rangecheck=yes, verbose=no,
+ exprdb="none")
+ }
+ imdelete (med, verify-)
+ } else {
+ if (out != "") {
+ if (verbose)
+ printf ("Create output image %s\n", out)
+ printf ("((a<%.3g)||(abs((b-c)/d)>%.3g)) ? e : b\n",
+ sigmed, sigrmed) | scan (expr)
+ imexpr (expr, out, resid, med, rmed, sig, in, dims="auto",
+ intype="auto", outtype="auto", refim="auto", bwidth=0,
+ btype="nearest", bpixval=0., rangecheck=yes, verbose=no,
+ exprdb="none")
+ }
+ if (crmask != "") {
+ if (verbose)
+ printf ("Create cosmic ray mask %s\n", crmask)
+ printf ("((a<%.3g)||(abs((b-c)/d)>%.3g)) ? 0 : 1\n",
+ sigmed, sigrmed) | scan (expr)
+ set imtype = "pl"
+ imexpr (expr, crmask, resid, med, rmed, sig, dims="auto",
+ intype="auto", outtype="short", refim="auto", bwidth=0,
+ btype="nearest", bpixval=0., rangecheck=yes, verbose=no,
+ exprdb="none")
+ }
+ imdelete (med, verify-)
+ imdelete (sig, verify-)
+ imdelete (rmed, verify-)
+ }
+ if (residual == "")
+ imdelete (resid, verify-)
+end
diff --git a/noao/imred/crutil/src/crsurface.x b/noao/imred/crutil/src/crsurface.x
new file mode 100644
index 00000000..32645ff4
--- /dev/null
+++ b/noao/imred/crutil/src/crsurface.x
@@ -0,0 +1,46 @@
+define DUMMY 6
+
+# CR_SURFACE -- Draw a perspective view of a surface. The altitude
+# and azimuth of the viewing angle are variable.
+
+procedure cr_surface(gp, data, ncols, nlines, angh, angv)
+
+pointer gp # GIO pointer
+real data[ncols,nlines] # Surface data to be plotted
+int ncols, nlines # Dimensions of surface
+real angh, angv # Orientation of surface (degrees)
+
+int wkid
+pointer sp, work
+
+int first
+real vpx1, vpx2, vpy1, vpy2
+common /frstfg/ first
+common /noaovp/ vpx1, vpx2, vpy1, vpy2
+
+begin
+ call smark (sp)
+ call salloc (work, 2 * (2 * ncols * nlines + ncols + nlines), TY_REAL)
+
+ # Initialize surface common blocks
+ first = 1
+ call srfabd()
+
+ # Define viewport.
+ call ggview (gp, vpx1, vpx2, vpy1, vpy2)
+
+ # Link GKS to GIO
+ wkid = 1
+ call gopks (STDERR)
+ call gopwk (wkid, DUMMY, gp)
+ call gacwk (wkid)
+
+ call ezsrfc (data, ncols, nlines, angh, angv, Memr[work])
+
+ call gdawk (wkid)
+ # We don't want to close the GIO pointer.
+ #call gclwk (wkid)
+ call gclks ()
+
+ call sfree (sp)
+end
diff --git a/noao/imred/crutil/src/mkpkg b/noao/imred/crutil/src/mkpkg
new file mode 100644
index 00000000..1279b656
--- /dev/null
+++ b/noao/imred/crutil/src/mkpkg
@@ -0,0 +1,38 @@
+# COSMIC RAY CLEANING PACKAGE
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $call crutil
+ ;
+
+install:
+ $move xx_crutil.e noaobin$x_crutil.e
+ ;
+
+crutil:
+ $omake x_crutil.x
+ $link x_crutil.o libpkg.a -lxtools -lcurfit -lgsurfit -lncar -lgks\
+ -o xx_crutil.e
+ ;
+
+libpkg.a:
+ crexamine.x crlist.h <error.h> <gset.h> <mach.h> <pkg/gtools.h>\
+ <imhdr.h>
+ crfind.x <math/gsurfit.h>
+ crlist.x crlist.h <error.h> <gset.h> <pmset.h>
+ crsurface.x
+ t_cosmicrays.x crlist.h <error.h> <math/gsurfit.h> <imhdr.h> <gset.h>\
+ <pkg/gtools.h> <imset.h>
+ t_craverage.x <imhdr.h> <error.h> <mach.h>
+ t_crgrow.x <error.h> <imhdr.h>
+ t_crmedian.x <imhdr.h> <mach.h>
+ xtmaskname.x
+ ;
diff --git a/noao/imred/crutil/src/t_cosmicrays.x b/noao/imred/crutil/src/t_cosmicrays.x
new file mode 100644
index 00000000..fa68a39d
--- /dev/null
+++ b/noao/imred/crutil/src/t_cosmicrays.x
@@ -0,0 +1,329 @@
+include <error.h>
+include <imhdr.h>
+include <imset.h>
+include <math/gsurfit.h>
+include <gset.h>
+include <pkg/gtools.h>
+include "crlist.h"
+
+# T_COSMICRAYS -- Detect and remove cosmic rays in images.
+# A list of images is examined for cosmic rays which are then replaced
+# by values from neighboring pixels. The output image may be the same
+# as the input image. This is the top level procedure which manages
+# the input and output image data. The actual algorithm for detecting
+# cosmic rays is in CR_FIND.
+
+procedure t_cosmicrays ()
+
+int list1 # List of input images to be cleaned
+int list2 # List of output images
+int list3 # List of output bad pixel files
+real threshold # Detection threshold
+real fluxratio # Luminosity boundary for stars
+int npasses # Number of cleaning passes
+int szwin # Size of detection window
+bool train # Use training objects?
+pointer savefile # Save file for training objects
+bool interactive # Examine cosmic ray parameters?
+char ans # Answer to interactive query
+
+int nc, nl, c, c1, c2, l, l1, l2, szhwin, szwin2
+int i, j, k, m, ncr, ncrlast, nreplaced, flag
+pointer sp, input, output, badpix, str, gp, gt, im, in, out
+pointer x, y, z, w, sf1, sf2, cr, data, ptr
+
+bool clgetb(), streq(), strne()
+char clgetc()
+int imtopenp(), imtlen(), imtgetim(), clpopnu(), clgfil(), clgeti()
+real clgetr()
+pointer immap(), impl2r(), imgs2r(), gopen(), gt_init()
+errchk immap, impl2r, imgs2r
+errchk cr_find, cr_examine, cr_replace, cr_plot, cr_crmask
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (badpix, SZ_FNAME, TY_CHAR)
+ call salloc (savefile, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the task parameters. Check that the number of output images
+ # is either zero, in which case the cosmic rays will be removed
+ # in place, or equal to the number of input images.
+
+ list1 = imtopenp ("input")
+ list2 = imtopenp ("output")
+ i = imtlen (list1)
+ j = imtlen (list2)
+ if (j > 0 && j != i)
+ call error (0, "Input and output image lists do not match")
+
+ list3 = clpopnu ("crmasks")
+ threshold = clgetr ("threshold")
+ fluxratio = clgetr ("fluxratio")
+ npasses = clgeti ("npasses")
+ szwin = clgeti ("window")
+ train = clgetb ("train")
+ call clgstr ("savefile", Memc[savefile], SZ_FNAME)
+ interactive = clgetb ("interactive")
+ call clpstr ("answer", "yes")
+ ans = 'y'
+
+ # Set up the graphics.
+ call clgstr ("graphics", Memc[str], SZ_LINE)
+ if (interactive) {
+ gp = gopen (Memc[str], NEW_FILE+AW_DEFER, STDGRAPH)
+ gt = gt_init()
+ call gt_sets (gt, GTTYPE, "mark")
+ call gt_sets (gt, GTXTRAN, "log")
+ call gt_setr (gt, GTXMIN, 10.)
+ call gt_setr (gt, GTYMIN, 0.)
+ call gt_sets (gt, GTTITLE, "Parameters of cosmic rays candidates")
+ call gt_sets (gt, GTXLABEL, "Flux")
+ call gt_sets (gt, GTYLABEL, "Flux Ratio")
+ }
+
+ # Set up surface fitting. The background points are placed together
+ # at the beginning of the arrays. There are two surface pointers,
+ # one for using the fast refit if there are no points excluded and
+ # one for doing a full fit with points excluded.
+
+ szhwin = szwin / 2
+ szwin2 = szwin * szwin
+ call salloc (data, szwin, TY_INT)
+ call salloc (x, szwin2, TY_REAL)
+ call salloc (y, szwin2, TY_REAL)
+ call salloc (z, szwin2, TY_REAL)
+ call salloc (w, szwin2, TY_REAL)
+
+ k = 0
+ do i = 1, szwin {
+ Memr[x+k] = i
+ Memr[y+k] = 1
+ k = k + 1
+ }
+ do i = 2, szwin {
+ Memr[x+k] = szwin
+ Memr[y+k] = i
+ k = k + 1
+ }
+ do i = szwin-1, 1, -1 {
+ Memr[x+k] = i
+ Memr[y+k] = szwin
+ k = k + 1
+ }
+ do i = szwin-1, 2, -1 {
+ Memr[x+k] = 1
+ Memr[y+k] = i
+ k = k + 1
+ }
+ do i = 2, szwin-1 {
+ do j = 2, szwin-1 {
+ Memr[x+k] = j
+ Memr[y+k] = i
+ k = k + 1
+ }
+ }
+ call aclrr (Memr[z], szwin2)
+ call amovkr (1., Memr[w], 4*szwin-4)
+ call gsinit (sf1, GS_POLYNOMIAL, 2, 2, NO, 1., real(szwin),
+ 1., real(szwin))
+ call gsinit (sf2, GS_POLYNOMIAL, 2, 2, NO, 1., real(szwin),
+ 1., real(szwin))
+ call gsfit (sf1, Memr[x], Memr[y], Memr[z], Memr[w], 4*szwin-4,
+ WTS_USER, j)
+
+ # Process each input image. Either work in place or create a
+ # new output image. If an error mapping the images occurs
+ # issue a warning and go on to the next input image.
+
+ while (imtgetim (list1, Memc[input], SZ_FNAME) != EOF) {
+ if (imtgetim (list2, Memc[output], SZ_FNAME) == EOF)
+ call strcpy (Memc[input], Memc[output], SZ_FNAME)
+ if (clgfil (list3, Memc[badpix], SZ_FNAME) == EOF)
+ Memc[badpix] = EOS
+
+ iferr {
+ in = NULL
+ out = NULL
+ cr = NULL
+
+ # Map the input image. If the output image is
+ # the same as the input image work in place.
+ # Initialize IMIO to use a scrolling buffer of lines.
+
+ if (streq (Memc[input], Memc[output])) {
+ im = immap (Memc[input], READ_WRITE, 0)
+ } else
+ im = immap (Memc[input], READ_ONLY, 0)
+ in = im
+
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ if ((nl < szwin) || (nc < szwin))
+ call error (0, "Image size is too small")
+ call imseti (in, IM_NBUFS, szwin)
+ call imseti (in, IM_TYBNDRY, BT_NEAREST)
+ call imseti (in, IM_NBNDRYPIX, szhwin)
+
+ # Open the output image if needed.
+ if (strne (Memc[input], Memc[output]))
+ im = immap (Memc[output], NEW_COPY, in)
+ out = im
+
+ # Open a cosmic ray list structure.
+ call cr_open (cr)
+ ncrlast = 0
+ nreplaced = 0
+
+ # Now proceed through the image line by line, scrolling
+ # the line buffers at each step. If creating a new image
+ # also write out each line as it is read. A procedure is
+ # called to find the cosmic ray candidates in the line
+ # and add them to the list maintained by CRLIST.
+ # Note that cosmic rays are not replaced at this point
+ # in order to allow the user to modify the criteria for
+ # a cosmic ray and review the results.
+
+ c1 = 1-szhwin
+ c2 = nc+szhwin
+ do i = 1, szwin-1
+ Memi[data+i] =
+ imgs2r (in, c1, c2, i-szhwin, i-szhwin)
+
+ do l = 1, nl {
+ do i = 1, szwin-1
+ Memi[data+i-1] = Memi[data+i]
+ Memi[data+szwin-1] =
+ imgs2r (in, c1, c2, l+szhwin, l+szhwin)
+ if (out != in)
+ call amovr (Memr[Memi[data+szhwin]+szhwin],
+ Memr[impl2r(out,l)], nc)
+
+ call cr_find (cr, threshold, Memi[data],
+ c2-c1+1, szwin, c1, l,
+ sf1, sf2, Memr[x], Memr[y], Memr[z], Memr[w])
+ }
+ if (interactive && train) {
+ call cr_train (cr, gp, gt, in, fluxratio, Memc[savefile])
+ train = false
+ }
+ call cr_flags (cr, fluxratio)
+
+ # If desired examine the cosmic ray list interactively.
+ if (interactive && ans != 'N') {
+ if (ans != 'Y') {
+ call eprintf ("%s - ")
+ call pargstr (Memc[input])
+ call flush (STDERR)
+ ans = clgetc ("answer")
+ }
+ if ((ans == 'Y') || (ans == 'y'))
+ call cr_examine (cr, gp, gt, in, fluxratio, 'r')
+ }
+
+ # Now replace the selected cosmic rays in the output image.
+
+ call imflush (out)
+ call imseti (out, IM_ADVICE, RANDOM)
+ call cr_replace (cr, ncrlast, out, nreplaced)
+
+ # Do additional passes through the data. We work in place
+ # in the output image. Note that we only have to look in
+ # the vicinity of replaced cosmic rays for secondary
+ # events since we've already looked at every pixel once.
+ # Instead of scrolling through the image we will extract
+ # subrasters around each replaced cosmic ray. However,
+ # we use pointers into the subraster to maintain the same
+ # format expected by CR_FIND.
+
+ if (npasses > 1) {
+ if (out != in)
+ call imunmap (out)
+ call imunmap (in)
+ im = immap (Memc[output], READ_WRITE, 0)
+ in = im
+ out = im
+ call imseti (in, IM_TYBNDRY, BT_NEAREST)
+ call imseti (in, IM_NBNDRYPIX, szhwin)
+
+ for (i=2; i<=npasses; i=i+1) {
+ # Loop through each cosmic ray in the previous pass.
+ ncr = CR_NCR(cr)
+ do j = ncrlast+1, ncr {
+ flag = Memi[CR_FLAG(cr)+j-1]
+ if (flag==NO || flag==ALWAYSNO)
+ next
+ c = Memr[CR_COL(cr)+j-1]
+ l = Memr[CR_LINE(cr)+j-1]
+ c1 = max (1-szhwin, c - (szwin-1))
+ c2 = min (nc+szhwin, c + (szwin-1))
+ k = c2 - c1 + 1
+ l1 = max (1-szhwin, l - (szwin-1))
+ l2 = min (nl+szhwin, l + (szwin-1))
+
+ # Set the line pointers off an image section
+ # centered on a previously replaced cosmic ray.
+
+ ptr = imgs2r (in, c1, c2, l1, l2) - k
+
+ l1 = max (1, l - szhwin)
+ l2 = min (nl, l + szhwin)
+ do l = l1, l2 {
+ do m = 1, szwin
+ Memi[data+m-1] = ptr + m * k
+ ptr = ptr + k
+
+ call cr_find ( cr, threshold, Memi[data],
+ k, szwin, c1, l, sf1, sf2,
+ Memr[x], Memr[y], Memr[z], Memr[w])
+ }
+ }
+ call cr_flags (cr, fluxratio)
+
+ # Replace any new cosmic rays found.
+ call cr_replace (cr, ncr, in, nreplaced)
+ ncrlast = ncr
+ }
+ }
+
+ # Output header log, log, plot, and bad pixels.
+ call sprintf (Memc[str], SZ_LINE,
+ "Threshold=%5.1f, fluxratio=%6.2f, removed=%d")
+ call pargr (threshold)
+ call pargr (fluxratio)
+ call pargi (nreplaced)
+ call imastr (out, "crcor", Memc[str])
+
+ call cr_plot (cr, in, fluxratio)
+ call cr_crmask (cr, Memc[badpix], in)
+
+ call cr_close (cr)
+ if (out != in)
+ call imunmap (out)
+ call imunmap (in)
+ } then {
+ # In case of error clean up and go on to the next image.
+ if (in != NULL) {
+ if (out != NULL && out != in)
+ call imunmap (out)
+ call imunmap (in)
+ }
+ if (cr != NULL)
+ call cr_close (cr)
+ call erract (EA_WARN)
+ }
+ }
+
+ if (interactive) {
+ call gt_free (gt)
+ call gclose (gp)
+ }
+ call imtclose (list1)
+ call imtclose (list2)
+ call clpcls (list3)
+ call gsfree (sf1)
+ call gsfree (sf2)
+ call sfree (sp)
+end
diff --git a/noao/imred/crutil/src/t_craverage.x b/noao/imred/crutil/src/t_craverage.x
new file mode 100644
index 00000000..f7b82113
--- /dev/null
+++ b/noao/imred/crutil/src/t_craverage.x
@@ -0,0 +1,847 @@
+include <error.h>
+include <imhdr.h>
+include <mach.h>
+
+define MAXBUF 500000 # Maximum pixel buffer
+
+define PLSIG 15.87 # Low percentile
+define PHSIG 84.13 # High percentile
+
+
+# T_CRAVERAGE -- Detect, fix, and flag cosmic rays. Also detect objects.
+# Deviant pixels relative to a local average with the candidate pixel
+# excluded and sigma are detected and replaced by the average value
+# and/or written to a cosmic ray mask. Average values above a the median
+# of a background annulus are detected as objects and cosmic rays are
+# excluded. The object positions may be output in the mask.
+
+procedure t_craverage ()
+
+int inlist # Input image list
+int outlist # Output image list
+int crlist # Output mask list
+int avglist # Output average list
+int siglist # Output sigma list
+int crval # Output cosmic ray mask value
+int objval # Output object mask value
+int navg # Averaging box size
+int nrej # Number of high pixels to reject from average
+int nbkg # Background width
+int nsig # Sigma box size
+real lobjsig, hobjsig # Object threshold sigmas
+real lcrsig, hcrsig # CR threshold sigmas outside of object
+real var0 # Variance coefficient for DN^0 term
+real var1 # Variance coefficient for DN^1 term
+real var2 # Variance coefficient for DN^2 term
+real crgrw # Cosmic ray grow radius
+real objgrw # Object grow radius
+
+int i, nc, nl, nlstep, nbox, l1, l2, l3, l4, nl1, pmmode
+pointer sp, input, output, crmask, crmask1, extname, average, sigma
+pointer in, out, pm, aim, sim
+pointer inbuf, pinbuf, outbuf, pbuf, abuf, sbuf
+
+real clgetr()
+int clgeti(), imtopenp(), imtgetim()
+pointer immap(), imgs2s(), imgs2r(), imps2r(), imps2s()
+errchk immap, imgs2s, imgs2r, imps2r, imps2s, craverage, crgrow, imgstr
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (crmask, SZ_FNAME, TY_CHAR)
+ call salloc (crmask1, SZ_FNAME, TY_CHAR)
+ call salloc (average, SZ_FNAME, TY_CHAR)
+ call salloc (sigma, SZ_FNAME, TY_CHAR)
+ call salloc (extname, SZ_FNAME, TY_CHAR)
+
+ # Get parameters.
+ inlist = imtopenp ("input")
+ outlist = imtopenp ("output")
+ crlist = imtopenp ("crmask")
+ avglist = imtopenp ("average")
+ siglist = imtopenp ("sigma")
+ crval = clgeti ("crval")
+ objval = clgeti ("objval")
+ navg = max (1, clgeti ("navg") / 2)
+ nrej = min (clgeti ("nrej"), navg-1)
+ nbkg = clgeti ("nbkg")
+ nsig = clgeti ("nsig")
+ lobjsig = clgetr ("lobjsig")
+ hobjsig = clgetr ("hobjsig")
+ lcrsig = clgetr ("lcrsig")
+ hcrsig = clgetr ("hcrsig")
+ nbox = 2 * (navg + nbkg) + 1
+ var0 = clgetr ("var0")
+ var1 = clgetr ("var1")
+ var2 = clgetr ("var2")
+ crgrw = clgetr ("crgrow")
+ objgrw = clgetr ("objgrow")
+
+ # Do the input images.
+ Memc[crmask1] = EOS
+ while (imtgetim (inlist, Memc[input], SZ_FNAME) != EOF) {
+ if (imtgetim (outlist, Memc[output], SZ_FNAME) == EOF)
+ Memc[output] = EOS
+ if (imtgetim (crlist, Memc[crmask], SZ_FNAME) == EOF)
+ call strcpy (Memc[crmask1], Memc[crmask], SZ_FNAME)
+ else if (Memc[crmask] == '!')
+ call strcpy (Memc[crmask], Memc[crmask1], SZ_FNAME)
+ if (imtgetim (avglist, Memc[average], SZ_FNAME) == EOF)
+ Memc[average] = EOS
+ if (imtgetim (siglist, Memc[sigma], SZ_FNAME) == EOF)
+ Memc[sigma] = EOS
+
+ # Map the input and output images.
+ iferr {
+ in = NULL; out = NULL; pm = NULL; aim = NULL; sim = NULL
+ inbuf = NULL; pinbuf = NULL; outbuf = NULL; pbuf = NULL;
+ abuf = NULL; sbuf=NULL
+
+ in = immap (Memc[input], READ_ONLY, 0)
+ if (Memc[output] != EOS)
+ out = immap (Memc[output], NEW_COPY, in)
+ if (Memc[crmask] != EOS) {
+ if (Memc[crmask] == '!')
+ call imgstr (in, Memc[crmask+1], Memc[crmask], SZ_FNAME)
+ pmmode = READ_WRITE
+ iferr (call imgstr (in, "extname", Memc[extname], SZ_FNAME))
+ call strcpy ("pl", Memc[extname], SZ_FNAME)
+ call xt_maskname (Memc[crmask], Memc[extname], pmmode,
+ Memc[crmask], SZ_FNAME)
+ iferr (pm = immap (Memc[crmask], pmmode, 0)) {
+ pmmode = NEW_COPY
+ pm = immap (Memc[crmask], pmmode, in)
+ }
+ }
+ if (Memc[average] != EOS)
+ aim = immap (Memc[average], NEW_COPY, in)
+ if (Memc[sigma] != EOS)
+ sim = immap (Memc[sigma], NEW_COPY, in)
+
+ # Go through the input in large blocks of lines. If the
+ # block is smaller than the whole image overlap the blocks
+ # so the average only has boundaries at the ends of the image.
+ # However, the output is done in non-overlapping blocks with
+ # the pointers are adjusted so that addresses can be in the
+ # space of the input block. CRAVERAGE does not address
+ # outside of the output data block. Set the mask values
+ # based on the distances to the nearest good pixels.
+
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ nlstep = max (1, MAXBUF / nc - nbox)
+
+ do i = 1, nl, nlstep {
+ l1 = i
+ l2 = min (nl, i + nlstep - 1)
+ l3 = max (1, l1 - nbox / 2)
+ l4 = min (nl, l2 + nbox / 2)
+ nl1 = l4 - l3 + 1
+ inbuf = imgs2r (in, 1, nc, l3, l4)
+ if (out != NULL)
+ outbuf = imps2r (out, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (pm != NULL) {
+ if (pmmode == READ_WRITE) {
+ pinbuf = imgs2s (pm, 1, nc, l3, l4)
+ pbuf = imps2s (pm, 1, nc, l1, l2)
+ call amovs (Mems[pinbuf+(l1-l3)*nc],
+ Mems[pbuf], nc*(l2-l1+1))
+ pbuf = pbuf - (l1 - l3) * nc
+ } else {
+ pinbuf = NULL
+ pbuf = imps2s (pm, 1, nc, l1, l2)
+ call aclrs (Mems[pbuf], nc*(l2-l1+1))
+ pbuf = pbuf - (l1 - l3) * nc
+ }
+ }
+ if (aim != NULL)
+ abuf = imps2r (aim, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (sim != NULL)
+ sbuf = imps2r (sim, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (pinbuf == NULL)
+ call craverage (inbuf, outbuf, pbuf, abuf, sbuf,
+ nc, nl1, l1-l3+1, l2-l3+1, navg, nrej, nbkg,
+ var0, var1, var2, nsig, lcrsig, hcrsig,
+ lobjsig, hobjsig, crval, objval)
+ else
+ call craverage1 (inbuf, pinbuf, outbuf, pbuf, abuf,
+ sbuf, nc, nl1, l1-l3+1, l2-l3+1, navg, nrej, nbkg,
+ var0, var1, var2, nsig, lcrsig, hcrsig,
+ lobjsig, hobjsig, crval, objval)
+ }
+
+ # Grow regions if desired. The routines are nops if the
+ # grow is zero.
+
+ if (pm != NULL) {
+ if (pmmode != READ_WRITE) {
+ call imunmap (pm)
+ iferr (pm = immap (Memc[crmask], READ_WRITE, 0))
+ call error (1, "Can't reopen mask for growing")
+ }
+
+ if (crval == objval)
+ call crgrow (pm, max (crgrw, objgrw), crval, crval)
+ else {
+ call crgrow (pm, crgrw, crval, crval)
+ call crgrow (pm, objgrw, objval, objval)
+ }
+ }
+ } then
+ call erract (EA_WARN)
+
+ if (sim != NULL)
+ call imunmap (sim)
+ if (aim != NULL)
+ call imunmap (aim)
+ if (pm != NULL)
+ call imunmap (pm)
+ if (out != NULL)
+ call imunmap (out)
+ call imunmap (in)
+ }
+
+ call imtclose (inlist)
+ call imtclose (outlist)
+ call imtclose (crlist)
+ call imtclose (avglist)
+ call imtclose (siglist)
+
+ call sfree (sp)
+end
+
+
+# CRAVERAGE -- Detect, replace, and flag cosmic rays.
+# A local background is computed using moving box averages to avoid
+# contaminating bad pixels. If variance model is given then that is
+# used otherwise a local sigma is computed in blocks (it is not a moving box
+# for efficiency) by using a percentile point of the sorted pixel values to
+# estimate the width of the distribution uncontaminated by bad pixels). Once
+# the background and sigma are known deviant pixels are found by using sigma
+# threshold factors.
+
+procedure craverage (in, out, pout, aout, sout, nc, nl, nl1, nl2,
+ navg, nrej, nbkg, var0, var1, var2, nsig, lcrsig, hcrsig,
+ lobjsig, hobjsig crval, objval)
+
+pointer in #I Input data
+pointer out #O Output data
+pointer pout #O Output mask (0=good, 1=bad)
+pointer aout #O Output averages
+pointer sout #O Output sigmas
+int nc, nl #I Number of columns and lines
+int nl1, nl2 #I Lines to compute
+int navg #I Averaging box half-size
+int nrej #I Number of high pixels to reject from average
+int nbkg #I Median background width
+real var0 #I Variance coefficient for DN^0 term
+real var1 #I Variance coefficient for DN^1 term
+real var2 #I Variance coefficient for DN^2 term
+int nsig #I Sigma box size
+real lcrsig, hcrsig #I Threshold sigmas outside of object
+real lobjsig, hobjsig #I Object threshold sigmas
+int crval #I CR mask value
+int objval #I Object mask value
+
+int i, j, c, c1, c2, c3, c4, l, l1, l2, l3, l4, n1, n2
+int navg2, nbkg2, nsig2, plsig, phsig
+real data, avg, bkg, sigma, losig, hosig
+real low, high, cravg(), amedr()
+pointer stack, avgs, bkgs, sigs, work1, work2
+pointer ptr1, ptr2, ip, op, pp, ap, sp
+
+begin
+ navg2 = (2 * navg + 1) ** 2
+ nbkg2 = (2 * (navg + nbkg) + 1) ** 2 - navg2
+ nsig2 = nsig * nsig
+
+ call smark (stack)
+ call salloc (avgs, nc, TY_REAL)
+ call salloc (bkgs, nc, TY_REAL)
+ call salloc (sigs, nc, TY_REAL)
+ call salloc (work1, navg2, TY_REAL)
+ call salloc (work2, max (nsig2, nbkg2), TY_REAL)
+
+ if (var0 != 0. && var1 == 0. && var2 ==0.)
+ call amovkr (sqrt(var0), Memr[sigs], nc)
+
+ avgs = avgs - 1
+ sigs = sigs - 1
+ bkgs = bkgs - 1
+
+ plsig = nint (PLSIG*nsig2/100.-1)
+ phsig = nint (PHSIG*nsig2/100.-1)
+ losig = lobjsig / sqrt (real(navg2-1))
+ hosig = hobjsig / sqrt (real(navg2-1))
+
+ do l = nl1, nl2 {
+ # Compute statistics.
+ l1 = max (1, l-navg-nbkg)
+ l2 = max (1, l-navg)
+ l3 = min (nl, l+navg)
+ l4 = min (nl, l+navg+nbkg)
+ ap = aout + (l - 1) * nc
+ do c = 1, nc {
+ c1 = max (1, c-navg-nbkg)
+ c2 = max (1, c-navg)
+ c3 = min (nc, c+navg)
+ c4 = min (nc, c+navg+nbkg)
+ ptr1 = work1
+ ptr2 = work2
+ n1 = 0
+ n2 = 0
+ do j = l1, l2-1 {
+ ip = in + (j - 1) * nc + c1 - 1
+ do i = c1, c4 {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ ip = ip + 1
+ }
+ }
+ do j = l2, l3 {
+ ip = in + (j - 1) * nc + c1 - 1
+ do i = c1, c2-1 {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ ip = ip + 1
+ }
+ do i = c2, c3 {
+ if (j != l || i != c) {
+ Memr[ptr1] = Memr[ip]
+ n1 = n1 + 1
+ ptr1 = ptr1 + 1
+ }
+ ip = ip + 1
+ }
+ do i = c3+1, c4 {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ ip = ip + 1
+ }
+ }
+ do j = l3+1, l4 {
+ ip = in + (j - 1) * nc + c1 - 1
+ do i = c1, c4 {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ ip = ip + 1
+ }
+ }
+ avg = cravg (Memr[work1], n1, nrej)
+ bkg = amedr (Memr[work2], n2)
+ Memr[bkgs+c] = bkg
+ Memr[avgs+c] = avg
+ if (aout != NULL) {
+ Memr[ap] = avg - bkg
+ ap = ap + 1
+ }
+ }
+
+ # Compute sigmas and output if desired.
+ if (var0 != 0. || var1 != 0. || var2 != 0.) {
+ if (var1 != 0.) {
+ if (var2 != 0.) {
+ do c = 1, nc {
+ data = max (0., Memr[avgs+c])
+ Memr[sigs+c] = sqrt (var0+var1*data+var2*data**2)
+ }
+ } else {
+ do c = 1, nc {
+ data = max (0., Memr[avgs+c])
+ Memr[sigs+c] = sqrt (var0 + var1 * data)
+ }
+ }
+ } else if (var2 != 0.) {
+ do c = 1, nc {
+ data = max (0., Memr[avgs+c])
+ Memr[sigs+c] = sqrt (var0 + var2 * data**2)
+ }
+ }
+ } else {
+ # Compute sigmas from percentiles. This is done in blocks.
+ if (mod (l-nl1, nsig) == 0 && l<nl-nsig+1) {
+ do c = 1, nc-nsig+1, nsig {
+ ptr2 = work2
+ n2 = 0
+ do j = l, l+nsig-1 {
+ ip = in + (j - 1) * nc + c - 1
+ do i = 1, nsig {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ ip = ip + 1
+ }
+ }
+ call asrtr (Memr[work2], Memr[work2], n2)
+ sigma = (Memr[work2+phsig] - Memr[work2+plsig]) / 2.
+ call amovkr (sigma, Memr[sigs+c], nsig)
+ }
+ call amovkr (sigma, Memr[sigs+c], nc-c+1)
+ }
+ }
+ if (sout != NULL) {
+ sp = sout + (l - 1) * nc
+ do c = 1, nc {
+ Memr[sp] = Memr[sigs+c]
+ sp = sp + 1
+ }
+ }
+
+ # Detect, fix, and flag cosmic rays.
+ if (pout == NULL && out == NULL)
+ ;
+ else if (pout == NULL) {
+ ip = in + (l - 1) * nc
+ op = out + (l - 1) * nc
+ do c = 1, nc {
+ data = Memr[ip]
+ avg = Memr[avgs+c]
+ bkg = Memr[bkgs+c]
+ sigma = Memr[sigs+c]
+ low = bkg - losig * sigma
+ high = bkg + hosig * sigma
+ if (avg < low || avg > high) {
+ Memr[op] = data
+ } else {
+ low = avg - lcrsig * sigma
+ high = avg + hcrsig * sigma
+ if (data < low || data > high)
+ Memr[op] = avg
+ else
+ Memr[op] = data
+ }
+ ip = ip + 1
+ op = op + 1
+ }
+ } else if (out == NULL) {
+ ip = in + (l - 1) * nc
+ pp = pout + (l - 1) * nc
+ do c = 1, nc {
+ data = Memr[ip]
+ avg = Memr[avgs+c]
+ bkg = Memr[bkgs+c]
+ sigma = Memr[sigs+c]
+ low = bkg - losig * sigma
+ high = bkg + hosig * sigma
+ if (avg < low || avg > high)
+ Mems[pp] = objval
+ else {
+ low = avg - lcrsig * sigma
+ high = avg + hcrsig * sigma
+ if (data < low || data > high)
+ Mems[pp] = crval
+ }
+ ip = ip + 1
+ pp = pp + 1
+ }
+ } else {
+ ip = in + (l - 1) * nc
+ op = out + (l - 1) * nc
+ pp = pout + (l - 1) * nc
+ do c = 1, nc {
+ data = Memr[ip]
+ avg = Memr[avgs+c]
+ bkg = Memr[bkgs+c]
+ sigma = Memr[sigs+c]
+ low = bkg - losig * sigma
+ high = bkg + hosig * sigma
+ if (avg < low || avg > high) {
+ Memr[op] = data
+ Mems[pp] = objval
+ } else {
+ low = avg - lcrsig * sigma
+ high = avg + hcrsig * sigma
+ if (data < low || data > high) {
+ Memr[op] = avg
+ Mems[pp] = crval
+ } else
+ Memr[op] = data
+ }
+ ip = ip + 1
+ op = op + 1
+ pp = pp + 1
+ }
+ }
+ }
+
+ call sfree (stack)
+end
+
+
+# CRAVERAGE1 -- Detect, replace, and flag cosmic rays checking input mask.
+# A local background is computed using moving box averages to avoid
+# contaminating bad pixels. If variance model is given then that is
+# used otherwise a local sigma is computed in blocks (it is not a moving box
+# for efficiency) by using a percentile point of the sorted pixel values to
+# estimate the width of the distribution uncontaminated by bad pixels). Once
+# the background and sigma are known deviant pixels are found by using sigma
+# threshold factors.
+
+procedure craverage1 (in, pin, out, pout, aout, sout, nc, nl, nl1, nl2,
+ navg, nrej, nbkg, var0, var1, var2, nsig, lcrsig, hcrsig,
+ lobjsig, hobjsig crval, objval)
+
+pointer in #I Input data
+pointer pin #I Pixel mask data
+pointer out #O Output data
+pointer pout #O Output mask (0=good, 1=bad)
+pointer aout #O Output averages
+pointer sout #O Output sigmas
+int nc, nl #I Number of columns and lines
+int nl1, nl2 #I Lines to compute
+int navg #I Averaging box half-size
+int nrej #I Number of high pixels to reject from average
+int nbkg #I Median background width
+real var0 #I Variance coefficient for DN^0 term
+real var1 #I Variance coefficient for DN^1 term
+real var2 #I Variance coefficient for DN^2 term
+int nsig #I Sigma box size
+real lcrsig, hcrsig #I Threshold sigmas outside of object
+real lobjsig, hobjsig #I Object threshold sigmas
+int crval #I CR mask value
+int objval #I Object mask value
+
+int i, j, c, c1, c2, c3, c4, l, l1, l2, l3, l4, n1, n2
+int navg2, nbkg2, nsig2, plsig, phsig
+real data, avg, bkg, sigma, losig, hosig
+real low, high, cravg(), amedr()
+pointer stack, avgs, bkgs, sigs, work1, work2
+pointer ptr1, ptr2, ip, mp, op, pp, ap, sp
+
+begin
+ navg2 = (2 * navg + 1) ** 2
+ nbkg2 = (2 * (navg + nbkg) + 1) ** 2 - navg2
+ nsig2 = nsig * nsig
+
+ call smark (stack)
+ call salloc (avgs, nc, TY_REAL)
+ call salloc (bkgs, nc, TY_REAL)
+ call salloc (sigs, nc, TY_REAL)
+ call salloc (work1, navg2, TY_REAL)
+ call salloc (work2, max (nsig2, nbkg2), TY_REAL)
+
+ if (var0 != 0. && var1 == 0. && var2 ==0.)
+ call amovkr (sqrt(var0), Memr[sigs], nc)
+
+ avgs = avgs - 1
+ sigs = sigs - 1
+ bkgs = bkgs - 1
+
+ losig = lobjsig / sqrt (real(navg2-1))
+ hosig = hobjsig / sqrt (real(navg2-1))
+
+ do l = nl1, nl2 {
+ # Compute statistics.
+ l1 = max (1, l-navg-nbkg)
+ l2 = max (1, l-navg)
+ l3 = min (nl, l+navg)
+ l4 = min (nl, l+navg+nbkg)
+ ap = aout + (l - 1) * nc
+ do c = 1, nc {
+ c1 = max (1, c-navg-nbkg)
+ c2 = max (1, c-navg)
+ c3 = min (nc, c+navg)
+ c4 = min (nc, c+navg+nbkg)
+ ptr1 = work1
+ ptr2 = work2
+ n1 = 0
+ n2 = 0
+ do j = l1, l2-1 {
+ ip = in + (j - 1) * nc + c1 - 1
+ mp = pin + (j - 1) * nc + c1 - 1
+ do i = c1, c4 {
+ if (Mems[mp] == 0) {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ }
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ do j = l2, l3 {
+ ip = in + (j - 1) * nc + c1 - 1
+ mp = pin + (j - 1) * nc + c1 - 1
+ do i = c1, c2-1 {
+ if (Mems[mp] == 0) {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ }
+ ip = ip + 1
+ mp = mp + 1
+ }
+ do i = c2, c3 {
+ if ((j != l || i != c) && Mems[mp] == 0) {
+ Memr[ptr1] = Memr[ip]
+ n1 = n1 + 1
+ ptr1 = ptr1 + 1
+ }
+ ip = ip + 1
+ mp = mp + 1
+ }
+ do i = c3+1, c4 {
+ if (Mems[mp] == 0) {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ }
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ do j = l3+1, l4 {
+ ip = in + (j - 1) * nc + c1 - 1
+ mp = pin + (j - 1) * nc + c1 - 1
+ do i = c1, c4 {
+ if (Mems[mp] == 0) {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ }
+ ip = ip + 1
+ }
+ }
+ if (n1 > 0)
+ avg = cravg (Memr[work1], n1, nrej)
+ else
+ avg = INDEFR
+ if (n2 > 0)
+ bkg = amedr (Memr[work2], n2)
+ else
+ bkg = INDEFR
+ Memr[bkgs+c] = bkg
+ Memr[avgs+c] = avg
+ if (aout != NULL) {
+ if (IS_INDEFR(avg) || IS_INDEFR(bkg))
+ Memr[ap] = 0.
+ else
+ Memr[ap] = avg - bkg
+ ap = ap + 1
+ }
+ }
+
+ # Compute sigmas and output if desired.
+ if (var0 != 0. || var1 != 0. || var2 != 0.) {
+ if (var1 != 0.) {
+ if (var2 != 0.) {
+ do c = 1, nc {
+ data = max (0., Memr[avgs+c])
+ Memr[sigs+c] = sqrt (var0+var1*data+var2*data**2)
+ }
+ } else {
+ do c = 1, nc {
+ data = max (0., Memr[avgs+c])
+ Memr[sigs+c] = sqrt (var0 + var1 * data)
+ }
+ }
+ } else if (var2 != 0.) {
+ do c = 1, nc {
+ data = max (0., Memr[avgs+c])
+ Memr[sigs+c] = sqrt (var0 + var2 * data**2)
+ }
+ }
+ } else {
+ # Compute sigmas from percentiles. This is done in blocks.
+ if (mod (l-nl1, nsig) == 0 && l<nl-nsig+1) {
+ do c = 1, nc-nsig+1, nsig {
+ ptr2 = work2
+ n2 = 0
+ do j = l, l+nsig-1 {
+ ip = in + (j - 1) * nc + c - 1
+ mp = pin + (j - 1) * nc + c - 1
+ do i = 1, nsig {
+ if (Mems[mp] == 0) {
+ Memr[ptr2] = Memr[ip]
+ n2 = n2 + 1
+ ptr2 = ptr2 + 1
+ }
+ ip = ip + 1
+ mp = mp + 1
+ }
+ }
+ if (n2 > 10) {
+ call asrtr (Memr[work2], Memr[work2], n2)
+ plsig = nint (PLSIG*n2/100.-1)
+ phsig = nint (PHSIG*n2/100.-1)
+ sigma = (Memr[work2+phsig]-Memr[work2+plsig])/2.
+ } else
+ sigma = INDEFR
+ call amovkr (sigma, Memr[sigs+c], nsig)
+ }
+ call amovkr (sigma, Memr[sigs+c], nc-c+1)
+ }
+ }
+ if (sout != NULL) {
+ sp = sout + (l - 1) * nc
+ do c = 1, nc {
+ sigma = Memr[sigs+c]
+ if (IS_INDEFR(sigma))
+ Memr[sp] = 0.
+ else
+ Memr[sp] = sigma
+ sp = sp + 1
+ }
+ }
+
+ # Detect, fix, and flag cosmic rays.
+ if (pout == NULL && out == NULL)
+ ;
+ if (pout == NULL) {
+ ip = in + (l - 1) * nc
+ mp = pin + (l - 1) * nc
+ op = out + (l - 1) * nc
+ do c = 1, nc {
+ data = Memr[ip]
+ avg = Memr[avgs+c]
+ bkg = Memr[bkgs+c]
+ sigma = Memr[sigs+c]
+ if (!(Mems[mp] != 0 || IS_INDEFR(avg) ||
+ IS_INDEFR(bkg) || IS_INDEFR(sigma))) {
+ low = bkg - losig * sigma
+ high = bkg + hosig * sigma
+ if (avg < low || avg > high) {
+ Memr[op] = data
+ } else {
+ low = avg - lcrsig * sigma
+ high = avg + hcrsig * sigma
+ if (data < low || data > high)
+ Memr[op] = avg
+ else
+ Memr[op] = data
+ }
+ } else
+ Memr[op] = data
+ ip = ip + 1
+ mp = mp + 1
+ op = op + 1
+ }
+ } else if (out == NULL) {
+ ip = in + (l - 1) * nc
+ mp = pin + (l - 1) * nc
+ pp = pout + (l - 1) * nc
+ do c = 1, nc {
+ data = Memr[ip]
+ avg = Memr[avgs+c]
+ bkg = Memr[bkgs+c]
+ sigma = Memr[sigs+c]
+ if (!(Mems[mp] != 0 || IS_INDEFR(avg) ||
+ IS_INDEFR(bkg) || IS_INDEFR(sigma))) {
+ low = bkg - losig * sigma
+ high = bkg + hosig * sigma
+ if (avg < low || avg > high)
+ Mems[pp] = objval
+ else {
+ low = avg - lcrsig * sigma
+ high = avg + hcrsig * sigma
+ if (data < low || data > high)
+ Mems[pp] = crval
+ }
+ }
+ ip = ip + 1
+ mp = mp + 1
+ pp = pp + 1
+ }
+ } else {
+ ip = in + (l - 1) * nc
+ mp = pin + (l - 1) * nc
+ op = out + (l - 1) * nc
+ pp = pout + (l - 1) * nc
+ do c = 1, nc {
+ data = Memr[ip]
+ avg = Memr[avgs+c]
+ bkg = Memr[bkgs+c]
+ sigma = Memr[sigs+c]
+ if (!(Mems[mp] != 0 || IS_INDEFR(avg) ||
+ IS_INDEFR(bkg) || IS_INDEFR(sigma))) {
+ low = bkg - losig * sigma
+ high = bkg + hosig * sigma
+ if (avg < low || avg > high) {
+ Memr[op] = data
+ Mems[pp] = objval
+ } else {
+ low = avg - lcrsig * sigma
+ high = avg + hcrsig * sigma
+ if (data < low || data > high) {
+ Memr[op] = avg
+ Mems[pp] = crval
+ } else
+ Memr[op] = data
+ }
+ } else
+ Memr[op] = data
+ ip = ip + 1
+ mp = mp + 1
+ op = op + 1
+ pp = pp + 1
+ }
+ }
+ }
+
+ call sfree (stack)
+end
+
+
+# CRAVG -- Compute average with the highest nrej points excluded.
+# When nrej is greater than 2 the data array will be returned sorted.
+
+real procedure cravg (data, npts, nrej)
+
+real data[npts] #I Input data (will be sorted if nrej>2)
+int npts #I Number of data points
+int nrej #I Number of data points to reject
+
+int i
+real sum, max1, max2, val
+
+begin
+ if (npts <= nrej)
+ return (INDEFR)
+
+ switch (nrej) {
+ case 0:
+ sum = 0.
+ do i = 1, npts
+ sum = sum + data[i]
+ case 1:
+ sum = 0.
+ max1 = data[1]
+ do i = 2, npts {
+ val = data[i]
+ if (val > max1) {
+ sum = sum + max1
+ max1 = val
+ } else
+ sum = sum + val
+ }
+ case 2:
+ sum = 0.
+ max1 = min (data[1], data[2])
+ max2 = max (data[1], data[2])
+ do i = 3, npts {
+ val = data[i]
+ if (val > max1) {
+ sum = sum + max1
+ if (val > max2) {
+ max1 = max2
+ max2 = val
+ } else
+ max1 = val
+ } else
+ sum = sum + val
+ }
+ default:
+ call asrtr (data, data, npts)
+ sum = 0.
+ do i = 1, npts-nrej
+ sum = sum + data[i]
+ }
+
+ return (sum / (npts - nrej))
+end
diff --git a/noao/imred/crutil/src/t_crgrow.x b/noao/imred/crutil/src/t_crgrow.x
new file mode 100644
index 00000000..7316cc76
--- /dev/null
+++ b/noao/imred/crutil/src/t_crgrow.x
@@ -0,0 +1,182 @@
+include <error.h>
+include <imhdr.h>
+
+# T_CRGROW -- Grow cosmic ray mask identifications.
+
+procedure t_crgrow ()
+
+int input # Input masks
+int output # Output masks
+real radius # Radius
+int inval # Input mask value to grow
+int outval # Output grown mask value
+
+pointer sp, inmask, outmask, temp1, temp2
+pointer im, ptr
+
+int imtopenp(), imtlen(), imtgetim()
+bool strne()
+int clgeti()
+real clgetr()
+pointer immap()
+errchk immap, crgrow
+
+begin
+ call smark (sp)
+ call salloc (inmask, SZ_FNAME, TY_CHAR)
+ call salloc (outmask, SZ_FNAME, TY_CHAR)
+ call salloc (temp1, SZ_FNAME, TY_CHAR)
+ call salloc (temp2, SZ_FNAME, TY_CHAR)
+
+ # Task parameters.
+ input = imtopenp ("input")
+ output = imtopenp ("output")
+ radius = max (0., clgetr ("radius"))
+ inval = clgeti ("inval")
+ outval = clgeti ("outval")
+
+ if (imtlen (output) != imtlen (input))
+ call error (1, "Input and output lists do not match")
+
+ # Grow the cosmic ray masks.
+ while (imtgetim (input, Memc[inmask], SZ_FNAME) != EOF) {
+ call strcpy (Memc[inmask], Memc[outmask], SZ_FNAME)
+ if (imtgetim (output, Memc[outmask], SZ_FNAME) == EOF)
+ call error (1, "Output list ended prematurely")
+ if (strne (Memc[inmask], Memc[outmask])) {
+ call imgcluster (Memc[inmask], Memc[temp1], SZ_FNAME)
+ call imgcluster (Memc[outmask], Memc[temp2], SZ_FNAME)
+ iferr (call imcopy (Memc[temp1], Memc[temp2])) {
+ call erract (EA_WARN)
+ next
+ }
+ im = immap (Memc[inmask], READ_ONLY, TY_CHAR)
+ iferr (call imgstr (im, "extname", Memc[temp1], SZ_FNAME))
+ call strcpy ("pl", Memc[temp1], SZ_FNAME)
+ call imunmap (im)
+ }
+ call xt_maskname (Memc[outmask], Memc[temp1], 0, Memc[outmask],
+ SZ_FNAME)
+
+ if (radius < 1.)
+ next
+
+ iferr {
+ im = NULL
+ ptr = immap (Memc[outmask], READ_WRITE, 0); im = ptr
+ call crgrow (im, radius, inval, outval)
+ } then {
+ call erract (EA_WARN)
+ if (strne (Memc[inmask], Memc[outmask])) {
+ if (im != NULL) {
+ call imunmap (im)
+ iferr (call imdelete (Memc[outmask]))
+ call erract (EA_WARN)
+ }
+ }
+ }
+
+ if (im != NULL)
+ call imunmap (im)
+ }
+
+ call imtclose (output)
+ call imtclose (input)
+ call sfree (sp)
+end
+
+
+# CRGROW -- Grow cosmic rays.
+
+procedure crgrow (im, grow, inval, outval)
+
+pointer im # Mask pointer (Read/Write)
+real grow # Radius (pixels)
+int inval # Input mask value for pixels to grow
+int outval # Output mask value for grown pixels
+
+int i, j, k, l, nc, nl, ngrow, nbufs, val1, val2
+long v1[2], v2[2]
+real grow2, y2
+pointer , buf, buf1, buf2, ptr
+
+int imgnli(), impnli()
+errchk calloc, imgnli, impnli
+
+begin
+ if (grow < 1. || inval == 0)
+ return
+
+ grow2 = grow * grow
+ ngrow = int (grow)
+ buf = NULL
+
+ iferr {
+ if (IM_NDIM(im) > 2)
+ call error (1,
+ "Only one or two dimensional masks are allowed")
+
+ nc = IM_LEN(im, 1)
+ if (IM_NDIM(im) > 1)
+ nl = IM_LEN(im,2)
+ else
+ nl = 1
+
+ # Initialize buffering.
+ nbufs = min (1 + 2 * ngrow, nl)
+ call calloc (buf, nc*nbufs, TY_INT)
+
+ call amovkl (long(1), v1, IM_NDIM(im))
+ call amovkl (long(1), v2, IM_NDIM(im))
+ while (imgnli (im, buf1, v1) != EOF) {
+ j = v1[2] - 1
+ buf2 = buf + mod (j, nbufs) * nc
+ do i = 1, nc {
+ val1 = Memi[buf1]
+ val2 = Memi[buf2]
+ if ((IS_INDEFI(inval) && val1 != 0) || val1 == inval) {
+ do k = max(1,j-ngrow), min (nl,j+ngrow) {
+ ptr = buf + mod (k, nbufs) * nc - 1
+ y2 = (k - j) ** 2
+ do l = max(1,i-ngrow), min (nc,i+ngrow) {
+ if ((l-i)**2 + y2 > grow2)
+ next
+ Memi[ptr+l] = -val1
+ }
+ }
+ } else {
+ if (val2 >= 0)
+ Memi[buf2] = val1
+ }
+ buf1 = buf1 + 1
+ buf2 = buf2 + 1
+ }
+
+ if (j > ngrow) {
+ while (impnli (im, buf2, v2) != EOF) {
+ k = v2[2] - 1
+ buf1 = buf + mod (k, nbufs) * nc
+ do i = 1, nc {
+ val1 = Memi[buf1]
+ if (val1 < 0) {
+ if (IS_INDEFI(outval))
+ Memi[buf2] = -val1
+ else
+ Memi[buf2] = outval
+ } else
+ Memi[buf2] = val1
+ Memi[buf1] = 0.
+ buf1 = buf1 + 1
+ buf2 = buf2 + 1
+ }
+ if (j != nl)
+ break
+ }
+ }
+ }
+ } then
+ call erract (EA_ERROR)
+
+ if (buf != NULL)
+ call mfree (buf, TY_INT)
+end
diff --git a/noao/imred/crutil/src/t_crmedian.x b/noao/imred/crutil/src/t_crmedian.x
new file mode 100644
index 00000000..6c7d0fba
--- /dev/null
+++ b/noao/imred/crutil/src/t_crmedian.x
@@ -0,0 +1,417 @@
+include <imhdr.h>
+include <mach.h>
+
+define MAXBUF 500000 # Maximum pixel buffer
+
+define PLSIG 15.87 # Low percentile
+define PHSIG 84.13 # High percentile
+
+
+# T_CRMEDIAN -- Detect, fix, and flag cosmic rays.
+# Deviant pixels relative to a local median and sigma are detected and
+# replaced by the median value and/or written to a cosmic ray mask.
+
+procedure t_crmedian ()
+
+pointer input # Input image
+pointer output # Output image
+pointer crmask # Output mask
+pointer median # Output median
+pointer sigma # Output sigma
+pointer residual # Output residual
+real var0 # Variance coefficient for DN^0 term
+real var1 # Variance coefficient for DN^1 term
+real var2 # Variance coefficient for DN^2 term
+real lsig, hsig # Threshold sigmas
+int ncmed, nlmed # Median box size
+int ncsig, nlsig # Sigma box size
+
+int i, nc, nl, nlstep, l1, l2, l3, l4, nl1
+pointer sp, extname, in, out, pm, mim, sim, rim
+pointer inbuf, outbuf, pmbuf, mbuf, sbuf, rbuf
+real clgetr()
+int clgeti(), nowhite()
+pointer immap(), imgs2r(), imps2r(), imps2s()
+errchk immap, imgs2r, imps2r, imps2s, crmedian, imgstr
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (crmask, SZ_FNAME, TY_CHAR)
+ call salloc (residual, SZ_FNAME, TY_CHAR)
+ call salloc (median, SZ_FNAME, TY_CHAR)
+ call salloc (sigma, SZ_FNAME, TY_CHAR)
+ call salloc (extname, SZ_FNAME, TY_CHAR)
+
+ # Get parameters.
+ call clgstr ("input", Memc[input], SZ_FNAME)
+ call clgstr ("output", Memc[output], SZ_FNAME)
+ call clgstr ("crmask", Memc[crmask], SZ_FNAME)
+ call clgstr ("median", Memc[median], SZ_FNAME)
+ call clgstr ("sigma", Memc[sigma], SZ_FNAME)
+ call clgstr ("residual", Memc[residual], SZ_FNAME)
+ var0 = clgetr ("var0")
+ var1 = clgetr ("var1")
+ var2 = clgetr ("var2")
+ lsig = clgetr ("lsigma")
+ hsig = clgetr ("hsigma")
+ ncmed = clgeti ("ncmed")
+ nlmed = clgeti ("nlmed")
+ ncsig = clgeti ("ncsig")
+ nlsig = clgeti ("nlsig")
+
+ # Map the input and output images.
+ in = NULL; out = NULL; pm = NULL; mim = NULL; sim = NULL; rim = NULL
+ inbuf = NULL; outbuf = NULL; pmbuf = NULL
+ mbuf = NULL; sbuf=NULL; rbuf = NULL
+ in = immap (Memc[input], READ_ONLY, 0)
+ if (nowhite (Memc[output], Memc[output], SZ_FNAME) > 0)
+ out = immap (Memc[output], NEW_COPY, in)
+ if (nowhite (Memc[crmask], Memc[crmask], SZ_FNAME) > 0) {
+ if (Memc[crmask] == '!')
+ call imgstr (in, Memc[crmask+1], Memc[crmask], SZ_FNAME)
+ iferr (call imgstr (in, "extname", Memc[extname], SZ_FNAME))
+ call strcpy ("pl", Memc[extname], SZ_FNAME)
+ call xt_maskname (Memc[crmask], Memc[extname], 0, Memc[crmask],
+ SZ_FNAME)
+ pm = immap (Memc[crmask], NEW_COPY, in)
+ }
+ if (nowhite (Memc[median], Memc[median], SZ_FNAME) > 0)
+ mim = immap (Memc[median], NEW_COPY, in)
+ if (nowhite (Memc[sigma], Memc[sigma], SZ_FNAME) > 0)
+ sim = immap (Memc[sigma], NEW_COPY, in)
+ if (nowhite (Memc[residual], Memc[residual], SZ_FNAME) > 0)
+ rim = immap (Memc[residual], NEW_COPY, in)
+
+ # Go through the input in large blocks of lines. If the
+ # block is smaller than the whole image overlap the blocks
+ # so the median only has boundaries at the ends of the image.
+ # However, the output is done in non-overlapping blocks with
+ # the pointers are adjusted so that addresses can be in the space
+ # of the input block. CRMEDIAN does not address outside of
+ # the output data block. Set the mask values based on the
+ # distances to the nearest good pixels.
+
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ nlstep = max (1, MAXBUF / nc - nlmed)
+
+ do i = 1, nl, nlstep {
+ l1 = i
+ l2 = min (nl, i + nlstep - 1)
+ l3 = max (1, l1 - nlmed / 2)
+ l4 = min (nl, l2 + nlmed / 2)
+ nl1 = l4 - l3 + 1
+ inbuf = imgs2r (in, 1, nc, l3, l4)
+ if (out != NULL)
+ outbuf = imps2r (out, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (pm != NULL)
+ pmbuf = imps2s (pm, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (mim != NULL)
+ mbuf = imps2r (mim, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (sim != NULL)
+ sbuf = imps2r (sim, 1, nc, l1, l2) - (l1 - l3) * nc
+ if (rim != NULL)
+ rbuf = imps2r (rim, 1, nc, l1, l2) - (l1 - l3) * nc
+ call crmedian (inbuf, outbuf, pmbuf, mbuf, sbuf, rbuf,
+ nc, nl1, l1-l3+1, l2-l3+1, ncmed, nlmed, var0, var1, var2,
+ ncsig, nlsig, lsig, hsig)
+ }
+
+ if (rim != NULL)
+ call imunmap (rim)
+ if (sim != NULL)
+ call imunmap (sim)
+ if (mim != NULL)
+ call imunmap (mim)
+ if (pm != NULL)
+ call imunmap (pm)
+ if (out != NULL)
+ call imunmap (out)
+ call imunmap (in)
+ call sfree (sp)
+end
+
+
+# CRMEDIAN -- Detect, replace, and flag cosmic rays.
+# A local background is computed using moving box medians to avoid
+# contaminating bad pixels. If variance model is given then that is
+# used otherwise a local sigma is computed in blocks (it is not a moving box
+# for efficiency) by using a percentile point of the sorted pixel values to
+# estimate the width of the distribution uncontaminated by bad pixels). Once
+# the background and sigma are known deviant pixels are found by using sigma
+# threshold factors.
+
+procedure crmedian (in, out, pout, mout, sout, rout, nc, nl, nl1, nl2,
+ ncmed, nlmed, var0, var1, var2, ncsig, nlsig, lsig, hsig)
+
+pointer in #I Input data
+pointer out #O Output data
+pointer pout #O Output mask (0=good, 1=bad)
+pointer mout #O Output medians
+pointer sout #O Output sigmas
+pointer rout #O Output residuals
+int nc, nl #I Number of columns and lines
+int nl1, nl2 #I Lines to compute
+int ncmed, nlmed #I Median box size
+real var0 # Variance coefficient for DN^0 term
+real var1 # Variance coefficient for DN^1 term
+real var2 # Variance coefficient for DN^2 term
+int ncsig, nlsig #I Sigma box size
+real lsig, hsig #I Threshold sigmas
+
+int i, j, k, l, m, plsig, phsig
+real data, med, sigma, low, high, amedr()
+pointer stack, meds, sigs, work, ptr, ip, op, pp, mp, sp, rp
+
+begin
+ call smark (stack)
+ call salloc (meds, nc, TY_REAL)
+ call salloc (sigs, nc, TY_REAL)
+ call salloc (work, max (ncsig*nlsig, ncmed*nlmed), TY_REAL)
+
+ if (var0 != 0. && var1 == 0. && var2 ==0.)
+ call amovkr (sqrt(var0), Memr[sigs], nc)
+
+ meds = meds - 1
+ sigs = sigs - 1
+
+ i = ncsig * nlsig
+ plsig = nint (PLSIG*i/100.-1)
+ phsig = nint (PHSIG*i/100.-1)
+
+ do i = nl1, nl2 {
+
+ # Compute median and output if desired. This is a moving median.
+ l = min (nl, i+nlmed/2)
+ l = max (1, l-nlmed+1)
+ mp = mout + (i - 1) * nc
+ do j = 1, nc {
+ k = min (nc, j+ncmed/2)
+ k = max (1, k-ncmed+1)
+ ptr = work
+ ip = in + (l - 1) * nc + k - 1
+ do m = l, l+nlmed-1 {
+ call amovr (Memr[ip], Memr[ptr], ncmed)
+ ip = ip + nc
+ ptr = ptr + ncmed
+ }
+ med = amedr (Memr[work], ncmed * nlmed)
+ Memr[meds+j] = med
+ if (mout != NULL) {
+ Memr[mp] = med
+ mp = mp + 1
+ }
+ }
+
+ # Compute sigmas and output if desired.
+ if (var0 != 0. || var1 != 0. || var2 != 0.) {
+ if (var1 != 0.) {
+ if (var2 != 0.) {
+ do j = 1, nc {
+ data = max (0., Memr[meds+j])
+ Memr[sigs+j] = sqrt (var0 + var1*data + var2*data**2)
+ }
+ } else {
+ do j = 1, nc {
+ data = max (0., Memr[meds+j])
+ Memr[sigs+j] = sqrt (var0 + var1 * data)
+ }
+ }
+ } else if (var2 != 0.) {
+ do j = 1, nc {
+ data = max (0., Memr[meds+j])
+ Memr[sigs+j] = sqrt (var0 + var2 * data**2)
+ }
+ }
+ } else {
+ # Compute sigmas from percentiles. This is done in blocks.
+ if (mod (i-nl1, nlsig) == 0 && i<nl-nlsig+1) {
+ do j = 1, nc-ncsig+1, ncsig {
+ ptr = work
+ ip = in + (i - 1) * nc + j - 1
+ do k = i, i+nlsig-1 {
+ call amovr (Memr[ip], Memr[ptr], ncsig)
+ ip = ip + nc
+ ptr = ptr + ncsig
+ }
+ call asrtr (Memr[work], Memr[work], ncsig*nlsig)
+ sigma = (Memr[work+phsig] - Memr[work+plsig]) / 2.
+ call amovkr (sigma, Memr[sigs+j], ncsig)
+ }
+ call amovkr (sigma, Memr[sigs+j], nc-j+1)
+ }
+ }
+ if (sout != NULL) {
+ sp = sout + (i - 1) * nc
+ do j = 1, nc {
+ Memr[sp] = Memr[sigs+j]
+ sp = sp + 1
+ }
+ }
+
+ # Detect, fix, and flag cosmic rays.
+ if (rout == NULL) {
+ if (pout == NULL) {
+ ip = in + (i - 1) * nc
+ op = out + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ low = med - lsig * sigma
+ high = med + hsig * sigma
+ if (data < low || data > high)
+ Memr[op] = med
+ else
+ Memr[op] = data
+ ip = ip + 1
+ op = op + 1
+ }
+ } else if (out == NULL) {
+ ip = in + (i - 1) * nc
+ pp = pout + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ low = med - lsig * sigma
+ high = med + hsig * sigma
+ if (data < low || data > high)
+ Mems[pp] = 1
+ else
+ Mems[pp] = 0
+ ip = ip + 1
+ pp = pp + 1
+ }
+ } else {
+ ip = in + (i - 1) * nc
+ op = out + (i - 1) * nc
+ pp = pout + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ low = med - lsig * sigma
+ high = med + hsig * sigma
+ if (data < low || data > high) {
+ Memr[op] = med
+ Mems[pp] = 1
+ } else {
+ Memr[op] = data
+ Mems[pp] = 0
+ }
+ ip = ip + 1
+ op = op + 1
+ pp = pp + 1
+ }
+ }
+ } else {
+ if (pout == NULL && out == NULL) {
+ ip = in + (i - 1) * nc
+ rp = rout + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ if (sigma > 0.)
+ Memr[rp] = (data - med) / sigma
+ else {
+ if ((data - med) < 0.)
+ Memr[rp] = -MAX_REAL
+ else
+ Memr[rp] = MAX_REAL
+ }
+ ip = ip + 1
+ rp = rp + 1
+ }
+ } else if (pout == NULL) {
+ ip = in + (i - 1) * nc
+ op = out + (i - 1) * nc
+ rp = rout + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ low = med - lsig * sigma
+ high = med + hsig * sigma
+ if (data < low || data > high)
+ Memr[op] = med
+ else
+ Memr[op] = data
+ if (sigma > 0.)
+ Memr[rp] = (data - med) / sigma
+ else {
+ if ((data - med) < 0.)
+ Memr[rp] = -MAX_REAL
+ else
+ Memr[rp] = MAX_REAL
+ }
+ ip = ip + 1
+ op = op + 1
+ rp = rp + 1
+ }
+ } else if (out == NULL) {
+ ip = in + (i - 1) * nc
+ pp = pout + (i - 1) * nc
+ rp = rout + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ low = med - lsig * sigma
+ high = med + hsig * sigma
+ if (data < low || data > high)
+ Mems[pp] = 1
+ else
+ Mems[pp] = 0
+ if (sigma > 0.)
+ Memr[rp] = (data - med) / sigma
+ else {
+ if ((data - med) < 0.)
+ Memr[rp] = -MAX_REAL
+ else
+ Memr[rp] = MAX_REAL
+ }
+ ip = ip + 1
+ pp = pp + 1
+ rp = rp + 1
+ }
+ } else {
+ ip = in + (i - 1) * nc
+ op = out + (i - 1) * nc
+ pp = pout + (i - 1) * nc
+ rp = rout + (i - 1) * nc
+ do j = 1, nc {
+ data = Memr[ip]
+ med = Memr[meds+j]
+ sigma = Memr[sigs+j]
+ low = med - lsig * sigma
+ high = med + hsig * sigma
+ if (data < low || data > high) {
+ Memr[op] = med
+ Mems[pp] = 1
+ } else {
+ Memr[op] = data
+ Mems[pp] = 0
+ }
+ if (sigma > 0.)
+ Memr[rp] = (data - med) / sigma
+ else {
+ if ((data - med) < 0.)
+ Memr[rp] = -MAX_REAL
+ else
+ Memr[rp] = MAX_REAL
+ }
+ ip = ip + 1
+ op = op + 1
+ pp = pp + 1
+ rp = rp + 1
+ }
+ }
+ }
+ }
+
+ call sfree (stack)
+end
diff --git a/noao/imred/crutil/src/x_crutil.x b/noao/imred/crutil/src/x_crutil.x
new file mode 100644
index 00000000..48408a9f
--- /dev/null
+++ b/noao/imred/crutil/src/x_crutil.x
@@ -0,0 +1,4 @@
+task cosmicrays = t_cosmicrays,
+ craverage = t_craverage,
+ crgrow = t_crgrow,
+ crmedian = t_crmedian
diff --git a/noao/imred/crutil/src/xtmaskname.x b/noao/imred/crutil/src/xtmaskname.x
new file mode 100644
index 00000000..eb132e12
--- /dev/null
+++ b/noao/imred/crutil/src/xtmaskname.x
@@ -0,0 +1,125 @@
+# MASKNAME -- Make a mask name. This creates a FITS mask extension if
+# possible, otherwise it creates a pixel list file. To create a FITS
+# extension the filename must explicitly select the FITS kernel or the
+# default image type must be a FITS file. The input and output strings
+# may be the same.
+
+procedure xt_maskname (fname, extname, mode, mname, maxchar)
+
+char fname[ARB] #I File name
+char extname[ARB] #I Default pixel mask extension name
+int mode #I Mode
+char mname[maxchar] #O Output mask name
+int maxchar #I Maximum characters in mask name
+
+int i, fits
+pointer sp, temp
+
+bool streq()
+int strmatch(), stridxs(), strldxs(), strncmp()
+int envfind(), access(), imaccess()
+
+begin
+ call smark (sp)
+ call salloc (temp, maxchar, TY_CHAR)
+
+ # Determine whether to use FITS pixel mask extensions. One may set
+ # fits=NO to force use of pl even when FITS mask extensions are
+ # supported.
+ fits = YES
+ if (fits == YES && envfind ("masktype", Memc[temp], maxchar) > 0) {
+ if (streq (Memc[temp], "pl"))
+ fits = NO
+ }
+ i = strldxs ("]", fname)
+
+ # Check for explicit .pl extension.
+ if (strmatch (fname, ".pl$") > 0)
+ call strcpy (fname, mname, maxchar)
+
+ # Check for explicit mask extension.
+ else if (strmatch (fname, "type=mask") > 0)
+ call strcpy (fname, mname, maxchar)
+ else if (strmatch (fname, "type\\\=mask") > 0)
+ call strcpy (fname, mname, maxchar)
+
+ # Add mask type.
+ else if (i > 0) {
+ if (mode != READ_ONLY) {
+ call strcpy (fname[i], Memc[temp], maxchar)
+ call sprintf (mname[i], maxchar-i, ",type=mask%s")
+ call pargstr (Memc[temp])
+ }
+ i = stridxs ("[", mname)
+ if (i > 0) {
+ call strcpy (mname[i+1], Memc[temp], SZ_FNAME)
+ if (strncmp (mname[i+1], "append", 6)==0 ||
+ strncmp (mname[i+1], "inherit", 7)==0) {
+ call sprintf (mname[i+1], SZ_FNAME-i+1, "%s,%s")
+ call pargstr (extname)
+ call pargstr (Memc[temp])
+ }
+ }
+
+ # Create output from rootname name.
+ } else if (fits == YES) {
+ call strcpy (fname, Memc[temp], SZ_FNAME)
+ if (mode == READ_ONLY) {
+ call sprintf (mname, maxchar, "%s[%s]")
+ call pargstr (Memc[temp])
+ call pargstr (extname)
+ } else {
+ call sprintf (mname, maxchar, "%s[%s,type=mask]")
+ call pargstr (Memc[temp])
+ call pargstr (extname)
+ }
+ } else
+ call strcat (".pl", mname, maxchar)
+
+ # Check if extension name is actually given.
+ i = stridxs ("[", mname)
+ if (i > 0) {
+ call strcpy (mname[i+1], Memc[temp], SZ_FNAME)
+ if (strncmp (mname[i+1], "append", 6)==0 ||
+ strncmp (mname[i+1], "inherit", 7)==0) {
+ call sprintf (mname[i+1], SZ_FNAME-i+1, "%s,%s")
+ call pargstr (extname)
+ call pargstr (Memc[temp])
+ }
+ }
+
+ # Convert to pl form if required.
+ i = stridxs ("[", mname)
+ if (i > 0 && mode == READ_ONLY)
+ fits = imaccess (mname, mode)
+ if (fits == NO && i > 0) {
+ mname[i] = EOS
+ if (mode == NEW_IMAGE) {
+ if (access (mname, 0, 0) == NO) {
+ ifnoerr (call fmkdir (mname))
+ mname[i] = '/'
+ else
+ mname[i] = '.'
+ } else
+ mname[i] = '/'
+ } else {
+ if (access (mname, 0, 0) == NO)
+ mname[i] = '.'
+ else
+ mname[i] = '/'
+ }
+
+ if (strncmp (mname[i+1], "type", 4) == 0 ||
+ strncmp (mname[i+1], "append", 6) == 0 ||
+ strncmp (mname[i+1], "inherit", 7) == 0) {
+ mname[i+1] = EOS
+ call strcat (extname, mname, maxchar)
+ } else {
+ i = stridxs (",]", mname)
+ mname[i] = EOS
+ }
+ call strcat (".pl", mname, maxchar)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/ctioslit/Revisions b/noao/imred/ctioslit/Revisions
new file mode 100644
index 00000000..28f900fd
--- /dev/null
+++ b/noao/imred/ctioslit/Revisions
@@ -0,0 +1,26 @@
+.help revisions Dec94 noao.imred.ctioslit
+.nf
+
+=====
+V2.12
+=====
+
+imred$ctioslit/standard.par
+ Added blackbody query parameters. (5/2/02, Valdes)
+
+========
+V2.11.3b
+========
+
+imred$ctioslit/demos/mkdoslit.cl
+ Made the ARTDATA package parameters explicit (4/15/97, Valdes)
+
+imred$ctioslit/sparams.par
+ Changed match from 10. to -3. (4/5/96, Valdes)
+
+imred$ctioslit/ctioslit.cl
+imred$ctioslit/ctioslit.men
+ Added background, illumination, response, apflatten, apnormalize.
+ (12/29/94, Valdes)
+
+.endhelp
diff --git a/noao/imred/ctioslit/calibrate.par b/noao/imred/ctioslit/calibrate.par
new file mode 100644
index 00000000..e09457a2
--- /dev/null
+++ b/noao/imred/ctioslit/calibrate.par
@@ -0,0 +1,13 @@
+# CALIBRATE parameter file
+
+input,s,a,,,,Input spectra to calibrate
+output,s,a,,,,Output calibrated spectra
+extinct,b,h,yes,,,Apply extinction correction?
+flux,b,h,yes,,,Apply flux calibration?
+extinction,s,h,)_.extinction,,,Extinction file
+observatory,s,h,)_.observatory,,,Observatory of observation
+ignoreaps,b,h,yes,,,Ignore aperture numbers in flux calibration?
+sensitivity,s,h,"sens",,,Image root name for sensitivity spectra
+fnu,b,h,no,,,Create spectra having units of FNU?
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
diff --git a/noao/imred/ctioslit/ctioslit.cl b/noao/imred/ctioslit/ctioslit.cl
new file mode 100644
index 00000000..abc2a1f3
--- /dev/null
+++ b/noao/imred/ctioslit/ctioslit.cl
@@ -0,0 +1,69 @@
+#{ CTIOSLIT package definition
+
+# Define CTIOSLIT package
+package ctioslit
+
+set demos = "ctioslit$demos/"
+
+# Slitproc
+cl < doslit$doslittasks.cl
+task sparams = "ctioslit$sparams.par"
+
+# Onedspec tasks
+task autoidentify,
+ continuum,
+ deredden,
+ dispcor,
+ dopcor,
+ identify,
+ refspectra,
+ reidentify,
+ sarith,
+ sflip,
+ slist,
+ splot,
+ specplot,
+ specshift = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Different default parameters
+task calibrate,
+ sensfunc,
+ standard = "ctioslit$x_onedspec.e"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ apflatten,
+ apnormalize,
+ aprecenter,
+ apresize,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apdefault = "apextract$apdefault.par"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apflat1 = "apextract$apflat1.par"
+task apnorm1 = "apextract$apflat1.par"
+
+# Longslit tasks
+task illumination,
+ response = "twodspec$longslit/x_longslit.e"
+task background = "generic$background.cl"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Demos
+task demos = "demos$demos.cl"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apflat1, apnorm1, dispcor1, sparams
+
+clbye()
diff --git a/noao/imred/ctioslit/ctioslit.hd b/noao/imred/ctioslit/ctioslit.hd
new file mode 100644
index 00000000..3ce338e7
--- /dev/null
+++ b/noao/imred/ctioslit/ctioslit.hd
@@ -0,0 +1 @@
+# Help directory for the CTIOSLIT package.
diff --git a/noao/imred/ctioslit/ctioslit.men b/noao/imred/ctioslit/ctioslit.men
new file mode 100644
index 00000000..31c74dd0
--- /dev/null
+++ b/noao/imred/ctioslit/ctioslit.men
@@ -0,0 +1,38 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ background - Fit and subtract a line or column background
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit and normalize the continuum of multispec spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify arc lines and determine a dispersion function
+ illumination - Determine illumination calibration
+ refspectra - Assign reference spectra to observations
+ reidentify - Reidentify arc lines and determine new dispersion functions
+ response - Determine response calibration
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Copy spectra including aperture selection and format changes
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ slist - List spectrum headers
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Plot and analyze spectra
+ standard - Identify standard stars to be used in sensitivity calc
+
+ doslit - Process CTIO slit spectra
+ demos - Demonstrations and tests
diff --git a/noao/imred/ctioslit/ctioslit.par b/noao/imred/ctioslit/ctioslit.par
new file mode 100644
index 00000000..4ed0fbf0
--- /dev/null
+++ b/noao/imred/ctioslit/ctioslit.par
@@ -0,0 +1,15 @@
+# CTIOSLIT parameter file
+extinction,s,h,onedstds$ctioextinct.dat,,,Extinction file
+caldir,s,h,onedstds$ctionewcal/,,,Standard star calibration directory
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,"",,,Record number extensions
+version,s,h,"CTIOSLIT V3: January 1992"
diff --git a/noao/imred/ctioslit/demos/demoarc1.dat b/noao/imred/ctioslit/demos/demoarc1.dat
new file mode 100644
index 00000000..fa0a179d
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demoarc1.dat
@@ -0,0 +1,38 @@
+ OBJECT = 'First comp ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 60. / actual integration time
+ DARKTIME= 60. / total elapsed time
+ IMAGETYP= 'comp ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:11:30.00 ' / universal time
+ ST = '09:04:54.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:09:03.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '48.760 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDMEAN = 179.398
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/ctioslit/demos/demoarc2.dat b/noao/imred/ctioslit/demos/demoarc2.dat
new file mode 100644
index 00000000..4cd9975d
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demoarc2.dat
@@ -0,0 +1,38 @@
+ OBJECT = 'Last comp ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 60. / actual integration time
+ DARKTIME= 60. / total elapsed time
+ IMAGETYP= 'comp ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:41:30.00 ' / universal time
+ ST = '09:34:54.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:09:03.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '48.760 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDMEAN = 179.398
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/ctioslit/demos/demoobj1.dat b/noao/imred/ctioslit/demos/demoobj1.dat
new file mode 100644
index 00000000..78f3b9ad
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demoobj1.dat
@@ -0,0 +1,37 @@
+ OBJECT = 'V640Mon 4500 ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 1200. / actual integration time
+ DARKTIME= 1200. / total elapsed time
+ IMAGETYP= 'object ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:19:55.00 ' / universal time
+ ST = '09:13:15.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:08:52.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '44.580 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/ctioslit/demos/demos.cl b/noao/imred/ctioslit/demos/demos.cl
new file mode 100644
index 00000000..5b065c51
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demos.cl
@@ -0,0 +1,18 @@
+# DEMOS -- Run specified demo provided a demo file exists.
+
+procedure demos (demoname)
+
+file demoname {prompt="Demo name"}
+
+begin
+ file demo, demofile
+
+ if ($nargs == 0 && mode != "h")
+ type ("demos$demos.men")
+ demo = demoname
+ demofile = "demos$" // demo // ".cl"
+ if (access (demofile))
+ cl (< demofile)
+ else
+ error (1, "Unknown demo " // demo)
+end
diff --git a/noao/imred/ctioslit/demos/demos.men b/noao/imred/ctioslit/demos/demos.men
new file mode 100644
index 00000000..c6f83e09
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demos.men
@@ -0,0 +1,4 @@
+ MENU of CTIOSLIT Demonstrations
+
+ doslit - Quick test of DOSLIT (no comments, no delays)
+ mkdoslit - Make DOSLIT test data
diff --git a/noao/imred/ctioslit/demos/demos.par b/noao/imred/ctioslit/demos/demos.par
new file mode 100644
index 00000000..4181ed59
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demos.par
@@ -0,0 +1,2 @@
+demoname,f,a,"",,,"Demo name"
+mode,s,h,"ql",,,
diff --git a/noao/imred/ctioslit/demos/demostd1.dat b/noao/imred/ctioslit/demos/demostd1.dat
new file mode 100644
index 00000000..78f3b9ad
--- /dev/null
+++ b/noao/imred/ctioslit/demos/demostd1.dat
@@ -0,0 +1,37 @@
+ OBJECT = 'V640Mon 4500 ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 1200. / actual integration time
+ DARKTIME= 1200. / total elapsed time
+ IMAGETYP= 'object ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:19:55.00 ' / universal time
+ ST = '09:13:15.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:08:52.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '44.580 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/ctioslit/demos/doslit.cl b/noao/imred/ctioslit/demos/doslit.cl
new file mode 100644
index 00000000..b2ecbde2
--- /dev/null
+++ b/noao/imred/ctioslit/demos/doslit.cl
@@ -0,0 +1,14 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdoslit.cl")
+
+unlearn doslit
+sparams.extras = no
+sparams.coordlist = "linelists$idhenear.dat"
+delete demologfile,demoplotfile verify=no >& dev$null
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdoslit.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/ctioslit/demos/mkdoslit.cl b/noao/imred/ctioslit/demos/mkdoslit.cl
new file mode 100644
index 00000000..b76f467d
--- /dev/null
+++ b/noao/imred/ctioslit/demos/mkdoslit.cl
@@ -0,0 +1,25 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkexample ("longslit", "demoarc1", oseed=5, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoarc1", "demos$demoarc1.dat", append=no, verbose=no)
+mkexample ("longslit", "demoobj1", oseed=1, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoobj1", "demos$demoobj1.dat", append=no, verbose=no)
+mkexample ("longslit", "demostd1", oseed=2, nseed=2,
+ errors=no, verbose=yes, list=no)
+mkheader ("demostd1", "demos$demostd1.dat", append=no, verbose=no)
+mkexample ("longslit", "demoarc2", oseed=5, nseed=2,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoarc2", "demos$demoarc2.dat", append=no, verbose=no)
diff --git a/noao/imred/ctioslit/demos/xgdoslit.dat b/noao/imred/ctioslit/demos/xgdoslit.dat
new file mode 100644
index 00000000..eef7d9f2
--- /dev/null
+++ b/noao/imred/ctioslit/demos/xgdoslit.dat
@@ -0,0 +1,71 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\sctioslit\n
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdoslit\n
+demoobj1\r
+demoarc1,demoarc2\r
+\r
+demostd1\r
+rdnoise\r
+gain\r
+\r
+\r
+5700\r
+6.2\r
+\r
+y\r
+y\r
+y\r
+y\r
+y\r
+^Z
+doslit\sredo+\n
+\n
+\n
+b/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+y\n
+4210\n
+7350\n
+6.2\n
+\n
+n\n
+\n
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+f56\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+Y\n
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+gkimos\sdemoplotfile\snx=3\sny=3\sdev=stdgraph\n
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/ctioslit/sensfunc.par b/noao/imred/ctioslit/sensfunc.par
new file mode 100644
index 00000000..94f84f4a
--- /dev/null
+++ b/noao/imred/ctioslit/sensfunc.par
@@ -0,0 +1,17 @@
+standards,s,a,std,,,Input standard star data file (from STANDARD)
+sensitivity,s,a,"sens",,,Output root sensitivity function imagename
+apertures,s,h,"",,,Aperture selection list
+ignoreaps,b,h,yes,,,Ignore apertures and make one sensitivity function?
+logfile,f,h,"logfile",,,Output log for statistics information
+extinction,f,h,)_.extinction,,,Extinction file
+newextinction,f,h,"extinct.dat",,,Output revised extinction file
+observatory,s,h,)_.observatory,,,Observatory of data
+function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,Fitting function
+order,i,h,6,1,,Order of fit
+interactive,b,h,yes,,,Determine sensitivity function interactively?
+graphs,s,h,"sr",,,Graphs per frame
+marks,s,h,"plus cross box",,,Data mark types (marks deleted added)
+colors,s,h,"2 1 3 4",,,Colors (lines marks deleted added)
+cursor,*gcur,h,"",,,Graphics cursor input
+device,s,h,"stdgraph",,,Graphics output device
+answer,s,q, yes,"no|yes|NO|YES",,"(no|yes|NO|YES)"
diff --git a/noao/imred/ctioslit/sparams.par b/noao/imred/ctioslit/sparams.par
new file mode 100644
index 00000000..cfdf1f4f
--- /dev/null
+++ b/noao/imred/ctioslit/sparams.par
@@ -0,0 +1,65 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE PARAMETERS -- "
+lower,r,h,-3.,,,Lower aperture limit relative to center
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,1,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none",,,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- BACKGROUND SUBTRACTION PARAMETERS --"
+background,s,h,"fit","none|average|median|minimum|fit",,Background to subtract
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,"Background function"
+b_order,i,h,1,,,"Background function order"
+b_sample,s,h,"-10:-6,6:10",,,"Background sample regions"
+b_naverage,i,h,-100,,,"Background average or median"
+b_niterate,i,h,1,0,,"Background rejection iterations"
+b_low,r,h,3.,0.,,"Background lower rejection sigma"
+b_high,r,h,3.,0.,,"Background upper rejection sigma
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$idhenear.dat,,,"Line list"
+match,r,h,10.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"spline3","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,1,1,,"Order of dispersion function"
+i_niterate,i,h,1,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SENSITIVITY CALIBRATION PARAMETERS --"
+s_function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,"Fitting function"
+s_order,i,h,1,1,,"Order of sensitivity function"
+fnu,b,h,no,,,"Create spectra having units of FNU?"
diff --git a/noao/imred/ctioslit/standard.par b/noao/imred/ctioslit/standard.par
new file mode 100644
index 00000000..99b98877
--- /dev/null
+++ b/noao/imred/ctioslit/standard.par
@@ -0,0 +1,21 @@
+input,f,a,,,,Input image file root name
+output,s,a,std,,,Output flux file (used by SENSFUNC)
+samestar,b,h,yes,,,Same star in all apertures?
+beam_switch,b,h,no,,,Beam switch spectra?
+apertures,s,h,"",,,Aperture selection list
+bandwidth,r,h,INDEF,,,Bandpass widths
+bandsep,r,h,INDEF,,,Bandpass separation
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
+extinction,s,h,)_.extinction,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+observatory,s,h,)_.observatory,,,Observatory for data
+interact,b,h,yes,,,Graphic interaction to define new bandpasses
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+star_name,s,q,,,,Star name in calibration list
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
+mag,r,q,,,,Magnitude of star
+magband,s,q,,"U|B|V|R|I|J|H|K|L|Lprime|M",,"Magnitude type"
+teff,s,q,,,,Effective temperature or spectral type
+answer,s,q,no,,,"(no|yes|NO|YES|NO!|YES!)"
diff --git a/noao/imred/doc/demos.hlp b/noao/imred/doc/demos.hlp
new file mode 100644
index 00000000..35a32b6e
--- /dev/null
+++ b/noao/imred/doc/demos.hlp
@@ -0,0 +1,77 @@
+.help demos Sep90
+.ih
+NAME
+demos -- Demonstration and Test Procedures
+.ih
+PACKAGES
+noao.imred.argus, noao.imred.goldcams, noao.imred.kpcoude.fiber
+noao.imred.kpcoude.slit, noao.imred.nessie, noao.imred.specred
+noao.twodspec.longslit
+.ih
+USAGE
+demos demoname
+.ih
+PARAMETERS
+.ls demoname
+Demonstration or test procedure name. Each package may have a different
+set of demonstrations. If the demo name is not specified on the command
+line a menu of names is printed and then the name is queried.
+.le
+.ih
+DESCRIPTION
+Many packages have demonstration and test procedures. These are generally
+based on artificial data and use the \fIstty playback\fR (see \fBstty\fR)
+mechanism of the CL. A playback replaces interactive terminal input
+with previously stored input but otherwise is an actual execution of
+the entered commands. This allows both demonstration of various types
+and an actual test of the software on a particular IRAF system.
+
+Generally the \fBdemos\fR procedures create their own data if not present
+from a previous execution. After the procedure is completed the data,
+logfiles, etc. are left so that they may be examined further and
+the user may try some experiments. Thus, it might be useful to create
+a new directory for the demo using \fBmkdir\fR and "cd" to it.
+
+Currently, most of the demos are test procedures which do not contain
+comments and suitable delays to act as a demonstration. These will
+be added in time. Also some of the demos just create the demo/test
+data if one just wants some relevant data for experimentation with
+the package.
+
+One should be aware that since the tasks are actually run parameters
+are sometimes changed.
+.ih
+EXAMPLES
+1. From the \fBgoldcam\fR package list the menu and execute the
+qtest demo.
+
+.nf
+ go> mkdir demo
+ go> cd demo
+ go> demos
+ MENU of GOLDCAM Demonstrations
+
+ qtest - Quick test of GOLDCAM (no comments, no delays)
+
+ Demo name (qtest):
+ <Demo follows>
+.fi
+
+2. From the \fBnessie\fR package create some simple test data.
+
+.nf
+ ne> demos mkqdata
+ Creating image demoobj ...
+ Creating image demoflat ...
+ Creating image demoarc1 ...
+ Creating image demoarc2 ...
+ ne> demos mkqdata
+ ne>
+.fi
+
+Note that the second execution does not create the data again.
+
+.ih
+SEE ALSO
+artdata.mkexamples, ccdred.ccdtest.demo
+.endhelp
diff --git a/noao/imred/doc/revisions.v2.ms b/noao/imred/doc/revisions.v2.ms
new file mode 100644
index 00000000..50036911
--- /dev/null
+++ b/noao/imred/doc/revisions.v2.ms
@@ -0,0 +1,89 @@
+.nr PS 9
+.nr VS 11
+.RP
+.ND
+.TL
+IMRED Package Revisions Summary: IRAF Version 2.10
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+P.O. Box 26732, Tucson, Arizona 85726
+September 1990
+.AB
+This paper summarizes the changes in Version 2 of the IRAF \fBimred\fR
+package which is part of IRAF Version 2.10. The major changes are:
+
+.IP \(bu
+New multifiber reduction packages \fBargus\fR, \fBnessie\fR, and
+\fBkpcoude.fiber\fR.
+.IP \(bu
+New spectrophotmetric slit spectra reduction packages \fBgoldcam\fR,
+\fBspecred\fR, and \fBkpcoude.slit\fR.
+.IP \(bu
+New versions of the \fBmsred\fR and \fBechelle\fR packages based on
+the new versions of \fBapextract\fR and \fBonedspec\fR.
+.AE
+.NH
+Introduction
+.PP
+A number of new specialized image reduction packages and new versions
+of the generic echelle and multiobject spectroscopy packages have been
+added to the \fBimred\fR package in IRAF Version 2.10. The new
+subpackages will be made available as external packages prior to
+the release of V2.10. The major changes are:
+
+.IP \(bu
+New multifiber reduction packages \fBargus\fR, \fBnessie\fR, and
+\fBkpcoude.fiber\fR.
+.IP \(bu
+New spectrophotmetric slit spectra reduction packages \fBgoldcam\fR,
+\fBspecred\fR, and \fBkpcoude.slit\fR.
+.IP \(bu
+New versions of the \fBmsred\fR and \fBechelle\fR packages based on
+the new versions of \fBapextract\fR and \fBonedspec\fR.
+
+.LP
+In additions there have been some minor changes in the other
+spectroscopy packages required by changes in the \fBonedspec\fR package.
+.PP
+The new packages are specialized to specific instruments or types of
+data. They contain tasks collected from the various general spectroscopy
+packages which are appropriate for a particular type of data.
+However, the most important contribution of these packages are
+special reduction tasks which are streamlined to perform the complete
+calibration and reduction of the data in as simple and automated
+manner as possible. The tasks combine operations from both two
+dimensional extraction and one dimensional spectral calibrations
+and collects all the useful parameters in two parameter sets while
+fixing and hiding parameters which are irrelevent.
+.PP
+The new packages are as follows. The \fBargus\fR package is for the
+flat fielding, throughput correction, extraction, dispersion correction,
+and sky correction of data from the CTIO \fIArgus\fR multifiber instrument.
+The \fBnessie\fR package is similar and is for the KPNO \fINessie\fR
+multifiber plugboard instrument. The \fBkpcoude.fiber\fR package is
+specialized for the three fiber (two arc and one object) instrument
+at the KPNO Coude. It is similar to the other multifiber packages
+except there is no sky subtraction.
+.PP
+The other three packages are for sky subtracted extraction,
+dispersion correction, extinction correction, and flux calibration
+of slit instruments. The packages are for the KPNO \fIGoldcam\fR,
+the KPNO Coude, and for the CTIO \fI2DFRUTTI\fR. They are all
+fairly general and could be used for other instruments. They are
+distinguished by choices of default parameters.
+.PP
+There are user's guides for the powerful new reduction tasks in
+the new packages. These are available both as nicely typeset
+documents and as on-line IRAF manual pages.
+.PP
+Tasks from the revised \fBapextract\fR and \fBonedspec\fR packages
+appear in many of the \fBimred\fR packages. In particular the
+\fBechelle\fR and \fBmsred\fR packages are now based on this new
+software.
+.PP
+Some minor changes are the replacement of the \fBspecphot\fR package
+by \fBspecred\fR and the renaming and reorganization of the
+\fBcoude\fR package.
diff --git a/noao/imred/doc/tutor.hlp b/noao/imred/doc/tutor.hlp
new file mode 100644
index 00000000..21684194
--- /dev/null
+++ b/noao/imred/doc/tutor.hlp
@@ -0,0 +1,64 @@
+.help tutor Aug86 noao.imred
+.ih
+NAME
+tutor -- Present tutorial help on a particular package
+.ih
+USAGE
+tutor topic
+.ih
+PARAMETERS
+.ls topic
+Topic for which help is desired. If no topic is given then available
+topics are presented. The topic "all" prints all the topics.
+.le
+.ls package = ""
+Package for which tutorial help is desired. If null then the tutorial
+for the current package is presented.
+.le
+.ls tutordb = "helpdb"
+The filename of the tutorial database to be searched. If the \fIvalue\fR of the
+parameter is the reserved string "helpdb", the actual filename is the value
+of the CL environment variable \fIhelpdb\fR.
+.le
+.ih
+DESCRIPTION
+This task provides a tutorial facility for some of the IRAF packages.
+The tutorial consists of a number of topics
+which are obtained by typing "tutor topic" where topic is one of the
+available topics. If no topic is given then the available topics are
+presented. The topic "all" presents all the topics.
+
+This task is implemented using the \fBhelp\fR task. Therefore,
+modifying the \fBhelp\fR parameters dealing with the device and print
+format will also determine the output of \fBtutor\fR. The tutorial
+topic material is contained in the file Tutorial.hlp for each package.
+The database containing the directory to the tutorial files may or may
+not be the same as the standard help database.
+.ih
+EXAMPLES
+To get started:
+
+ cl> tutor
+
+To get help on reading and writing data tapes:
+
+ cl> tutor dataio
+
+To read all the topics:
+
+ cl> tutor all
+.ih
+BUGS
+Piping the output of \fBtutor\fR to lprint does not work properly because
+\fBhelp\fR is contained in a script.
+.ih
+TUTORIALS
+Tutorials are currently available only for the \fBechelle\fR package.
+This tutorial is in the process of being developed.
+.ih
+SEE ALSO
+.nf
+help
+Individual help pages for all the tasks mentioned in the tutorial.
+.fi
+.endhelp
diff --git a/noao/imred/dtoi/README b/noao/imred/dtoi/README
new file mode 100644
index 00000000..e293c6c8
--- /dev/null
+++ b/noao/imred/dtoi/README
@@ -0,0 +1 @@
+DTOI -- Density to intensity transformation package.
diff --git a/noao/imred/dtoi/Revisions b/noao/imred/dtoi/Revisions
new file mode 100644
index 00000000..e6e955ff
--- /dev/null
+++ b/noao/imred/dtoi/Revisions
@@ -0,0 +1,144 @@
+.help revisions Jun88 noao.imred.dtoi
+.nf
+
+imred$dtoi/selftest.x
+ The 'lut' was declared as TY_INT instead of TY_REAL (5/4/13)
+
+imred$dtoi/spolist.x
+ A TY_INT pointer was being used with Memr[] (4/20/13)
+
+=======
+V2.16
+=======
+
+imred$dtoi/hdtoi.x
+ The hd_fogcalc() procedure was being called with too few arguments
+ (7/12/09, MJF)
+
+=======
+V2.14.1
+=======
+
+imred$dtoi/hdicfit/mkpkg
+ Added missing dependencies. (12/13/01, MJF)
+
+imred$dtoi/mkpkg
+ Added missing dependencies. (10/11/99, Valdes)
+
+=======
+V2.11.2
+=======
+
+imred$dtoi/doc/hdfit.hlp
+imred$dtoi/doc/hdshift.hlp
+imred$dtoi/doc/hdtoi.hlp
+imred$dtoi/doc/spotlist.hlp
+ Fixed minor formating problems. (4/22/99, Valdes)
+
+imred$dtoi/hdicggraph.x
+imred$dtoi/hdicebars.x
+ Replace direct access to GTOOLS structure with GTOOLS interface.
+ (12/18/98, Valdes)
+
+=======
+V2.11.1
+=======
+
+imred$dtoi/spotlist.x 7 Sept, 1989 SRo
+ In Suzanne's absence, removed the divide by nfog fog images in
+ hd_fogcalc() after a bug report by Steve Majewski that when multiple
+ fog images are used, the resultant fog values are too low by 1/nfog.
+ The total_pix accumulator is already a total for all images. Should
+ be verified by Suzanne on her return.
+
+imred$dtoi/hdicfit/hdicgdelte.x 17 April, 1989 ShJ
+ Procedure icg_dl1 was declared as an integer procedure, although
+ it does not return a function values and was not being called
+ as a function.
+
+imred$dtoi/hdicfit/hdicgcolon.x 27 March, 1989 ShJ
+ Changed only a comment line, to point out that the :ebars command
+ switches the meaning of the error bars between representing
+ standard deviations or weights.
+
+imred$dtoi/spotlist.x 11 May, 1988 ShJ
+ Added parameter "option", which selects a mean or median calculation
+ of spot densities and the fog value. Previously, only a mean
+ calculation was available. The choice of "option" is written to
+ the database.
+
+imred$dtoi/hd_aravr.x 11 May, 1988 ShJ
+ This is a new file, modified from the vops version only in that it
+ puts an upper limit on the number of iterations allowed in the
+ procedure of 10. This should avoid the problem reported by
+ Steve Majewski of nearly saturated pixels "hanging"; they actually
+ were oscillating endlessly in the mean rejection calculation.
+
+imred$dtoi/hdtoi.x 11 May, 1988 ShJ
+ Added parameter "option", which selects a mean or median calulcation
+ of the fog value, in those cases where the user supplies a fog
+ image, rather than a fog value. Previously, only a mean calculation
+ was available. This newly calculated value of fog is now also
+ written to the database.
+
+imred$dtoi/hdtoi.x 5 April, 1988 ShJ
+ Modified the way in which hdtoi scales intensities to the "ceiling"
+ parameter. A maximum intensity is calculated, using cveval to
+ evaluate the transformed value of the maximum density above fog.
+ The fog value subtracted is hdtoi.fog, which may or may not
+ equal the value of fog written by spotlist into the database.
+ Previously, the program was incorrectly subtracting the spotlist
+ fog value from the maximum density when calculating the maximum
+ intensity and so the scale factor to represent saturated intensities
+ as "ceiling".
+
+imred$dtoi/hdicgundel.x 25 March, 1988 ShJ
+ Procedures icg_u1d and icg_undeleted in this source file were
+ declared as functions but not returning a value. They were not
+ being called as functions, and are now not declared as functions.
+
+imred$dtoi/database.x 14 March, 1988 ShJ
+ Added procedure to put a double precision value: ddb_putd().
+
+imred$dtoi/spotlist.x 14 March, 1988 ShJ
+ Added code for additional input parameter: "maxad", the maximum
+ allowed input value, i.e., the AD units in a saturated pixel. From
+ this value and the "scale" parameter, spotlist calculates the
+ maximum permissable density, and writes this to the database as a
+ double precision value.
+
+imred$dtoi/hdfit.x 14 March, 1988 ShJ
+ Parameter "maxden" was removed, as this value is now calculated
+ precisely by spotlist and can be read from the database.
+
+imred$dtoi/spotlist.par
+imred$dtoi/hdfit.par
+imred$dtoi/doc/spotlist.hlp
+imred$dtoi/doc/hdfit.hlp 14 March, 1988 ShJ
+ Modified help pages and parameter files for spotlist and hdfit to
+ support the above changes to the user interface.
+
+imred$dtoi/hdtoi.x 11 March, 1988 ShJ
+ Bug fixed in hd_aptrans(). The equations for the k75 and k50
+ transformations were incorrect, having a '*' where a '+' should
+ have been. Also, the strncmp procedure was only checking the first
+ character to distinguish between transformation types, in which
+ case k75 and k50 would not be resolved.
+
+imred$dtoi/hdfit.x 10 March, 1988 ShJ
+ Repaired an error in incrementing an offset into the output
+ arrays in hd_rdloge. This error was seen only when more than
+ two databases contributed input values to be fit. This was
+ a trivial arithmetic error that had been overlooked due to
+ insufficient testing. This bug would produce a memory error
+ as values were written into unallocated locations.
+
+imred$dtoi/hdicfit/hdic.com 9 March, 1988 ShJ
+ Reordered the values in the common block, as the largest double
+ precision value was following the integers. The SUN-4 compiler
+ gave warning of this, then padded the double value. Also at this
+ time file hdicgundelete.x was renamed to hdicgundel.x to avoid
+ problems associated with filenames longer than 14 characters. The
+ hdicfit library was completely rebuilt at this time, to insure the
+ new object module would be retrieved.
+.endhelp
diff --git a/noao/imred/dtoi/database.x b/noao/imred/dtoi/database.x
new file mode 100644
index 00000000..cb501472
--- /dev/null
+++ b/noao/imred/dtoi/database.x
@@ -0,0 +1,611 @@
+include <time.h>
+include <ctype.h>
+include <ctotok.h>
+include <finfo.h>
+
+# DATABASE.X -- This file contains source for the database utilities used by
+# the DTOI package. They are based on Frank Valdes's dtext utilities.
+
+# Definition for dbtext structure.
+
+define DT_LEN 5
+
+define DT Memi[$1] # FIO channel
+define DT_NRECS Memi[$1+1] # Number of records
+define DT_OFFSETS Memi[$1+2] # Pointer to record offsets
+define DT_NAMES Memi[$1+3] # Pointer to name indices
+define DT_MAP Memi[$1+4] # Pointer to record names
+
+define DT_OFFSET Meml[DT_OFFSETS($1)+$2-1]
+define DT_NAMEI Memi[DT_NAMES($1)+$2-1]
+define DT_NAME Memc[DT_MAP($1)+DT_NAMEI($1,$2)]
+
+define DT_ALLOC 20 # Allocation block size
+
+
+# DDB_MAP -- Map the database.
+
+pointer procedure ddb_map (database, mode)
+
+char database[ARB] # Database file
+int mode # FIO mode
+
+int i, nrec
+int dt_alloc1, dt_alloc2
+pointer db, str
+
+int open(), fscan(), strlen()
+bool streq()
+long note()
+errchk delete, open
+
+begin
+ call calloc (db, DT_LEN, TY_STRUCT)
+
+ if (mode == NEW_FILE)
+ iferr (call delete (database))
+ ;
+
+ DT(db) = open (database, mode, TEXT_FILE)
+
+ if (mode != READ_ONLY)
+ return (db)
+
+ dt_alloc1 = DT_ALLOC
+ dt_alloc2 = DT_ALLOC * SZ_LINE
+ call malloc (DT_OFFSETS(db), dt_alloc1, TY_LONG)
+ call malloc (DT_NAMES(db), dt_alloc1, TY_INT)
+ call malloc (DT_MAP(db), dt_alloc2, TY_CHAR)
+ call malloc (str, SZ_LINE, TY_CHAR)
+
+ nrec = 1
+ DT_NRECS(db) = 0
+ DT_NAMEI(db, nrec) = 0
+
+ while (fscan (DT(db)) != EOF) {
+ call gargwrd (DT_NAME(db, nrec), SZ_LINE)
+
+ if (streq (DT_NAME(db, nrec), "begin")) {
+ call gargstr (Memc[str], SZ_LINE)
+ for (i=str; IS_WHITE(Memc[i]); i=i+1)
+ ;
+ call strcpy (Memc[i], DT_NAME(db, nrec), SZ_LINE)
+
+ for (i = 1; i < nrec; i = i + 1)
+ if (streq (DT_NAME(db, i), DT_NAME(db, nrec)))
+ break
+
+ if (i < nrec)
+ DT_OFFSET(db, i) = note (DT(db))
+ else {
+ DT_NRECS(db) = nrec
+ DT_OFFSET(db, nrec) = note (DT(db))
+ DT_NAMEI(db, nrec+1) = DT_NAMEI(db, nrec) +
+ strlen (DT_NAME(db, nrec)) + 1
+ nrec = nrec + 1
+ }
+
+ if (nrec == dt_alloc1) {
+ dt_alloc1 = dt_alloc1 + DT_ALLOC
+ call realloc (DT_OFFSETS(db), dt_alloc1, TY_LONG)
+ call realloc (DT_NAMES(db), dt_alloc1, TY_INT)
+ }
+
+ if (DT_NAMEI(db, nrec) + SZ_LINE >= dt_alloc2) {
+ dt_alloc2 = dt_alloc2 + DT_ALLOC * SZ_LINE
+ call realloc (DT_MAP(db), dt_alloc2, TY_CHAR)
+ }
+ }
+ }
+
+ call realloc (DT_MAP(db), DT_NAMEI(db, nrec), TY_CHAR)
+ call realloc (DT_OFFSETS(db), DT_NRECS(db), TY_LONG)
+ call realloc (DT_NAMES(db), DT_NRECS(db), TY_INT)
+
+ return (db)
+end
+
+
+# DDB_UNMAP -- Unmap the database.
+
+procedure ddb_unmap (db)
+
+pointer db # Database file descriptor
+
+begin
+ call close (DT(db))
+ call mfree (DT_MAP(db), TY_CHAR)
+ call mfree (DT_OFFSETS(db), TY_LONG)
+ call mfree (DT_NAMES(db), TY_INT)
+ call mfree (db, TY_STRUCT)
+end
+
+
+# DDB_PREC -- Write a record to the database.
+
+procedure ddb_prec (db, record)
+
+pointer db # Pointer to database
+char record[ARB] # Name of record to enter
+
+pointer sp, entry
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "begin\t%s\n")
+ call pargstr (record)
+
+ call fprintf (DT(db), Memc[entry])
+
+ call sfree (sp)
+end
+
+
+# DDB_PUTR -- Write a real valued field into the current record.
+
+procedure ddb_putr (db, field, value)
+
+pointer db # Pointer to database
+char field[ARB] # Format string
+real value # Real value to output
+
+pointer sp, entry
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%g\n")
+ call pargstr (field)
+ call pargr (value)
+
+ call fprintf (DT(db), Memc[entry])
+
+ call sfree (sp)
+end
+
+# DDB_PUTD -- Write a double valued field into the current record.
+
+procedure ddb_putd (db, field, value)
+
+pointer db # Pointer to database
+char field[ARB] # Format string
+double value # Real value to output
+
+pointer sp, entry
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%g\n")
+ call pargstr (field)
+ call pargd (value)
+
+ call fprintf (DT(db), Memc[entry])
+
+ call sfree (sp)
+end
+
+
+# DDB_PUTI -- Write an integer valued field into the current record.
+
+procedure ddb_puti (db, field, value)
+
+pointer db # Pointer to database
+char field[ARB] # Format string
+int value # Integer value to output
+
+pointer sp, entry
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%d\n")
+ call pargstr (field)
+ call pargi (value)
+
+ call fprintf (DT(db), Memc[entry])
+
+ call sfree (sp)
+end
+
+
+# DDB_PSTR -- Write a string valued field into the current record.
+
+procedure ddb_pstr (db, field, value)
+
+pointer db # Pointer to database
+char field[ARB] # Format string
+char value[ARB] # String field
+
+pointer sp, entry
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%s\n")
+ call pargstr (field)
+ call pargstr (value)
+
+ call fprintf (DT(db), Memc[entry])
+
+ call sfree (sp)
+end
+
+
+# DDB_PAR -- Put an array of real values into a field in the current record.
+
+procedure ddb_par (db, field, array, nvals)
+
+pointer db # Pointer to database structure
+char field[ARB] # Name of field to be added
+real array[nvals] # Array of real values
+int nvals # Number of values in array
+
+pointer sp, entry
+int i
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%d\n")
+ call pargstr (field)
+ call pargi (nvals)
+
+ call fprintf (DT(db), Memc[entry])
+
+ do i = 1, nvals {
+ call sprintf (Memc[entry], SZ_LINE, "\t\t%g\n")
+ call pargr (array[i])
+
+ call fprintf (DT(db), Memc[entry])
+ }
+
+ call sfree (sp)
+end
+
+
+# DDB_PAD -- Put an array of double values into a field in the current record.
+
+procedure ddb_pad (db, field, array, nvals)
+
+pointer db # Pointer to database structure
+char field[ARB] # Name of field to be added
+double array[nvals] # Array of double values
+int nvals # Number of values in array
+
+pointer sp, entry
+int i
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%d\n")
+ call pargstr (field)
+ call pargi (nvals)
+
+ call fprintf (DT(db), Memc[entry])
+
+ do i = 1, nvals {
+ call sprintf (Memc[entry], SZ_LINE, "\t\t%g\n")
+ call pargd (array[i])
+
+ call fprintf (DT(db), Memc[entry])
+ }
+
+ call sfree (sp)
+end
+
+
+# DDB_PAI -- Put an array of integer values into a field in the current
+# record.
+
+procedure ddb_pai (db, field, array, nvals)
+
+pointer db # Pointer to database structure
+char field[ARB] # Name of field to be added
+int array[nvals] # Array of integer values
+int nvals # Number of values in array
+
+pointer sp, entry
+int i
+
+begin
+ call smark (sp)
+ call salloc (entry, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[entry], SZ_LINE, "\t%s\t%d\n")
+ call pargstr (field)
+ call pargi (nvals)
+
+ call fprintf (DT(db), Memc[entry])
+
+ do i = 1, nvals {
+ call sprintf (Memc[entry], SZ_LINE, "\t\t%d\n")
+ call pargi (array[i])
+
+ call fprintf (DT(db), Memc[entry])
+ }
+
+ call sfree (sp)
+end
+
+
+# DDB_LOCATE -- Locate a database record.
+
+int procedure ddb_locate (db, name)
+
+pointer db # DTTEXT pointer
+char name[ARB] # Record name
+
+int i
+
+bool streq()
+pointer sp, err_string
+
+begin
+ do i = 1, DT_NRECS(db) {
+ if (streq (name, DT_NAME(db, i)))
+ return (i)
+ }
+
+ # The record was not found
+
+ call smark (sp)
+ call salloc (err_string, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[err_string], SZ_LINE,
+ "DDB_LOCATE: Database record '%s' not found")
+ call pargstr (name)
+
+ call error (21, Memc[err_string])
+ call sfree (sp)
+end
+
+
+# DDB_GSTR -- Get a string field
+
+procedure ddb_gstr (db, record, field, str, maxchar)
+
+pointer db # Database file descriptor
+int record # Database index
+char field[ARB] # Database field
+char str[maxchar] # String value
+int maxchar # Maximum characters for string
+
+char name[SZ_LINE]
+
+pointer sp, esp
+int i, fscan()
+bool streq()
+
+begin
+ if ((record < 1) || (record > DT_NRECS(db)))
+ call error (22, "Database record request out of bounds")
+
+ call seek (DT(db), DT_OFFSET(db, record))
+
+ while (fscan (DT(db)) != EOF) {
+ call gargwrd (name, SZ_LINE)
+
+ if (streq (name, "begin"))
+ break
+ else if (streq (name, field)) {
+ call gargstr (str, maxchar)
+ for (i=1; IS_WHITE(str[i]); i=i+1)
+ ;
+ if (i>1)
+ call strcpy (str[i], str, maxchar)
+ return
+ }
+ }
+
+ call smark (sp)
+ call salloc (esp, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[esp], SZ_LINE,
+ "DDB_GSTR: Database field '%s' not found")
+ call pargstr (field)
+
+ call error (23, Memc[esp])
+ call sfree (sp)
+end
+
+
+# DDB_GETI -- Get an integer field.
+
+int procedure ddb_geti (db, record, field)
+
+pointer db # DTTEXT pointer
+int record # Database index
+char field[ARB] # Database field
+
+real rval
+bool fp_equalr()
+real ddb_getr()
+
+errchk ddb_getr
+
+begin
+ rval = ddb_getr (db, record, field)
+
+ if (fp_equalr (rval, INDEFR))
+ return (INDEFI)
+ else
+ return (int (rval))
+end
+
+
+# DDB_GETR -- Get an real field
+
+real procedure ddb_getr (db, record, field)
+
+pointer db # DTTEXT pointer
+int record # Database index
+char field[ARB] # Database field
+
+pointer sp, esp
+real rval
+char name[SZ_LINE]
+
+int fscan(), nscan()
+bool streq()
+
+begin
+ if ((record < 1) || (record > DT_NRECS(db)))
+ call error (24, "Database record request out of bounds")
+
+ call seek (DT(db), DT_OFFSET(db, record))
+
+ while (fscan (DT(db)) != EOF) {
+ call gargwrd (name, SZ_LINE)
+
+ if (streq (name, "begin"))
+ break
+ else if (streq (name, field)) {
+ call gargr (rval)
+ if (nscan() == 2)
+ return (rval)
+ else
+ call error (25, "Error in database field value")
+ }
+ }
+
+ call smark (sp)
+ call salloc (esp, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[esp], SZ_LINE,
+ "DDB_GET: Database field '%s' not found")
+ call pargstr (field)
+
+ call error (26, Memc[esp])
+ call sfree (sp)
+end
+
+
+# DDB_GAR -- Get a real array field
+
+procedure ddb_gar (db, record, field, array, len_array, npts)
+
+pointer db # DTTEXT pointer
+int record # Database index
+char field[ARB] # Database field
+real array[len_array] # Array values
+int len_array # Length of array
+int npts # Number of values in the field
+
+int i
+double tmp
+pointer sp, dubble
+bool fp_equald()
+
+begin
+ call smark (sp)
+ call salloc (dubble, npts, TY_DOUBLE)
+
+ call ddb_gad (db, record, field, Memd[dubble], len_array, npts)
+ do i = 1, npts {
+ tmp = Memd[dubble+i-1]
+ if (fp_equald (tmp, INDEFD))
+ array[i] = INDEFR
+ else
+ array[i] = real (tmp)
+ }
+
+ call sfree (sp)
+end
+
+
+# DDB_GAD -- Get a double array field
+
+procedure ddb_gad (db, record, field, array, len_array, npts)
+
+pointer db # DTTEXT pointer
+int record # Database index
+char field[ARB] # Database field
+double array[len_array] # Array values
+int len_array # Length of array
+int npts # Number of points in the array
+
+pointer sp, esp
+char name[SZ_LINE]
+int i
+
+int fscan(), nscan()
+bool streq()
+
+begin
+ if ((record < 1) || (record > DT_NRECS(db)))
+ call error (27, "Database record request out of bounds")
+
+ call seek (DT(db), DT_OFFSET(db, record))
+
+ while (fscan (DT(db)) != EOF) {
+ call gargwrd (name, SZ_LINE)
+
+ if (streq (name, "begin"))
+ break
+ else if (streq (name, field)) {
+ call gargi (npts)
+ if (nscan() != 2)
+ call error (28, "Error in database field value")
+
+ npts = min (npts, len_array)
+ for (i = 1; i <= npts; i = i + 1) {
+ if (fscan (DT(db)) == EOF)
+ call error (29, "Error in database field value")
+
+ call gargd (array[i])
+ if (nscan() != 1)
+ call error (30, "Error in database field value")
+ }
+ return
+ }
+ }
+
+ call smark (sp)
+ call salloc (esp, SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[esp], SZ_LINE,
+ "DDB_GA: Database field '%s' not found")
+ call pargstr (field)
+
+ call error (31, Memc[esp])
+ call sfree (sp)
+end
+
+
+# DDB_PTIME -- Put a time string with a comment
+
+procedure ddb_ptime (db)
+
+pointer db # DTTEXT pointer
+
+char timestr[SZ_TIME]
+long time, clktime()
+
+begin
+ time = clktime (0)
+ call cnvtime (time, timestr, SZ_TIME)
+ call fprintf (DT(db), "# %s\n")
+ call pargstr (timestr)
+end
+
+
+# DDB_SCAN -- Scan a line from the database.
+
+int procedure ddb_scan (db)
+
+pointer db # DDB pointer
+int fscan()
+
+begin
+ return (fscan (DT(db)))
+end
diff --git a/noao/imred/dtoi/dematch.par b/noao/imred/dtoi/dematch.par
new file mode 100644
index 00000000..cd7a5cfc
--- /dev/null
+++ b/noao/imred/dtoi/dematch.par
@@ -0,0 +1,8 @@
+# Cl parameters are:
+database,f,a,,,,Database of density information
+wedge,f,a,"",,,Wedge number used for calibration
+filter,f,a,"",,,Filter used for calibration
+emulsion,f,a,"",,,Emulsion used for calibration
+wedgefile,f,h,"noao$lib/hdwedge.dat",,,File containing exposure information
+nskip,i,h,0,,,Number of faint spots skipped
+verbose,b,h,yes,,,Print exposure record from wedgefile
diff --git a/noao/imred/dtoi/dematch.x b/noao/imred/dtoi/dematch.x
new file mode 100644
index 00000000..ff47e90f
--- /dev/null
+++ b/noao/imred/dtoi/dematch.x
@@ -0,0 +1,160 @@
+include <error.h>
+
+# T_DEMATCH -- matches densities listed in the input database to
+# log exposure values retrieved from a system maintained or user
+# provided file. The output matches are added to the input database.
+
+procedure t_dematch ()
+
+pointer filter, emulsion, wedge_db, density_db, db, exp, den, sp
+pointer wedge, expo
+int nskip, rec, nvalues
+
+pointer ddb_map()
+bool clgetb()
+int ddb_locate(), ddb_geti(), clgeti()
+
+begin
+ call smark (sp)
+ call salloc (filter, SZ_FNAME, TY_CHAR)
+ call salloc (emulsion, SZ_FNAME, TY_CHAR)
+ call salloc (wedge_db, SZ_FNAME, TY_CHAR)
+ call salloc (density_db, SZ_FNAME, TY_CHAR)
+ call salloc (wedge, SZ_FNAME, TY_CHAR)
+
+ # Get parameters
+ call clgstr ("database", Memc[density_db], SZ_FNAME)
+ call clgstr ("wedge", Memc[wedge], SZ_FNAME)
+ call clgstr ("filter", Memc[filter], SZ_FNAME)
+ call clgstr ("emulsion", Memc[emulsion], SZ_FNAME)
+ call clgstr ("wedgefile", Memc[wedge_db], SZ_FNAME)
+ nskip = clgeti ("nskip")
+
+ # Retrieve exposure information; one wedge per run
+ call hd_rwedge (Memc[wedge_db], exp, Memc[wedge], Memc[filter],
+ Memc[emulsion], clgetb("verbose"))
+
+ db = ddb_map (Memc[density_db], READ_ONLY)
+ iferr {
+ rec = ddb_locate (db, "density")
+ nvalues = ddb_geti (db, rec, "den_val")
+ } then
+ call error (0, "Error locating density record in database")
+
+ call salloc (den, nvalues, TY_REAL)
+ iferr (call ddb_gar (db, rec, "den_val", Memr[den], nvalues, nvalues))
+ call error (0, "Error reading density information")
+
+ # Close db file before reopening for append.
+ call ddb_unmap (db)
+
+ # Add fields for wedge, filter and plate as "calibrate" record
+ db = ddb_map (Memc[density_db], APPEND)
+ call ddb_prec (db, "calibrate")
+ call ddb_pstr (db, "wedge", Memc[wedge])
+ call ddb_pstr (db, "filter", Memc[filter])
+ call ddb_pstr (db, "emulsion", Memc[emulsion])
+
+ # Exposures are returned in increasing order. Make sure the
+ # exposures are output in the same order as density values.
+
+ call salloc (expo, nvalues, TY_REAL)
+ call amovr (Memr[exp+nskip], Memr[expo], nvalues)
+
+ if (Memr[den] > Memr[den+nvalues-1])
+ call hd_reorderr (Memr[expo], nvalues)
+
+ # Now add exposure values to database
+ call ddb_ptime (db)
+ call ddb_prec (db, "exposure")
+ call ddb_par (db, "log_exp", Memr[expo], nvalues)
+
+ call ddb_unmap (db)
+
+ call mfree (exp, TY_REAL)
+ call sfree (sp)
+end
+
+
+# HD_RWEDGE -- Read wedge information from database file for a given
+# wedge, filter, emulsion combination. A pointer to the extracted
+# exposure values is returned as an argument.
+
+procedure hd_rwedge (db_file, exp, wedge, filter, emulsion, verbose)
+
+char db_file[SZ_FNAME] # Name of database with exposure information
+pointer exp # Pointer to array of exposure values - output
+char wedge[SZ_FNAME] # Wedge number
+char filter[SZ_FNAME] # Filter used
+char emulsion[SZ_FNAME] # Emulsion used
+bool verbose # Print record of exposure information?
+
+pointer db
+char wfe[SZ_FNAME]
+int rec, nvalues, stat, i
+
+pointer ddb_map()
+int ddb_locate(), ddb_geti(), ddb_scan()
+errchk ddb_map, ddb_scan
+
+begin
+ # Convert strings to upper case for matching in database
+ call strupr (wedge)
+ call strupr (filter)
+ call strupr (emulsion)
+
+ db = ddb_map (db_file, READ_ONLY)
+ iferr (rec = ddb_locate (db, wedge))
+ call erract (EA_FATAL)
+
+ # Construct field name of filter/emulsion combination in question
+ call sprintf (wfe, SZ_FNAME, "%s/%s")
+ call pargstr (emulsion)
+ call pargstr (filter)
+
+ # Retrieve exposure values from database file
+ iferr (nvalues = ddb_geti (db, rec, wfe))
+ call erract (EA_FATAL)
+
+ call malloc (exp, nvalues, TY_REAL)
+ stat = ddb_scan (db)
+
+ do i = 1, nvalues {
+ call gargr (Memr[exp+i-1])
+
+ # Calibration values are stored 8 values per line
+ if (mod (i, 8) == 0) {
+ if (nvalues > i)
+ stat = ddb_scan (db)
+ }
+ }
+
+ if (verbose) {
+ call printf ("\nCalibration log exposure values for %s/%s: \n")
+ call pargstr (wedge)
+ call pargstr (wfe)
+
+ do i = 1, nvalues {
+ call printf ("%8g ")
+ call pargr (Memr[exp+i-1])
+ if (mod (i, 8) == 0)
+ call printf ("\n")
+ }
+
+ # Need a newline if the last line didn't have 8 entries.
+ if (mod (nvalues, 8) != 0)
+ call printf ("\n")
+
+ call flush (STDOUT)
+ }
+
+ # Exposures are returned sorted in increasing order. That is,
+ # the exposure for the lightest spot is element one, and the
+ # darkest spot's exposure value is the last array element.
+
+ if (Memr[exp] > Memr[exp+nvalues-1])
+ # Out of order - flip elements
+ call hd_reorderr (Memr[exp], nvalues)
+
+ call ddb_unmap (db)
+end
diff --git a/noao/imred/dtoi/doc/dematch.hlp b/noao/imred/dtoi/doc/dematch.hlp
new file mode 100644
index 00000000..7dffad26
--- /dev/null
+++ b/noao/imred/dtoi/doc/dematch.hlp
@@ -0,0 +1,51 @@
+.help dematch Feb87 imred.dtoi
+.ih
+NAME
+dematch -- match density to log exposure values
+.ih
+USAGE
+dematch database
+.ih
+PARAMETERS
+.ls database
+Database containing density list, probably from \fIspotlist\fR.
+.le
+.ls wedge = "", filter = "", emulsion = ""
+Information used to retrieve log exposure values from \fBwedgefile\fR.
+.le
+.ls wedgefile = "noao$lib/hdwedge.dat"
+Name of file containing wedge intensity information.
+.le
+.ls nskip = 0
+Number of faint spots skipped, used as an offset into the list of
+log exposure values.
+.le
+.ls verbose = yes
+Print the log exposure information to STDOUT as well as to \fBdatabase\fR.
+.le
+.ih
+DESCRIPTION
+Task \fIdematch\fR matches density values to log exposure values. A database
+of density values is input, as well as information needed to
+retrieve log exposure values from a reference file. The two sources of
+information are matched, and the matching log exposure values are added
+as a record in the database.
+
+Parameter \fBnskip\fR tells how many faint spots were not
+included in the density \fBdatabase\fR. This information is
+used to align the density, exposure values. It doesn't matter if the
+densities are listed in a monotonically increasing or decreasing
+order, as long as no spots were omitted between the first and last
+measured.
+.ih
+EXAMPLES
+Match densities in db1 to log exposure values for wedge#117
+with a IIIAJ emulsion and a GG385 filter.
+.nf
+
+ cl> dematch db1 wedge=117 filt=gg385 emulsion=IIIAJ
+.fi
+.ih
+SEE ALSO
+spotlist, hdfit, hdtoi
+.endhelp
diff --git a/noao/imred/dtoi/doc/dtoi.ms b/noao/imred/dtoi/doc/dtoi.ms
new file mode 100644
index 00000000..4f999259
--- /dev/null
+++ b/noao/imred/dtoi/doc/dtoi.ms
@@ -0,0 +1,576 @@
+.RP
+.TL
+An Overview of the IRAF DTOI Package
+.AU
+Suzanne Hammond Jacoby
+.AI
+IRAF Group - Central Computer Services
+.K2 "" "" "*"
+February 1987
+.br
+Revised July 1988
+
+.AB
+This document describes the DTOI package, which contains tasks
+for determining and applying a density to intensity transformation to
+photographic data. The transformation is determined from a set
+of calibration spots with known relative intensities. A curve is
+interactively fit to the densities and intensities of the calibration
+spots. The transformation is then applied and a new output image written.
+.AE
+
+.NH
+Introduction
+.PP
+The DTOI package contains tasks for computing and applying a density
+to intensity transformation to photographic data. These tasks perform the
+standard steps in linearizing data: calculating HD data points from
+calibration spots, fitting a curve to these points and applying the HD
+curve to the data. It is also possible to combine related HD curves.
+Communication between the tasks is via text files which the user can
+inspect or modify. It is intended
+to be easy for users to introduce data from outside the DTOI package
+into the processing.
+.PP
+There are currently six tasks in the package. They are:
+
+.ce
+The \fBDTOI\fR Package
+.TS
+center;
+n.
+spotlist \&- Calculate densities and weights of calibration spots.
+dematch \&- Match densities to log exposure values.
+hdfit \&- Fit characteristic curve to density, exposure data.
+hdtoi \&- Apply HD transformation to image data.
+hdshift \&- Align related characteristic curves.
+selftest \&- Test transformation algorithm.
+.TE
+.PP
+The DTOI package does not currently support the self calibration of images,
+but the addition of this capability is planned. This would involve
+determining the HD curve from the data itself, by assuming the point spread
+function scales linearly with intensity.
+.PP
+Upon entering the package, your calibration spots and images to be
+transformed should be on the disk in IRAF image format.
+
+.NH
+Determining the HD Curve Data
+.PP
+To determine the HD curve, you need two sets of data: the
+measured photographic densities of a set of calibration spots and
+the log exposure values corresponding to these measurements. The
+log exposure values must be known a priori. Tasks \fIspotlist\fR and
+\fIdematch\fR are used to assemble these two data sets.
+.NH 2
+SPOTLIST
+.PP
+The first step is to calculate the density of
+the calibration spots, each of which is a separate IRAF image or image
+section. The spot density is either the median of the spot pixels or
+the mean of the pixels when pixels more then a user specified number of
+standard deviations away from the mean have been rejected. The numbers
+in the spot image must be scaled to density; parameter \fBspotlist.scale\fR
+is used such that density = input_value * scale. Task \fIspotlist\fR also
+calculates the standard deviation of each spot and reports
+the number of good pixels, i.e., the number of pixels not rejected
+when determining the mean density.
+The final product of this task is a record in the data base containing a
+density for each spot. The scale factor used is also written to the data
+base; it will be read later in task \fIhdtoi\fR.
+.NH 2
+DEMATCH
+.PP
+Log exposure values must be matched to the measured density values. These
+log exposure values must be known a priori and will be read from a file.
+Task \fIdematch\fR retrieves the proper exposure information by
+matching the wedge number, emulsion type and filter used. Once a match
+has been made, the proper log exposure values are written to a record
+in the database.
+.PP
+A database of log exposure values for the NOAO standard wedges is maintained
+in a system file; the wedge/emulsion/filter combinations available are listed
+in last section of this document. This file can be replaced with one specific
+to any institution; the file name is supplied to task \fIdematch\fR as a
+parameter. In this way the wedge file can be personalized to any application
+and not be lost when the system is updated.
+
+.NH
+Fitting the Curve
+.PP
+The HD curve, or characteristic curve, is a plot of density versus log
+exposure. This curve is determined from the data points generated by
+tasks \fIspotlist\fR and \fIdematch\fR. The objective is to fit
+a curve to these points, such that Log exposure = F(Density). The
+technique available in this package allows the independent variable of the
+fit to be a transformation of the density (log opacitance, for example).
+The log exposure and density values are
+read from the database. If multiple entries for a particular record are
+present in the database, the last one is used.
+.NH 2
+HDFIT
+.PP
+Task \fIhdfit\fR fits a characteristic curve to density and log exposure
+values in preparation for transforming an image from density to intensity.
+Five functional forms of the curve are available:
+.nf
+
+ Power Series
+ Linear Spline
+ Cubic Spline
+ Legendre Polynomials
+ Chebyshev Polynomials
+
+.fi
+.LP
+It is possible to apply a transformation to the
+independent variable (density above fog) prior to the fit. The traditional
+choice is to fit log exposure
+as a function of the log opacitance, rather than density directly. This is
+sometimes referred to as the Baker, or Seidel, function. Transforming
+the density has the effect of stretching the low density data points, which
+tend to be relatively oversampled.
+In the DTOI package, four independendent variables are currently available:
+.nf
+
+ Density
+ Log Opacitance
+ K50 - (Kaiser* Transform with alpha = 0.50)
+ K75 - (Kaiser* Transform with alpha = 0.75)
+
+.fi
+.FS
+* Margoshes and Rasberry, Spectrochimica Acta, Vol 24B, p497, (1969)
+.FE
+Any combination of transformation type and fitting function can be used and
+changed interactively. Two combinations of interest are discussed here.
+
+The default fit is a power series fit where the independent variable is
+Log Opacitance. That is:
+.LP
+.EQ
+
+ "Log Exposure = " sum from k=0 to {ncoeff - 1} {A sub k Y sup k}
+
+.EN
+.sp 1
+.EQ
+ "where Y = Log Opacitance = "Log sub 10 (10 sup Density - 1)
+.EN
+.LP
+A fit that is expected to best model a IIIA-J emulsion is a power series
+fit to a K75 transform of the density. That is,
+.LP
+.EQ
+
+ "Log Exposure = "sum from k=0 to {ncoeff - 1} {A sub k Y sup k}
+
+.EN
+.sp 1
+.EQ
+"where Y = K75 transform = Density + 0.75 " Log sub 10 (1 - 10 sup -Density )
+.EN
+.LP
+Over the expected small dynamic range in variables of the fit, legendre
+and chebyshev functions offer no advantages over a simple power series
+functional form. The cubic and linear spline fits may follow the data very
+closely, but with typically sparse data sets this is not desirable. It
+is expected that power series fit will serve satisfactorily in all cases.
+
+.NH 3
+Interactive Curve Fitting
+.PP
+Task \fIhdfit\fR can be run interactively or not. In interactive mode,
+points in the sample can be edited, added or deleted. Weighting values
+can be changed as well as the fog value, the type of transformation
+and the fitting function chosen. To obtain the best fit possible, interactive
+fitting is recommended. A complete list of the available commands
+is printed here; this list is also available interactively with the
+keystroke '\fL?\fR'.
+.TS
+center;
+c s s w(3.0i)
+c l s.
+
+ DTOI INTERACTIVE CURVE FITTING OPTIONS
+
+\fL?\fR Print options
+\fLa\fR Add the point at the cursor position to the sample
+\fLc\fR Print the coordinates and fit of point nearest the cursor
+\fLd\fR Delete data point nearest the cursor
+\fLf\fR Fit the data and redraw or overplot
+\fLg\fR T{
+Redefine graph keys. Any of the following data types may be along
+either axis:
+T}
+.T&
+l l l.
+ \fLx\fR Independent variable \fLy\fR Dependent variable
+ \fLf\fR Fitted value \fLr\fR Residual (y - f)
+ \fLd\fR Ratio (y / f) \fLn\fR Nonlinear part of y
+ \fLu\fR Density above fog
+
+Graph keys:
+.T&
+c l s.
+
+\fLh\fR h = (x,y) transformed density vs. log exposure
+\fLi\fR i = (y,x) log exposure vs. transformed density
+\fLj\fR j = (x,r) transformed density vs. residuals
+\fLk\fR k = (x,d) transformed density vs. the y(data)/y(fit) ratio
+\fLl\fR l = (y,u) log exposure vs. density above fog (HD Curve)
+
+\fLo\fR Overplot the next graph
+\fLq\fR T{
+Terminate the interactive curve fitting, updating the database file.
+T}
+\fLr\fR Redraw graph
+\fLu\fR Undelete the deleted point nearest the cursor
+\fLw\fR Set the graph window. For help type 'w' followed by '?'.
+\fLx\fR Change the x value of the point nearest the cursor
+\fLy\fR Change the y value of the point nearest the cursor
+\fLz\fR Change the weight of the point nearest the cursor
+
+.T&
+l s s w(3.0i).
+T{
+The parameters are listed or set with the following commands which may be
+abbreviated. To list the value of a parameter type the command alone.
+T}
+
+.T&
+l l s.
+
+\fL:show \fR[\fIfile\fR] Show the values of all the parameters
+\fL:vshow \fR[\fIfile\fR] Show the values of all the parameters verbosely
+\fL:errors \fR[\fIfile\fR] Print the errors of the fit (default STDOUT)
+\fL:reset \fR T{
+Return to original conditions of x, y, wts and npts.
+T}
+\fL:ebars \fR[\fIerrors/weights\fR] T{
+The size of marker type '[hv]ebars' can show either standard deviations or
+relative weights.
+T}
+\fL:function \fR[\fIvalue\fR] T{
+Fitting function (power, chebyshev, legendre, spline3, or spline1)
+T}
+\fL:transform \fR[\fIvalue\fR] Set the transform type (none, logo, k50, k75)
+\fL:fog \fR[\fIvalue\fR] Change the fog level (or ":fog reset")
+\fL:order \fR[\fIvalue\fR] Fitting function order
+\fL:quit \fR Terminate HDFIT without updating database
+\fL:/mark \fRstring T{
+Mark type (point, box, plus, cross, diamond, hline, vline, hebar, vebar, circle)
+T}
+
+.T&
+l s s.
+T{
+Additional commands are available for setting graph formats and manipulating
+the graphics. Use the following commands for help.
+T}
+
+.T&
+l l s.
+\fL:/help\fR Print help for graph formatting option
+\fL:.help\fR Print cursor mode help
+
+.TE
+.PP
+The value of fog can be changed interactively if you have
+reason to override the value written in the database by \fIspotlist\fR.
+You can reset the fog to its original value with the command ":fog reset".
+A common problem with defining the HD curve is that some of
+the calibration spot densities fall below fog. This is caused by either
+the low signal to noise at low densities or by making a poor choice of
+where to scan the fog level. These points are rejected from the fit
+when a transformation of the density is being made, as the transform cannot
+be evaluated for negative density. If the fog value or transformation
+type is interactively changed so this problem no longer exists,
+the spot densities are restored in the sample.
+
+The parameters of the final fit are written to a database which then
+contains the information
+necessary to reinitialize the curfit package for applying the transformation
+in \fIhdtoi\fR.
+
+.NH
+Applying the Transform
+.PP
+.NH 2
+HDTOI
+.PP
+Once the HD curve has been defined, it is applied to a density image
+in task \fIhdtoi\fR.
+Here the transformation is applied, as described by the fit parameters
+stored in the database. If more than one record of fit parameters is
+present, the last one is used. This means task \fIhdfit\fR can be
+repeated until an acceptable solution is found; the last solution will
+be used by \fIhdtoi\fR. On output, a new output image is written; the
+input image is left intact.
+.PP
+The transformation is accomplished by using a look-up table. All possible
+input values, from the minimum to maximum values found in the image, are
+converted to density using the scale value read from the database, and then
+to intensity using the fit parameters determined by \fIhdfit\fR. The input
+value is then the index into the intensity table:
+intensity = look_up_table (input_value).
+.PP
+A scaling factor can be applied to the final intensities, as typically
+they will be < 1.0. (The maximum log exposure in the NOAO wedge database
+is 0.0) By default, a saturated density pixel will be assigned the "ceiling"
+intensity of 30000 and the other pixels are scaled accordingly.
+The user is responsible for choosing a ceiling value
+that will avoid having significant digits truncated.
+The precision
+of the transform is unaffected by scaling the
+final intensities, although caution must be used if the output image
+pixel type is an integer.
+.PP
+The value of fog to be used is entered by the user, and can be either
+a number or a list of file names from which to calculate the fog value.
+The fog value is subtracted from the input image before the transformation
+takes place.
+Again, consider density values below fog. Two choices are available for
+these densities: the calculated intensity can be equal to the constant
+value 0.0 or equal -1.0 times the intensity determined for absolute (density).
+
+.NH
+Aligning Related HD curves
+.PP
+Calibration data sets from several plates can be combined once a shift
+particular to each set has been removed. "Different spot exposures
+define a series of HD curves which are parallel but mutually separated
+by arbitrary shifts in log exposure, produced by differing lamp intensities
+or exposure times. Generally, Kodak spectroscopic plates can be
+averaged if [1] they come from the same emulsion batch and box, [2]
+they receive identical hypersensitizing, [3] they are exposed similarly and
+[4] they receive the same development." *
+.FS
+* "Averaging Photographic Characteristic Curves", John Kormendy, from
+"ESO Workshop on Two Dimensional Photometry", Edited by P. Crane and
+K.Kjar, p 69, (1980), an ESO Publication.
+.FE
+.NH 2
+HDSHIFT
+.PP
+Procedure \fIhdshift\fR calculates and subtracts a zero point shift to
+bring several related HD curves into alignment. The individual shifts
+are calculated by elimination of the first coefficient (Bevington, eqn 9-3):
+.EQ
+
+a0 = y bar - a sub 1 X bar - a sub 2 X bar sup 2 - ~ ...~ - a sub n X bar sup n
+
+.EN
+Here, the averages over y and X refer to individual calibration set averages;
+the coefficients a1, ... an were previously calculated using data from all
+calibration sets with task \fIhdfit\fR, and stored in the database. The
+a0 term is calculated individually for each database; this term represents
+the zero point shift in log exposure and will be different for each database.
+
+On output, the log exposure values in each database have been
+shifted to the zero point shift of the first database in the list. The
+log exposure records are now aligned and it would be appropriate
+to run \fIhdfit\fR on the modified database list.
+.NH
+Testing the Transformation Algorithm
+.PP
+A test task is included to see if any numerical errors were introduced
+during the density to intensity transformation. It also evaluates
+truncation errors produced when an output image with integer pixels,
+rather than reals, is written.
+.NH 2
+SELFTEST
+.PP
+An intensity vector is generated from a density vector in two different
+ways. The first method uses the density vector and known coefficients
+to compute the intensity. The second method uses the curfit package
+to generate a look up table of intensities as done in task \fIhdtoi\fR. The
+residual of the two vectors is plotted; ideally the difference between
+the 'known' and 'calculated' intensity is zero.
+.PP
+Task \fIselftest\fR also plots intensity as a function of density for
+both integer and real output pixels. The user should investigate the
+plot with the cursor zoom and expand capabilities to determine if
+truncation errors are significant.
+.NH
+The Wedgefile Database
+.PP
+Task \fIdematch\fR reads a database and retrieves log exposure information
+for certain combinations of wedge number, photographic emulsion and filter.
+Those combinations included in the NOAO database are listed in the next
+section, although any calibration data can be included if the values are
+known. To modify the database, it is recommended that
+you generate a new file rather than add records to the existing file. This
+way, the modifications will not be lost when a new version of the IRAF
+system is released.
+
+In the database, the information for each wedge makes up a separate record;
+each record starts with the word \fBbegin\fR. Each record has a title field
+and can have multiple emulsion/filter fields. The number of log exposure
+values must be given, followed by the values written 8 per line. The order
+of the exposure data can be either monotonically increasing or decreasing.
+Here is an example:
+.DS
+begin 115
+ title MAYALL 4-M PF BEFORE 15APR74 (CHROME) [MP1-MP968]
+ IIIAJ/UG2 16
+ 0.000 -0.160 -0.419 -0.671 -0.872 -1.153 -1.471 -1.765
+ -2.106 -2.342 -2.614 -2.876 -3.183 -3.555 -3.911 -4.058
+ IIAO/UG2 16
+ 0.000 -0.160 -0.418 -0.670 -0.871 -1.152 -1.468 -1.761
+ -2.102 -2.338 -2.609 -2.870 -3.176 -3.547 -3.901 -4.047
+
+.DE
+.NH 2
+Contents of the NOAO Wedgefile
+.LP
+The following table lists the wedge/emulsion/filter combinations available in
+the NOAO wedgefile database.
+.TS
+center;
+l l s s s
+l l l l l.
+
+\fBWedge 24 CTIO SCHMIDT WESTON TUBE SENSITOMETER. \fR
+ MONO/MONO
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 48 PALOMAR 48-INCH SCHMIDT STEP WEDGE. \fR
+ MONO/MONO
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 84 OLD 84-INCH SPOT SENSITOMETER (1967) \fR
+ MONO/MONO
+
+.TE
+.TS
+l l s s s
+l l l l l.
+\fBWedge 101 SPOT BOX 4, KEPT IN SCHOENING-S LAB. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 115 MAYALL 4-M PF BEFORE 15APR74 (CHROME) [MP1-MP968] \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4770 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 117 CTIO 4-METER P.F. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4770 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 118 CTIO 4-METER CASSEGRAIN \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470 MONO/6900
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 119 SPOT BOX 5, KEPT AT MAYALL 4-METER. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 120 SPOT BOX 6, KEPT AT 2.1-METER. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 121 SPOT BOX 8, KEPT IN SCHOENING'S LAB. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 122 SPOT BOX 7, AVAILABLE AT KPNO NIGHT ASST'S OFFICE \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470C
+.TE
+.TS
+l l s s s
+l l l l l.
+\fBWedge 123 MAYALL 4-M P.F. 15APR74 TO 21MAY74 [MP969-MP1051] \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4770 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 129 MAYALL 4-METER P.F. AFTER 21MAY74 [MP1052--> ] \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 130 MAYALL 4-METER CASS CAMERA. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4760 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 138 TRAVELLING BOX AFTER 06JAN78. \fR
+ IIIAJ/UG2 IIAO/UG2 IIIAJ/*5113 IIAO/*5113
+ IIAO/GG385 IIIAJ/CLEAR IIIAJ/GG385 IIAD/GG495
+ 127/GG495 098/RG610 127/RG610 IVN/RG695
+ MONO/4363 MONO/4770 MONO/5200 MONO/5876
+ MONO/6470
+
+.T&
+l l s s s
+l l l l l.
+\fBWedge 201 TEN UCLA SPOTS (H. FORD, 10JAN78) \fR
+ MONO/MONO
+.TE
diff --git a/noao/imred/dtoi/doc/dtoi.toc b/noao/imred/dtoi/doc/dtoi.toc
new file mode 100644
index 00000000..cd794321
--- /dev/null
+++ b/noao/imred/dtoi/doc/dtoi.toc
@@ -0,0 +1,34 @@
+.LP
+.DS C
+\fBTable of Contents\fR
+.DE
+.sp 3
+1.\h'|0.4i'\fBIntroduction\fP\l'|5.6i.'\0\01
+.sp
+2.\h'|0.4i'\fBDetermining the HD Curve Data \fP\l'|5.6i.'\0\01
+.br
+\h'|0.4i'2.1.\h'|0.9i'SPOTLIST\l'|5.6i.'\0\02
+.br
+\h'|0.4i'2.2.\h'|0.9i'DEMATCH\l'|5.6i.'\0\02
+.sp
+3.\h'|0.4i'\fBFitting the Curve\fP\l'|5.6i.'\0\02
+.br
+\h'|0.4i'3.1.\h'|0.9i'HDFIT\l'|5.6i.'\0\02
+.br
+\h'|0.9i'3.1.1.\h'|1.5i'Interactive Curve Fitting\l'|5.6i.'\0\03
+.sp
+4.\h'|0.4i'\fBApplying the Transform\fP\l'|5.6i.'\0\05
+.br
+\h'|0.4i'4.1.\h'|0.9i'HDTOI\l'|5.6i.'\0\05
+.sp
+5.\h'|0.4i'\fBAligning Related HD curves\fP\l'|5.6i.'\0\06
+.br
+\h'|0.4i'5.1.\h'|0.9i'HDSHIFT\l'|5.6i.'\0\06
+.sp
+6.\h'|0.4i'\fBTesting the Transformation Algorithm\fP\l'|5.6i.'\0\06
+.br
+\h'|0.4i'6.1.\h'|0.9i'SELFTEST\l'|5.6i.'\0\06
+.sp
+7.\h'|0.4i'\fBThe Wedgefile Database\fP\l'|5.6i.'\0\06
+.br
+\h'|0.4i'7.1.\h'|0.9i'Contents of the NOAO Wedgefile\l'|5.6i.'\0\07
diff --git a/noao/imred/dtoi/doc/hdfit.hlp b/noao/imred/dtoi/doc/hdfit.hlp
new file mode 100644
index 00000000..0b55137e
--- /dev/null
+++ b/noao/imred/dtoi/doc/hdfit.hlp
@@ -0,0 +1,79 @@
+.help hdfit Mar88 imred.dtoi
+.ih
+NAME
+hdfit -- fit characteristic curve to density, exposure data
+.ih
+USAGE
+hdfit database
+.ih
+PARAMETERS
+.ls database
+Database[s] containing the density, log exposure information.
+.le
+.ls function = "power"
+Type of curve to fit; chosen from "power", "legendre", "chebyshev",
+"spline1" or "spline3". Abbreviations are permitted.
+.le
+.ls transform = "logopacitance"
+Transformation performed on the density prior to fitting. Chosen from
+"none", "logopacitance", "k50" or "k75".
+.le
+.ls weighting = "none"
+Weights can be assigned to the independent variable for fitting a curve.
+Choices are "none", "user" and "calculated".
+.le
+.ls order = 4
+Order of the fit.
+.le
+.ls interactive = yes
+Fit the data interactively?
+.le
+.ls device = "stdgraph"
+Interactive graphics device.
+.le
+.ls cursor = "stdgcur"
+Source of cursor input.
+.le
+.ih
+DESCRIPTION
+Task \fIhdfit\fR is used to fit a curve to density and log exposure
+values in preparation for transforming an image from density to intensity.
+The log exposure and density are read from \fBdatabase\fR.
+More than one database can be input,
+in which case one curve is fit to the combined data and the results
+written to each database in the list.
+
+Weights can be applied to the independent variable of the fit.
+Weights can be changed interactively, and are originally chosen from
+"none", "user" and "calculated". A weights value can
+be calculated from the standard deviations, read from \fBdatabase\fR,
+as weight = (normalized density) / sdev. If user weights are to be
+used, they are read from \fBdatabase\fR record "weights" as "wts_vals"
+entries.
+
+When \fBinteractive\fR = yes, the HD curve is plotted and the cursor
+made available for interactively examining and altering the fit.
+The fitting function, transformation and order can be modified; data
+points can be added, deleted or edited. Four choices of independent
+variable are available in \fBhdfit\fR by means of the parameter
+\fBtransform\fR. No transformation can take place, in which case
+the independent variable is the density. Other choices are the log
+opacitance or a Kaiser transform with alpha = 0.50 or 0.75. The
+default choice is to fit log exposure as a function of the log opacitance;
+this is traditionally known as the Baker-Seidel function.
+.ih
+EXAMPLES
+.nf
+Using the defaults as starting parameters, interactively fit a curve to
+the data points in db1.
+
+ cl> hdfit db1
+
+A sixth order power series function is fit in batch mode to the db1 data.
+
+ cl> hdfit db1 order=6 interactive-
+.fi
+.ih
+SEE ALSO
+spotlist, dematch, hdtoi
+.endhelp
diff --git a/noao/imred/dtoi/doc/hdshift.hlp b/noao/imred/dtoi/doc/hdshift.hlp
new file mode 100644
index 00000000..aaa59063
--- /dev/null
+++ b/noao/imred/dtoi/doc/hdshift.hlp
@@ -0,0 +1,50 @@
+.help hdshift Feb87 imred.dtoi
+.ih
+NAME
+hdshift - calculate and subtract zero point to align HD curves.
+.ih
+USAGE
+hdshift database
+.ih
+PARAMETERS
+.ls database
+Input list of databases containing density, exposure and fit information.
+.le
+.ih
+DESCRIPTION
+For each file in \fBdatabase\fR, procedure \fBhdshift\fR calculates and
+subtracts a zero point shift to bring several related HD curves into
+alignment. The individual shifts are calculated by elimination of the
+first coefficient (Bevington, eqn 9-3):
+.nf
+ _ _ _ _
+ a0 = y - a1*X - a2*X**2 - ... - an*X**n
+
+.fi
+Here, the averages over y and X refer to individual \fBdatabase\fR averages;
+the coefficients a1, ... an were previously calculated using data from all
+\fBdatabase\fRs, in task \fIhdfit\fR, and stored in the database. The
+a0 term is calculated individually for each database; this term represents
+the zero point shift in log exposure and will be different for each database.
+
+On output, the log exposure values in each \fBdatabase\fR have been
+shifted to the zero point shift of the first database in the list. The
+log exposure records are now aligned and it would be appropriate
+to run task \fIhdfit\fR on the modified \fBdatabase\fR list and
+determine the common solution.
+.ih
+EXAMPLES
+.nf
+
+Shift the curves in four databases to a common zero point.
+
+ cl> hdshift db1,db2,db3,db4
+.fi
+.ih
+SEE ALSO
+hdfit, hdtoi
+.br
+"Averaging Photographic Characteristic Curves", John Kormendy, from
+"ESO Workshop on Two Dimensional Photometry", Edited by P. Crane and
+K.Kjar, p 69, (1980), an ESO Publication.
+.endhelp
diff --git a/noao/imred/dtoi/doc/hdtoi.hlp b/noao/imred/dtoi/doc/hdtoi.hlp
new file mode 100644
index 00000000..bf4355f0
--- /dev/null
+++ b/noao/imred/dtoi/doc/hdtoi.hlp
@@ -0,0 +1,88 @@
+.help hdtoi May88 imred.dtoi
+.ih
+NAME
+hdtoi -- transform images according to hd curve
+.ih
+USAGE
+hdtoi input output database
+.ih
+PARAMETERS
+.ls input
+List of images to be transformed.
+.le
+.ls output
+List of output image names.
+.le
+.ls database
+Name of text database describing HD curve.
+.le
+.ls fog = ""
+Value of fog level, read from database if unspecified.
+.le
+.ls option = "mean"
+Option for calculating fog density when \fBfog\fR is a file list, can be
+either "mean" or "median".
+.le
+.ls sigma = 3.0
+If \fBfog\fR is a file name, and \fBoption\fR = "mean", the mean fog density
+is iteratively calculated using this rejection criteria.
+.le
+.ls floor = 0.0
+Value assigned to levels below fog, can be either 0.0 or -1.0.
+.le
+.ls ceiling = 30000.
+The final intensities are scaled to this value, such that a saturated
+input density equals \fBceiling\fR on output.
+.le
+.ls datatype = "r"
+Datatype of output image pixels.
+.le
+.ls verbose = yes
+Print log of processing to STDOUT.
+.le
+.ih
+DESCRIPTION
+Task \fIhdtoi\fR transforms one image to another as described by the
+\fBdatabase\fR. There is only one HD curve per run; the same
+transformation is applied to all input images.
+
+The fog value can be obtained in three ways: read from the database, read
+as a floating point number, or calculated from a list of fog images. If
+parameter \fBfog\fR is not specified, the fog value is read from
+\fBdatabase\fR. If \fBfog\fR is specified, it can be entered
+as either a floating point number or as a list of file names. If the
+value cannot be read as a number, it is assumed to be a file name. In that
+case, the density of each file in the fog list is calculated and the
+average of these values is subtracted from \fBinput\fR before processing.
+The algorithm used to calculate the fog density is selected by the
+\fBoption\fR parameter, and is either a "mean" or "median" calculation.
+The fog density can be the mean value after pixels more than the specified
+number of sigma have been rejected, or the median value of all the fog spot
+pixels.
+
+The fog value is subtracted from the input image before the transformation
+takes place. It is possible that some density values will fall below
+the fog level; these values are handled in one of two ways. Values
+below the fog value are set equal to 0.0 when \fBfloor\fR = 0.0. If
+\fBfloor\fR = -1.0, the resulting intensity = -1 * intensity (abs (value)).
+
+A scaling factor is applied to the final intensities, as typically
+they will be < 1.0. The \fBceiling\fR parameter is used to specify what
+value a saturated density is transformed to; all intensities are scaled
+to this upper limit. The precision of the transformation is unaffected by
+this parameter, although caution must be used if the output image pixel
+type is an integer. The user is responsible for choosing
+a \fBceiling\fR that avoids the truncation of significant digits.
+.ih
+EXAMPLES
+Convert three density images to intensity images as described in database db1.
+
+ cl> hdtoi denin* intim1,intim2,intim3 db1
+.ih
+TIME REQUIREMENTS
+Task \fBhdtoi\fR requires 20 cpu seconds to transform a 512 square image, with
+a 12 bit data range, on a VAX 750
+.ih
+SEE ALSO
+spotlist, dematch, hdfit
+.endhelp
diff --git a/noao/imred/dtoi/doc/selftest.hlp b/noao/imred/dtoi/doc/selftest.hlp
new file mode 100644
index 00000000..329c9099
--- /dev/null
+++ b/noao/imred/dtoi/doc/selftest.hlp
@@ -0,0 +1,81 @@
+.help selftest Feb87 imred.dtoi
+.ih
+NAME
+selftest -- test routine to verify \fIdtoi\fR transformation
+.ih
+USAGE
+selftest nbits
+.ih
+PARAMETERS
+.ls nbits = 12
+Dymanic range of data to test.
+.le
+.ls device = "stdgraph"
+Plotting device for graphical output.
+.le
+.ls verbose = no
+A table of density, intensity values is printed if \fBverbose\fR = yes.
+.le
+.ls ceiling = 30000.
+Maximum intensity to output.
+.le
+.ls max_raw = 0
+The maximum raw data value. Needed only if \fInbits\fR equals something
+other than 12, 15 or 0.
+.le
+.ls scale = 0.0
+The raw data value to density scale value. Needed only if \fInbits\fR
+equals something other than 12, 15, or 0.
+.le
+
+.ih
+DESCRIPTION
+Task \fIselftest\fR is a test program for the \fIdtoi\fR package. Its
+output can be examined to see if numerical errors are introduced during
+the density to intensity transformation. It also evaluates truncation
+errors produced when an output image with integer pixels is written.
+
+Many different PDS setups can be investigated with task \fBselftest\fR.
+Setting parameter \fInbits\fR = 12
+indicates PDS format data, with data range 0 to 3071. Setting \fInbits\fR = 15
+indicates FITS format data, with data range 0 to 24575. The special value of
+\fInbits\fR = 0 means a small test data range from 1 to 144 is investigated.
+If any other value of \fInbits\fR is entered, the user is queried for the
+max raw data values and the raw data to density scaling factor.
+
+An intensity vector is generated from a density vector in two different ways.
+The first method uses the density vector and known coefficients to compute
+the intensity. The second method uses the curfit package to generate a
+look up table of intensities as done in task \fBHDTOI\fR. The residual
+of the two intensity vectors is plotted. Ideally, the difference between
+the 'known' intensities and 'calculated' intensities is zero.
+
+The second plot output by \fBselftest\fR shows intensity as a function
+of density. Two lines are overplotted; integer intensity versus density
+and real intensity versus density. Because truncation errors are most
+pronounced at low density values, the plot covers only the lowest 5%
+of the density range. The user should investigate the plot with the
+cursor zoom and expand capabilities to determine if truncation errors
+are significant.
+
+In verbose mode, \fBselftest\fR produced a three column table of raw
+data value, density and calculated intensity.
+
+.ih
+EXAMPLES
+
+.nf
+Run task selftest for 12 bit data with plots appearing on the terminal.
+
+ cl> selftest
+
+.fi
+Run selftest in verbose mode, spooling the output to file 'ditable'. This
+file is then run through the 'fields' task to extract the density and intensity
+columns which are piped to plot. The results in a plot of the look up table.
+.nf
+
+ cl> selftest ver+ > ditable
+ cl> fields ditable 2,3 | graph xlab=Density ylab=Intensity
+.fi
+.endhelp
diff --git a/noao/imred/dtoi/doc/splotlist.hlp b/noao/imred/dtoi/doc/splotlist.hlp
new file mode 100644
index 00000000..43b3f223
--- /dev/null
+++ b/noao/imred/dtoi/doc/splotlist.hlp
@@ -0,0 +1,81 @@
+.help spotlist May88 imred.dtoi
+.ih
+NAME
+spotlist -- calculate densities of calibration spots
+.ih
+USAGE
+spotlist spots fogs database
+.ih
+PARAMETERS
+.ls spots
+List of image files containing the calibration data.
+.le
+.ls fogs
+List of image files containing fog spots.
+.le
+.ls database
+Name for output database.
+.le
+.ls scale = 0.00151 # (4.65 / 3071.)
+The scale factor to convert values in the image files to densities, such
+that scale = density / input_value.
+.le
+.ls maxad = 3071
+The maximum A/D value, that is, the input value corresponding to a
+saturated pixel.
+.le
+.ls option= "mean"
+Option for calculating densities can be either "mean" or "median".
+.le
+.ls sigma = 3.0
+Rejection criteria for iteratively calculating mean density.
+.le
+.ih
+DESCRIPTION
+Task \fIspotlist\fR reads calibration spot images and calculates their
+density and standard deviation. Three records are entered in the
+database: density, standard deviation and number of unrejected pixels.
+Each record contains as many entries as calibration spots.
+
+All input values are multiplied by the \fBscale\fR parameter to convert
+them to densities. The value of \fBscale\fR is not critical to the
+reductions, it is provided so that \fIspotlist\fR output can be in the
+familiar units of density. The default value of \fBscale\fR is correct
+for NOAO PDS data written to a PDS format tape. If a FITS format tape was
+written, \fBscale\fR = 0.0001893. These values are appropriate for the PDS
+with its new 15-bit logarithmic A to D converter. The value of \fBscale\fR
+used is also entered in the database.
+
+Parameter \fBmaxad\fR is the integer input value that represents a
+saturated pixel. This value is used by \fIspotlist\fR to accurately
+calculate the density of a saturated pixel, which is then entered in the
+database. This value of "maxden" will later be used by task \fIhdfit\fR
+to normalize the independent variable vector, and by task \fIhdtoi\fR to
+scale the intensity range precisely to a user specified value.
+
+A fog level is calculated from image \fBfogs\fR, and entered into
+the database file. If more than one image is given for \fBfogs\fR,
+a single fog value is calculated from all fog pixels. The fog level
+is calculated but not subtracted in this procedure. The fog images to be
+averaged should be the same size. An entry for the fog level is made
+in the database.
+
+The \fBspots\fR files are assumed to be ordered such that they are either
+monotonically increasing or decreasing in density, with no omitted spots
+between the first and last measured. The calculated density can be
+calculated two ways; the algorithm used is selected by the \fBoption\fR
+parameter. The density is either the mean spot value after pixels more
+than the specified number of sigma from the mean value have been rejected,
+or the median value of all the spot pixels.
+.ih
+EXAMPLES
+Calculate mean densities of calibration spots which had previously been
+read in from a FITS format tape. The database "db1" will be created.
+
+.nf
+ cl> spotlist spots* fogspot db1 scale=1.893e-4
+.fi
+.ih
+SEE ALSO
+dematch, hdfit, hdtoi
+.endhelp
diff --git a/noao/imred/dtoi/dtoi.cl b/noao/imred/dtoi/dtoi.cl
new file mode 100644
index 00000000..3dd6c42b
--- /dev/null
+++ b/noao/imred/dtoi/dtoi.cl
@@ -0,0 +1,16 @@
+#{
+# DTOI package -- Density to Intensity Transformation Package. (Feb87)
+
+set dtoi = "iraf$noao/imred/dtoi/"
+
+package dtoi
+
+task dematch,
+ hdfit,
+ hdtoi,
+ hdshift,
+ selftest,
+ spotlist = "dtoi$x_dtoi.e"
+
+
+clbye()
diff --git a/noao/imred/dtoi/dtoi.hd b/noao/imred/dtoi/dtoi.hd
new file mode 100644
index 00000000..af554ac2
--- /dev/null
+++ b/noao/imred/dtoi/dtoi.hd
@@ -0,0 +1,11 @@
+# Help directory for the dtoi package.
+
+$doc = "iraf$noao/imred/dtoi/doc/"
+
+dematch hlp=doc$dematch.hlp, src=dematch.x
+hdfit hlp=doc$hdfit.hlp, src=hdfit.x
+hdtoi hlp=doc$hdtoi.hlp, src=hdtoi.x
+hdshift hlp=doc$hdshift.hlp, src=hdshift.x
+spotlist hlp=doc$spotlist.hlp, src=spotlist.x
+selftest hlp=doc$selftest.hlp, src=selftest.x
+revisions sys=Revisions
diff --git a/noao/imred/dtoi/dtoi.men b/noao/imred/dtoi/dtoi.men
new file mode 100644
index 00000000..f22d92eb
--- /dev/null
+++ b/noao/imred/dtoi/dtoi.men
@@ -0,0 +1,6 @@
+ dematch - Match a list of density values to exposure values
+ hdfit - Fit a curve to density, log exposure values
+ hdshift - Align related HD curves
+ hdtoi - Apply DTOI transformation to density image
+ selftest - Self test program to check DTOI transformation
+ spotlist - Generate a list of calibration spot values
diff --git a/noao/imred/dtoi/dtoi.par b/noao/imred/dtoi/dtoi.par
new file mode 100644
index 00000000..494aa73c
--- /dev/null
+++ b/noao/imred/dtoi/dtoi.par
@@ -0,0 +1,2 @@
+version,s,h,"February 13, 1987"
+mode,s,h,ql
diff --git a/noao/imred/dtoi/hd_aravr.x b/noao/imred/dtoi/hd_aravr.x
new file mode 100644
index 00000000..3376264b
--- /dev/null
+++ b/noao/imred/dtoi/hd_aravr.x
@@ -0,0 +1,50 @@
+define MAX_ITERATIONS 10
+include <mach.h>
+
+# HD_ARAV -- Compute the mean and standard deviation of a sample array by
+# iteratively rejecting points further than KSIG from the mean. If the
+# value of KSIG is given as 0.0, a cutoff value will be automatically
+# calculated from the standard deviation and number of points in the sample.
+# The number of pixels remaining in the sample upon termination is returned
+# as the function value.
+#
+# A max_iterations parameter was added to prevent the rejection scheme
+# from oscillating endlessly for nearly saturated pixels. This is the
+# only difference between the vops procedure and hd_aravr. (ShJ 5/88)
+
+int procedure hd_aravr (a, npix, mean, sigma, ksig)
+
+real a[ARB] # input data array
+real mean, sigma, ksig, deviation, lcut, hcut, lgpx
+int npix, niter, ngpix, old_ngpix, awvgr()
+
+begin
+ lcut = -MAX_REAL # no rejection to start
+ hcut = MAX_REAL
+ ngpix = 0
+ niter = 0
+
+ # Iteratively compute mean, sigma and reject outliers until no
+ # more pixels are rejected, or until there are no more pixels,
+ # or until the maximum iterations limit is exceeded.
+
+ repeat {
+ niter = niter + 1
+ old_ngpix = ngpix
+ ngpix = awvgr (a, npix, mean, sigma, lcut, hcut)
+ if (ngpix <= 1)
+ break
+
+ if (ksig == 0.0) { # Chauvenet's relation
+ lgpx = log10 (real(ngpix))
+ deviation = (lgpx * (-0.1042 * lgpx + 1.1695) + .8895) * sigma
+ } else
+ deviation = sigma * abs(ksig)
+
+ lcut = mean - deviation # compute window
+ hcut = mean + deviation
+
+ } until ((old_ngpix == ngpix) || (niter > MAX_ITERATIONS))
+
+ return (ngpix)
+end
diff --git a/noao/imred/dtoi/hdfit.par b/noao/imred/dtoi/hdfit.par
new file mode 100644
index 00000000..7f42aa2a
--- /dev/null
+++ b/noao/imred/dtoi/hdfit.par
@@ -0,0 +1,9 @@
+# Cl parameters for task hdfit are:
+database,f,a,,,,List of database names
+function,s,h,"power",,,Fitting function
+transform,s,h,"logopacitance",,,Independent variable transformation
+order,i,h,4,,,Initial order (ncoeff) of fit
+interactive,b,h,y,,,Interactive fitting flag
+device,s,h,"stdgraph",,,Name of interactive graphics device
+weighting,s,h,"none",,,Type of weighting
+cursor,*gcur,h,"",,,Source of cursor input
diff --git a/noao/imred/dtoi/hdfit.x b/noao/imred/dtoi/hdfit.x
new file mode 100644
index 00000000..04a82f6e
--- /dev/null
+++ b/noao/imred/dtoi/hdfit.x
@@ -0,0 +1,364 @@
+include <math/curfit.h>
+include <imhdr.h>
+include <fset.h>
+include <mach.h>
+include <ctype.h>
+include <error.h>
+include <pkg/gtools.h>
+include <pkg/xtanswer.h>
+include "hdicfit/hdicfit.h"
+
+# T_HDFIT -- Fit a curve. This task fits a characteristic
+# curve to density and log exposure data read from an input
+# database. The interactive curve fitting package is used.
+# The database is updated to contain the values of the fit
+# necessary to reinitialize the curfit package for performing
+# the transformation in hdtoi.
+
+procedure t_hdfit ()
+
+pointer sp, fcn, device, db, gt, exp, wts, save, trans, weight
+pointer dbfile, ic, den, errs
+int db_list, order, interactive, nsave, nvals, wt_type, update
+real ref_fog, real_fog
+double fog, maxden
+
+pointer ddb_map(), gt_init()
+bool clgetb(), fp_equalr(), fp_equald()
+int clpopni(), clgeti(), strncmp(), clgfil(), hd_fit()
+real ic_getr()
+
+begin
+ call smark (sp)
+ call salloc (fcn, SZ_FNAME, TY_CHAR)
+ call salloc (device, SZ_FNAME, TY_CHAR)
+ call salloc (trans, SZ_FNAME, TY_CHAR)
+ call salloc (weight, SZ_FNAME, TY_CHAR)
+ call salloc (dbfile, SZ_FNAME, TY_CHAR)
+
+ # Get cl parameters
+ db_list = clpopni ("database")
+ call clgstr ("function", Memc[fcn], SZ_FNAME)
+ call clgstr ("transform", Memc[trans], SZ_FNAME)
+ order = clgeti ("order")
+
+ # Decide which type of weighting the user wants
+ call clgstr ("weighting", Memc[weight], SZ_FNAME)
+ if (strncmp (Memc[weight], "none", 1) == 0)
+ wt_type = WT_NONE
+ else if (strncmp (Memc[weight], "user", 1) == 0)
+ wt_type = WT_USER
+ else if (strncmp (Memc[weight], "calc", 1) == 0)
+ wt_type = WT_CALC
+ else
+ call error (0, "Unrecognized weighting type")
+
+ # Initialize interactive curve fitting package.
+ gt = gt_init ()
+ call ic_open (ic)
+ call ic_pstr (ic, "function", Memc[fcn])
+ call ic_pstr (ic, "transform", Memc[trans])
+ call ic_puti (ic, "order", order)
+ call ic_pstr (ic, "ylabel", "Log Exposure")
+ call ic_pkey (ic, 5, 'y', 'u')
+
+ if (clgetb ("interactive")) {
+ interactive = YES
+ call clgstr ("device", Memc[device], SZ_FNAME)
+ } else {
+ interactive = NO
+ call strcpy ("", Memc[device], SZ_FNAME)
+ }
+
+ # Read information from each dlog file; accumulate number of values.
+ # The density (not fog subtracted) is returned. The density values
+ # are also sorted in increasing order.
+
+ call hd_rdloge (db_list, exp, den, wts, errs, nvals, fog, maxden,
+ wt_type)
+ if (nvals == 0)
+ call error (1, "T_HDFIT: No data values in sample")
+
+ call hd_sdloge (Memd[den], Memd[exp], Memd[wts], Memd[errs], nvals)
+ call ic_putr (ic, "fog", real(fog))
+ ref_fog = real (fog)
+ call ic_putr (ic, "rfog", ref_fog)
+
+ # Initialize the dtoi/icgfit interface.
+ if (fp_equald (maxden, 0.0D0))
+ call error (1, "Saturated pixel density not initialized")
+ call hdic_init (Memd[den], nvals, maxden)
+
+ update = hd_fit (ic, gt, Memd[den], Memd[exp], Memd[wts], Memd[errs],
+ nvals, save, nsave, Memc[device], interactive)
+
+ if (update == YES) {
+ # Record fit information in (each) database
+ call ic_gstr (ic, "function", Memc[fcn], SZ_FNAME)
+ call ic_gstr (ic, "transform", Memc[trans], SZ_FNAME)
+ real_fog = ic_getr (ic, "fog")
+
+ while (clgfil (db_list, Memc[dbfile], SZ_FNAME) != EOF) {
+ db = ddb_map (Memc[dbfile], APPEND)
+ call ddb_ptime (db)
+ # Add new fog record if it was changed interactively.
+ if (!fp_equalr (real_fog, ref_fog)) {
+ call ddb_prec (db, "fog")
+ call ddb_putr (db, "density", real_fog)
+ }
+
+ call ddb_prec (db, "cv")
+ call ddb_pad (db, "save", Memd[save], nsave)
+ call ddb_pstr (db, "function", Memc[fcn])
+ call ddb_pstr (db, "transformation", Memc[trans])
+
+ call ddb_unmap (db)
+ }
+ }
+
+ call ic_closed (ic)
+ call mfree (save, TY_DOUBLE)
+ call mfree (den, TY_DOUBLE)
+ call mfree (exp, TY_DOUBLE)
+ call mfree (wts , TY_DOUBLE)
+ call mfree (errs, TY_DOUBLE)
+
+ call gt_free (gt)
+ call clpcls (db_list)
+ call sfree (sp)
+end
+
+
+# HD_RLOGE -- Read log exposure, density and weights from a single dloge
+# database file. Pointers to the three arrays are returned as arguments.
+# The number of values accumulated is returned also; note that the value
+# of nvals is changed upon return; it should not be given as a constant.
+# If more than one database is being read (as in HDSHIFT applications),
+# the density ABOVE fog is returned, and the reference fog values set = 0.0
+# The maximum density, the density of a saturated pixel, is read from the
+# first databas and returned.
+
+procedure hd_rdloge (db_list, exp, den, wts, errs, nvals, fog, maxden, wt_type)
+
+int db_list # File descriptor for data base file
+pointer exp # Pointer to exposure array - returned
+pointer den # Pointer to density array - returned
+pointer wts # Pointer to weights array - returned
+pointer errs # Pointer to std deviation array - returned
+int nvals # Number of data pairs read - returned
+double fog # Value of fog read from database - returned
+double maxden # Maximum density, read from db - returned
+int wt_type # Type of weighting
+
+pointer db
+bool sdevrec
+int buflen, off, rec
+int nden, nexp, nwts, nspots, nerrs, nfiles
+char dloge[SZ_FNAME]
+
+pointer ddb_map()
+bool fp_equald()
+int ddb_locate(), ddb_geti(), clgfil(), imtlen()
+real ddb_getr()
+errchk ddb_locate, ddb_gad, malloc, ddb_map, ddb_unmap
+
+begin
+ nvals = 0
+ off = 0
+ buflen = NSPOTS
+ nfiles = imtlen (db_list)
+ maxden = 0.0D0
+
+ # Dynamically allocate memory for arrays; it can be increased later.
+ call malloc (exp, buflen, TY_DOUBLE)
+ call malloc (den, buflen, TY_DOUBLE)
+ call malloc (wts, buflen, TY_DOUBLE)
+ call malloc (errs, buflen, TY_DOUBLE)
+
+ while (clgfil (db_list, dloge, SZ_FNAME) != EOF) {
+ iferr (db = ddb_map (dloge, READ_ONLY)) {
+ call erract (EA_WARN)
+ next
+ }
+
+ # Get fog value to be subtracted from density
+ rec = ddb_locate (db, "fog")
+ fog = double (ddb_getr (db, rec, "density"))
+
+ # Get density array
+ rec = ddb_locate (db, "density")
+ nden = ddb_geti (db, rec, "den_val")
+
+ call ddb_gad (db, rec, "den_val", Memd[den+off], nden, nden)
+ if (nfiles > 1)
+ call asubkd (Memd[den+off], fog, Memd[den+off], nden)
+
+ # Get log exposure array
+ rec = ddb_locate (db, "exposure")
+ nexp = ddb_geti (db, rec, "log_exp")
+ call ddb_gad (db, rec, "log_exp", Memd[exp+off], nexp, nexp)
+
+ # Get saturated pixel density if not already set
+ if (fp_equald (maxden, 0.0D0)) {
+ iferr (rec = ddb_locate (db, "common")){
+ ;
+ } else
+ maxden = double (ddb_getr (db, rec, "maxden"))
+ }
+
+ # Get std deviation array
+ sdevrec = true
+ iferr {
+ rec = ddb_locate (db, "standard deviation")
+ nerrs = ddb_geti (db, rec, "sdev_val")
+ call ddb_gad (db, rec, "sdev_val", Memd[errs+off], nerrs, nerrs)
+ } then {
+ call erract (EA_WARN)
+ call eprintf ("Marker type '[ve]bar' can only show weights\n")
+ call amovkd (0.0D0, Memd[errs+off], nden)
+ sdevrec = FALSE
+ }
+
+ if (wt_type == WT_CALC) {
+ if (sdevrec) {
+ iferr {
+ nspots = min (nden, nexp, nerrs)
+ call adivd (Memd[den+off], Memd[errs+off],
+ Memd[wts+off], nspots)
+ } then {
+ call erract (EA_WARN)
+ call eprintf ("All weights set to 1.0\n")
+ call amovkd (double (1.0), Memd[wts+off], nspots)
+ }
+ } else {
+ nspots = min (nden, nexp)
+ call eprintf ("No sdev record; All weights set to 1.0\n")
+ call amovkd (double (1.0), Memd[wts+off], nspots)
+ }
+ }
+
+ if (wt_type == WT_USER) {
+ # WT_USER: fill "user" weights array
+ iferr {
+ rec = ddb_locate (db, "weight")
+ nwts = ddb_geti (db, rec, "wts_val")
+ nspots = min (nden, nexp, nwts)
+ call ddb_gad (db, rec, "wts_val", Memd[wts+off], nwts, nwts)
+ } then {
+ # Users weights can't be found. Set weights array to 1.0's.
+ call erract (EA_WARN)
+ call eprintf ("All weights set to 1.0\n")
+ nspots = min (nden, nexp)
+ call amovkd (double (1.0), Memd[wts+off], nspots)
+ }
+ }
+
+ if (wt_type == WT_NONE) {
+ # WT_NONE: fill "none" weights array
+ nspots = min (nden, nexp)
+ call amovkd (double (1.0), Memd[wts+off], nspots)
+ }
+
+ # Increment number of counts; reallocate memory if necessary.
+ nvals = nvals + nspots
+ off = off + nspots
+
+ if (nvals > buflen) {
+ buflen = buflen + NSPOTS
+ call realloc (exp, buflen, TY_DOUBLE)
+ call realloc (den, buflen, TY_DOUBLE)
+ call realloc (wts, buflen, TY_DOUBLE)
+ call realloc (errs, buflen, TY_DOUBLE)
+ }
+
+ call ddb_unmap (db)
+ }
+
+ if (nfiles > 1)
+ fog = 0.0D0
+ call clprew (db_list)
+end
+
+
+# HD_SLOGE -- Sort the log exposure, density and weight information in order
+# of increasing exposure value. The sorting is done is place. The three
+# data values are assummed matched on input, that is, exposure[i] matches
+# density[i] with weight[i] for all array entries.
+
+procedure hd_sdloge (density, exposure, weights, errors, nvals)
+
+double density[nvals] # Density array
+double exposure[nvals] # Exposure array
+double weights[nvals] # Weights array
+double errors[nvals] # Standard deviation array
+int nvals # Number of values in data arrays
+
+int i, j
+double temp
+define swap {temp=$1;$1=$2;$2=temp}
+
+begin
+ # Bubble sort - inefficient, but sorting is done only once on
+ # an expected small sample size (16 pts typically).
+
+ for (i = nvals; i > 1; i = i - 1)
+ for (j = 1; j < i; j = j + 1)
+ if (density [j] > density [j+1]) {
+
+ # Out of order; exchange values
+ swap (exposure[j], exposure[j+1])
+ swap ( density[j], density[j+1])
+ swap ( weights[j], weights[j+1])
+ swap ( errors[j], errors[j+1])
+ }
+end
+
+
+# HD_FIT -- Fit the curve to input density, exposure and weight values.
+# The fit can be performed interactively or not.
+
+int procedure hd_fit (ic,
+ gt, den, exp, wts, errs, nvals, save, nsave, dev, interact)
+
+pointer ic
+pointer gt # Graphics tools pointer
+double den[ARB] # Density values
+double exp[ARB] # Exposure values
+double wts[ARB] # Weight array
+double errs[ARB] # Standard deviation array
+int nvals # Number of data pairs to fit
+pointer save # ??
+int nsave # ??
+char dev[SZ_FNAME] # Interactive graphics device
+int interact # Flag for interactive graphics
+
+pointer gp, cv, sp, x, dum
+pointer gopen()
+int update, dcvstati()
+errchk malloc, gopen
+
+begin
+ if (interact == YES) {
+ gp = gopen (dev, NEW_FILE, STDGRAPH)
+ call icg_fitd (ic, gp, "cursor", gt, cv, den, exp, wts, errs, nvals)
+ call gclose (gp)
+ update = IC_UPDATE(ic)
+
+ } else {
+ # Do fit non-interactively
+ call smark (sp)
+ call salloc (x, nvals, TY_DOUBLE)
+ call salloc (dum, nvals, TY_INT)
+ call hdic_transform (ic, den, wts, Memd[x], wts, Memi[dum], nvals)
+ call ic_fitd (ic, cv, Memd[x], exp, wts, nvals, YES, YES, YES, YES)
+ call sfree (sp)
+ update = YES
+ }
+
+ nsave = (dcvstati (cv, CVORDER)) + 7
+ call malloc (save, nsave, TY_DOUBLE)
+ call dcvsave (cv, Memd[save])
+ call dcvfree (cv)
+
+ return (update)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdic.com b/noao/imred/dtoi/hdicfit/hdic.com
new file mode 100644
index 00000000..959df10e
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdic.com
@@ -0,0 +1,6 @@
+# Common block for hdtoi package/ icgfit package interface.
+
+int nraw
+double maxden
+pointer den, big_den
+common /raw/maxden, den, big_den, nraw
diff --git a/noao/imred/dtoi/hdicfit/hdicadd.x b/noao/imred/dtoi/hdicfit/hdicadd.x
new file mode 100644
index 00000000..1fae152d
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicadd.x
@@ -0,0 +1,47 @@
+include "hdicfit.h"
+
+# HDIC_ADDPOINT -- Add a density, exposure, weights point into the sample.
+
+procedure hdic_addpoint (ic,
+ nden, nexp, nwts, den, y, wts, userwts, x, wd, sdev, npts)
+
+pointer ic # Pointer to ic data structure
+real nden # New density value to be added
+real nexp # New exposure value to be added
+double nwts # New weight value to be added
+pointer den # Pointer to existing density array
+pointer y # Pointer to existing exposure array
+pointer wts # Pointer to existing wts array
+pointer userwts # Pointer to existing userwts array
+pointer x # Pointer to existing array of ind variables
+pointer wd # Pointer to flag array for deletion reasons
+pointer sdev # Pointer to standard deviation array
+int npts # Number of points, incremented on output
+
+begin
+ npts = npts + 1
+
+ call realloc (den, npts, TY_DOUBLE)
+ call realloc (y, npts, TY_DOUBLE)
+ call realloc (wts, npts, TY_DOUBLE)
+ call realloc (userwts, npts, TY_DOUBLE)
+ call realloc (x, npts, TY_DOUBLE)
+ call realloc (wd, npts, TY_INT)
+ call realloc (sdev, npts, TY_DOUBLE)
+
+ Memd[den+npts-1] = double (nden)
+ Memd[y +npts-1] = double (nexp)
+ Memd[wts+npts-1] = double (nwts)
+ Memd[userwts+npts-1] = double (nwts)
+ Memi[wd +npts-1] = NDELETE
+ Memd[sdev+npts-1] = ADDED_PT
+
+ # Sort the data and then update the reference vector.
+ call hdic_sort (Memd[den], Memd[y], Memd[wts], Memd[userwts],
+ Memi[wd], Memd[sdev], npts)
+ call hdic_init (Memd[den], npts, Memd[den+npts-1])
+
+ IC_NEWX(ic) = YES
+ IC_NEWY(ic) = YES
+ IC_NEWWTS(ic) = YES
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicclean.x b/noao/imred/dtoi/hdicfit/hdicclean.x
new file mode 100644
index 00000000..6c838123
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicclean.x
@@ -0,0 +1,94 @@
+include <pkg/rg.h>
+include "hdicfit.h"
+
+# IC_CLEAN -- Replace rejected points by the fitted values.
+
+procedure ic_cleand (ic, cv, x, y, w, npts)
+
+pointer ic # ICFIT pointer
+pointer cv # Curfit pointer
+double x[ARB] # Ordinates
+double y[ARB] # Abscissas
+double w[ARB] # Weights
+int npts # Number of points
+
+int i, nclean, newreject
+pointer sp, xclean, yclean, wclean
+double dcveval()
+
+begin
+ if ((IC_LOW(ic) == 0.) && (IC_HIGH(ic) == 0.))
+ return
+
+ # If there has been no subsampling and no sample averaging then the
+ # IC_REJPTS(ic) array already contains the rejected points.
+
+ if (npts == IC_NFIT(ic)) {
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ y[i] = dcveval (cv, x[i])
+ }
+ }
+
+ # If there has been no sample averaging then the rejpts array already
+ # contains indices into the subsampled array.
+
+ } else if (abs(IC_NAVERAGE(ic)) == 1) {
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[IC_YFIT(ic)+i-1] =
+ dcveval (cv, Memd[IC_XFIT(ic)+i-1])
+ }
+ }
+ call rg_unpackd (IC_RG(ic), Memd[IC_YFIT(ic)], y)
+
+ # Because ic_fit rejects points from the fitting data which
+ # has been sample averaged the rejpts array refers to the wrong data.
+ # Do the cleaning using ic_deviant to find the points to reject.
+
+ } else if (RG_NPTS(IC_RG(ic)) == npts) {
+ call amovki (NO, Memi[IC_REJPTS(ic)], npts)
+ call ic_deviantd (cv, x, y, w, Memi[IC_REJPTS(ic)], npts,
+ IC_LOW(ic), IC_HIGH(ic), IC_GROW(ic), NO, IC_NREJECT(ic),
+ newreject)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ y[i] = dcveval (cv, x[i])
+ }
+ }
+
+ # If there is subsampling then allocate temporary arrays for the
+ # subsample points.
+
+ } else {
+ call smark (sp)
+
+ nclean = RG_NPTS(IC_RG(ic))
+ call salloc (xclean, nclean, TY_DOUBLE)
+ call salloc (yclean, nclean, TY_DOUBLE)
+ call salloc (wclean, nclean, TY_DOUBLE)
+
+ call rg_packd (IC_RG(ic), x, Memd[xclean])
+ call rg_packd (IC_RG(ic), y, Memd[yclean])
+ call rg_packd (IC_RG(ic), w, Memd[wclean])
+ call amovki (NO, Memi[IC_REJPTS(ic)], npts)
+
+ call ic_deviantd (cv, Memd[xclean], Memd[yclean],
+ Memd[wclean], Memi[IC_REJPTS(ic)], nclean, IC_LOW(ic),
+ IC_HIGH(ic), IC_GROW(ic), NO, IC_NREJECT(ic), newreject)
+
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[yclean+i-1] = dcveval (cv, Memd[xclean+i-1])
+ }
+ }
+ call rg_unpackd (IC_RG(ic), Memd[yclean], y)
+
+ call sfree (sp)
+ }
+end
+
diff --git a/noao/imred/dtoi/hdicfit/hdicdeviant.x b/noao/imred/dtoi/hdicfit/hdicdeviant.x
new file mode 100644
index 00000000..0c6f8f75
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicdeviant.x
@@ -0,0 +1,116 @@
+include <mach.h>
+include <math/curfit.h>
+
+# IC_DEVIANT -- Find deviant points with large residuals from the fit
+# and reject from the fit.
+#
+# The sigma of the fit residuals is calculated. The rejection thresholds
+# are set at +-reject*sigma. Points outside the rejection threshold are
+# recorded in the reject array.
+
+procedure ic_deviantd (cv, x, y, w, rejpts, npts, low_reject, high_reject,
+ grow, refit, nreject, newreject)
+
+pointer cv # Curve descriptor
+double x[ARB] # Input ordinates
+double y[ARB] # Input data values
+double w[ARB] # Weights
+int rejpts[ARB] # Points rejected
+int npts # Number of input points
+real low_reject, high_reject # Rejection thresholds
+real grow # Rejection radius
+int refit # Refit the curve?
+int nreject # Number of points rejected
+int newreject # Number of new points rejected
+
+int i, j, i_min, i_max, ilast
+double sigma, low_cut, high_cut, residual
+pointer sp, residuals
+
+begin
+ # If low_reject and high_reject are zero then simply return.
+ if ((low_reject == 0.) && (high_reject == 0.))
+ return
+
+ # Allocate memory for the residuals.
+ call smark (sp)
+ call salloc (residuals, npts, TY_DOUBLE)
+
+ # Compute the residuals.
+ call dcvvector (cv, x, Memd[residuals], npts)
+ call asubd (y, Memd[residuals], Memd[residuals], npts)
+
+ # Compute the sigma of the residuals. If there are less than
+ # 5 points return.
+
+ j = 0
+ nreject = 0
+ sigma = 0.
+
+ do i = 1, npts {
+ if ((w[i] != 0.) && (rejpts[i] == NO)) {
+ sigma = sigma + Memd[residuals+i-1] ** 2
+ j = j + 1
+ } else if (rejpts[i] == YES)
+ nreject = nreject + 1
+ }
+
+ if (j < 5) {
+ call sfree (sp)
+ return
+ } else
+ sigma = sqrt (sigma / j)
+
+ if (low_reject > 0.)
+ low_cut = -low_reject * sigma
+ else
+ low_cut = -MAX_REAL
+ if (high_reject > 0.)
+ high_cut = high_reject * sigma
+ else
+ high_cut = MAX_REAL
+
+ # Reject the residuals exceeding the rejection limits.
+ # A for loop is used instead of do because with region growing we
+ # want to modify the loop index.
+
+ newreject = 0
+ for (i = 1; i <= npts; i = i + 1) {
+ if ((w[i] == 0.) || (rejpts[i] == YES))
+ next
+
+ residual = Memd[residuals + i - 1]
+ if ((residual > high_cut) || (residual < low_cut)) {
+ i_min = max (1, int (i - grow))
+ i_max = min (npts, int (i + grow))
+
+ # Reject points from the fit and flag them.
+ do j = i_min, i_max {
+ if ((abs (x[i] - x[j]) <= grow) && (w[j] != 0.) &&
+ (rejpts[j] == NO)) {
+ if (refit == YES)
+ call dcvrject (cv, x[j], y[j], w[j])
+ rejpts[j] = YES
+ newreject = newreject + 1
+ ilast = j
+ }
+ }
+ i = ilast
+ }
+ }
+
+ if ((refit == YES) && (newreject > 0)) {
+ call dcvsolve (cv, i)
+
+ switch (i) {
+ case SINGULAR:
+ call eprintf ("ic_reject: Singular solution\n")
+ case NO_DEG_FREEDOM:
+ call eprintf ("ic_reject: No degrees of freedom\n")
+ }
+ }
+
+ nreject = nreject + newreject
+
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicdosetup.x b/noao/imred/dtoi/hdicfit/hdicdosetup.x
new file mode 100644
index 00000000..c9e117ec
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicdosetup.x
@@ -0,0 +1,104 @@
+include <math/curfit.h>
+include "hdicfit.h"
+
+# IC_DOSETUP -- Setup the fit. This is called at the start of each call
+# to ic_fit to update the fitting parameters if necessary.
+
+procedure ic_dosetupd (ic, cv, x, wts, npts, newx, newwts, newfunction, refit)
+
+pointer ic # ICFIT pointer
+pointer cv # Curfit pointer
+double x[ARB] # Ordinates of data
+double wts[ARB] # Weights
+int npts # Number of points in data
+int newx # New x points?
+int newwts # New weights?
+int newfunction # New function?
+int refit # Use cvrefit?
+
+int ord
+pointer rg_xrangesd()
+extern hd_powerd()
+errchk rg_xrangesd, malloc
+
+begin
+ # Set sample points.
+ if ((newx == YES) || (newwts == YES)) {
+ if (npts == 0)
+ call error (0, "No data points for fit")
+
+ call mfree (IC_XFIT(ic), TY_DOUBLE)
+ call mfree (IC_YFIT(ic), TY_DOUBLE)
+ call malloc (IC_XFIT(ic), npts, TY_DOUBLE)
+
+ call mfree (IC_WTSFIT(ic), TY_DOUBLE)
+ call malloc (IC_WTSFIT(ic), npts, TY_DOUBLE)
+
+ call mfree (IC_REJPTS(ic), TY_INT)
+ call malloc (IC_REJPTS(ic), npts, TY_INT)
+ call amovki (NO, Memi[IC_REJPTS(ic)], npts)
+ IC_NREJECT(ic) = 0
+
+ # Set sample points.
+
+ call rg_free (IC_RG(ic))
+ IC_RG(ic) = rg_xrangesd (Memc[IC_SAMPLE(ic)], x, npts)
+ call rg_order (IC_RG(ic))
+ call rg_merge (IC_RG(ic))
+ call rg_wtbind (IC_RG(ic), abs (IC_NAVERAGE(ic)), x, wts, npts,
+ Memd[IC_XFIT(ic)], Memd[IC_WTSFIT(ic)], IC_NFIT(ic))
+
+ if (IC_NFIT(ic) == 0)
+ call error (0, "No sample points for fit")
+
+ if (IC_NFIT(ic) == npts) {
+ call rg_free (IC_RG(ic))
+ call mfree (IC_XFIT(ic), TY_DOUBLE)
+ call mfree (IC_WTSFIT(ic), TY_DOUBLE)
+ IC_YFIT(ic) = NULL
+ IC_WTSFIT(ic) = NULL
+ } else
+ call malloc (IC_YFIT(ic), IC_NFIT(ic), TY_DOUBLE)
+
+ refit = NO
+ }
+
+ # Set curve fitting parameters.
+
+ if ((newx == YES) || (newfunction == YES)) {
+ if (cv != NULL)
+ call dcvfree (cv)
+
+ switch (IC_FUNCTION(ic)) {
+ case LEGENDRE, CHEBYSHEV:
+ ord = min (IC_ORDER(ic), IC_NFIT(ic))
+ call dcvinit (cv, IC_FUNCTION(ic), ord, double (IC_XMIN(ic)),
+ double (IC_XMAX(ic)))
+ case SPLINE1:
+ ord = min (IC_ORDER(ic), IC_NFIT(ic) - 1)
+ if (ord > 0)
+ call dcvinit (cv, SPLINE1, ord, double (IC_XMIN(ic)),
+ double (IC_XMAX(ic)))
+ else
+ call dcvinit (cv, LEGENDRE, IC_NFIT(ic),
+ double (IC_XMIN(ic)), double (IC_XMAX(ic)))
+ case SPLINE3:
+ ord = min (IC_ORDER(ic), IC_NFIT(ic) - 3)
+ if (ord > 0)
+ call dcvinit (cv, SPLINE3, ord, double (IC_XMIN(ic)),
+ double (IC_XMAX(ic)))
+ else
+ call dcvinit (cv, LEGENDRE, IC_NFIT(ic),
+ double (IC_XMIN(ic)), double (IC_XMAX(ic)))
+ case USERFNC:
+ ord = min (IC_ORDER(ic), IC_NFIT(ic))
+ call dcvinit (cv, USERFNC, ord, double (IC_XMIN(ic)),
+ double (IC_XMAX(ic)))
+ call dcvuserfnc (cv, hd_powerd)
+ default:
+ call error (0, "Unknown fitting function")
+ }
+
+ refit = NO
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicebars.x b/noao/imred/dtoi/hdicfit/hdicebars.x
new file mode 100644
index 00000000..335c161d
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicebars.x
@@ -0,0 +1,217 @@
+include <gset.h>
+include <mach.h>
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+define MSIZE 2.0
+
+# HDIC_EBARS -- Plot data points with their error bars as markers. The
+# independent variable is fog subtracted transformed density.
+
+procedure hdic_ebars (ic, gp, gt, x, y, wts, ebw, npts)
+
+pointer ic # Pointer to ic structure
+pointer gp # Pointer to graphics stream
+pointer gt # Pointer to gtools structure
+double x[ARB] # Array of independent variables
+double y[ARB] # Array of dependent variables
+double wts[ARB] # Array of weights
+double ebw[ARB] # Error bar half width (positive number)
+int npts # Number of points
+
+pointer sp, xr, yr, sz
+int orig_mark, i, xaxis, yaxis, mark, added_mark
+real size, dx, dy, szmk
+int gt_geti()
+bool fp_equald()
+include "hdic.com"
+
+begin
+ call smark (sp)
+
+ xaxis = (IC_AXES (ic, IC_GKEY(ic), 1))
+ yaxis = (IC_AXES (ic, IC_GKEY(ic), 2))
+
+ if (xaxis == 'x' || xaxis == 'u')
+ orig_mark = GM_HEBAR
+ else if (yaxis == 'x' || yaxis == 'u')
+ orig_mark = GM_VEBAR
+ else {
+ call eprintf ("Choose graph type with axes 'u' or 'x'\n")
+ call sfree (sp)
+ return
+ }
+
+ added_mark = GM_CIRCLE
+
+ call salloc (xr, npts, TY_REAL)
+ call salloc (yr, npts, TY_REAL)
+ call salloc (sz, npts, TY_REAL)
+
+ call achtdr (x, Memr[xr], npts)
+ call achtdr (y, Memr[yr], npts)
+ call achtdr (ebw, Memr[sz], npts)
+
+ if (IC_OVERPLOT(ic) == NO) {
+ # Start a new plot
+ call gclear (gp)
+
+ # Set the graph scale and axes
+ call gascale (gp, Memr[xr], npts, 1)
+ call gascale (gp, Memr[yr], npts, 2)
+
+ # If plotting HD curve, set wy2 to maxden, which may have
+ # been updated if a new endpoint was added.
+
+ if ((IC_AXES (ic, IC_GKEY(ic), 1) == 'y') &&
+ (IC_AXES (ic, IC_GKEY(ic), 2) == 'u'))
+ call gswind (gp, INDEF, INDEF, INDEF, real (maxden))
+
+ call gt_swind (gp, gt)
+ call gt_labax (gp, gt)
+ }
+
+ call ggscale (gp, 0.0, 0.0, dx, dy)
+
+ do i = 1, npts {
+ size = Memr[sz+i-1] # Sizes are WCS units; transform them
+ if (gt_geti (gt, GTTRANSPOSE) == NO) {
+ size = size / dx
+ mark = orig_mark
+ szmk = size
+ # Check for added point
+ if (fp_equald (ebw[i], ADDED_PT)) {
+ szmk = MSIZE
+ mark = added_mark
+ }
+
+ # Check for deleted point
+ if (fp_equald (wts[i], 0.0D0)) {
+ szmk = MSIZE
+ mark = mark + GM_CROSS
+ }
+
+ call gmark (gp, Memr[xr+i-1], Memr[yr+i-1], mark, szmk, szmk)
+
+ } else {
+ size = size / dy
+ szmk = size
+ mark = orig_mark
+
+ # Check for added point
+ if (fp_equald (ebw[i], ADDED_PT)) {
+ szmk = MSIZE
+ mark = added_mark
+ }
+
+ # Check for deleted point
+ if (fp_equald (wts[i], 0.0D0)) {
+ szmk = MSIZE
+ mark = mark + GM_CROSS
+ }
+
+ call gmark (gp, Memr[yr+i-1], Memr[xr+i-1], mark, szmk, szmk)
+ }
+ }
+
+ IC_OVERPLOT(ic) = NO
+ call sfree (sp)
+end
+
+
+# HDIC_EBW -- Calculate error bar width for plotting points. Width is
+# returned in NDC units.
+
+procedure hdic_ebw (ic, density, indv, sdev, ebw, npts)
+
+pointer ic # Pointer to ic structure
+double density[ARB] # Untransformed density NOT fog subtracted
+double indv[ARB] # Transformed density above fog
+double sdev[ARB] # Array of standard deviation values, density units
+double ebw[ARB] # Error bar half width (positive numbers)
+int npts # Number of data points
+
+double fog
+pointer sp, denaf, outwe
+int xaxis, yaxis, i
+bool fp_equald()
+real ic_getr()
+
+begin
+ xaxis = (IC_AXES (ic, IC_GKEY(ic), 1))
+ yaxis = (IC_AXES (ic, IC_GKEY(ic), 2))
+
+ if (xaxis == 'u' || yaxis == 'u') {
+ call amovd (sdev, ebw, npts)
+ return
+ }
+
+ call smark (sp)
+ call salloc (denaf, npts, TY_DOUBLE)
+ call salloc (outwe, npts, TY_DOUBLE)
+
+ fog = double (ic_getr (ic, "fog"))
+
+ call asubkd (density, fog, Memd[denaf], npts)
+ call aaddd (Memd[denaf], sdev, Memd[denaf], npts)
+
+ call hdic_ebtran (Memd[denaf], Memd[outwe], npts, IC_TRANSFORM(ic))
+
+ # Subtract transformed values to get errors. Then check for
+ # added points, which are flagged with ebw=ADDED_PT.
+
+ call asubd (Memd[outwe], indv, ebw, npts)
+ do i = 1, npts {
+ if (fp_equald (sdev[i], ADDED_PT))
+ ebw[i] = ADDED_PT
+ }
+
+ call sfree (sp)
+end
+
+
+# HDIC_EBTRAN -- Apply transformation, generating a vector of independent
+# variables from a density vector. The independent variable vector is the
+# standard deviation values and the output values are the errors. This
+# routine checks for ADDED_PT valued standard deviation, which indicates an
+# added point.
+
+procedure hdic_ebtran (density, ind_var, npts, transform)
+
+double density[npts] # Density vector - input
+double ind_var[npts] # Ind variable vector - filled on output
+int npts # Length of data vectors
+int transform # Integer code for transform type
+
+int i
+bool fp_equald()
+
+begin
+ switch (transform) {
+ case HD_LOGO:
+ do i = 1, npts {
+ if (fp_equald (density[i], 0.0))
+ ind_var[i] = 0.0
+ else
+ ind_var[i] = log10 ((10. ** density[i]) - 1.0)
+ }
+ case HD_K75:
+ do i = 1, npts {
+ if (fp_equald (density[i], 0.0))
+ ind_var[i] = 0.0
+ else
+ ind_var[i] = density[i] + 0.75*log10(1.- 10.**(-density[i]))
+ }
+ case HD_K50:
+ do i = 1, npts {
+ if (fp_equald (density[i], 0.0))
+ ind_var[i] = 0.0
+ else
+ ind_var[i] = density[i] + 0.50*log10(1.- 10.**(-density[i]))
+ }
+ case HD_NONE:
+ call amovd (density, ind_var, npts)
+ default:
+ call error (0, "Unrecognized transformation in HDIC_EBTRAN")
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicerrors.x b/noao/imred/dtoi/hdicfit/hdicerrors.x
new file mode 100644
index 00000000..7722e5e2
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicerrors.x
@@ -0,0 +1,143 @@
+include <math/curfit.h>
+include "hdicfit.h"
+
+# IC_ERRORS -- Compute and error diagnositic information.
+
+procedure ic_errorsd (ic, file, cv, x, y, wts, npts)
+
+pointer ic # ICFIT pointer
+char file[ARB] # Output file
+pointer cv # Curfit pointer
+double x[ARB] # Ordinates
+double y[ARB] # Abscissas
+double wts[ARB] # Weights
+int npts # Number of data points
+
+int i, n, deleted, ncoeffs, fd
+double chisqr, rms
+pointer sp, fit, wts1, coeffs, errors
+
+int dcvstati(), open()
+double ic_rmsd()
+errchk open()
+
+begin
+ # Open the output file.
+ fd = open (file, APPEND, TEXT_FILE)
+
+ # Determine the number of coefficients and allocate memory.
+ ncoeffs = dcvstati (cv, CVNCOEFF)
+ call smark (sp)
+ call salloc (coeffs, ncoeffs, TY_DOUBLE)
+ call salloc (errors, ncoeffs, TY_DOUBLE)
+
+ if (npts == IC_NFIT(ic)) {
+ # Allocate memory for the fit.
+ n = npts
+ call salloc (fit, n, TY_DOUBLE)
+ call salloc (wts1, n, TY_DOUBLE)
+
+ # Eliminate rejected points and count deleted points.
+ call amovd (wts, Memd[wts1], n)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[wts1+i-1] = 0.
+ }
+ }
+ deleted = 0
+ do i = 1, n {
+ if (wts[i] == 0.)
+ deleted = deleted + 1
+ }
+
+ # Get the coefficients and compute the errors.
+ call dcvvector (cv, x, Memd[fit], n)
+ call dcvcoeff (cv, Memd[coeffs], ncoeffs)
+ call dcverrors (cv, y, Memd[wts1], Memd[fit], n, chisqr,
+ Memd[errors])
+ rms = ic_rmsd (x, y, Memd[fit], Memd[wts1], n)
+
+ } else {
+ # Allocate memory for the fit.
+ n = IC_NFIT(ic)
+ call salloc (fit, n, TY_DOUBLE)
+ call salloc (wts1, n, TY_DOUBLE)
+
+ # Eliminate rejected points and count deleted points.
+ call amovd (Memd[IC_WTSFIT(ic)], Memd[wts1], n)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[wts1+i-1] = 0.
+ }
+ }
+ deleted = 0
+ do i = 1, n {
+ if (wts[i] == 0.)
+ deleted = deleted + 1
+ }
+
+ # Get the coefficients and compute the errors.
+ call dcvvector (cv, Memd[IC_XFIT(ic)], Memd[fit], n)
+ rms = ic_rmsd (Memd[IC_XFIT(ic)], Memd[IC_YFIT(ic)],
+ Memd[fit], Memd[wts1], n)
+ call dcvcoeff (cv, Memd[coeffs], ncoeffs)
+ call dcverrors (cv, Memd[IC_YFIT(ic)], Memd[wts1], Memd[fit],
+ n, chisqr, Memd[errors])
+ }
+
+ # Print the error analysis.
+ call fprintf (fd, "total points = %d\nsample points = %d\n")
+ call pargi (npts)
+ call pargi (n)
+ call fprintf (fd, "nrejected = %d\ndeleted = %d\n")
+ call pargi (IC_NREJECT(ic))
+ call pargi (deleted)
+ call fprintf (fd, "RMS = %7.4g\n")
+ call pargd (rms)
+ call fprintf (fd, "square root of reduced chi square = %7.4g\n")
+ call pargd (sqrt (chisqr))
+
+ #call fprintf (fd, "\tcoefficent\terror\n")
+ #do i = 1, ncoeffs {
+ # call fprintf (fd, "\t%10.4e\t%10.4e\n")
+ # call parg$t (Mem$t[coeffs+i-1])
+ # call parg$t (Mem$t[errors+i-1])
+ #}
+
+ # Free allocated memory.
+ call sfree (sp)
+ call close (fd)
+end
+
+
+# IC_RMS -- Compute RMS of points which have not been deleted.
+
+double procedure ic_rmsd (x, y, fit, wts, npts)
+
+double x[ARB] # Ordinates
+double y[ARB] # Abscissas
+double fit[ARB] # Fit
+double wts[ARB] # Weights
+int npts # Number of data points
+
+int i, n
+double resid, rms
+
+begin
+ rms = 0.
+ n = 0
+ do i = 1, npts {
+ if (wts[i] == 0.)
+ next
+ resid = y[i] - fit[i]
+ rms = rms + resid * resid
+ n = n + 1
+ }
+
+ if (n > 0)
+ rms = sqrt (rms / n)
+
+ return (rms)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicfit.h b/noao/imred/dtoi/hdicfit/hdicfit.h
new file mode 100644
index 00000000..89d9c73d
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicfit.h
@@ -0,0 +1,65 @@
+# Definition file for DTOI task hdfit which uses the hdicfit subdirectory.
+
+define NSPOTS 64 # Initially, max number of calibration spots
+define NVALS_FIT 200 # Number of points in vector of fitted points
+define WT_NONE 0 # No weighting used in fit
+define WT_USER 1 # User specifies weighting in fit
+define WT_CALC 2 # Weighting in fit calculated from std dev
+define MIN_DEN EPSILONR # Density used for setting curfit minval
+define HD_NONE 1 # Ind var is density - no transformation
+define HD_LOGO 2 # Ind var is log opacitance
+define HD_K50 3 # Ind var is Kaiser transform w/ alpha=0.50
+define HD_K75 4 # Ind var is Kaiser transform w/ alpha=0.75
+define UDELETE 100 # Point deleted by user flag
+define PDELETE 101 # Point deleted by program
+define NDELETE 102 # Point not deleted
+define ADDED_PT 0.0 # Indication of added point in sdev array
+
+# The ICFIT data structure - modified for use with the DTOI package.
+
+define IC_NGKEYS 5 # Number of graph keys
+define IC_LENSTRUCT 47 # Length of ICFIT structure
+
+# User fitting parameters
+define IC_FUNCTION Memi[$1] # Function type
+define IC_ORDER Memi[$1+1] # Order of function
+define IC_SAMPLE Memi[$1+2] # Pointer to sample string
+define IC_NAVERAGE Memi[$1+3] # Sampling averaging bin
+define IC_NITERATE Memi[$1+4] # Number of rejection interation
+define IC_TRANSFORM Memi[$1+5] # Type of transformation ** DTOI ONLY **
+define IC_XMIN Memr[P2R($1+6)] # Minimum value for curve
+define IC_XMAX Memr[P2R($1+7)] # Maximum value for curve
+define IC_LOW Memr[P2R($1+8)] # Low rejection value
+define IC_HIGH Memr[P2R($1+9)] # Low rejection value
+define IC_GROW Memr[P2R($1+10)]# Rejection growing radius
+
+# ICFIT parameters used for fitting
+define IC_NFIT Memi[$1+11] # Number of fit points
+define IC_NREJECT Memi[$1+12] # Number of rejected points
+define IC_RG Memi[$1+13] # Pointer for ranges
+define IC_XFIT Memi[$1+14] # Pointer to ordinates of fit points
+define IC_YFIT Memi[$1+15] # Pointer to abscissas of fit points
+define IC_WTSFIT Memi[$1+16] # Pointer to weights of fit points
+define IC_REJPTS Memi[$1+17] # Pointer to rejected points
+
+# ICFIT parameters used for interactive graphics
+define IC_NEWX Memi[$1+18] # New x fit points?
+define IC_NEWY Memi[$1+19] # New y points?
+define IC_NEWWTS Memi[$1+20] # New weights?
+define IC_NEWFUNCTION Memi[$1+21] # New fitting function?
+define IC_NEWTRANSFORM Memi[$1+22] # New transform? ** DTOI ONLY **
+define IC_OVERPLOT Memi[$1+23] # Overplot next plot?
+define IC_FITERROR Memi[$1+24] # Error in fit
+define IC_LABELS Memi[$1+25+$2-1]# Graph axis labels
+define IC_UNITS Memi[$1+27+$2-1]# Graph axis units
+
+define IC_FOG Memr[P2R($1+29)]# *** DTOI ONLY *** value of fog level
+define IC_NEWFOG Memi[$1+30] # Flag for change in fog
+define IC_RESET Memi[$1+31] # Flag for resetting variables
+define IC_UPDATE Memi[$1+32]
+define IC_EBARS Memi[$1+33] # Flag for plotting error bars
+define IC_RFOG Memr[P2R($1+34)]# Reference value of fog for resetting
+
+# ICFIT key definitions
+define IC_GKEY Memi[$1+35] # Graph key
+define IC_AXES Memi[$1+36+($2-1)*2+$3-1] # Graph axis codes
diff --git a/noao/imred/dtoi/hdicfit/hdicfit.x b/noao/imred/dtoi/hdicfit/hdicfit.x
new file mode 100644
index 00000000..91e05f9d
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicfit.x
@@ -0,0 +1,80 @@
+include <math/curfit.h>
+include "hdicfit.h"
+
+# IC_FIT -- Fit a function. This is the main fitting task. It uses
+# flags to define changes since the last fit. This allows the most
+# efficient use of the curfit and ranges packages.
+
+procedure ic_fitd (ic, cv, x, y, wts, npts, newx, newy, newwts, newfunction)
+
+pointer ic # ICFIT pointer
+pointer cv # Curfit pointer
+double x[npts] # Ordinates
+double y[npts] # Data to be fit
+double wts[npts] # Weights
+int npts # Number of points
+int newx # New x points?
+int newy # New y points?
+int newwts # New weights?
+int newfunction # New function?
+
+int ier, refit
+errchk ic_dosetupd
+
+begin
+ # Setup the new parameters.
+ call ic_dosetupd (ic, cv, x, wts, npts, newx, newwts, newfunction,
+ refit)
+
+ if (npts == IC_NFIT(ic)) {
+ # If not sampling use the data array directly.
+ if (refit == NO) {
+ call dcvfit (cv, x, y, wts, npts, WTS_USER, ier)
+ } else if (newy == YES)
+ call dcvrefit (cv, x, y, wts, ier)
+
+ } else {
+ # If sampling first form the sample y values.
+ if ((newx == YES) || (newy == YES) || (newwts == YES))
+ call rg_wtbind (IC_RG(ic), IC_NAVERAGE(ic), y, wts, npts,
+ Memd[IC_YFIT(ic)], Memd[IC_WTSFIT(ic)], IC_NFIT(ic))
+ if (refit == NO) {
+ call dcvfit (cv, Memd[IC_XFIT(ic)], Memd[IC_YFIT(ic)],
+ Memd[IC_WTSFIT(ic)], IC_NFIT(ic), WTS_USER, ier)
+ } else if (newy == YES)
+ call dcvrefit (cv, Memd[IC_XFIT(ic)], Memd[IC_YFIT(ic)],
+ Memd[IC_WTSFIT(ic)], ier)
+ }
+
+ # Check for an error in the fit.
+ switch (ier) {
+ case SINGULAR:
+ call printf ("Singular solution")
+ call flush (STDOUT)
+ case NO_DEG_FREEDOM:
+ call printf ("No degrees of freedom")
+ call flush (STDOUT)
+ return
+ }
+
+ refit = YES
+
+ # Do pixel rejection if desired.
+ if ((IC_LOW(ic) > 0.) || (IC_HIGH(ic) > 0.)) {
+ if (npts == IC_NFIT(ic)) {
+ call ic_rejectd (cv, x, y, wts, Memi[IC_REJPTS(ic)],
+ IC_NFIT(ic), IC_LOW(ic), IC_HIGH(ic), IC_NITERATE(ic),
+ IC_GROW(ic), IC_NREJECT(ic))
+ } else {
+ call ic_rejectd (cv, Memd[IC_XFIT(ic)], Memd[IC_YFIT(ic)],
+ Memd[IC_WTSFIT(ic)], Memi[IC_REJPTS(ic)], IC_NFIT(ic),
+ IC_LOW(ic), IC_HIGH(ic), IC_NITERATE(ic), IC_GROW(ic),
+ IC_NREJECT(ic))
+ }
+
+ if (IC_NREJECT(ic) > 0)
+ refit = NO
+ } else
+ IC_NREJECT(ic) = 0
+end
+
diff --git a/noao/imred/dtoi/hdicfit/hdicgaxes.x b/noao/imred/dtoi/hdicfit/hdicgaxes.x
new file mode 100644
index 00000000..4f016c9d
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgaxes.x
@@ -0,0 +1,101 @@
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+# ICG_AXES -- Set axes data.
+# The applications program may set additional axes types.
+
+procedure icg_axesd (ic, gt, cv, axis, x, y, z, npts)
+
+pointer ic # ICFIT pointer
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer
+int axis # Output axis
+double x[npts] # Independent variable
+double y[npts] # Dependent variable
+double z[npts] # Output values
+int npts # Number of points
+
+int i, axistype, gtlabel[2], gtunits[2]
+double a, b, xmin, xmax
+pointer label, units
+
+double dcveval(), icg_dvzd()
+errchk adivd()
+extern icg_dvzd()
+
+data gtlabel/GTXLABEL, GTYLABEL/
+data gtunits/GTXUNITS, GTYUNITS/
+
+begin
+ axistype = IC_AXES(ic, IC_GKEY(ic), axis)
+ switch (axistype) {
+ case 'x': # Independent variable
+ call gt_sets (gt, gtlabel[axis], Memc[IC_LABELS(ic,1)])
+ call gt_sets (gt, gtunits[axis], Memc[IC_UNITS(ic,1)])
+ call amovd (x, z, npts)
+ call gt_sets (gt, GTSUBTITLE, "")
+ case 'y': # Dependent variable
+ call gt_sets (gt, gtlabel[axis], Memc[IC_LABELS(ic,2)])
+ call gt_sets (gt, gtunits[axis], Memc[IC_UNITS(ic,2)])
+ call amovd (y, z, npts)
+ call gt_sets (gt, GTSUBTITLE, "")
+ case 'f': # Fitted values
+ call gt_sets (gt, gtlabel[axis], "fit")
+ call gt_sets (gt, gtunits[axis], Memc[IC_UNITS(ic,2)])
+ call dcvvector (cv, x, z, npts)
+ call gt_sets (gt, GTSUBTITLE, "")
+ case 'r': # Residuals
+ call gt_sets (gt, gtlabel[axis], "residuals")
+ call gt_sets (gt, gtunits[axis], Memc[IC_UNITS(ic,2)])
+ call dcvvector (cv, x, z, npts)
+ call asubd (y, z, z, npts)
+ call gt_sets (gt, GTSUBTITLE, "")
+ case 'd': # Ratio
+ call gt_sets (gt, gtlabel[axis], "ratio")
+ call gt_sets (gt, gtunits[axis], "")
+ call dcvvector (cv, x, z, npts)
+# iferr (call adiv$t (y, z, z, npts))
+ call advzd (y, z, z, npts, icg_dvzd)
+ call gt_sets (gt, GTSUBTITLE, "")
+ case 'n': # Linear component removed
+ call gt_sets (gt, gtlabel[axis], "non-linear component")
+ call gt_sets (gt, gtunits[axis], Memc[IC_UNITS(ic,2)])
+ xmin = IC_XMIN(ic)
+ xmax = IC_XMAX(ic)
+ a = dcveval (cv, double(xmin))
+ b = (dcveval (cv, double(xmax)) - a) / (xmax - xmin)
+ do i = 1, npts
+ z[i] = y[i] - a - b * (x[i] - xmin)
+ call gt_sets (gt, GTSUBTITLE, "")
+ default: # User axes types.
+ call malloc (label, SZ_LINE, TY_CHAR)
+ call malloc (units, SZ_LINE, TY_CHAR)
+ if (axis == 1) {
+ call strcpy (Memc[IC_LABELS(ic,1)], Memc[label], SZ_LINE)
+ call strcpy (Memc[IC_UNITS(ic,1)], Memc[units], SZ_LINE)
+ call amovd (x, z, npts)
+ } else {
+ call strcpy (Memc[IC_LABELS(ic,2)], Memc[label], SZ_LINE)
+ call strcpy (Memc[IC_UNITS(ic,2)], Memc[units], SZ_LINE)
+ call amovd (y, z, npts)
+ }
+ call icg_uaxesd (ic, axistype, cv, x, y, z, npts, Memc[label],
+ Memc[units], SZ_LINE)
+ call gt_sets (gt, gtlabel[axis], Memc[label])
+ call gt_sets (gt, gtunits[axis], Memc[units])
+ call gt_sets (gt, GTSUBTITLE, "HD Curve")
+ call mfree (label, TY_CHAR)
+ call mfree (units, TY_CHAR)
+ }
+end
+
+
+# ICG_DVZ -- Error action to take on zero division.
+
+double procedure icg_dvzd (x)
+
+double x # Numerator
+
+begin
+ return (1.)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgcolon.x b/noao/imred/dtoi/hdicfit/hdicgcolon.x
new file mode 100644
index 00000000..52299df7
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgcolon.x
@@ -0,0 +1,284 @@
+include <error.h>
+include <gset.h>
+include "hdicfit.h"
+
+define EB_WTS 10
+define EB_SDEV 11
+
+# List of colon commands.
+define CMDS "|show|sample|naverage|function|order|low_reject|high_reject|\
+ |niterate|grow|errors|vshow|transform|fog|reset|quit|ebars|"
+
+define SHOW 1 # Show values of parameters
+define SAMPLE 2 # Set or show sample ranges
+define NAVERAGE 3 # Set or show sample averaging or medianing
+define FUNCTION 4 # Set or show function type
+define ORDER 5 # Set or show order
+define LOW_REJECT 6 # Set or show lower rejection factor
+define HIGH_REJECT 7 # Set or show upper rejection factor
+# newline 8
+define NITERATE 9 # Set or show rejection iterations
+define GROW 10 # Set or show rejection growing radius
+define ERRORS 11 # Show errors of fit
+define VSHOW 12 # Show verbose information
+define TRANSFORM 13 # Set or show transformation
+define FOG 14 # Set or show value of fog
+define RESET 15 # Reset x, y, wts, npts to original values
+define QUIT 16 # Terminate without updating database
+define EBARS 17 # Set error bars to represent weights or
+ # standard deviations
+
+# ICG_COLON -- Processes colon commands.
+
+procedure icg_colond (ic, cmdstr, gp, gt, cv, x, y, wts, npts)
+
+pointer ic # ICFIT pointer
+char cmdstr[ARB] # Command string
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer for error listing
+double x[npts], y[npts], wts[npts] # Data arrays for error listing
+int npts # Number of data points
+
+real rval
+char cmd[SZ_LINE]
+int ncmd, ival, ip, junk
+
+int nscan(), strdic(), strncmp(), ctor()
+string funcs "|chebyshev|legendre|spline1|spline3|power|"
+string tform "|none|logopacitance|k50|k75|"
+
+begin
+ # Use formated scan to parse the command string.
+ # The first word is the command and it may be minimum match
+ # abbreviated with the list of commands.
+
+ call sscan (cmdstr)
+ call gargwrd (cmd, SZ_LINE)
+ ncmd = strdic (cmd, cmd, SZ_LINE, CMDS)
+
+ switch (ncmd) {
+ case SHOW:
+ # show: Show the values of the fitting parameters. The terminal
+ # is cleared and paged using the gtools paging procedures.
+
+ call gargwrd (cmd, SZ_LINE)
+ if (nscan() == 1) {
+ call gdeactivate (gp, AW_CLEAR)
+ call ic_show (ic, "STDOUT", gt)
+ call greactivate (gp, AW_PAUSE)
+ } else {
+ iferr (call ic_show (ic, cmd, gt))
+ call erract (EA_WARN)
+ }
+
+ case SAMPLE:
+ # sample: List or set the sample points.
+
+ call gargwrd (cmd, SZ_LINE)
+ if (cmd[1] == EOS) {
+ call printf ("sample = %s\n")
+ call pargstr (Memc[IC_SAMPLE(ic)])
+ } else {
+ call strcpy (cmd, Memc[IC_SAMPLE(ic)], SZ_LINE)
+ IC_NEWX(ic) = YES
+ }
+
+ case NAVERAGE:
+ # naverage: List or set the sample averging.
+
+ call gargi (ival)
+ if (nscan() == 1) {
+ call printf ("naverage = %d\n")
+ call pargi (IC_NAVERAGE(ic))
+ } else {
+ IC_NAVERAGE(ic) = ival
+ IC_NEWX(ic) = YES
+ }
+
+ case FUNCTION:
+ # function: List or set the fitting function.
+
+ call gargwrd (cmd, SZ_LINE)
+ if (cmd[1] == EOS) {
+ call printf ("function = %s\n")
+ call ic_gstr (ic, "function", cmd, SZ_LINE)
+ call pargstr (cmd)
+ } else {
+ if (strdic (cmd, cmd, SZ_LINE, funcs) > 0) {
+ call ic_pstr (ic, "function", cmd)
+ IC_NEWFUNCTION(ic) = YES
+ } else
+ call printf ("Unknown or ambiguous function")
+ }
+
+ case ORDER:
+ # order: List or set the function order.
+
+ call gargi (ival)
+ if (nscan() == 1) {
+ call printf ("order = %d\n")
+ call pargi (IC_ORDER(ic))
+ } else {
+ IC_ORDER(ic) = ival
+ IC_NEWFUNCTION(ic) = YES
+ }
+
+ case LOW_REJECT:
+ # low_reject: List or set the lower rejection threshold.
+
+ call gargr (rval)
+ if (nscan() == 1) {
+ call printf ("low_reject = %g\n")
+ call pargr (IC_LOW(ic))
+ } else
+ IC_LOW(ic) = rval
+
+ case HIGH_REJECT:
+ # high_reject: List or set the high rejection threshold.
+
+ call gargr (rval)
+ if (nscan() == 1) {
+ call printf ("high_reject = %g\n")
+ call pargr (IC_HIGH(ic))
+ } else
+ IC_HIGH(ic) = rval
+
+ case NITERATE:
+ # niterate: List or set the number of rejection iterations.
+
+ call gargi (ival)
+ if (nscan() == 1) {
+ call printf ("niterate = %d\n")
+ call pargi (IC_NITERATE(ic))
+ } else
+ IC_NITERATE(ic) = ival
+
+ case GROW:
+ # grow: List or set the rejection growing.
+
+ call gargr (rval)
+ if (nscan() == 1) {
+ call printf ("grow = %g\n")
+ call pargr (IC_GROW(ic))
+ } else
+ IC_GROW(ic) = rval
+
+ case ERRORS:
+ call gargwrd (cmd, SZ_LINE)
+ if (nscan() == 1) {
+ call gdeactivate (gp, AW_CLEAR)
+ call ic_show (ic, "STDOUT", gt)
+ call ic_errorsd (ic, "STDOUT", cv, x, y, wts, npts)
+ call greactivate (gp, AW_PAUSE)
+ } else {
+ iferr {
+ call ic_show (ic, cmd, gt)
+ call ic_errorsd (ic, cmd, cv, x, y, wts, npts)
+ } then
+ call erract (EA_WARN)
+ }
+ case VSHOW:
+ # verbose show: Show the values of the fitting parameters.
+ # The terminal is paged using the gtools paging procedure.
+
+ call gargwrd (cmd, SZ_LINE)
+ if (cmd[1] == EOS) {
+ call gdeactivate (gp, AW_CLEAR)
+ call ic_vshowd (ic, "STDOUT", cv, x, y, wts, npts, gt)
+ call greactivate (gp, AW_PAUSE)
+ } else {
+ iferr {
+ call ic_vshowd (ic, cmd, cv, x, y, wts, npts, gt)
+ } then
+ call erract (EA_WARN)
+ }
+ case TRANSFORM:
+ # transform: List or set the transformation type. This
+ # option applies to HDTOI procedures only.
+
+ call gargwrd (cmd, SZ_LINE)
+ if (cmd[1] == EOS) {
+ call printf ("transform = %s\n")
+ call ic_gstr (ic, "transform", cmd, SZ_LINE)
+ call pargstr (cmd)
+ } else {
+ ival= strdic (cmd, cmd, SZ_LINE, tform)
+ if (ival > 0) {
+ call ic_pstr (ic, "transform", cmd)
+ IC_NEWTRANSFORM(ic) = YES
+ IC_NEWX(ic) = YES
+ switch (IC_TRANSFORM(ic)) {
+ case HD_NONE:
+ call ic_pstr (ic, "xlabel", "Density")
+ case HD_LOGO:
+ call ic_pstr (ic, "xlabel",
+ "Log Opacitance: log (10**Den - 1)")
+ case HD_K50:
+ call ic_pstr (ic, "xlabel",
+ "Den + 0.50 * Log (1 - (10 ** -Den))")
+ case HD_K75:
+ call ic_pstr (ic, "xlabel",
+ "Den + 0.75 * Log (1 - (10 ** -Den))")
+ }
+ } else
+ call printf ("Unknown or ambiguous transform")
+ }
+
+ case FOG:
+ # fog: DTOI ONLY - change or reset the value of the fog level
+
+ call gargwrd (cmd, SZ_LINE)
+ if (cmd[1] == EOS) {
+ call printf ("fog = %g\n")
+ call pargr (IC_FOG(ic))
+ } else {
+ if (strncmp (cmd, "reset", 1) == 0)
+ IC_FOG(ic) = IC_RFOG(ic)
+ else {
+ ip = 1
+ junk = ctor (cmd, ip, rval)
+ IC_FOG(ic) = rval
+ }
+ IC_NEWFOG(ic) = YES
+ IC_NEWX(ic) = YES
+ }
+
+ case RESET:
+ # Set flag to reset x, y, wts and npts to original values.
+ IC_RESET(ic) = YES
+ IC_NEWX(ic) = YES
+ IC_NEWY(ic) = YES
+ IC_NEWWTS(ic) = YES
+ IC_NEWFUNCTION(ic) = YES
+ IC_NEWTRANSFORM(ic) = YES
+
+ case QUIT:
+ # Set update flag to know
+ IC_UPDATE(ic) = NO
+
+ case EBARS:
+ # [HV]BAR marker can indicate either errors or weights
+ call gargwrd (cmd, SZ_LINE)
+ if (cmd[1] == EOS) {
+ if (IC_EBARS(ic) == EB_WTS)
+ call printf ("ebars = Weights\n")
+ else if (IC_EBARS(ic) == EB_SDEV)
+ call printf ("ebars = Errors\n")
+ } else {
+ if (strncmp (cmd, "weights", 1) == 0 ||
+ strncmp (cmd, "WEIGHTS", 1) == 0)
+ IC_EBARS(ic) = EB_WTS
+ else if (strncmp (cmd, "errors", 1) == 0 ||
+ strncmp (cmd, "ERRORS", 1) == 0)
+ IC_EBARS(ic) = EB_SDEV
+ else
+ call printf ("Unrecognized value for ebars '%s'\n")
+ call pargstr (cmd)
+ }
+
+ default:
+ call eprintf ("Unrecognized command '%s'\n")
+ call pargstr (cmd)
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgdelete.x b/noao/imred/dtoi/hdicfit/hdicgdelete.x
new file mode 100644
index 00000000..82c61f70
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgdelete.x
@@ -0,0 +1,81 @@
+include <gset.h>
+include <mach.h>
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+define MSIZE 2. # Mark size
+
+# ICG_DELETE -- Delete data point nearest the cursor.
+# The nearest point to the cursor in NDC coordinates is determined.
+
+procedure icg_deleted (ic, gp, gt, cv, x, y, wts, npts, wx, wy)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer
+double x[npts], y[npts] # Data points
+double wts[npts] # Weight array
+int npts # Number of points
+real wx, wy # Position to be nearest
+
+int gt_geti()
+pointer sp, xout, yout
+
+begin
+ call smark (sp)
+ call salloc (xout, npts, TY_DOUBLE)
+ call salloc (yout, npts, TY_DOUBLE)
+
+ call icg_axesd (ic, gt, cv, 1, x, y, Memd[xout], npts)
+ call icg_axesd (ic, gt, cv, 2, x, y, Memd[yout], npts)
+ if (gt_geti (gt, GTTRANSPOSE) == NO)
+ call icg_d1d (ic, gp, Memd[xout], Memd[yout], wts, npts, wx, wy)
+ else
+ call icg_d1d (ic, gp, Memd[yout], Memd[xout], wts, npts, wy, wx)
+
+ call sfree (sp)
+end
+
+
+# ICG_D1D - Do the actual delete.
+
+procedure icg_d1d (ic, gp, x, y, wts, npts, wx, wy)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+double x[npts], y[npts] # Data points
+double wts[npts] # Weight array
+int npts # Number of points
+real wx, wy # Position to be nearest
+
+int i, j
+real x0, y0, r2, r2min
+
+begin
+ # Transform world cursor coordinates to NDC.
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+
+ # Search for nearest point to a point with non-zero weight.
+ r2min = MAX_REAL
+ do i = 1, npts {
+ if (wts[i] == 0.)
+ next
+
+ call gctran (gp, real (x[i]), real (y[i]), x0, y0, 1, 0)
+
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ j = i
+ }
+ }
+
+ # Mark the deleted point with a cross and set the weight to zero.
+ if (j != 0) {
+ call gscur (gp, real (x[j]), real (y[j]))
+ call gmark (gp, real (x[j]), real (y[j]), GM_CROSS, MSIZE, MSIZE)
+ wts[j] = 0.
+ IC_NEWWTS(ic) = YES
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgfit.x b/noao/imred/dtoi/hdicfit/hdicgfit.x
new file mode 100644
index 00000000..b50ed03c
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgfit.x
@@ -0,0 +1,402 @@
+include <error.h>
+include <pkg/gtools.h>
+include <mach.h>
+include <gset.h>
+include "hdicfit.h"
+
+define HELP "noao$lib/scr/hdicgfit.key"
+define PROMPT "hdicfit options"
+define EB_WTS 10
+define EB_SDEV 11
+
+# ICG_FIT -- Interactive curve fitting with graphics. This is the main
+# entry point for the interactive graphics part of the icfit package.
+
+procedure icg_fitd (ic, gp, cursor, gt, cv, density, loge, owts, osdev, npts)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+char cursor[ARB] # GIO cursor input
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer
+double density[npts] # Original density, not fog subtracted
+double loge[npts] # Original log exposure values
+double owts[npts] # Original weights array
+double osdev[npts] # Original standard deviation array
+int npts # Number of points
+
+real wx, wy, wwy, oldx, oldy
+int wcs, key, nptso
+char cmd[SZ_LINE]
+
+int i, newgraph, axes[2], linetype
+double x1, newwt
+pointer userwts, x, y, wts, den, whydel, sdev, ebw
+
+int clgcur(), stridxs(), scan(), nscan()
+int icg_nearestd(), gstati()
+double dcveval()
+errchk ic_fitd, malloc
+define redraw_ 91
+
+begin
+ # Allocate memory for the fit and a copy of the weights.
+ # The weights are copied because they are changed when points are
+ # deleted. The input x is the untransformed density, and is used
+ # to generate other types of transforms. Points can be added to
+ # the sample, so the y array and weights array can change as well.
+ # The original number of points is also remembered.
+
+ call malloc (userwts, npts, TY_DOUBLE)
+ call malloc (x, npts, TY_DOUBLE)
+ call malloc (y, npts, TY_DOUBLE)
+ call malloc (wts, npts, TY_DOUBLE)
+ call malloc (den, npts, TY_DOUBLE)
+ call malloc (whydel, npts, TY_INT)
+ call malloc (sdev, npts, TY_DOUBLE)
+ call malloc (ebw, npts, TY_DOUBLE)
+
+ call amovd (owts, Memd[userwts], npts)
+ call amovd (owts, Memd[wts], npts)
+ call amovd (loge, Memd[y], npts)
+ call amovd (density, Memd[den], npts)
+ call amovki (NDELETE, Memi[whydel], npts)
+ call amovd (osdev, Memd[sdev], npts)
+ nptso = npts
+
+ # Initialize
+ IC_OVERPLOT(ic) = NO
+ IC_NEWX(ic) = YES
+ IC_NEWY(ic) = YES
+ IC_NEWWTS(ic) = YES
+ IC_NEWFUNCTION(ic) = YES
+ IC_NEWTRANSFORM(ic) = YES
+ IC_UPDATE(ic) = YES
+ IC_EBARS(ic) = EB_SDEV
+
+ # Read cursor commands.
+
+ key = 'f'
+ axes[1] = IC_AXES(ic, IC_GKEY(ic), 1)
+ axes[2] = IC_AXES(ic, IC_GKEY(ic), 2)
+
+ repeat {
+ switch (key) {
+ case '?': # Print help text.
+ call gpagefile (gp, HELP, PROMPT)
+
+ case 'q': # Terminate cursor loop
+ break
+
+ case ':': # List or set parameters
+ if (stridxs ("/", cmd) == 1)
+ call gt_colon (cmd, gp, gt, newgraph)
+ else
+ call icg_colond (ic, cmd, gp, gt, cv, Memd[x],
+ Memd[y], Memd[wts], npts)
+
+ if (IC_RESET(ic) == YES) {
+ npts = nptso
+ call amovd (owts, Memd[userwts], npts)
+ call amovd (owts, Memd[wts], npts)
+ call amovd (loge, Memd[y], npts)
+ call amovd (density, Memd[den], npts)
+ call amovki (NDELETE, Memi[whydel], npts)
+ call amovd (osdev, Memd[sdev], npts)
+ call hdic_init (density, npts, 0.0)
+ }
+
+ # See if user wants to quit without updating
+ if (IC_UPDATE(ic) == NO) {
+ call mfree (x, TY_DOUBLE)
+ call mfree (y, TY_DOUBLE)
+ call mfree (wts, TY_DOUBLE)
+ call mfree (userwts, TY_DOUBLE)
+ call mfree (den, TY_DOUBLE)
+ call mfree (sdev, TY_DOUBLE)
+ return
+ }
+
+ case 'a': # Add data points to the sample. This is only possible
+ # from an HD curve plot.
+
+ if ((IC_AXES (ic, IC_GKEY(ic), 1) == 'y') &&
+ (IC_AXES (ic, IC_GKEY(ic), 2) == 'u')) {
+
+ # Query for weight after plotting current location
+ # call gt_plot (gp, gt, wx, wy, 1)
+ call gmark (gp, wx, wy, GM_CIRCLE, 2.0, 2.0)
+ newwt = 1.0D0
+ call printf ("Enter weight of new point (%g): ")
+ call pargd (newwt)
+ call flush (STDOUT)
+ if (scan() != EOF) {
+ call pargd (x1)
+ if (nscan() == 1) {
+ if (!IS_INDEFD (x1)) {
+ newwt = x1
+ }
+ }
+ }
+
+ } else {
+ call eprintf ("Points can be added only from an HD Curve\n")
+ next
+ }
+
+ # Add fog into "density above fog" value read from cursor
+ wwy = wy + IC_FOG (ic)
+ if (wwy < 0.0) {
+ call eprintf (
+ "New density (%g) is below fog and will not be added\n")
+ call pargr (wwy)
+ next
+ }
+
+ # Add point into sample
+ call eprintf ("New Point: density above fog = %.4f, log ")
+ call pargr (wwy)
+ call eprintf ("exposure = %.4f, weight = %.4f\n")
+ call pargr (wx)
+ call pargd (newwt)
+
+ call hdic_addpoint (ic, wwy, wx, newwt, den, y, wts, userwts,
+ x, whydel, sdev, npts)
+
+ call realloc (ebw, npts, TY_DOUBLE)
+
+ call hdic_transform (ic, Memd[den], Memd[userwts], Memd[x],
+ Memd[wts], Memi[whydel], npts)
+
+ case 'c': # Print the positions of data points.
+ i = icg_nearestd (ic, gp, gt, cv, Memd[x], Memd[y], npts,
+ wx, wy)
+
+ if (i != 0) {
+ call printf ("den= %7.4g x= %7.4g exp= %7.4g fit= %7.4g")
+ call pargd (Memd[den+i-1])
+ call pargd (Memd[x+i-1])
+ call pargd (Memd[y+i-1])
+ call pargd (dcveval (cv, Memd[x+i-1]))
+ }
+
+ case 'd': # Delete data points.
+ call icg_deleted (ic, gp, gt, cv, Memd[x], Memd[y], Memd[wts],
+ npts, wx, wy)
+
+ case 'f': # Fit the function and reset the flags.
+ iferr {
+ # Copy new transformed vector, if necessary
+ if (IC_NEWTRANSFORM(ic) == YES || IC_NEWFOG(ic) == YES)
+ call hdic_transform (ic, Memd[den], Memd[userwts],
+ Memd[x], Memd[wts], Memi[whydel], npts)
+
+ call ic_fitd (ic, cv, Memd[x], Memd[y], Memd[wts], npts,
+ IC_NEWX(ic), IC_NEWY(ic), IC_NEWWTS(ic),
+ IC_NEWFUNCTION(ic))
+
+ IC_NEWX(ic) = NO
+ IC_NEWY(ic) = NO
+ IC_NEWWTS(ic) = NO
+ IC_NEWFUNCTION(ic) = NO
+ IC_NEWTRANSFORM(ic) = NO
+ IC_FITERROR(ic) = NO
+ IC_NEWFOG(ic) = NO
+ newgraph = YES
+ } then {
+ IC_FITERROR(ic) = YES
+ call erract (EA_WARN)
+ newgraph = NO
+ }
+
+ case 'g': # Set graph axes types.
+ call printf ("Graph key to be defined: ")
+ call flush (STDOUT)
+ if (scan() == EOF)
+ goto redraw_
+ call gargc (cmd[1])
+
+ switch (cmd[1]) {
+ case '\n':
+ case 'h', 'i', 'j', 'k', 'l':
+ switch (cmd[1]) {
+ case 'h':
+ key = 1
+ case 'i':
+ key = 2
+ case 'j':
+ key = 3
+ case 'k':
+ key = 4
+ case 'l':
+ key = 5
+ }
+
+ call printf ("Set graph axes types (%c, %c): ")
+ call pargc (IC_AXES(ic, key, 1))
+ call pargc (IC_AXES(ic, key, 2))
+ call flush (STDOUT)
+ if (scan() == EOF)
+ goto redraw_
+ call gargc (cmd[1])
+
+ switch (cmd[1]) {
+ case '\n':
+ default:
+ call gargc (cmd[2])
+ call gargc (cmd[2])
+ if (cmd[2] != '\n') {
+ IC_AXES(ic, key, 1) = cmd[1]
+ IC_AXES(ic, key, 2) = cmd[2]
+ }
+ }
+ default:
+ call printf ("Not a graph key")
+ }
+
+ case 'h':
+ if (IC_GKEY(ic) != 1) {
+ IC_GKEY(ic) = 1
+ newgraph = YES
+ }
+
+ case 'i':
+ if (IC_GKEY(ic) != 2) {
+ IC_GKEY(ic) = 2
+ newgraph = YES
+ }
+
+ case 'j':
+ if (IC_GKEY(ic) != 3) {
+ IC_GKEY(ic) = 3
+ newgraph = YES
+ }
+
+ case 'k':
+ if (IC_GKEY(ic) != 4) {
+ IC_GKEY(ic) = 4
+ newgraph = YES
+ }
+
+ case 'l':
+ if (IC_GKEY(ic) != 5) {
+ IC_GKEY(ic) = 5
+ newgraph = YES
+ }
+
+ case 'o': # Set overplot flag
+ IC_OVERPLOT(ic) = YES
+
+ case 'r': # Redraw the graph
+ newgraph = YES
+
+ case 'u': # Undelete data points.
+ call icg_undeleted (ic, gp, gt, cv, Memd[x], Memd[y],
+ Memd[wts], Memd[userwts], npts, wx, wy)
+
+ case 'w': # Window graph
+ call gt_window (gt, gp, cursor, newgraph)
+
+ case 'x': # Reset the value of the x point.
+ i = icg_nearestd (ic, gp, gt, cv, Memd[x], Memd[y], npts, wx,
+ wy)
+
+ if (i != 0) {
+ call printf ("Enter new x (%g): ")
+ call pargd (Memd[x+i-1])
+ call flush (STDOUT)
+ if (scan() != EOF) {
+ call gargd (x1)
+ if (nscan() == 1) {
+ if (!IS_INDEF (x1)) {
+ oldx = Memd[x+i-1]
+ oldy = Memd[y+i-1]
+ Memd[x+i-1] = x1
+ call hd_redraw (gp, oldx, oldy, x1, oldy)
+ IC_NEWX(ic) = YES
+ }
+ }
+ }
+ }
+
+ case 'y': # Reset the value of the y point.
+ i = icg_nearestd (ic, gp, gt, cv, Memd[x], Memd[y], npts, wx,
+ wy)
+
+ if (i != 0) {
+ call printf ("Enter new y (%g): ")
+ call pargd (Memd[y+i-1])
+ call flush (STDOUT)
+ if (scan() != EOF) {
+ call gargd (x1)
+ if (nscan() == 1) {
+ if (!IS_INDEF (x1)) {
+ oldx = Memd[x+i-1]
+ oldy = Memd[y+i-1]
+ Memd[y+i-1] = x1
+ call hd_redraw (gp, oldx, oldy, oldx, x1)
+ IC_NEWY(ic) = YES
+ }
+ }
+ }
+ }
+
+ case 'z': # Reset the weight value of the nearest point
+ i = icg_nearestd (ic, gp, gt, cv, Memd[x], Memd[y], npts, wx,
+ wy)
+
+ if (i != 0) {
+ call printf ("Enter new weight (%g): ")
+ call pargd (Memd[wts+i-1])
+ call flush (STDOUT)
+ if (scan() != EOF) {
+ call gargd (x1)
+ if (nscan() == 1) {
+ if (!IS_INDEF (x1)) {
+ Memd[wts+i-1] = x1
+ IC_NEWWTS(ic) = YES
+ }
+ }
+ }
+ }
+
+ default: # Let the user decide on any other keys.
+ call icg_user (ic, gp, gt, cv, wx, wy, wcs, key, cmd)
+ }
+
+ # Redraw the graph if necessary.
+redraw_ if (newgraph == YES) {
+ if (IC_AXES(ic, IC_GKEY(ic), 1) != axes[1]) {
+ axes[1] = IC_AXES(ic, IC_GKEY(ic), 1)
+ call gt_setr (gt, GTXMIN, INDEF)
+ call gt_setr (gt, GTXMAX, INDEF)
+ }
+ if (IC_AXES(ic, IC_GKEY(ic), 2) != axes[2]) {
+ axes[2] = IC_AXES(ic, IC_GKEY(ic), 2)
+ call gt_setr (gt, GTYMIN, INDEF)
+ call gt_setr (gt, GTYMAX, INDEF)
+ }
+
+ # Overplot with a different line type
+ if (IC_OVERPLOT(ic) == YES)
+ linetype = min ((gstati (gp, G_PLTYPE) + 1), 4)
+ else
+ linetype = GL_SOLID
+ call gseti (gp, G_PLTYPE, linetype)
+
+ call hdic_ebw (ic, Memd[den], Memd[x], Memd[sdev], Memd[ebw],
+ npts)
+
+ call icg_graphd (ic, gp, gt, cv, Memd[x], Memd[y], Memd[wts],
+ Memd[ebw], npts)
+
+ newgraph = NO
+ }
+ } until (clgcur (cursor, wx, wy, wcs, key, cmd, SZ_LINE) == EOF)
+
+ call mfree (x, TY_DOUBLE)
+ call mfree (y, TY_DOUBLE)
+ call mfree (wts, TY_DOUBLE)
+ call mfree (userwts, TY_DOUBLE)
+ call mfree (den, TY_DOUBLE)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicggraph.x b/noao/imred/dtoi/hdicfit/hdicggraph.x
new file mode 100644
index 00000000..dcd9ade3
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicggraph.x
@@ -0,0 +1,329 @@
+include <mach.h>
+include <gset.h>
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+define MSIZE 2. # Mark size
+define SZ_TPLOT 7 # String length for plot type
+define EB_WTS 10
+define EB_SDEV 11
+define MAX_SZMARKER 4
+
+# ICG_GRAPH -- Graph data and fit.
+
+procedure icg_graphd (ic, gp, gt, cv, x, y, wts, ebw, npts)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointers
+pointer cv # Curfit pointer
+double x[npts] # Independent variable
+double y[npts] # Dependent variable
+double wts[npts] # Weights
+double ebw[npts] # Half the error bar width
+int npts # Number of points
+
+pointer xout, yout
+int nvalues
+errchk malloc
+
+begin
+ call malloc (xout, npts, TY_DOUBLE)
+ call malloc (yout, npts, TY_DOUBLE)
+
+ call icg_axesd (ic, gt, cv, 1, x, y, Memd[xout], npts)
+ call icg_axesd (ic, gt, cv, 2, x, y, Memd[yout], npts)
+ call icg_paramsd (ic, cv, x, y, wts, npts, gt)
+
+ call icg_g1d (ic, gp, gt, Memd[xout], Memd[yout], wts, ebw, npts)
+
+ if (npts != IC_NFIT(ic)) {
+ if ((abs (IC_NAVERAGE(ic)) > 1) || (IC_NREJECT(ic) > 0)) {
+ call realloc (xout, IC_NFIT(ic), TY_DOUBLE)
+ call realloc (yout, IC_NFIT(ic), TY_DOUBLE)
+ call icg_axesd (ic, gt, cv, 1, Memd[IC_XFIT(ic)],
+ Memd[IC_YFIT(ic)], Memd[xout], IC_NFIT(ic))
+ call icg_axesd (ic, gt, cv, 2, Memd[IC_XFIT(ic)],
+ Memd[IC_YFIT(ic)], Memd[yout], IC_NFIT(ic))
+ call icg_g2d (ic, gp, gt, Memd[xout], Memd[yout],
+ IC_NFIT(ic))
+ }
+
+ } else if (IC_NREJECT(ic) > 0)
+ call icg_g2d (ic, gp, gt, Memd[xout], Memd[yout], npts)
+
+ nvalues = NVALS_FIT
+ call icg_gfd (ic, gp, gt, cv, nvalues)
+
+ # Mark the the sample regions.
+ call icg_sampled (ic, gp, gt, x, npts, 1)
+
+ call mfree (xout, TY_DOUBLE)
+ call mfree (yout, TY_DOUBLE)
+end
+
+
+# ICG_G1D -- Plot data vector.
+
+procedure icg_g1d (ic, gp, gt, x, y, wts, ebw, npts)
+
+pointer ic # Pointer to ic structure
+pointer gp # Pointer to graphics stream
+pointer gt # Pointer to gtools structure
+double x[npts] # Array of independent variables
+double y[npts] # Array of dependent variables
+double wts[npts] # Array of weights
+double ebw[npts] # Error bar half width in WCS (positive density)
+int npts # Number of points
+
+pointer sp, xr, yr, sz, szmk, gt1, gt2
+char tplot[SZ_TPLOT]
+int i, xaxis, yaxis, symbols[3], markplot
+real size, rmin, rmax, big
+bool fp_equald(), streq(), fp_equalr()
+int strncmp()
+data symbols/GM_PLUS, GM_BOX, GM_CIRCLE/
+include "hdic.com"
+
+begin
+ call smark (sp)
+ call salloc (xr, npts, TY_REAL)
+ call salloc (yr, npts, TY_REAL)
+ call salloc (sz, npts, TY_REAL)
+ call salloc (szmk, npts, TY_REAL)
+
+ call achtdr (x, Memr[xr], npts)
+ call achtdr (y, Memr[yr], npts)
+
+ xaxis = (IC_AXES (ic, IC_GKEY(ic), 1))
+ yaxis = (IC_AXES (ic, IC_GKEY(ic), 2))
+
+ # Set up gtools structure for deleted (gt1) and added (gt2) points
+ call gt_copy (gt, gt1)
+ call gt_setr (gt1, GTXSIZE, MSIZE)
+ call gt_setr (gt1, GTYSIZE, MSIZE)
+ call gt_sets (gt1, GTMARK, "cross")
+
+ call gt_copy (gt, gt2)
+ call gt_setr (gt2, GTXSIZE, MSIZE)
+ call gt_setr (gt2, GTYSIZE, MSIZE)
+ call gt_sets (gt2, GTMARK, "circle")
+
+ markplot = NO
+ call gt_gets (gt, GTTYPE, tplot, SZ_TPLOT)
+ if (strncmp (tplot, "mark", 4) == 0)
+ markplot = YES
+
+ if (IC_OVERPLOT(ic) == NO) {
+ # Start a new plot
+ call gclear (gp)
+
+ # Set the graph scale and axes
+ call gascale (gp, Memr[xr], npts, 1)
+ call gascale (gp, Memr[yr], npts, 2)
+
+ # If plotting HD curve, set wy2 to maxden, which may have
+ # been updated if a new endpoint was added.
+
+ if (xaxis == 'y' && yaxis == 'u')
+ call gswind (gp, INDEF, INDEF, INDEF, real (maxden))
+
+ call gt_swind (gp, gt)
+ call gt_labax (gp, gt)
+ }
+
+ # Calculate size of markers if error bars are being used. If the
+ # weights are being used as the marker size, they are first scaled
+ # between 1.0 and the maximum marker size.
+
+ call gt_gets (gt, GTMARK, tplot, SZ_TPLOT)
+ if (streq (tplot, "hebar") || streq (tplot, "vebar")) {
+ if (IC_EBARS(ic) == EB_WTS) {
+ call achtdr (wts, Memr[sz], npts)
+ call alimr (Memr[sz], npts, rmin, rmax)
+ if (fp_equalr (rmin, rmax))
+ call amovr (Memr[sz], Memr[szmk], npts)
+ else {
+ big = real (MAX_SZMARKER)
+ call amapr (Memr[sz], Memr[szmk], npts,rmin,rmax,1.0, big)
+ }
+ } else {
+ call achtdr (ebw, Memr[sz], npts)
+ call hd_szmk (gp, gt, xaxis, yaxis, tplot, Memr[sz],
+ Memr[szmk], npts)
+ }
+ } else
+ call amovkr (MSIZE, Memr[szmk], npts)
+
+ do i = 1, npts {
+ # Check for deleted point
+ if (fp_equald (wts[i], 0.0D0))
+ call gt_plot (gp, gt1, Memr[xr+i-1], Memr[yr+i-1], 1)
+
+ # Check for added point
+ else if (fp_equald (ebw[i], ADDED_PT))
+ call gt_plot (gp, gt2, Memr[xr+i-1], Memr[yr+i-1], 1)
+
+ else {
+ size = Memr[szmk+i-1]
+ call gt_setr (gt, GTXSIZE, size)
+ call gt_setr (gt, GTYSIZE, size)
+ call gt_plot (gp, gt, Memr[xr+i-1], Memr[yr+i-1], 1)
+ }
+ }
+
+ IC_OVERPLOT(ic) = NO
+ call sfree (sp)
+end
+
+
+# HD_SZMK -- Calculate size of error bar markers. This procedure is
+# called when the marker type is hebar or vebar and the marker size is
+# to be the error in the point, not its weight in the fit.
+
+procedure hd_szmk (gp, gt, xaxis, yaxis, mark, insz, outsz, npts)
+
+pointer gp # Pointer to gio structure
+pointer gt # Pointer to gtools structure
+int xaxis, yaxis # Codes for x and y axis types
+char mark[SZ_TPLOT] # Type of marker to use
+real insz[npts] # Standard deviations in WCS units
+real outsz[npts] # Output size array in NDC units
+int npts # Number of points in arrays
+
+int gt_geti()
+char tplot[SZ_TPLOT]
+real dx, dy
+bool streq()
+
+begin
+ # Check validity of axis types
+ if (xaxis != 'x' && xaxis != 'u' && yaxis != 'x' && yaxis != 'u') {
+ call eprintf ("Choose graph type with axes 'u' or 'x'; ")
+ call eprintf ("Using marker size 2.0\n")
+ call amovkr (MSIZE, outsz, npts)
+ return
+ }
+
+ call gt_gets (gt, GTMARK, tplot, SZ_TPLOT)
+ if (streq (tplot, "hebar")) {
+ if (yaxis == 'x' || yaxis == 'u') {
+ call gt_sets (gt, GTMARK, "vebar")
+ call eprintf ("Marker switched to vebar\n")
+ # call flush (STDOUT)
+ }
+ } else if (streq (tplot, "vebar")) {
+ if (xaxis == 'x' || xaxis == 'u') {
+ call gt_sets (gt, GTMARK, "hebar")
+ call eprintf ("Marker switched to hebar\n")
+ # call flush (STDOUT)
+ }
+ }
+
+ # Need to scale standard deviation from density to NDC units
+ call ggscale (gp, 0.0, 0.0, dx, dy)
+ if (gt_geti (gt, GTTRANSPOSE) == NO)
+ call adivkr (insz, dx, outsz, npts)
+ else
+ call adivkr (insz, dy, outsz, npts)
+end
+
+
+# ICG_G2D -- Show sample range and rejected data on plot.
+
+procedure icg_g2d (ic, gp, gt, x, y, npts)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+double x[npts], y[npts] # Data points
+int npts # Number of data points
+
+int i
+pointer sp, xr, yr, gt1
+
+begin
+ call smark (sp)
+ call salloc (xr, npts, TY_REAL)
+ call salloc (yr, npts, TY_REAL)
+ call achtdr (x, Memr[xr], npts)
+ call achtdr (y, Memr[yr], npts)
+
+ call gt_copy (gt, gt1)
+ call gt_sets (gt1, GTTYPE, "mark")
+
+ # Mark the sample points.
+
+ if (abs (IC_NAVERAGE(ic)) > 1) {
+ call gt_sets (gt1, GTMARK, "plus")
+ call gt_setr (gt1, GTXSIZE, real (-abs (IC_NAVERAGE(ic))))
+ call gt_setr (gt1, GTYSIZE, 1.)
+ call gt_plot (gp, gt1, Memr[xr], Memr[yr], npts)
+ }
+
+ # Mark the rejected points.
+
+ if (IC_NREJECT(ic) > 0) {
+ call gt_sets (gt1, GTMARK, "diamond")
+ call gt_setr (gt1, GTXSIZE, MSIZE)
+ call gt_setr (gt1, GTYSIZE, MSIZE)
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ call gt_plot (gp, gt1, Memr[xr+i-1], Memr[yr+i-1], 1)
+ }
+ }
+
+ call gt_free (gt1)
+ call sfree (sp)
+end
+
+
+# ICG_GF[RD] -- Overplot the fit on a graph of the data points. A vector
+# is constructed and evaluated, then plotted.
+
+procedure icg_gfd (ic, gp, gt, cv, npts)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOL pointer
+pointer cv # CURFIT pointer
+int npts # Number of points to plot
+
+pointer sp, xr, yr, x, y, xo, yo, gt1
+int gstati()
+
+begin
+ call smark (sp)
+
+ if (IC_FITERROR(ic) == YES)
+ return
+
+ call salloc (xr, npts, TY_REAL)
+ call salloc (yr, npts, TY_REAL)
+ call salloc (x, npts, TY_DOUBLE)
+ call salloc (y, npts, TY_DOUBLE)
+ call salloc (xo, npts, TY_DOUBLE)
+ call salloc (yo, npts, TY_DOUBLE)
+
+ # Calculate big vector of independent variable values. Note value
+ # of npts can change with this call.
+ call hdic_gvec (ic, Memd[x], npts, IC_TRANSFORM(ic))
+
+ # Calculate vector of fit values.
+ call dcvvector (cv, Memd[x], Memd[y], npts)
+
+ # Convert to user function or transpose axes. Change type to reals
+ # for plotting.
+ call icg_axesd (ic, gt, cv, 1, Memd[x], Memd[y], Memd[xo], npts)
+ call icg_axesd (ic, gt, cv, 2, Memd[x], Memd[y], Memd[yo], npts)
+ call achtdr (Memd[xo], Memr[xr], npts)
+ call achtdr (Memd[yo], Memr[yr], npts)
+
+ call gt_copy (gt, gt1)
+ call gt_sets (gt1, GTTYPE, "line")
+ call gt_seti (gt1, GTLINE, gstati (gp, G_PLTYPE))
+ call gt_plot (gp, gt1, Memr[xr], Memr[yr], npts)
+ call gt_free (gt1)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgnearest.x b/noao/imred/dtoi/hdicfit/hdicgnearest.x
new file mode 100644
index 00000000..8d556273
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgnearest.x
@@ -0,0 +1,72 @@
+include <mach.h>
+include <pkg/gtools.h>
+
+# ICG_NEAREST -- Find the nearest point to the cursor and return the index.
+# The nearest point to the cursor in NDC coordinates is determined.
+# The cursor is moved to the nearest point selected.
+
+int procedure icg_nearestd (ic, gp, gt, cv, x, y, npts, wx, wy)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer
+double x[npts], y[npts] # Data points
+int npts # Number of points
+real wx, wy # Cursor position
+
+int pt
+pointer sp, xout, yout
+int icg_nd(), gt_geti()
+
+begin
+ call smark (sp)
+ call salloc (xout, npts, TY_DOUBLE)
+ call salloc (yout, npts, TY_DOUBLE)
+
+ call icg_axesd (ic, gt, cv, 1, x, y, Memd[xout], npts)
+ call icg_axesd (ic, gt, cv, 2, x, y, Memd[yout], npts)
+
+ if (gt_geti (gt, GTTRANSPOSE) == NO)
+ pt = icg_nd (gp, Memd[xout], Memd[yout], npts, wx, wy)
+ else
+ pt = icg_nd (gp, Memd[yout], Memd[xout], npts, wy, wx)
+ call sfree (sp)
+
+ return (pt)
+end
+
+
+# ICG_ND -- ??
+
+int procedure icg_nd (gp, x, y, npts, wx, wy)
+
+pointer gp # GIO pointer
+double x[npts], y[npts] # Data points
+int npts # Number of points
+real wx, wy # Cursor position
+
+int i, j
+real x0, y0, r2, r2min
+
+begin
+ # Transform world cursor coordinates to NDC.
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+
+ # Search for nearest point.
+ r2min = MAX_REAL
+ do i = 1, npts {
+ call gctran (gp, real (x[i]), real (y[i]), x0, y0, 1, 0)
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ j = i
+ }
+ }
+
+ # Move the cursor to the selected point and return the index.
+ if (j != 0)
+ call gscur (gp, real (x[j]), real (y[j]))
+
+ return (j)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgparams.x b/noao/imred/dtoi/hdicfit/hdicgparams.x
new file mode 100644
index 00000000..e502fba1
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgparams.x
@@ -0,0 +1,94 @@
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+# ICG_PARAMS -- Set parameter string.
+
+procedure icg_paramsd (ic, cv, x, y, wts, npts, gt)
+
+pointer ic # ICFIT pointer
+pointer cv # Curfit pointer
+double x[ARB] # Ordinates
+double y[ARB] # Abscissas
+double wts[ARB] # Weights
+int npts # Number of data points
+pointer gt # GTOOLS pointer
+
+double rms
+int i, n, deleted
+pointer sp, fit, wts1, str, params
+double ic_rmsd()
+
+begin
+ call smark (sp)
+
+ if (npts == IC_NFIT(ic)) {
+ # Allocate memory for the fit.
+ n = npts
+ call salloc (fit, n, TY_DOUBLE)
+ call salloc (wts1, n, TY_DOUBLE)
+
+ # Eliminate rejected points and count deleted points.
+ call amovd (wts, Memd[wts1], n)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[wts1+i-1] = 0.
+ }
+ }
+ deleted = 0
+ do i = 1, n {
+ if (wts[i] == 0.)
+ deleted = deleted + 1
+ }
+
+ # Set the fit and compute the RMS error.
+ call dcvvector (cv, x, Memd[fit], n)
+ rms = ic_rmsd (x, y, Memd[fit], Memd[wts1], n)
+
+ } else {
+ # Allocate memory for the fit.
+ n = IC_NFIT(ic)
+ call salloc (fit, n, TY_DOUBLE)
+ call salloc (wts1, n, TY_DOUBLE)
+
+ # Eliminate rejected points and count deleted points.
+ call amovd (Memd[IC_WTSFIT(ic)], Memd[wts1], n)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[wts1+i-1] = 0.
+ }
+ }
+ deleted = 0
+ do i = 1, n {
+ if (wts[i] == 0.)
+ deleted = deleted + 1
+ }
+
+ # Set the fit and compute the rms error.
+ call dcvvector (cv, Memd[IC_XFIT(ic)], Memd[fit], n)
+ rms = ic_rmsd (Memd[IC_XFIT(ic)], Memd[IC_YFIT(ic)], Memd[fit],
+ Memd[wts1], n)
+ }
+
+ # Print the parameters and errors.
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (params, 2*SZ_LINE, TY_CHAR)
+
+ call sprintf (Memc[str],SZ_LINE, "function=%s, order=%d, transform=%s")
+ call ic_gstr (ic, "function", Memc[params], 2*SZ_LINE)
+ call pargstr (Memc[params])
+ call pargi (IC_ORDER(ic))
+ call ic_gstr (ic, "transform", Memc[params], SZ_LINE)
+ call pargstr (Memc[params])
+ call sprintf (Memc[params], 2*SZ_LINE,
+ "%s\nfog=%.5f, total=%d, deleted=%d, RMS=%7.4g")
+ call pargstr (Memc[str])
+ call pargr (IC_FOG(ic))
+ call pargi (npts)
+ call pargi (deleted)
+ call pargd (rms)
+ call gt_sets (gt, GTPARAMS, Memc[params])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgredraw.x b/noao/imred/dtoi/hdicfit/hdicgredraw.x
new file mode 100644
index 00000000..d42a0740
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgredraw.x
@@ -0,0 +1,22 @@
+include <gset.h>
+
+define MSIZE 2.0
+
+# HD_REDRAW -- Redraw a data point. The old position marker is erased,
+# the new marker drawn, and the cursor moved to the new location.
+
+procedure hd_redraw (gp, oldx, oldy, newx, newy)
+
+pointer gp # Pointer to graphics stream
+real oldx # Old x coordinate
+real oldy # Old y coordinate
+real newx # New x coordinate
+real newy # New y coordinate
+
+begin
+ call gseti (gp, G_PMLTYPE, GL_CLEAR)
+ call gmark (gp, oldx, oldy, GM_PLUS, MSIZE, MSIZE)
+ call gseti (gp, G_PMLTYPE, GL_SOLID)
+ call gmark (gp, newx, newy, GM_PLUS, MSIZE, MSIZE)
+ call gscur (gp, newx, newy)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgsample.x b/noao/imred/dtoi/hdicfit/hdicgsample.x
new file mode 100644
index 00000000..12eef158
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgsample.x
@@ -0,0 +1,84 @@
+include <gset.h>
+include <pkg/rg.h>
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+# ICG_SAMPLE -- Mark sample.
+
+procedure icg_sampled (ic, gp, gt, x, npts, pltype)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+double x[npts] # Ordinates of graph
+int npts # Number of data points
+int pltype # Plot line type
+
+pointer rg
+int i, axis, pltype1
+real xl, xr, yb, yt, dy
+real x1, x2, y1, y2, y3
+
+int gstati(), stridxs(), gt_geti()
+pointer rg_xrangesd()
+
+begin
+ if (stridxs ("*", Memc[IC_SAMPLE(ic)]) > 0)
+ return
+
+ # Find axis along which the independent data is plotted.
+ if (IC_AXES(ic,IC_GKEY(ic),1) == 'x')
+ axis = 1
+ else if (IC_AXES(ic,IC_GKEY(ic),2) == 'x')
+ axis = 2
+ else
+ return
+
+ if (gt_geti (gt, GTTRANSPOSE) == YES)
+ axis = mod (axis, 2) + 1
+
+ pltype1 = gstati (gp, G_PLTYPE)
+ call gseti (gp, G_PLTYPE, pltype)
+ rg = rg_xrangesd (Memc[IC_SAMPLE(ic)], x, npts)
+
+ switch (axis) {
+ case 1:
+ call ggwind (gp, xl, xr, yb, yt)
+
+ dy = yt - yb
+ y1 = yb + dy / 100
+ y2 = y1 + dy / 20
+ y3 = (y1 + y2) / 2
+
+ do i = 1, RG_NRGS(rg) {
+ x1 = x[RG_X1(rg, i)]
+ x2 = x[RG_X2(rg, i)]
+ if ((x1 > xl) && (x1 < xr))
+ call gline (gp, x1, y1, x1, y2)
+ if ((x2 > xl) && (x2 < xr))
+ call gline (gp, x2, y1, x2, y2)
+ call gline (gp, x1, y3, x2, y3)
+ }
+ case 2:
+ call ggwind (gp, yb, yt, xl, xr)
+
+ dy = yt - yb
+ y1 = yb + dy / 100
+ y2 = y1 + dy / 20
+ y3 = (y1 + y2) / 2
+
+ do i = 1, RG_NRGS(rg) {
+ x1 = x[RG_X1(rg, i)]
+ x2 = x[RG_X2(rg, i)]
+ if ((x1 > xl) && (x1 < xr))
+ call gline (gp, y1, x1, y2, x1)
+ if ((x2 > xl) && (x2 < xr))
+ call gline (gp, y1, x2, y2, x2)
+ call gline (gp, y3, x1, y3, x2)
+ }
+ }
+
+ call gseti (gp, G_PLTYPE, pltype1)
+ call rg_free (rg)
+end
+
diff --git a/noao/imred/dtoi/hdicfit/hdicguaxes.x b/noao/imred/dtoi/hdicfit/hdicguaxes.x
new file mode 100644
index 00000000..f8199c87
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicguaxes.x
@@ -0,0 +1,38 @@
+include "hdicfit.h"
+
+# ICG_UAXIS -- Set user axis.
+
+procedure icg_uaxesd (ic, key, cv, x, y, z, npts, label, units, maxchars)
+
+pointer ic # Pointer to ic structure
+int key # Key for axes
+pointer cv # CURFIT pointer
+double x[npts] # Independent variable
+double y[npts] # Dependent variable
+double z[npts] # Output values
+int npts # Number of points
+char label[maxchars] # Axis label
+char units[maxchars] # Units for axis
+int maxchars # Maximum chars in label
+
+int offset
+double fog
+real ic_getr()
+include "hdic.com"
+
+begin
+ # Axis type 'u' returns the untransformed independent variable
+ # in the z array. That is, the original density values after
+ # subtracting the current fog value. Some density values could be
+ # below fog were excluded from the transformed vector.
+
+ call strcpy ("Density above fog", label, maxchars)
+ fog = double (ic_getr (ic, "fog"))
+
+ if (npts == nraw)
+ call asubkd (Memd[den], fog, z, npts)
+ else {
+ offset = big_den + (NVALS_FIT - npts)
+ call asubkd (Memd[offset], fog, z, npts)
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgundel.x b/noao/imred/dtoi/hdicfit/hdicgundel.x
new file mode 100644
index 00000000..9a7c68a4
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgundel.x
@@ -0,0 +1,87 @@
+include <gset.h>
+include <mach.h>
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+define MSIZE 2. # Mark size
+
+# ICG_UNDELETE -- Undelete data point nearest the cursor.
+# The nearest point to the cursor in NDC coordinates is determined.
+
+procedure icg_undeleted (ic, gp, gt, cv, x, y, wts, userwts, npts, wx, wy)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer
+double x[npts], y[npts] # Data points
+double wts[npts], userwts[npts] # Weight arrays
+int npts # Number of points
+real wx, wy # Position to be nearest
+
+pointer sp, xout, yout
+int gt_geti()
+
+begin
+ call smark (sp)
+ call salloc (xout, npts, TY_DOUBLE)
+ call salloc (yout, npts, TY_DOUBLE)
+
+ call icg_axesd (ic, gt, cv, 1, x, y, Memd[xout], npts)
+ call icg_axesd (ic, gt, cv, 2, x, y, Memd[yout], npts)
+ if (gt_geti (gt, GTTRANSPOSE) == NO) {
+ call icg_u1d (ic, gp, Memd[xout], Memd[yout], wts, userwts,
+ npts, wx, wy)
+ } else {
+ call icg_u1d (ic, gp, Memd[yout], Memd[xout], wts, userwts,
+ npts, wy, wx)
+ }
+
+ call sfree (sp)
+end
+
+
+# ICG_U1D -- Do the actual undelete.
+
+procedure icg_u1d (ic, gp, x, y, wts, userwts, npts, wx, wy)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+double x[npts], y[npts] # Data points
+double wts[npts], userwts[npts] # Weight arrays
+int npts # Number of points
+real wx, wy # Position to be nearest
+
+int i, j
+real x0, y0, r2, r2min
+
+begin
+ # Transform world cursor coordinates to NDC.
+ call gctran (gp, wx, wy, wx, wy, 1, 0)
+
+ # Search for nearest point to a point with zero weight.
+ r2min = MAX_REAL
+ do i = 1, npts {
+ if (wts[i] != 0.)
+ next
+
+ call gctran (gp, real (x[i]), real (y[i]), x0, y0, 1, 0)
+
+ r2 = (x0 - wx) ** 2 + (y0 - wy) ** 2
+ if (r2 < r2min) {
+ r2min = r2
+ j = i
+ }
+ }
+
+ # Unmark the deleted point and reset the weight.
+ if (j != 0) {
+ call gscur (gp, real (x[j]), real (y[j]))
+ call gseti (gp, G_PMLTYPE, GL_CLEAR)
+ call gmark (gp, real (x[j]), real (y[j]), GM_CROSS, MSIZE, MSIZE)
+ call gseti (gp, G_PMLTYPE, GL_SOLID)
+ call gmark (gp, real (x[j]), real (y[j]), GM_PLUS, MSIZE, MSIZE)
+ wts[j] = userwts[j]
+ IC_NEWWTS(ic) = YES
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicguser.x b/noao/imred/dtoi/hdicfit/hdicguser.x
new file mode 100644
index 00000000..5e070537
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicguser.x
@@ -0,0 +1,17 @@
+# ICG_USER -- User default action
+
+procedure icg_user (ic, gp, gt, cv, wx, wy, wcs, key, cmd)
+
+pointer ic # ICFIT pointer
+pointer gp # GIO pointer
+pointer gt # GTOOLS pointer
+pointer cv # CURFIT pointer
+real wx, wy # Cursor positions
+int wcs # GIO WCS
+int key # Cursor key
+char cmd[ARB] # Cursor command
+
+begin
+ # Ring bell
+ call printf ("\07")
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicgvec.x b/noao/imred/dtoi/hdicfit/hdicgvec.x
new file mode 100644
index 00000000..81909cb8
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicgvec.x
@@ -0,0 +1,74 @@
+include <mach.h>
+include "hdicfit.h"
+
+# HDIC_GVEC -- Get a vector of data to be plotted. The raw density
+# values are stored in common. From these values, all possible transforms
+# can be generated.
+
+procedure hdic_gvec (ic, xout, npts, transform)
+
+pointer ic # Pointer to ic structure
+double xout[npts] # Output vector
+int npts # Desired npoints to output - possibly changed on output
+int transform # Integer code for desired type of transform
+
+pointer sp, bigdenaf
+int i, j, npts_desired
+double fog, dval
+real ic_getr()
+include "hdic.com"
+
+begin
+ npts_desired = npts
+ if (npts_desired != NVALS_FIT)
+ call error (0, "hdicgvec: nvals != NVALS_FIT")
+
+ call smark (sp)
+ call salloc (bigdenaf, npts, TY_DOUBLE)
+
+ j = 1
+
+ fog = double (ic_getr (ic, "fog"))
+ call asubkd (Memd[big_den], fog, Memd[bigdenaf], npts)
+
+ switch (transform) {
+ case HD_NONE:
+ call amovd (Memd[bigdenaf], xout, NVALS_FIT)
+
+ case HD_LOGO:
+ do i = 1, npts_desired {
+ dval = Memd[bigdenaf+i-1]
+ if (dval < 0.0D0)
+ npts = npts - 1
+ else {
+ xout[j] = log10 ((10.**dval) - 1.0)
+ j = j + 1
+ }
+ }
+
+ case HD_K75:
+ do i = 1, npts_desired {
+ dval = Memd[bigdenaf+i-1]
+ if (dval < 0.0D0)
+ npts = npts - 1
+ else {
+ xout[j] = dval + 0.75 * log10 (1.0 - (10.** (-dval)))
+ j = j + 1
+ }
+ }
+
+ case HD_K50:
+ do i = 1, npts_desired {
+ dval = Memd[bigdenaf+i-1]
+ if (dval < 0.0D0)
+ npts = npts - 1
+ else {
+ xout[j] = dval + 0.50 * log10 (1.0 - (10.** (-dval)))
+ j = j + 1
+ }
+ }
+
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicinit.x b/noao/imred/dtoi/hdicfit/hdicinit.x
new file mode 100644
index 00000000..d9730051
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicinit.x
@@ -0,0 +1,60 @@
+include <mach.h>
+include "hdicfit.h"
+
+# HDIC_INIT -- Initialize hdfit/icgfit interface.
+
+procedure hdic_init (density, nvalues, dmax)
+
+double density[nvalues] # Reference density above fog values
+int nvalues # Number of values in sample
+double dmax # Maximum possible density
+
+int i
+pointer sp, base
+double xxmax, xxmin, delta_den
+bool fp_equald()
+include "hdic.com"
+errchk malloc
+
+begin
+ call smark (sp)
+
+ if (den == NULL || big_den == NULL) {
+ call malloc (den, nvalues, TY_DOUBLE)
+ call malloc (big_den, NVALS_FIT, TY_DOUBLE)
+ } else if (nvalues != nraw)
+ call realloc (den, nvalues, TY_DOUBLE)
+
+ nraw = nvalues
+ call salloc (base, NVALS_FIT, TY_DOUBLE)
+
+ # Copy density array to pointer location
+ call amovd (density, Memd[den], nraw)
+
+ # Calculate big vector of density values. The points are spaced
+ # linear in log space, to yield adequate spacing at low density values.
+
+ call alimd (density, nraw, xxmin, xxmax)
+
+ # Put user value for maximum density in common block if it is valid.
+ if (! fp_equald (dmax, 0.0D0))
+ maxden = dmax
+
+ # Make big_den go all the way to maxden, not just xxmax. Make sure
+ # the value of xxmin won't cause the log function to blow up.
+
+ if (xxmin > 0.0D0)
+ ;
+ else
+ xxmin = 2.0 * EPSILOND
+
+ delta_den = (log10 (maxden) - log10 (xxmin)) / double (NVALS_FIT - 1)
+
+ do i = 1, NVALS_FIT
+ Memd[big_den+i-1] = log10 (xxmin) + double (i-1) * delta_den
+
+ call amovkd (10.0D0, Memd[base], NVALS_FIT)
+ call aexpd (Memd[base], Memd[big_den], Memd[big_den], NVALS_FIT)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicparams.x b/noao/imred/dtoi/hdicfit/hdicparams.x
new file mode 100644
index 00000000..1840dccd
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicparams.x
@@ -0,0 +1,323 @@
+include "hdicfit.h"
+
+define FUNCTIONS "|chebyshev|legendre|spline3|spline1|power|"
+define TRANSFORMS "|none|logopacitance|k50|k75|"
+
+# IC_OPEN -- Open ICFIT parameter structure.
+
+procedure ic_open (ic)
+
+pointer ic # ICFIT pointer
+errchk malloc
+
+begin
+ # Allocate memory for the package parameter structure.
+ call malloc (ic, IC_LENSTRUCT, TY_STRUCT)
+ call malloc (IC_SAMPLE(ic), SZ_LINE, TY_CHAR)
+ call malloc (IC_LABELS(ic,1), SZ_LINE, TY_CHAR)
+ call malloc (IC_LABELS(ic,2), SZ_LINE, TY_CHAR)
+ call malloc (IC_UNITS(ic,1), SZ_LINE, TY_CHAR)
+ call malloc (IC_UNITS(ic,2), SZ_LINE, TY_CHAR)
+
+ # Set defaults.
+ call ic_pstr (ic, "function", "spline3")
+ call ic_puti (ic, "order", 1)
+ call ic_pstr (ic, "sample", "*")
+ call ic_puti (ic, "naverage", 1)
+ call ic_puti (ic, "niterate", 0)
+ call ic_putr (ic, "low", 0.)
+ call ic_putr (ic, "high", 0.)
+ call ic_putr (ic, "grow", 0.)
+ call ic_pstr (ic, "xlabel", "X")
+ call ic_pstr (ic, "ylabel", "Y")
+ call ic_pstr (ic, "xunits", "")
+ call ic_pstr (ic, "yunits", "")
+ call ic_puti (ic, "key", 1)
+ call ic_pkey (ic, 1, 'x', 'y')
+ call ic_pkey (ic, 2, 'y', 'x')
+ call ic_pkey (ic, 3, 'x', 'r')
+ call ic_pkey (ic, 4, 'x', 'd')
+ call ic_pkey (ic, 5, 'x', 'n')
+
+ # Initialize other parameters
+ IC_RG(ic) = NULL
+ IC_XFIT(ic) = NULL
+ IC_YFIT(ic) = NULL
+ IC_WTSFIT(ic) = NULL
+ IC_REJPTS(ic) = NULL
+end
+
+
+# IC_CLOSER -- Close ICFIT parameter structure.
+
+procedure ic_closer (ic)
+
+pointer ic # ICFIT pointer
+
+begin
+ # Free memory for the package parameter structure.
+ call rg_free (IC_RG(ic))
+ call mfree (IC_XFIT(ic), TY_REAL)
+ call mfree (IC_YFIT(ic), TY_REAL)
+ call mfree (IC_WTSFIT(ic), TY_REAL)
+ call mfree (IC_REJPTS(ic), TY_INT)
+ call mfree (IC_SAMPLE(ic), TY_CHAR)
+ call mfree (IC_LABELS(ic,1), TY_CHAR)
+ call mfree (IC_LABELS(ic,2), TY_CHAR)
+ call mfree (IC_UNITS(ic,1), TY_CHAR)
+ call mfree (IC_UNITS(ic,2), TY_CHAR)
+ call mfree (ic, TY_STRUCT)
+end
+
+
+# IC_CLOSED -- Close ICFIT parameter structure.
+
+procedure ic_closed (ic)
+
+pointer ic # ICFIT pointer
+
+begin
+ # Free memory for the package parameter structure.
+ call rg_free (IC_RG(ic))
+ call mfree (IC_XFIT(ic), TY_DOUBLE)
+ call mfree (IC_YFIT(ic), TY_DOUBLE)
+ call mfree (IC_WTSFIT(ic), TY_DOUBLE)
+ call mfree (IC_REJPTS(ic), TY_INT)
+ call mfree (IC_SAMPLE(ic), TY_CHAR)
+ call mfree (IC_LABELS(ic,1), TY_CHAR)
+ call mfree (IC_LABELS(ic,2), TY_CHAR)
+ call mfree (IC_UNITS(ic,1), TY_CHAR)
+ call mfree (IC_UNITS(ic,2), TY_CHAR)
+ call mfree (ic, TY_STRUCT)
+end
+
+
+# IC_PSTR -- Put string valued parameters.
+
+procedure ic_pstr (ic, param, str)
+
+pointer ic # ICFIT pointer
+char param[ARB] # Parameter to be put
+char str[ARB] # String value
+
+int i
+pointer ptr
+int strdic()
+bool streq()
+
+begin
+ if (streq (param, "sample"))
+ call strcpy (str, Memc[IC_SAMPLE(ic)], SZ_LINE)
+ else if (streq (param, "function")) {
+ call malloc (ptr, SZ_LINE, TY_CHAR)
+ i = strdic (str, Memc[ptr], SZ_LINE, FUNCTIONS)
+ if (i > 0)
+ IC_FUNCTION(ic) = i
+ call mfree (ptr, TY_CHAR)
+ } else if (streq (param, "transform")) {
+ call malloc (ptr, SZ_LINE, TY_CHAR)
+ i = strdic (str, Memc[ptr], SZ_LINE, TRANSFORMS)
+ if (i > 0)
+ IC_TRANSFORM(ic) = i
+ call mfree (ptr, TY_CHAR)
+ } else if (streq (param, "xlabel"))
+ call strcpy (str, Memc[IC_LABELS(ic,1)], SZ_LINE)
+ else if (streq (param, "ylabel"))
+ call strcpy (str, Memc[IC_LABELS(ic,2)], SZ_LINE)
+ else if (streq (param, "xunits"))
+ call strcpy (str, Memc[IC_UNITS(ic,1)], SZ_LINE)
+ else if (streq (param, "yunits"))
+ call strcpy (str, Memc[IC_UNITS(ic,2)], SZ_LINE)
+ else
+ call error (0, "ICFIT: Unknown parameter")
+end
+
+
+# IC_PUTI -- Put integer valued parameters.
+
+procedure ic_puti (ic, param, ival)
+
+pointer ic # ICFIT pointer
+char param[ARB] # Parameter to be put
+int ival # Integer value
+
+bool streq()
+
+begin
+ if (streq (param, "naverage"))
+ IC_NAVERAGE(ic) = ival
+ else if (streq (param, "order"))
+ IC_ORDER(ic) = ival
+ else if (streq (param, "niterate"))
+ IC_NITERATE(ic) = ival
+ else if (streq (param, "key"))
+ IC_GKEY(ic) = ival
+ else if (streq (param, "transform"))
+ IC_TRANSFORM(ic) = ival
+ else
+ call error (0, "ICFIT: Unknown parameter")
+end
+
+
+# IC_PKEY -- Put key parameters.
+# Note the key types must be integers not characters.
+
+procedure ic_pkey (ic, key, xaxis, yaxis)
+
+pointer ic # ICFIT pointer
+int key # Key to be defined
+int xaxis # X axis type
+int yaxis # Y axis type
+
+begin
+ IC_AXES(ic, key, 1) = xaxis
+ IC_AXES(ic, key, 2) = yaxis
+end
+
+
+# IC_GKEY -- Get key parameters.
+
+procedure ic_gkey (ic, key, xaxis, yaxis)
+
+pointer ic # ICFIT pointer
+int key # Key to be gotten
+int xaxis # X axis type
+int yaxis # Y axis type
+
+begin
+ xaxis = IC_AXES(ic, key, 1)
+ yaxis = IC_AXES(ic, key, 2)
+end
+
+
+# IC_PUTR -- Put real valued parameters.
+
+procedure ic_putr (ic, param, rval)
+
+pointer ic # ICFIT pointer
+char param[ARB] # Parameter to be put
+real rval # Real value
+
+bool streq()
+
+begin
+ if (streq (param, "xmin"))
+ IC_XMIN(ic) = rval
+ else if (streq (param, "xmax"))
+ IC_XMAX(ic) = rval
+ else if (streq (param, "low"))
+ IC_LOW(ic) = rval
+ else if (streq (param, "high"))
+ IC_HIGH(ic) = rval
+ else if (streq (param, "grow"))
+ IC_GROW(ic) = rval
+ else if (streq (param, "fog"))
+ IC_FOG(ic) = rval
+ else if (streq (param, "rfog"))
+ IC_RFOG(ic) = rval
+ else
+ call error (0, "ICFIT: Unknown parameter")
+end
+
+
+# IC_GSTR -- Get string valued parameters.
+
+procedure ic_gstr (ic, param, str, maxchars)
+
+pointer ic # ICFIT pointer
+char param[ARB] # Parameter to be put
+char str[maxchars] # String value
+int maxchars # Maximum number of characters
+
+bool streq()
+
+begin
+ if (streq (param, "sample"))
+ call strcpy (Memc[IC_SAMPLE(ic)], str, maxchars)
+ else if (streq (param, "xlabel"))
+ call strcpy (Memc[IC_LABELS(ic,1)], str, maxchars)
+ else if (streq (param, "ylabel"))
+ call strcpy (Memc[IC_LABELS(ic,2)], str, maxchars)
+ else if (streq (param, "xunits"))
+ call strcpy (Memc[IC_UNITS(ic,1)], str, maxchars)
+ else if (streq (param, "yunits"))
+ call strcpy (Memc[IC_UNITS(ic,2)], str, maxchars)
+ else if (streq (param, "function")) {
+ switch (IC_FUNCTION(ic)) {
+ case 1:
+ call strcpy ("chebyshev", str, maxchars)
+ case 2:
+ call strcpy ("legendre", str, maxchars)
+ case 3:
+ call strcpy ("spline3", str, maxchars)
+ case 4:
+ call strcpy ("spline1", str, maxchars)
+ case 5:
+ call strcpy ("power", str, maxchars)
+ }
+ } else if (streq (param, "transform")) {
+ switch (IC_TRANSFORM(ic)) {
+ case 1:
+ call strcpy ("none", str, maxchars)
+ case 2:
+ call strcpy ("logopacitance", str, maxchars)
+ case 3:
+ call strcpy ("k50", str, maxchars)
+ case 4:
+ call strcpy ("k75", str, maxchars)
+ }
+ } else
+ call error (0, "ICFIT: Unknown parameter")
+end
+
+
+# IC_GETI -- Get integer valued parameters.
+
+int procedure ic_geti (ic, param)
+
+pointer ic # ICFIT pointer
+char param[ARB] # Parameter to be gotten
+
+bool streq()
+
+begin
+ if (streq (param, "naverage"))
+ return (IC_NAVERAGE(ic))
+ else if (streq (param, "order"))
+ return (IC_ORDER(ic))
+ else if (streq (param, "niterate"))
+ return (IC_NITERATE(ic))
+ else if (streq (param, "key"))
+ return (IC_GKEY(ic))
+ else if (streq (param, "transform"))
+ return (IC_TRANSFORM(ic))
+
+ call error (0, "ICFIT: Unknown parameter")
+end
+
+
+# IC_GETR -- Get real valued parameters.
+
+real procedure ic_getr (ic, param)
+
+pointer ic # ICFIT pointer
+char param[ARB] # Parameter to be put
+
+bool streq()
+
+begin
+ if (streq (param, "xmin"))
+ return (IC_XMIN(ic))
+ else if (streq (param, "xmax"))
+ return (IC_XMAX(ic))
+ else if (streq (param, "low"))
+ return (IC_LOW(ic))
+ else if (streq (param, "high"))
+ return (IC_HIGH(ic))
+ else if (streq (param, "grow"))
+ return (IC_GROW(ic))
+ else if (streq (param, "fog"))
+ return (IC_FOG(ic))
+
+ call error (0, "ICFIT: Unknown parameter")
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicreject.x b/noao/imred/dtoi/hdicfit/hdicreject.x
new file mode 100644
index 00000000..82dd093c
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicreject.x
@@ -0,0 +1,39 @@
+# IC_REJECT -- Reject points with large residuals from the fit.
+#
+# The sigma of the fit residuals is calculated. The rejection thresholds
+# are set at low_reject*sigma and high_reject*sigma. Points outside the
+# rejection threshold are rejected from the fit and flagged in the rejpts
+# array. Finally, the remaining points are refit.
+
+procedure ic_rejectd (cv, x, y, w, rejpts, npts, low_reject, high_reject,
+ niterate, grow, nreject)
+
+pointer cv # Curve descriptor
+double x[npts] # Input ordinates
+double y[npts] # Input data values
+double w[npts] # Weights
+int rejpts[npts] # Points rejected
+int npts # Number of input points
+real low_reject, high_reject # Rejection threshold
+int niterate # Number of rejection iterations
+real grow # Rejection radius
+int nreject # Number of points rejected
+
+int i, newreject
+
+begin
+ # Initialize rejection.
+ nreject = 0
+ call amovki (NO, rejpts, npts)
+
+ if (niterate <= 0)
+ return
+
+ # Find deviant points.
+ do i = 1, niterate {
+ call ic_deviantd (cv, x, y, w, rejpts, npts, low_reject,
+ high_reject, grow, YES, nreject, newreject)
+ if (newreject == 0)
+ break
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicshow.x b/noao/imred/dtoi/hdicfit/hdicshow.x
new file mode 100644
index 00000000..521f04d3
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicshow.x
@@ -0,0 +1,52 @@
+include <pkg/gtools.h>
+include "hdicfit.h"
+
+# IC_SHOW -- Show the values of the parameters.
+
+procedure ic_show (ic, file, gt)
+
+pointer ic # ICFIT pointer
+char file[ARB] # Output file
+pointer gt # GTOOLS pointer
+
+int fd
+pointer str
+int open()
+long clktime()
+errchk open, malloc
+
+begin
+ fd = open (file, APPEND, TEXT_FILE)
+ call malloc (str, SZ_LINE, TY_CHAR)
+
+ call cnvtime (clktime(0), Memc[str], SZ_LINE)
+ call fprintf (fd, "\n# %s\n")
+ call pargstr (Memc[str])
+
+ call gt_gets (gt, GTTITLE, Memc[str], SZ_LINE)
+ call fprintf (fd, "# %s\n")
+ call pargstr (Memc[str])
+
+ call gt_gets (gt, GTYUNITS, Memc[str], SZ_LINE)
+ if (Memc[str] != EOS) {
+ call fprintf (fd, "fit units = %s\n")
+ call pargstr (Memc[str])
+ }
+
+ call ic_gstr (ic, "function", Memc[str], SZ_LINE)
+ call fprintf (fd, "function = %s\n")
+ call pargstr (Memc[str])
+
+ call fprintf (fd, "order = %d\n")
+ call pargi (IC_ORDER(ic))
+
+ call ic_gstr (ic, "transform", Memc[str], SZ_LINE)
+ call fprintf (fd, "transform = %s\n")
+ call pargstr (Memc[str])
+
+ call fprintf (fd, "fog = %g\n")
+ call pargr (IC_FOG(ic))
+
+ call mfree (str, TY_CHAR)
+ call close (fd)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicsort.x b/noao/imred/dtoi/hdicfit/hdicsort.x
new file mode 100644
index 00000000..16d6e0a6
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicsort.x
@@ -0,0 +1,38 @@
+# HDIC_SORT -- sort the log exposure, density and weight information in order
+# of increasing density value. The sorting is done is place. The four
+# data values are assummed matched on input, that is, exposure[i] matches
+# density[i] with weight[i] (and userwts[i]) for all array entries.
+
+procedure hdic_sort (density, exposure, weights, userwts, whydel, sdev, nvals)
+
+double density[nvals] # Density array
+double exposure[nvals] # Log exposure array
+double weights[nvals] # Weights array
+double userwts[nvals] # Reference weights array
+int whydel[nvals] # Flag array of reasons for deletion
+double sdev[nvals] # Array of standard deviations
+int nvals # Number of values to sort
+
+int i, j
+double temp
+define swap {temp=$1;$1=$2;$2=temp}
+int itemp
+define iswap {itemp=$1;$1=$2;$2=itemp}
+
+begin
+ # Bubble sort - inefficient, but sorting is done infrequently
+ # an expected small sample size (16 pts typically).
+
+ for (i = nvals; i > 1; i = i - 1)
+ for (j = 1; j < i; j = j + 1)
+ if (density [j] > density[j+1]) {
+
+ # Out of order; exchange values
+ swap (exposure[j], exposure[j+1])
+ swap ( density[j], density[j+1])
+ swap ( weights[j], weights[j+1])
+ swap ( userwts[j], userwts[j+1])
+ iswap ( whydel[j], whydel[j+1])
+ swap ( sdev[j], sdev[j+1])
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/hdictrans.x b/noao/imred/dtoi/hdicfit/hdictrans.x
new file mode 100644
index 00000000..0ee89037
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdictrans.x
@@ -0,0 +1,155 @@
+include <mach.h>
+include "hdicfit.h"
+
+# HDIC_TRANSFORM -- Transform density to independent variable of fit. The
+# desired transform is stored in the ic structure. A vector of x values
+# is returned, as is a possibly modified weights array. The minimum and
+# maximum limits of the fit are updated in the ic structure; the labels
+# are set also when IC_NEWTRANSFORM = YES. The fog value is subtracted
+# from the input density array and the transform performed.
+
+procedure hdic_transform (ic, density, userwts, xout, wts, whydel, npts)
+
+pointer ic # Pointer to ic structure
+double density[npts] # Array of original density values
+double userwts[npts] # Array of original weights values
+double xout[npts] # Transformed density above fog (returned)
+double wts[npts] # Input weights array
+int whydel[npts] # Reason for deletion array
+int npts # The number of density points - maybe changed on output
+
+int i
+pointer denaf, sp
+double fog, xxmin, xxmax, dval
+bool fp_equald()
+real ic_getr()
+include "hdic.com"
+
+begin
+ # Allocate space for density above fog array
+ call smark (sp)
+ call salloc (denaf, npts, TY_DOUBLE)
+
+ fog = double (ic_getr (ic, "fog"))
+ call asubkd (density, fog, Memd[denaf], npts)
+
+ switch (IC_TRANSFORM(ic)) {
+ case HD_NONE:
+ do i = 1, npts {
+ xout[i] = Memd[denaf+i-1]
+ # In every case, if the point was deleted by the program,
+ # restore it.
+ if (whydel[i] == PDELETE) {
+ wts[i] = userwts[i]
+ whydel[i] = NDELETE
+ }
+ }
+
+ call ic_pstr (ic, "xlabel", "Density above Fog")
+ xxmin = MIN_DEN - fog
+ xxmax = maxden
+ call ic_putr (ic, "xmin", real (xxmin))
+ call ic_putr (ic, "xmax", real (xxmax))
+
+ case HD_LOGO:
+ call ic_pstr (ic, "xlabel", "Log Opacitance: Log (10**Den - 1)")
+ xxmin = log10 ((10. ** (MIN_DEN)) - 1.0)
+ xxmax = log10 ((10. ** (maxden)) - 1.0)
+ call ic_putr (ic, "xmin", real (xxmin))
+ call ic_putr (ic, "xmax", real (xxmax))
+
+ do i = 1, npts {
+ dval = Memd[denaf+i-1]
+ if (dval < 0.0D0 || (fp_equald (dval, 0.0D0))) {
+ xout[i] = dval
+ wts[i] = 0.0D0
+ whydel[i] = PDELETE
+
+ } else {
+ xout[i] = log10 ((10. ** (dval)) - 1.0)
+
+ # If point had been deleted, find out why. It affects the
+ # weights value returned. Only if the point was previously
+ # deleted by the program, restore it; otherwise, leave it
+ # alone.
+
+ if (fp_equald (wts[i], 0.0D0)) {
+ if (whydel[i] == PDELETE) {
+ wts[i] = userwts[i]
+ whydel[i] = NDELETE
+ }
+ } else
+ wts[i] = userwts[i]
+ }
+ }
+
+ case HD_K75:
+ call ic_pstr (ic, "xlabel", "Den + 0.75 * Log (1 - (10 ** -Den))")
+ xxmin = MIN_DEN + 0.75 * log10 (1.0 - (10. ** (-MIN_DEN)))
+ xxmax = maxden + 0.75 * log10 (1.0 - (10. ** (-maxden)))
+ call ic_putr (ic, "xmin", real (xxmin))
+ call ic_putr (ic, "xmax", real (xxmax))
+
+ do i = 1, npts {
+ dval = Memd[denaf+i-1]
+ if (dval < 0.0D0 || (fp_equald (dval, 0.0D0))) {
+ xout[i] = dval
+ wts[i] = 0.0D0
+ whydel[i] = PDELETE
+
+ } else {
+ xout[i] = dval + 0.75 * log10 (1.0 - (10.** (-dval)))
+
+ # If point had been deleted, find out why. It affects the
+ # weights value returned. Only if the point was previously
+ # deleted by the program, restore it; otherwise, leave it
+ # alone.
+
+ if (fp_equald (wts[i], 0.0D0)) {
+ if (wts[i] == PDELETE) {
+ wts[i] = userwts[i]
+ whydel[i] = NDELETE
+ }
+ } else
+ wts[i] = userwts[i]
+ }
+ }
+
+ case HD_K50:
+ call ic_pstr (ic, "xlabel", "Den + 0.50 * Log (1 - (10 ** -Den))")
+ xxmin = MIN_DEN + 0.50 * log10 (1.0 - (10. ** (-MIN_DEN)))
+ xxmax = maxden + 0.50 * log10 (1.0 - (10. ** (-maxden)))
+ call ic_putr (ic, "xmin", real (xxmin))
+ call ic_putr (ic, "xmax", real (xxmax))
+
+ do i = 1, npts {
+ dval = Memd[denaf+i-1]
+ if (dval < 0.0D0 || (fp_equald (dval, 0.0D0))) {
+ xout[i] = dval
+ wts[i] = 0.0D0
+ whydel[i] = PDELETE
+
+ } else {
+ xout[i] = dval + 0.50 * log10 (1.0 - (10.** (-dval)))
+
+ # If point had been deleted, find out why. It affects the
+ # weights value returned. Only if the point was previously
+ # deleted by the program, restore it; otherwise, leave it
+ # alone.
+
+ if (fp_equald (wts[i], 0.0D0)) {
+ if (wts[i] == PDELETE) {
+ wts[i] = userwts[i]
+ whydel[i] = NDELETE
+ }
+ } else
+ wts[i] = userwts[i]
+ }
+ }
+
+ default:
+ call eprintf ("Unrecognizable Transform\n")
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdicfit/hdicvshow.x b/noao/imred/dtoi/hdicfit/hdicvshow.x
new file mode 100644
index 00000000..e62f6522
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/hdicvshow.x
@@ -0,0 +1,155 @@
+include <math/curfit.h>
+include "hdicfit.h"
+
+# IC_VSHOW -- Show fit parameters in verbose mode.
+
+procedure ic_vshowd (ic, file, cv, x, y, wts, npts, gt)
+
+pointer ic # ICFIT pointer
+char file[ARB] # Output file
+pointer cv # Curfit pointer
+double x[ARB] # Ordinates
+double y[ARB] # Abscissas
+double wts[ARB] # Weights
+int npts # Number of data points
+pointer gt # Graphics tools pointer
+
+double chisqr, rms
+int i, n, deleted, ncoeffs, fd
+pointer sp, fit, wts1, coeffs, errors
+
+int dcvstati(), open()
+double ic_rmsd()
+errchk open()
+
+begin
+ # Do the standard ic_show option, then add on the verbose part.
+ call ic_show (ic, file, gt)
+
+ if (npts == 0) {
+ call eprintf ("Incomplete output - no data points for fit\n")
+ return
+ }
+
+ # Open the output file.
+ fd = open (file, APPEND, TEXT_FILE)
+
+ # Determine the number of coefficients and allocate memory.
+ ncoeffs = dcvstati (cv, CVNCOEFF)
+
+ call smark (sp)
+ call salloc (coeffs, ncoeffs, TY_DOUBLE)
+ call salloc (errors, ncoeffs, TY_DOUBLE)
+
+ if (npts == IC_NFIT(ic)) {
+ # Allocate memory for the fit.
+ n = npts
+ call salloc (fit, n, TY_DOUBLE)
+ call salloc (wts1, n, TY_DOUBLE)
+
+ # Eliminate rejected points and count deleted points.
+ call amovd (wts, Memd[wts1], n)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[wts1+i-1] = 0.
+ }
+ }
+ deleted = 0
+ do i = 1, n {
+ if (wts[i] == 0.)
+ deleted = deleted + 1
+ }
+
+ # Get the coefficients and compute the errors.
+ call dcvvector (cv, x, Memd[fit], n)
+ call dcvcoeff (cv, Memd[coeffs], ncoeffs)
+ call dcverrors (cv, y, Memd[wts1], Memd[fit], n, chisqr,
+ Memd[errors])
+ rms = ic_rmsd (x, y, Memd[fit], Memd[wts1], n)
+
+ } else {
+ # Allocate memory for the fit.
+ n = IC_NFIT(ic)
+ call salloc (fit, n, TY_DOUBLE)
+ call salloc (wts1, n, TY_DOUBLE)
+
+ # Eliminate rejected points and count deleted points.
+ call amovd (Memd[IC_WTSFIT(ic)], Memd[wts1], n)
+ if (IC_NREJECT(ic) > 0) {
+ do i = 1, npts {
+ if (Memi[IC_REJPTS(ic)+i-1] == YES)
+ Memd[wts1+i-1] = 0.
+ }
+ }
+ deleted = 0
+ do i = 1, n {
+ if (wts[i] == 0.)
+ deleted = deleted + 1
+ }
+
+ # Get the coefficients and compute the errors.
+ call dcvvector (cv, Memd[IC_XFIT(ic)], Memd[fit], n)
+ rms = ic_rmsd (Memd[IC_XFIT(ic)], Memd[IC_YFIT(ic)],
+ Memd[fit], Memd[wts1], n)
+ call dcvcoeff (cv, Memd[coeffs], ncoeffs)
+ call dcverrors (cv, Memd[IC_YFIT(ic)], Memd[wts1], Memd[fit],
+ n, chisqr, Memd[errors])
+ }
+
+ # Print the error analysis.
+ call fprintf (fd, "total points = %d\n")
+ call pargi (npts)
+ call fprintf (fd, "deleted = %d\n")
+ call pargi (deleted)
+ call fprintf (fd, "RMS = %7.4g\n")
+ call pargd (rms)
+ call fprintf (fd, "square root of reduced chi square = %7.4g\n")
+ call pargd (sqrt (chisqr))
+
+ call fprintf (fd, "# \t coefficent\t error\n")
+ do i = 1, ncoeffs {
+ call fprintf (fd, "\t%10.4e\t%10.4e\n")
+ call pargd (Memd[coeffs+i-1])
+ call pargd (Memd[errors+i-1])
+ }
+
+ # Print x,y pairs and weights
+ call ic_listxywd (fd, cv, x, y, wts, npts)
+
+ call sfree (sp)
+ call close (fd)
+end
+
+
+# IC_LISTXYW -- List data as x,y pairs on output with their weights. Used
+# for verbose show procedure. The untransformed density is also output,
+# regardless of what transformation may have been applied.
+
+procedure ic_listxywd (fd, cv, xvals, yvals, weights, nvalues)
+
+int fd # File descriptor of output file
+pointer cv # Pointer to curfit structure
+int nvalues # Number of data values
+double xvals[nvalues] # Array of x data values
+double yvals[nvalues] # Array of y data values
+double weights[nvalues] # Array of weight values
+
+int i
+double dcveval()
+include "hdic.com"
+
+begin
+ call fprintf (fd,"\n#%15t Density %27t X %39t Yfit %51t LogE %63tWts\n")
+
+ do i = 1, nvalues {
+ call fprintf (fd,
+ "%2d %15t%-12.7f%27t%-12.7f%39t%-12.7f%51t%-12.7f%63t%-12.7f\n")
+ call pargi (i)
+ call pargd (Memd[den+i-1])
+ call pargd (xvals[i])
+ call pargd (dcveval (cv, xvals[i]))
+ call pargd (yvals[i])
+ call pargd (weights[i])
+ }
+end
diff --git a/noao/imred/dtoi/hdicfit/mkpkg b/noao/imred/dtoi/hdicfit/mkpkg
new file mode 100644
index 00000000..5fc5031f
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/mkpkg
@@ -0,0 +1,37 @@
+# HDICFIT package. *** Modified from icgfit package for DTOI applications ***
+
+update:
+ $update libhdic.a
+
+libhdic.a:
+
+ hdicclean.x hdicfit.h <pkg/rg.h>
+ hdicdeviant.x <mach.h> <math/curfit.h>
+ hdicdosetup.x hdicfit.h <math/curfit.h>
+ hdicerrors.x hdicfit.h <math/curfit.h>
+ hdicfit.x hdicfit.h <math/curfit.h>
+ hdicgaxes.x hdicfit.h <pkg/gtools.h>
+ hdicgcolon.x hdicfit.h <error.h> <gset.h>
+ hdicgdelete.x hdicfit.h <gset.h> <mach.h> <pkg/gtools.h>
+ hdicgfit.x hdicfit.h <error.h> <pkg/gtools.h> <mach.h> <gset.h>
+ hdicggraph.x hdicfit.h hdic.com <gset.h> <pkg/gtools.h> <mach.h>
+ hdicgnearest.x <mach.h> <pkg/gtools.h>
+ hdicgparams.x hdicfit.h <pkg/gtools.h>
+ hdicgsample.x hdicfit.h <gset.h> <pkg/gtools.h> <pkg/rg.h>
+ hdicguaxes.x hdic.com hdicfit.h
+ hdicgundel.x hdicfit.h <gset.h> <mach.h> <pkg/gtools.h>
+ hdicguser.x
+ hdicparams.x hdicfit.h
+ hdicreject.x
+ hdicshow.x hdicfit.h <pkg/gtools.h>
+ hdicvshow.x hdicfit.h hdic.com <math/curfit.h>
+
+ hdictrans.x hdicfit.h <mach.h> hdic.com
+ hdicgvec.x hdicfit.h <mach.h> hdic.com
+ hdicadd.x hdicfit.h
+ hdicinit.x hdic.com hdicfit.h <mach.h>
+ hdicsort.x
+ userfcn.x <error.h>
+ hdicgredraw.x <gset.h>
+ hdicebars.x hdicfit.h hdic.com <pkg/gtools.h> <gset.h> <mach.h>
+ ;
diff --git a/noao/imred/dtoi/hdicfit/userfcn.x b/noao/imred/dtoi/hdicfit/userfcn.x
new file mode 100644
index 00000000..d5dba4ed
--- /dev/null
+++ b/noao/imred/dtoi/hdicfit/userfcn.x
@@ -0,0 +1,37 @@
+include <error.h>
+
+# HD_POWERR -- Construct the basis functions for a power series function.
+# Invoked from curfit as a user function. Real version.
+
+procedure hd_powerr (x, order, k1, k2, basis)
+
+real x # array of data points
+int order # order of polynomial, order = 1, constant
+real k1, k2 # normalizing constants - unused
+real basis[ARB] # basis functions
+
+int i
+
+begin
+ do i = 1, order
+ iferr (basis[i] = x ** (i-1))
+ call erract (EA_FATAL)
+end
+
+
+# HD_POWERD -- Double version of above.
+
+procedure hd_powerd (x, order, k1, k2, basis)
+
+double x # array of data points
+int order # order of polynomial, order = 1, constant
+double k1, k2 # normalizing constants - unused
+double basis[ARB] # basis functions
+
+int i
+
+begin
+ do i = 1, order
+ iferr (basis[i] = x ** (i-1))
+ call erract (EA_FATAL)
+end
diff --git a/noao/imred/dtoi/hdshift.par b/noao/imred/dtoi/hdshift.par
new file mode 100644
index 00000000..c1fdf5c9
--- /dev/null
+++ b/noao/imred/dtoi/hdshift.par
@@ -0,0 +1,2 @@
+# Cl parameters for task hdshift are:
+database,s,a,"",,,List of database file
diff --git a/noao/imred/dtoi/hdshift.x b/noao/imred/dtoi/hdshift.x
new file mode 100644
index 00000000..cde846ad
--- /dev/null
+++ b/noao/imred/dtoi/hdshift.x
@@ -0,0 +1,184 @@
+include <math/curfit.h>
+
+# T_HDSHIFT -- Task hdshift in the dtoi package. This task is provided
+# to support Kormendy's method of combining related characteristic curves.
+# A zero point shift in log exp unique to each set of spots is calculated and
+# subtracted. A single curve is fit to the combined, shifted data in
+# a separate task (hdfit).
+
+procedure t_hdshift ()
+
+pointer sp, de, fun, save, db, cv, ps_coeff, den, exp, cvf
+int fd, rec, nsave, ncoeff, nvalues, i, nfile, junk
+real a0, ref_a0
+
+pointer ddb_map()
+int clpopni(), ddb_locate(), ddb_geti(), cvstati(), strncmp(), clgfil()
+
+begin
+ # Allocate space on stack for string buffers
+ call smark (sp)
+ call salloc (de, SZ_FNAME, TY_CHAR)
+ call salloc (cvf, SZ_FNAME, TY_CHAR)
+ call salloc (fun, SZ_FNAME, TY_CHAR)
+
+ # Get list of the database names. The curfit information is retrieved
+ # from the first file in the list, the list is then rewound.
+
+ fd = clpopni ("database")
+ junk = clgfil (fd, Memc[cvf], SZ_FNAME)
+ call clprew (fd)
+
+ # Get coefficients of common fit from cv_file
+ db = ddb_map (Memc[cvf], READ_ONLY)
+ rec = ddb_locate (db, "cv")
+ nsave = ddb_geti (db, rec, "save")
+ call salloc (save, nsave, TY_REAL)
+ call ddb_gar (db, rec, "save", Memr[save], nsave, nsave)
+ call ddb_gstr (db, rec, "function", Memc[fun], SZ_LINE)
+
+ call cvrestore (cv, Memr[save])
+ ncoeff = cvstati (cv, CVNCOEFF)
+ call salloc (ps_coeff, ncoeff, TY_REAL)
+
+ if (strncmp (Memc[fun], "power", 5) == 0)
+ call cvcoeff (cv, Memr[ps_coeff], ncoeff)
+ else
+ call cvpower (cv, Memr[ps_coeff], ncoeff)
+
+ do i = 1, ncoeff {
+ call eprintf ("%d %.7g\n")
+ call pargi (i)
+ call pargr (Memr[ps_coeff+i-1])
+ }
+
+ call ddb_unmap (db)
+
+ nfile = 0
+ while (clgfil (fd, Memc[de], SZ_FNAME) != EOF) {
+ db = ddb_map (Memc[de], READ_ONLY)
+ call hds_read (db, den, exp, nvalues)
+ call hds_calc (den, exp, nvalues, Memr[ps_coeff], ncoeff, a0)
+ nfile = nfile + 1
+ if (nfile == 1)
+ ref_a0 = a0
+ a0 = a0 - ref_a0
+
+ call printf ("file %s: subtracting zero point a0 = %.7g\n")
+ call pargstr (Memc[de])
+ call pargr (a0)
+
+ # Write new log exposure information to database
+ db = ddb_map (Memc[de], APPEND)
+ call hds_wdb (db, exp, nvalues, a0)
+ call mfree (den, TY_REAL)
+ call mfree (exp, TY_REAL)
+ call ddb_unmap (db)
+ }
+
+ call clpcls (fd)
+ call sfree (sp)
+end
+
+
+# HDS_READ -- Read the density and exposure values from the database file.
+# The density above fog and log exposure values are returned, as well as
+# the number of data pairs read.
+
+procedure hds_read (db, den, exp, nvalues)
+
+pointer db # Pointer to input database file
+pointer den # Pointer to density array - returned
+pointer exp # Pointer to exposure array - returned
+int nvalues # Number of data pairs read - returned
+
+real fog
+int nden, nexp, rec
+int ddb_locate(), ddb_geti()
+real ddb_getr()
+
+begin
+ # Get fog value to be subtracted from density
+ rec = ddb_locate (db, "fog")
+ fog = ddb_getr (db, rec, "density")
+
+ # Get density array
+ rec = ddb_locate (db, "density")
+ nden = ddb_geti (db, rec, "den_val")
+ call malloc (den, nden, TY_REAL)
+ call ddb_gar (db, rec, "den_val", Memr[den], nden, nden)
+ call asubkr (Memr[den], fog, Memr[den], nden)
+
+ # Get exposure array
+ rec = ddb_locate (db, "exposure")
+ nexp = ddb_geti (db, rec, "log_exp")
+ call malloc (exp, nexp, TY_REAL)
+ call ddb_gar (db, rec, "log_exp", Memr[exp], nexp, nexp)
+
+ nvalues = min (nden, nexp)
+end
+
+
+# HDS_CALC -- Calculate the individual shift, a0.
+
+procedure hds_calc (den, exp, nvalues, ps_coeff, ncoeff, a0)
+
+pointer den
+pointer exp
+int nvalues
+real ps_coeff[ARB]
+int ncoeff
+real a0
+
+int i
+real yavg, ycalc, xavg
+
+begin
+ # Calculate average density and log exposure values
+ xavg = 0.0
+ yavg = 0.0
+
+ do i = 1, nvalues {
+ xavg = xavg + Memr[den+i-1]
+ yavg = yavg + Memr[exp+i-1]
+ }
+
+ xavg = xavg / real (nvalues)
+ yavg = yavg / real (nvalues)
+
+ ycalc = 0.0
+ do i = 2, ncoeff
+ ycalc = ycalc + ps_coeff[i] * (xavg ** real (i-1))
+
+ # Subtraction yields the zero point shift in question
+ a0 = yavg - ycalc
+end
+
+
+# HDS_WDB -- Write shifted log exposure values to database.
+
+procedure hds_wdb (db, exp, nvalues, a0)
+
+pointer db # Pointer to database
+pointer exp # Pointer to array of exposure values
+int nvalues # Number of exposure values in sample
+real a0 # Shift to be subtracted
+
+pointer sp, expsub
+
+begin
+ call smark (sp)
+ call salloc (expsub, nvalues, TY_REAL)
+
+ call ddb_ptime (db)
+ call ddb_prec (db, "exposure")
+
+ call eprintf ("a0 = %g\n")
+ call pargr (a0)
+
+ call asubkr (Memr[exp], a0, Memr[expsub], nvalues)
+ call ddb_par (db, "log_exp", Memr[expsub], nvalues)
+
+ call ddb_putr (db, "A0 shift", a0)
+ call sfree (sp)
+end
diff --git a/noao/imred/dtoi/hdtoi.par b/noao/imred/dtoi/hdtoi.par
new file mode 100644
index 00000000..e26cde83
--- /dev/null
+++ b/noao/imred/dtoi/hdtoi.par
@@ -0,0 +1,11 @@
+# Cl parameters are:
+input,f,a,,,,List of images to be transformed
+output,f,a,,,,List of output image names
+database,f,a,,,,Database containing fit parameters
+fog,s,h,"",,,Value of fog - read from database if unspecified
+sigma,r,h,3.0,,,Rejection criteria for determining mean fog
+floor,r,h,0.0,,,Value assigned to levels below fog
+ceiling,r,h,30000.,,,Scale highest density to this intensity value
+datatype,s,h,r,,,Pixel type of output image
+option,s,h,mean,"mean|median",,Choice of fog algorithm (mean or median)
+verbose,b,h,yes,,,Print log of processing to STDOUT
diff --git a/noao/imred/dtoi/hdtoi.x b/noao/imred/dtoi/hdtoi.x
new file mode 100644
index 00000000..5b550ea8
--- /dev/null
+++ b/noao/imred/dtoi/hdtoi.x
@@ -0,0 +1,407 @@
+include <imhdr.h>
+include <mach.h>
+include <math/curfit.h>
+include <error.h>
+include "hdicfit/hdicfit.h"
+
+# T_HDTOI -- transform an image from density to intensity, according
+# to an hd curve described in an input database. A look up table of
+# all possible values is generated with the curfit package, and
+# then the image is transformed line by line. A fog value is subtracted
+# from the image prior to transformation, and it can be entered as either
+# a number or a list of fog images from which the fog value is calculated.
+# If a fog value has not been entered by the user, it is read from the database.
+
+procedure t_hdtoi ()
+
+pointer sp, cv, fog, db, im_in, lut, im_out, imageout, imagein, option
+bool verbose
+int minval, maxval, in_list, rec, ip, out_list, fog_list, ngpix
+int datatype, nluv, updatedb, nfpix
+real sigma, floor, scale, fog_val, sdev
+
+char clgetc()
+bool streq(), clgetb()
+pointer ddb_map(), immap()
+int imtopenp(), ddb_locate(), ctor(), imtlen(), imtgetim()
+int get_data_type(), imtopen()
+real clgetr(), ddb_getr()
+
+begin
+ call smark (sp)
+ call salloc (cv, SZ_FNAME, TY_CHAR)
+ call salloc (fog, SZ_LINE, TY_CHAR)
+ call salloc (imageout, SZ_FNAME, TY_CHAR)
+ call salloc (imagein, SZ_FNAME, TY_CHAR)
+
+ # Get cl parameters
+ in_list = imtopenp ("input")
+ out_list = imtopenp ("output")
+ call clgstr ("database", Memc[cv], SZ_FNAME)
+ call clgstr ("fog", Memc[fog], SZ_LINE)
+ sigma = clgetr ("sigma")
+ floor = clgetr ("floor")
+ verbose = clgetb ("verbose")
+ updatedb = NO
+
+ datatype = get_data_type (clgetc ("datatype"))
+ if (datatype == ERR)
+ call eprintf ("Using input pixel datatype for output\n")
+
+ db = ddb_map (Memc[cv], READ_ONLY)
+ rec = ddb_locate (db, "common")
+ scale = ddb_getr (db, rec, "scale")
+
+ # If not specified by user, get fog value from database. User can
+ # specify fog as a real number or a list of fog file names.
+
+ if (streq (Memc[fog], "")) {
+ rec = ddb_locate (db, "fog")
+ fog_val = ddb_getr (db, rec, "density")
+ } else {
+ ip = 1
+ if (ctor (Memc[fog], ip, fog_val) == 0) {
+ if (verbose)
+ call eprintf ("Calculating fog value ...\n")
+ fog_list = imtopen (Memc[fog])
+ call salloc (option, SZ_FNAME, TY_CHAR)
+ call clgstr ("option", Memc[option], SZ_FNAME)
+ call hd_fogcalc (fog_list, fog_val, sdev, ngpix, scale, sigma,
+ Memc[option], nfpix)
+
+ call eprintf ("Fog density = %f, sdev = %f, ngpix = %d\n")
+ call pargr (fog_val)
+ call pargr (sdev)
+ call pargi (ngpix)
+
+ updatedb = YES
+ }
+ }
+
+ # Generate look up table. First, the range of input values to
+ # calculate output values for must be determined. Arguments
+ # minval and maxval are integers because we assume all input
+ # images are short integers.
+
+ call hd_glimits (in_list, minval, maxval)
+ nluv = (maxval - minval) + 1
+ call salloc (lut, nluv, TY_REAL)
+
+ if (verbose)
+ call eprintf ("Generating look up table ...\n")
+ call hd_wlut (db, Memr[lut], minval, maxval, fog_val, floor)
+
+ # Loop through input images, applying transform
+ if (imtlen (in_list) != imtlen (out_list)) {
+ call imtclose (in_list)
+ call imtclose (out_list)
+ call error (0, "Number of input and output images not the same")
+ }
+
+ while ((imtgetim (in_list, Memc[imagein], SZ_FNAME) != EOF) &&
+ (imtgetim (out_list, Memc[imageout], SZ_FNAME) != EOF)) {
+
+ iferr (im_in = immap (Memc[imagein], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+
+ iferr (im_out = immap (Memc[imageout], NEW_COPY, im_in)) {
+ call imunmap (im_in)
+ call erract (EA_WARN)
+ next
+ }
+
+ if (verbose) {
+ call eprintf ("Density to Intensity Transform: %s ===> %s\n")
+ call pargstr (Memc[imagein])
+ call pargstr (Memc[imageout])
+ }
+
+ call hd_transform (im_in, im_out, Memr[lut], nluv, minval, datatype)
+
+ call imunmap (im_in)
+ call imunmap (im_out)
+ }
+
+ call ddb_unmap (db)
+ call imtclose (in_list)
+ call imtclose (out_list)
+
+ if (updatedb == YES) {
+ db = ddb_map (Memc[cv], APPEND)
+ # Write fog information to database as single record
+ call ddb_prec (db, "fog")
+ call ddb_putr (db, "density", fog_val)
+ call ddb_putr (db, "sdev", sdev)
+ call ddb_puti (db, "ngpix", ngpix)
+ call ddb_pstr (db, "option", Memc[option])
+ call ddb_unmap (db)
+ }
+
+ call sfree (sp)
+end
+
+
+# HD_TRANSFORM -- Apply transformation to image.
+
+procedure hd_transform (im, im_out, lu_table, nvals, minval, datatype)
+
+pointer im # Input image header pointer
+pointer im_out # Transformed image header pointer
+real lu_table[ARB] # Array of intensity values
+int nvals # Number of values in the lut
+int minval # Offset to first value in look up table
+int datatype # Pixel type on output
+
+int j, ncols
+pointer ptr_in, ptr_out, sp, luti
+pointer impl2r(), imgl2i(), impl2i()
+
+begin
+ if (datatype == ERR)
+ IM_PIXTYPE(im_out) = IM_PIXTYPE(im)
+ else
+ IM_PIXTYPE(im_out) = datatype
+
+ ncols = IM_LEN(im,1)
+
+ switch (datatype) {
+ case TY_REAL, TY_DOUBLE:
+ # Loop over input image rows. The look up table is left as
+ # a real array and a floating point image is written out.
+
+ do j = 1, IM_LEN(im,2) {
+ ptr_in = imgl2i (im, j)
+ ptr_out = impl2r (im_out, j)
+ call asubki (Memi[ptr_in], minval, Memi[ptr_in], ncols)
+ call alutr (Memi[ptr_in], Memr[ptr_out], ncols, lu_table)
+ }
+
+ default:
+ # Loop over input image rows. The look up table is truncated
+ # to type integer.
+
+ call smark (sp)
+ call salloc (luti, nvals, TY_INT)
+ call achtri (lu_table, Memi[luti], nvals)
+
+ do j = 1, IM_LEN(im,2) {
+ ptr_in = imgl2i (im, j)
+ ptr_out = impl2i (im_out, j)
+ call asubki (Memi[ptr_in], minval, Memi[ptr_in], ncols)
+ call aluti (Memi[ptr_in], Memi[ptr_out], ncols, Memi[luti])
+ }
+
+ call sfree (sp)
+ }
+end
+
+
+# HD_WLUT -- write look up table, such that intensity = lut [a/d output].
+# An entry is made in the look up table for every possible input value,
+# from minval to maxval.
+
+procedure hd_wlut (db, lut, minval, maxval, fog_val, floor)
+
+pointer db # Pointer to database file
+real lut[ARB] # Pointer to look up table, which gets filled here
+int minval # Minimum value to transform
+int maxval # Maximum value to transform
+real fog_val # Fog value to be subtracted from densities
+real floor # Value assigned to densities below fog
+
+bool zerofloor
+pointer sp, trans, fcn, save, cv, dens, ind_var, value
+int rec, nsave, i, function, nneg, npos, nvalues
+real scale, maxcvval, factor, maxexp, maxden, maxdenaf
+
+bool fp_equalr()
+int strncmp(), ddb_locate(), ddb_geti(), cvstati()
+real ddb_getr(), clgetr(), cveval()
+extern hd_powerr()
+
+begin
+ call smark (sp)
+ call salloc (trans, SZ_FNAME, TY_CHAR)
+ call salloc (fcn, SZ_FNAME, TY_CHAR)
+
+ nvalues = (maxval - minval) + 1
+ call salloc (ind_var, nvalues, TY_REAL)
+ call salloc (dens, nvalues, TY_REAL)
+ call salloc (value, nvalues, TY_REAL)
+
+ rec = ddb_locate (db, "common")
+ scale = ddb_getr (db, rec, "scale")
+ maxden = ddb_getr (db, rec, "maxden")
+
+ rec = ddb_locate (db, "cv")
+ nsave = ddb_geti (db, rec, "save")
+ call salloc (save, nsave, TY_REAL)
+ call ddb_gar (db, rec, "save", Memr[save], nsave, nsave)
+ call ddb_gstr (db, rec, "transformation", Memc[trans], SZ_LINE)
+
+ call cvrestore (cv, Memr[save])
+ function = cvstati (cv, CVTYPE)
+
+ if (function == USERFNC) {
+ # Need to restablish choice of user function
+ call ddb_gstr (db, rec, "function", Memc[fcn], SZ_FNAME)
+ if (strncmp (Memc[fcn], "power", 1) == 0)
+ call cvuserfnc (cv, hd_powerr)
+ else
+ call error (0, "Unknown user function in database")
+ }
+
+ maxdenaf = maxden - fog_val
+ call hd_aptrans (maxdenaf, maxcvval, 1, Memc[trans])
+ maxcvval = 10.0 ** (cveval (cv, maxcvval))
+ factor = clgetr ("ceiling") / maxcvval
+ maxexp = real (MAX_EXPONENT) - (log10 (factor) + 1.0)
+
+ zerofloor = false
+ if (fp_equalr (0.0, floor))
+ zerofloor = true
+
+ do i = 1, nvalues
+ Memr[value+i-1] = real (minval + i - 1)
+
+ # Scale all posible voltage values to density above fog
+ call altmr (Memr[value], Memr[dens], nvalues, scale, -fog_val)
+
+ # Find index of first value greater than MIN_DEN. Values less than
+ # this must be handled as the user specified with the floor parameter.
+
+ for (nneg=0; Memr[dens+nneg] < MIN_DEN; nneg=nneg+1)
+ ;
+ npos = nvalues - nneg
+
+ # Generate independent variable vector and then lut values. The
+ # logic is different if there are values below fog. Evaluating
+ # the polynomial fit with cvvector yields the log exposure. This
+ # is then converted to intensity and scaled by a user supplied factor.
+
+ if (nneg > 0) {
+ if (zerofloor) {
+ call amovkr (0.0, lut, nneg)
+ call hd_aptrans (Memr[dens+nneg],Memr[ind_var],npos,Memc[trans])
+ call cvvector (cv, Memr[ind_var], lut[nneg+1], npos)
+ call argtr (lut[nneg+1], npos, maxexp, maxexp)
+ do i = nneg+1, nvalues
+ lut[i] = (10. ** lut[i]) * factor
+
+ } else {
+ call amulkr (Memr[dens], -1.0, Memr[dens], nneg)
+
+ # Care must be taken so that no density of value 0.0 is
+ # passed to hd_aptrans. This would cause an overflow.
+
+ do i = 1, nneg {
+ if (fp_equalr (Memr[dens], 0.0))
+ Memr[dens] = MIN_DEN
+ }
+
+ call hd_aptrans (Memr[dens],Memr[ind_var], nvalues, Memc[trans])
+ call cvvector (cv, Memr[ind_var], lut, nvalues)
+ call argtr (lut, nvalues, maxexp, maxexp)
+ do i = 1, nvalues
+ lut[i] = (10.0 ** lut[i]) * factor
+ call amulkr (lut, -1.0, lut, nneg)
+ }
+
+ } else {
+ call hd_aptrans (Memr[dens], Memr[ind_var], nvalues, Memc[trans])
+ call cvvector (cv, Memr[ind_var], lut, nvalues)
+ call argtr (lut, nvalues, maxexp, maxexp)
+ do i = 1, nvalues
+ lut[i] = (10.0 ** lut[i]) * factor
+ }
+
+ call cvfree (cv)
+ call sfree (sp)
+end
+
+
+# HD_APTRANS -- Apply transformation, generating a vector of independent
+# variables from a density vector. It is assummed all values in the
+# input density vector are valid and will not cause arithmetic errors.
+# No checking for out of bounds values is performed.
+
+procedure hd_aptrans (density, ind_var, nvalues, transform)
+
+real density[nvalues] # Density vector - input
+real ind_var[nvalues] # Ind variable vector - filled on output
+int nvalues # Length of vectors
+char transform[ARB] # String containing transformation type
+
+int i
+int strncmp()
+
+begin
+ if (strncmp (transform, "logopacitance", 1) == 0) {
+ do i = 1, nvalues
+ ind_var[i] = log10 ((10. ** density[i]) - 1.0)
+
+ } else if (strncmp (transform, "k75", 2) == 0) {
+ do i = 1, nvalues
+ ind_var[i] = density[i] + .75 * log10(1. - 10. ** (-density[i]))
+
+ } else if (strncmp (transform, "k50", 2) == 0) {
+ do i = 1, nvalues
+ ind_var[i] = density[i] + .50 * log10(1. - 10. ** (-density[i]))
+
+ } else if (strncmp (transform, "none", 1) == 0) {
+ do i = 1, nvalues
+ ind_var[i] = density[i]
+
+ } else
+ call error (0, "Unrecognized transformation in database file")
+end
+
+
+# HD_GLIMITS -- Determinine the range of max and min values for a list
+# of images.
+
+procedure hd_glimits (in_list, minval, maxval)
+
+int in_list # File descriptor for list of images
+int minval # Smallest pixel value - returned
+int maxval # Largest pixel value - returned
+
+pointer im
+char image[SZ_FNAME]
+real current_min, current_max, min, max
+pointer immap()
+int imtgetim()
+errchk imtgetim, im_minmax, imtrew
+
+begin
+ current_min = MAX_REAL
+ current_max = EPSILONR
+
+ while (imtgetim (in_list, image, SZ_FNAME) != EOF) {
+ iferr (im = immap (image, READ_ONLY, 0))
+ # Just ignore it, warning will be printed by t_hdtoi
+ next
+
+ # Update min max values if necessary
+ if (IM_LIMTIME(im) < IM_MTIME(im))
+ call im_minmax (im, IM_MIN(im), IM_MAX(im))
+
+ min = IM_MIN(im)
+ max = IM_MAX(im)
+
+ if (min < current_min)
+ current_min = min
+
+ if (max > current_max)
+ current_max = max
+
+ call imunmap (im)
+ }
+
+ minval = int (current_min)
+ maxval = int (current_max)
+
+ call imtrew (in_list)
+end
diff --git a/noao/imred/dtoi/minmax.x b/noao/imred/dtoi/minmax.x
new file mode 100644
index 00000000..97d113f0
--- /dev/null
+++ b/noao/imred/dtoi/minmax.x
@@ -0,0 +1,73 @@
+include <imhdr.h>
+
+# IM_MINMAX -- Compute the minimum and maximum pixel values of an image.
+# Works for images of any dimensionality, size, or datatype, although
+# the min and max values can currently only be stored in the image header
+# as real values.
+
+procedure im_minmax (im, min_value, max_value)
+
+pointer im # image descriptor
+real min_value # minimum pixel value in image (out)
+real max_value # maximum pixel value in image (out)
+
+pointer buf
+bool first_line
+long v[IM_MAXDIM]
+short minval_s, maxval_s
+long minval_l, maxval_l
+real minval_r, maxval_r
+int imgnls(), imgnll(), imgnlr()
+errchk amovkl, imgnls, imgnll, imgnlr, alims, aliml, alimr
+
+begin
+ call amovkl (long(1), v, IM_MAXDIM) # start vector
+ first_line = true
+ min_value = INDEF
+ max_value = INDEF
+
+ switch (IM_PIXTYPE(im)) {
+ case TY_SHORT:
+ while (imgnls (im, buf, v) != EOF) {
+ call alims (Mems[buf], IM_LEN(im,1), minval_s, maxval_s)
+ if (first_line) {
+ min_value = minval_s
+ max_value = maxval_s
+ first_line = false
+ } else {
+ if (minval_s < min_value)
+ min_value = minval_s
+ if (maxval_s > max_value)
+ max_value = maxval_s
+ }
+ }
+ case TY_USHORT, TY_INT, TY_LONG:
+ while (imgnll (im, buf, v) != EOF) {
+ call aliml (Meml[buf], IM_LEN(im,1), minval_l, maxval_l)
+ if (first_line) {
+ min_value = minval_s
+ max_value = maxval_s
+ first_line = false
+ } else {
+ if (minval_l < min_value)
+ min_value = minval_l
+ if (maxval_l > max_value)
+ max_value = maxval_l
+ }
+ }
+ default:
+ while (imgnlr (im, buf, v) != EOF) {
+ call alimr (Memr[buf], IM_LEN(im,1), minval_r, maxval_r)
+ if (first_line) {
+ min_value = minval_r
+ max_value = maxval_r
+ first_line = false
+ } else {
+ if (minval_r < min_value)
+ min_value = minval_r
+ if (maxval_r > max_value)
+ max_value = maxval_r
+ }
+ }
+ }
+end
diff --git a/noao/imred/dtoi/mkpkg b/noao/imred/dtoi/mkpkg
new file mode 100644
index 00000000..80d5b737
--- /dev/null
+++ b/noao/imred/dtoi/mkpkg
@@ -0,0 +1,40 @@
+# Make the DTOI package.
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $call update@hdicfit
+ $update libpkg.a
+ $call dtoi
+ ;
+
+install:
+ $move xx_dtoi.e noaobin$x_dtoi.e
+ ;
+
+dtoi:
+ $omake x_dtoi.x
+ $link x_dtoi.o libpkg.a hdicfit/libhdic.a -lxtools -lcurfit\
+ -o xx_dtoi.e
+ ;
+
+libpkg.a:
+ database.x <ctotok.h> <ctype.h> <finfo.h> <time.h>
+ dematch.x <error.h>
+ hd_aravr.x <mach.h>
+ hdfit.x hdicfit/hdicfit.h <ctype.h> <error.h> <fset.h>\
+ <imhdr.h> <mach.h> <math/curfit.h> <pkg/gtools.h>\
+ <pkg/xtanswer.h>
+ hdshift.x <math/curfit.h>
+ hdtoi.x hdicfit/hdicfit.h <error.h> <imhdr.h> <mach.h>\
+ <math/curfit.h>
+ minmax.x <imhdr.h>
+ selftest.x <gio.h> <gset.h> <mach.h>
+ spotlist.x <error.h> <fset.h> <imhdr.h> <mach.h>
+ ;
diff --git a/noao/imred/dtoi/selftest.par b/noao/imred/dtoi/selftest.par
new file mode 100644
index 00000000..75445cf7
--- /dev/null
+++ b/noao/imred/dtoi/selftest.par
@@ -0,0 +1,8 @@
+# Parameters for task selftest
+nbits,i,q,12,,,Test data range
+device,s,h,"stdgraph",,,Output device
+verbose,b,h,no,,,"Print density, intensity values?"
+ceiling,r,h,30000.,,,Scale densest point to this intensity
+max_raw,i,q,0,,,Max raw data value
+scale,r,q,,0.0,,Raw value to density scale factor
+mode,s,h,ql
diff --git a/noao/imred/dtoi/selftest.x b/noao/imred/dtoi/selftest.x
new file mode 100644
index 00000000..3a7a4a4a
--- /dev/null
+++ b/noao/imred/dtoi/selftest.x
@@ -0,0 +1,290 @@
+include <gio.h>
+include <gset.h>
+include <mach.h>
+
+define A0 (-1.74743)
+define A1 (0.73)
+define A2 (-0.24)
+define A3 (0.035)
+define MAXDEN 6.0
+
+# T_SELFTEST -- a test procedure for the DTOI package. Two intensity vectors
+# are calculated in different ways and compared. A plot of the residuals is
+# shown. A plot showing the extent of truncation errors is also drawn. Two
+# standard ranges of data values are available: 12 bit, representing PDS
+# format data and 15 bit, representing the FITS data format available on the
+# PDS. Any other choice results in a small test, ranging from 1 - 144.
+
+procedure t_selftest
+
+bool verbose
+char device[SZ_FNAME]
+pointer sp, intk, intc, raw, den, gp
+int min_raw, max_raw, nvalues, i, nbits
+real scale, factor, ceiling
+
+bool clgetb()
+pointer gopen()
+int clgeti()
+real clgetr()
+
+begin
+ call smark (sp)
+
+ nbits = clgeti ("nbits")
+
+ switch (nbits) {
+ case 12:
+ min_raw = 1
+ max_raw = 3072
+ scale = 0.00151
+ case 15:
+ min_raw = 1
+ max_raw = 24576
+ scale = 4.65 / 24575.
+ case 0:
+ call eprintf ("Using test data range from 1 - 144\n")
+ min_raw = 1
+ max_raw = 144
+ scale = 0.0325
+ default:
+ call eprintf ("Unknown case: nbits = '%d', Please supply values:\n")
+ call pargi (nbits)
+ min_raw = 1
+ max_raw = clgeti ("max_raw")
+ # max density = 6.0. Density = raw value * scale.
+ call clputr ("scale.p_maximum", real (MAXDEN / max_raw))
+ call clputr ("scale.p_default", real (4.65 / (max_raw - 1)))
+ scale = clgetr ("scale")
+ }
+
+ call clgstr ("device", device, SZ_FNAME)
+ verbose = clgetb ("verbose")
+ ceiling = clgetr ("ceiling")
+
+ gp = gopen (device, NEW_FILE, STDGRAPH)
+
+ nvalues = max_raw - min_raw + 1
+ call salloc (intk, nvalues, TY_REAL)
+ call salloc (intc, nvalues, TY_REAL)
+ call salloc (den, nvalues, TY_REAL)
+ call salloc (raw, nvalues, TY_REAL)
+
+ do i = 1, nvalues
+ Memr[raw+i-1] = min_raw + i - 1
+
+ call amulkr (Memr[raw], scale, Memr[den], nvalues)
+
+ call hd_known (min_raw, max_raw, scale, Memr[intk], nvalues)
+ call hd_calc (min_raw, max_raw, scale, Memr[intc], nvalues)
+
+ if (verbose) {
+ factor = ceiling / Memr[intc+nvalues-1]
+ call printf ("# %20tRaw Value %40tDensity %60tIntensity\n\n")
+ do i = 1, nvalues {
+ call printf ("%20t%d %40t%g %60t%g\n")
+ call pargi (i)
+ call pargr (Memr[den+i-1])
+ call pargr (Memr[intc+i-1] * factor)
+ }
+ }
+
+ call hd_plotit (gp, Memr[den], Memr[intk], Memr[intc], nvalues)
+
+ call hd_trunc (gp, Memr[den], nvalues, ceiling, Memr[intc])
+
+ call gclose (gp)
+ call sfree (sp)
+end
+
+
+# HD_KNOWN -- Calculate vector of known intensity values.
+
+procedure hd_known (min_raw, max_raw, scale, intk, nvalues)
+
+int min_raw # Minimum raw data value
+int max_raw # Maximum raw data value
+real scale # Density = raw_value * scale
+real intk[nvalues] # Known intensities - filled on return
+int nvalues # Number of intensity values requested
+
+int i
+real density, logo
+real exp
+
+begin
+ do i = min_raw, max_raw {
+ density = max (EPSILONR, i * scale)
+ logo = log10 ((10. ** density) - 1.0)
+ exp = A0 + A1 * logo + A2 * logo ** 2 + A3 * logo ** 3
+ intk[i] = 10 ** (exp)
+ }
+end
+
+
+# HD_CALC -- Calcuate vector of intensity values as in HDTOI.
+
+procedure hd_calc (min, max, scale, intc, nvalues)
+
+int min # Minimum raw data value
+int max # Maximum raw data value
+real scale # Density = raw_value * scale
+real intc[nvalues] # Calculated intensity values - filled on return
+int nvalues # Number of intensity values requested
+
+real cfit[9]
+pointer sp, lut
+
+begin
+ call smark (sp)
+ call salloc (lut, nvalues, TY_REAL)
+
+ cfit[1] = 5.0
+ cfit[2] = 4.0
+ cfit[3] = -10.0
+ cfit[4] = MAXDEN
+ cfit[5] = 1.
+ cfit[6] = A0
+ cfit[7] = A1
+ cfit[8] = A2
+ cfit[9] = A3
+ call st_wlut (Memr[lut], min, max, scale, cfit)
+ call st_transform (min, max, Memr[lut], nvalues, intc)
+
+ call sfree (sp)
+end
+
+
+# HD_TRUNC -- Investigate truncation errors for real versus int output image.
+
+procedure hd_trunc (gp, density, nvalues, ceiling, intc)
+
+pointer gp # Pointer to graphics stream
+real density[nvalues] # Density array
+int nvalues # Number of density, intensity values
+real ceiling # Max intensity to output
+real intc[nvalues] # Calculated intensity values
+
+pointer sp, yint, yreal
+int npvals
+real factor
+
+begin
+ call smark (sp)
+
+ # Only the lowest 5% of the data values are plotted
+ npvals = nvalues * 0.05
+
+ call salloc (yint, npvals, TY_INT)
+ call salloc (yreal, npvals, TY_REAL)
+
+ # Scale intensity vector to ceiling
+ factor = ceiling / intc[nvalues]
+
+ call amulkr (intc, factor, intc, npvals)
+ call achtri (intc, Memi[yint], npvals)
+ call achtir (Memi[yint], Memr[yreal], npvals)
+
+ call gascale (gp, density, npvals, 1)
+ call gascale (gp, Memr[yreal], npvals, 2)
+ call gsview (gp, 0.2, 0.9, 0.1, 0.4)
+ call gseti (gp, G_ROUND, YES)
+ call glabax (gp,
+ "Expand to see Truncation Errors\n (real=SOLID, integer=DASHED)",
+ "Density (Lowest 5% only)", "Intensity")
+
+ call gseti (gp, G_PLTYPE, GL_SOLID)
+ call gpline (gp, density, intc, npvals)
+
+ call gseti (gp, G_PLTYPE, GL_DASHED)
+ call gpline (gp, density, Memr[yreal], npvals)
+
+ call sfree (sp)
+end
+
+
+# HD_PLOTIT -- Plot residuals of calculated versus known itensity.
+
+procedure hd_plotit (gp, density, intk, intc, nvalues)
+
+pointer gp # Pointer to graphics stream
+real density[nvalues] # Density array
+real intk[nvalues] # Array of known intensities
+real intc[nvalues] # Array of calculated intensities
+int nvalues # Number of density, intensity values
+
+pointer sp, resid
+
+begin
+ call smark (sp)
+ call salloc (resid, nvalues, TY_REAL)
+
+ call asubr (intk, intc, Memr[resid], nvalues)
+
+ call gascale (gp, density, nvalues, 1)
+ call gascale (gp, Memr[resid], nvalues, 2)
+ call gsview (gp, 0.2, 0.9, 0.6, 0.9)
+ call gseti (gp, G_ROUND, YES)
+
+ call glabax (gp, "Residual Intensity\n (Known - Calculated)",
+ "Density", "")
+ call gpline (gp, density, Memr[resid], nvalues)
+
+ call sfree (sp)
+end
+
+
+# ST_WLUT -- Generate look up table, using technique of HDTOI.
+
+procedure st_wlut (lut, min, max, scale, cfit)
+
+real lut[ARB] # Look up table of intensities
+int min # Minimum raw data value
+int max # Maximum raw data value
+real scale # Density = raw_value * scale
+real cfit[ARB] # Coefficient array for restoring curfit
+
+pointer cv, sp, den, indv, kv
+int nvalues, i
+extern hd_powerr()
+
+begin
+ call smark (sp)
+ nvalues = max - min + 1
+ call salloc (den, nvalues, TY_REAL)
+ call salloc (indv, nvalues, TY_REAL)
+ call salloc (kv, nvalues, TY_REAL)
+ do i = 1, nvalues
+ Memr[kv+i-1] = real (i)
+
+ call amulkr (Memr[kv], scale, Memr[den], nvalues)
+
+ call cvrestore (cv, cfit)
+ call cvuserfnc (cv, hd_powerr)
+
+ call hd_aptrans (Memr[den], Memr[indv], nvalues, "logo")
+ call cvvector (cv, Memr[indv], lut, nvalues)
+ do i = 1, nvalues
+ lut[i] = 10.0 ** lut[i]
+
+ call cvfree (cv)
+ call sfree (sp)
+end
+
+
+# ST_TRANSFORM -- Apply transformation from look up table to input vector.
+
+procedure st_transform (min, max, lut, nvalues, intc)
+
+int min # Minimum raw data value
+int max # Maximum raw data value
+real lut[ARB] # Array of intensity values
+int nvalues # Number of density, intensity values
+real intc[ARB] # Calculated intensities - returned
+
+int i
+
+begin
+ do i = 1, nvalues
+ intc[i] = lut[i]
+end
diff --git a/noao/imred/dtoi/spotlist.par b/noao/imred/dtoi/spotlist.par
new file mode 100644
index 00000000..8134c8de
--- /dev/null
+++ b/noao/imred/dtoi/spotlist.par
@@ -0,0 +1,8 @@
+# The cl parameters for task spotlist in the dtoi package
+spots,f,a,,,,List of image files containing spot data
+fogs,f,a,,,,List of images containing fog spots
+database,f,a,,,,Name of output database file
+scale,r,h,0.00151,,,Input value to density scale factor
+maxad,i,h,3071,,,Integer A/D value of saturated pixel
+sigma,r,h,3.0,,,Rejection criteria when determining mean density
+option,s,h,mean,"mean|median",,Choice of algorithm (mean or median)
diff --git a/noao/imred/dtoi/spotlist.x b/noao/imred/dtoi/spotlist.x
new file mode 100644
index 00000000..8f4ffeee
--- /dev/null
+++ b/noao/imred/dtoi/spotlist.x
@@ -0,0 +1,395 @@
+include <error.h>
+include <imhdr.h>
+include <mach.h>
+include <fset.h>
+
+# T_SPOTLIST -- calculates the densities and standard deviations of calibration
+# spots read in as IRAF images. Entries for density, sdev, spot number
+# and number of pixels used in the average are made in the output database.
+# These values are entered for all spots and the fog level. The
+# fog level is calculated but not subtracted in this procedure.
+
+procedure t_spotlist ()
+
+pointer db, sp, den, ngpix, sdev, dloge, option, npix
+int spot_fd, fog_fd, fngpix, nspots, maxad, fnpix
+real scale, sigma, fsdev, fog
+double maxden
+
+pointer ddb_map()
+int imtopenp(), imtlen(), clgeti(), strncmp()
+real clgetr()
+
+begin
+ call smark (sp)
+ call salloc (dloge, SZ_FNAME, TY_CHAR)
+ call salloc (option, SZ_FNAME, TY_CHAR)
+
+ # Get parameters and open file name templates
+ spot_fd = imtopenp ("spots")
+ fog_fd = imtopenp ("fogs")
+ call clgstr ("database", Memc[dloge], SZ_FNAME)
+ scale = clgetr ("scale")
+ maxad = clgeti ("maxad")
+ sigma = clgetr ("sigma")
+ call clgstr ("option", Memc[option], SZ_FNAME)
+
+ maxden = maxad * double (scale)
+
+ # Allocate space for density, standard deviation and ngpix arrays
+ nspots = imtlen (spot_fd)
+ call salloc ( den, nspots, TY_REAL)
+ call salloc (sdev, nspots, TY_REAL)
+ call salloc (ngpix, nspots, TY_INT)
+ call salloc ( npix, nspots, TY_INT)
+
+ # Calculate densities depending on algorithm option. The
+ # number of saturated pixels per spot is also calculated now.
+
+ if (strncmp (Memc[option], "median", 3) == 0)
+ call hd_median (spot_fd, Memr[den], Memr[sdev], Memi[ngpix],
+ nspots, scale, Memi[npix])
+ else
+ call hd_mean (spot_fd, Memr[den], Memr[sdev], Memi[ngpix],
+ nspots, scale, sigma, Memi[npix])
+
+ # Calculate fog level and count saturated pixels
+ call hd_fogcalc (fog_fd, fog, fsdev, fngpix, scale, sigma,
+ Memc[option], fnpix)
+
+ # Now print results to stdout
+ call hd_printit (Memr[den], Memr[sdev], Memi[npix], Memi[ngpix],
+ fog, fsdev, fnpix, fngpix, nspots)
+
+ # Open output database file and write spot information
+ db = ddb_map (Memc[dloge], APPEND)
+ call hd_wspotdb (db, Memr[den], Memr[sdev], Memi[ngpix], nspots)
+
+ # Write fog information to database as single record
+ call ddb_prec (db, "fog")
+ call ddb_putr (db, "density", fog)
+ call ddb_putr (db, "sdev", fsdev)
+ call ddb_puti (db, "ngpix", fngpix)
+
+ # Scale info gets written to database also (very precisely!)
+ call ddb_prec (db, "common")
+ call ddb_putr (db, "scale", scale)
+ call ddb_putd (db, "maxden", maxden)
+ call ddb_pstr (db, "option", Memc[option])
+
+ call ddb_unmap (db)
+
+ call imtclose (spot_fd)
+ call imtclose (fog_fd)
+ call sfree (sp)
+end
+
+
+# HD_MEAN -- Calculate mean density of calibration spots.
+
+procedure hd_mean (spot_fd, den, sdev, ngpix, nspots, scale, sigma, npix)
+
+int spot_fd # File descriptor for list of spots
+real den[ARB] # Mean density values - filled on return
+real sdev[ARB] # Standard deviation array - filled on return
+int ngpix[ARB] # Number of unrejected pixels - filled on return
+int nspots # Number of spots in list
+real scale # Scale for voltage to density conversion
+real sigma # Rejection criteria set by user
+int npix[ARB] # Number of pixels per spot
+
+pointer im, spot, sp, pix
+int i, junk, ncols, nlines
+pointer immap(), imgs2r()
+int imtgetim(), hd_aravr()
+errchk imgs2r, amulkr, hd_aravr
+
+begin
+ call smark (sp)
+ call salloc (spot, SZ_FNAME, TY_CHAR)
+
+ # Loop over spot rasters. Calculate density and standard deviation.
+ for (i = 1; i <= nspots; i = i + 1) {
+ junk = imtgetim (spot_fd, Memc[spot], SZ_FNAME)
+ iferr (im = immap (Memc[spot], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ npix[i] = ncols * nlines
+
+ # For all pixels in the image, scale the a/d value to density and
+ # calculate the mean value, using a mean rejection algorithm.
+
+ pix = imgs2r (im, 1, ncols, 1, nlines)
+ call amulkr (Memr[pix], scale, Memr[pix], npix[i])
+ ngpix[i] = hd_aravr (Memr[pix], npix[i], den[i], sdev[i], sigma)
+ call imunmap (im)
+ }
+
+ call sfree (sp)
+end
+
+
+# HD_MEDIAN -- Calculate median density of calibration spots.
+
+procedure hd_median (spot_fd, den, sdev, ngpix, nspots, scale, npix)
+
+int spot_fd # File descriptor for list of spots
+real den[ARB] # Mean density values - filled on return
+real sdev[ARB] # Standard deviation of input spots
+int ngpix[ARB] # Number of pixels not rejected
+int nspots # Number of spots in list
+real scale # Scale for voltage to density conversion
+int npix[ARB] # Number of pixels per spot
+
+pointer im, spot, sp, pix
+int i, junk, ncols, nlines
+real junk_mean
+pointer immap(), imgs2r()
+int imtgetim()
+real amedr()
+errchk imgs2r, amulkr, amedr
+
+begin
+ call smark (sp)
+ call salloc (spot, SZ_FNAME, TY_CHAR)
+
+ # Loop over spot rasters. Calculate density and standard deviation.
+ for (i = 1; i <= nspots; i = i + 1) {
+ junk = imtgetim (spot_fd, Memc[spot], SZ_FNAME)
+ iferr (im = immap (Memc[spot], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ npix[i] = ncols * nlines
+
+ # For all pixels in the image, scale the a/d value to density and
+ # calculate the median value. For the user's information, the
+ # sigma is also calculated in a call to aavgr.
+
+ pix = imgs2r (im, 1, ncols, 1, nlines)
+ call amulkr (Memr[pix], scale, Memr[pix], npix[i])
+ den[i] = amedr (Memr[pix], npix[i])
+ ngpix[i] = npix[i]
+ call aavgr (Memr[pix], npix[i], junk_mean, sdev[i])
+ call imunmap (im)
+ }
+
+ call sfree (sp)
+end
+
+
+# HD_FOGCALC -- Calculate fog density.
+
+procedure hd_fogcalc (fog_fd, fog, fsdev, fngpix, scale, sigma, option, nfpix)
+
+int fog_fd # File descriptor for list of fog spots
+real fog # Mean fog value - returned
+real fsdev # Standard deviation - returned
+int fngpix # Number of pixels used - returned
+real scale # Voltage to density scaling factor
+real sigma # Rejection criteria - set by user
+char option[ARB] # User's choice of mean/median algorithm
+int nfpix # Total number of fog pixels
+
+pointer pix, im, sp, fogfile, ptr
+int nfog, maxfog, i, junk, ncols, nlines, npix, total_pix, op
+real junk_mean
+pointer immap(), imgs2r()
+int imtlen(), imtgetim(), hd_aravr(), strncmp()
+real amedr()
+errchk calloc, imgs2r, aaddr, amulkr, hd_aravr
+
+begin
+ call smark (sp)
+ call salloc (fogfile, SZ_FNAME, TY_CHAR)
+
+ pix = NULL
+ total_pix = 0
+ op = 0
+ nfog = imtlen (fog_fd)
+ maxfog = nfog
+ do i = 1, maxfog {
+ junk = imtgetim (fog_fd, Memc[fogfile], SZ_FNAME)
+ iferr (im = immap (Memc[fogfile], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ nfog = nfog - 1
+ next
+ }
+
+ ncols = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+ npix = ncols * nlines
+ total_pix = total_pix + npix
+
+ if (pix == NULL)
+ # Initialize space for accumulating pixels
+ call calloc (pix, npix, TY_REAL)
+ else
+ # Increase space for accumulating pixels
+ call realloc (pix, total_pix, TY_REAL)
+
+ # Build up vector of fog pixels
+ ptr = imgs2r (im, 1, ncols, 1, nlines)
+ call amovr (Memr[ptr], Memr[pix+op], npix)
+ op = op + npix
+
+ call imunmap (im)
+ }
+
+ # Scale values to density and calculate fog and std deviation
+
+ if (nfog > 0) {
+# 7 Sept 1989, S. Rooke: in Suzanne's absence, made following bugfix after
+# bug reported by Steve Majewski that fog values are off by 1/n images where
+# multiple fog images are used in a single run. The total_pix already contains
+# the sum of all pixel values, so the fog pixel values should not be divided
+# by nfog. This should be verified by Suzanne on her return, and these comments
+# removed.
+# call amulkr (Memr[pix], scale / real (nfog), Memr[pix], total_pix)
+ call amulkr (Memr[pix], scale, Memr[pix], total_pix)
+ if (strncmp (option, "median", 3) == 0) {
+ fog = amedr (Memr[pix], total_pix)
+ fngpix = total_pix
+ call aavgr (Memr[pix], total_pix, junk_mean, fsdev)
+ } else
+ fngpix = hd_aravr (Memr[pix], total_pix, fog, fsdev, sigma)
+ } else {
+ fog = 0.0
+ fsdev = 0.0
+ fngpix = 0
+ }
+ nfpix = total_pix
+
+ call mfree (pix, TY_REAL)
+ call sfree (sp)
+end
+
+
+# HD_WSPOTDB -- Write spot information to database file. Values are first
+# sorted in order of increasing density.
+
+procedure hd_wspotdb (db, density, sdev, ngpix, nspots)
+
+pointer db # Pointer to database
+real density[ARB] # Array of densities
+real sdev [ARB] # Array of standard deviations
+int ngpix [ARB] # Array of npix used in calculations
+int nspots # Number of density spots
+
+begin
+ if (density[1] > density[nspots]) {
+ # Need to reorder arrays
+ call hd_reorderr (density, nspots)
+ call hd_reorderr ( sdev, nspots)
+ call hd_reorderi ( ngpix, nspots)
+ }
+
+ call ddb_ptime (db)
+
+ # Density record
+ call ddb_prec (db, "density")
+ call ddb_par (db, "den_val", density, nspots)
+
+ # Standard deviation of density is written to a record
+ call ddb_prec (db, "standard deviation")
+ call ddb_par (db, "sdev_val", sdev, nspots)
+
+ # Record for npix_used
+ call ddb_prec (db, "ngpix")
+ call ddb_pai (db, "npix_val", ngpix, nspots)
+end
+
+
+# HD_REORDERR - Flip order of real array in place.
+
+procedure hd_reorderr (array, nvals)
+
+real array[ARB] # Real array to be reordered
+int nvals # Number of elements in array
+
+pointer sp, tmp
+int i
+
+begin
+ call smark (sp)
+ call salloc (tmp, nvals, TY_REAL)
+
+ call amovr (array, Memr[tmp], nvals)
+ do i = 1, nvals
+ array[i] = Memr[tmp+nvals-i]
+
+ call sfree (sp)
+end
+
+
+# HD_REORDERI -- Flip order of integer array in place.
+
+procedure hd_reorderi (array, nvals)
+
+int array[ARB] # Integer array to be ordered
+int nvals # Number of elements in array
+
+pointer sp, tmp
+int i
+
+begin
+ call smark (sp)
+ call salloc (tmp, nvals, TY_INT)
+
+ call amovi (array, Memi[tmp], nvals)
+ do i = 1, nvals
+ array[i] = Memi[tmp+nvals-i]
+
+ call sfree (sp)
+end
+
+
+# HD_PRINTIT -- Neatly printout all the accumulated information.
+
+procedure hd_printit (den, sdev, npix, ngpix, fog, fsdev, fnpix, fngpix, nspots)
+
+real den[ARB] # density array
+real sdev[ARB] # std deviation array
+int npix[ARB] # npix array
+int ngpix[ARB] # ngoodpix array
+real fog # fog value
+real fsdev # std deviation of fog
+int fnpix # npix in fog
+int fngpix # ngoodpix in fog
+int nspots # number of spots
+int i
+
+begin
+
+ call fseti (STDOUT, F_FLUSHNL, YES)
+
+ call printf ("\n # Number of P")
+ call printf ("ixels\n")
+ call printf ("# Spot Number Density Std Deviation Total Used Rej")
+ call printf ("ected \n")
+
+ do i = 1, nspots {
+ call printf (" %d %17t%.4f %27t%.4f %43t%d %5d %6d \n")
+ call pargi (i)
+ call pargr (den[i])
+ call pargr (sdev[i])
+ call pargi (npix[i])
+ call pargi (ngpix[i])
+ call pargi (npix[i] - ngpix[i])
+ }
+
+ call printf (" FOG %17t%.4f %27t%.4f %43t%d %5d %6d \n")
+ call pargr (fog)
+ call pargr (fsdev)
+ call pargi (fnpix)
+ call pargi (fngpix)
+ call pargi (fnpix - fngpix)
+
+end
diff --git a/noao/imred/dtoi/x_dtoi.x b/noao/imred/dtoi/x_dtoi.x
new file mode 100644
index 00000000..5fd0cc72
--- /dev/null
+++ b/noao/imred/dtoi/x_dtoi.x
@@ -0,0 +1,6 @@
+task spotlist = t_spotlist,
+ hdfit = t_hdfit,
+ hdshift = t_hdshift,
+ hdtoi = t_hdtoi,
+ dematch = t_dematch,
+ selftest = t_selftest
diff --git a/noao/imred/echelle/Revisions b/noao/imred/echelle/Revisions
new file mode 100644
index 00000000..1ba3c451
--- /dev/null
+++ b/noao/imred/echelle/Revisions
@@ -0,0 +1,247 @@
+.help revisions Jun88 noao.imred.echelle
+.nf
+=======
+V2.12.3
+=======
+
+imred$echelle/echelle.cl
+ With the change of SCOMBINE to a separate executable the package
+ file needed to be updated. (1/19/05)
+
+=======
+V2.12.2
+=======
+
+imred$echelle/doc/ecidentify.hlp
+ Fixed some minor typos in the function description section.
+ (8/26/03, Valdes)
+
+=======
+V2.12.1
+=======
+
+=====
+V2.12
+=====
+
+imred$echelle/standard.par
+ Added blackbody query parameters. (5/2/02, Valdes)
+
+imred$echelle/doc/ecidentify.hlp
+ Added a description of how to evaluate the dispersion functions.
+ (4/20/01, Valdes)
+
+imred$echelle/doc/dofoe.hlp
+ Modified to explain that this task may be used with a single fiber.
+ (11/21/00, Valdes)
+
+========
+V2.11.3a
+========
+
+imred$echelle/doc/ecidentify.hlp
+ Fixed minor formating problem. (4/22/99, Valdes)
+
+=====
+V2.11
+=====
+
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+ Added sapertures to the pacakge. (12/4/97, Valdes)
+
+=====
+V2.11
+=====
+
+imred$echelle/demos/mkdoecslit.cl
+imred$echelle/demos/mkdofoe.cl
+ Made the ARTDATA package parameters explicit (4/15/97, Valdes)
+
+imred$echelle/demos/xgdoecslit.dat
+ The test now includes batch processing. (10/3/96, Valdes)
+
+imred$echelle/echelle.cl
+ Increased the minimum min_lenuserarea from 40000 to 100000.
+ (7/31/96, Valdes)
+
+imred$echelle/demos/mkdoecslit.cl
+ Added a flat field. (11/6/94, Valdes)
+
+imred$echelle/*
+ Further updates for WCS version of ONEDSPEC. (7/24/91, Valdes)
+
+imred$echelle/echelle.cl
+imred$echelle/echelle.par
+imred$echelle/echelle.men
+imred$echelle/echelle.hd
+ The package was updated for V3 of APEXTRACT and ONEDSPEC. All parameter
+ files are now part of those packages and now local copies are used.
+ Some tasks have disappeared such as APIO and ECSELECT and a number of
+ others have been added.
+
+imred$echelle/ecbplot.cl -
+imred$echelle/doc/ecbplot.hlp -
+imred$echelle/echelle.cl
+imred$echelle/echelle.hd
+imred$echelle/echelle.men
+ The generic task BPLOT replaces ECBPLOT. (8/24/90, Valdes)
+
+============================
+V3 of APEXTRACT and ONEDSPEC
+============================
+
+imred$echelle/doc/ecreidentify.hlp
+ The refit option now maintains the order offset of the reference.
+ (6/12/90, Valdes)
+
+====
+V2.9
+====
+
+imred$echelle/ecbplot.cl
+ Added a missing parameter declaration for "cur". (10/27/89, Valdes)
+
+====
+V2.8
+====
+
+imred$echelle/eccontinuum.par +
+imred$echelle/doc/eccontinuum.hlp +
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+imred$echelle/echelle.hd
+ Added the hooks for the new eccontinuum. The executable is in
+ onedspec$t_ecctm.x. (6/2/89, Seaman)
+
+imred$echelle/ecbplot.cl +
+imred$echelle/doc/ecbplot.hlp +
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+imred$echelle/echelle.hd
+ Added the new script and removed bplot. (6/2/89, Seaman)
+
+imred$echelle/standard.par
+ Removed ennumerated list. (4/10/89, Valdes)
+
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+imred$echelle/specplot.par+
+ Task SPECPLOT added. (4/3/89 ShJ)
+
+imred$echelle/ecselect.par +
+imred$echelle/doc/ecselect.hlp +
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+imred$echelle/echelle.hd
+ Added a new task ECSELECT to select and extract apertures from
+ echelle format spectra. (3/8/89, Valdes)
+
+imred$echelle/apscatter.par +
+imred$echelle/apscat1.par +
+imred$echelle/apscat2.par +
+imred$echelle/echelle.cl
+imred$echelle/echelle.men
+ Added a new task, APSCATTER, to fit and subtract scattered light.
+ It includes two hidden psets, APSCAT1 and APSCAT2. (3/3/89, Valdes)
+
+imred$echelle/doc/ecidentify.hlp
+ Add a little discussion concerning the speed advantage of the 'o'
+ fit compared to the 'f' fit. (2/2/89, Valdes)
+
+noao$imred/echelle/
+ Davis, Oct 31, 1988
+ The tasks ecbplot and eccontinuum and the hidden task _enranges were
+ added to the echelle package. These tasks were written by Rob
+ Seaman.
+
+imred$echelle/echelle.cl
+imred$echelle/echelle.par
+imred$echelle/echelle.men
+imred$echelle/ecdispcor.par +
+imred$echelle/ecidentify.par +
+imred$echelle/ecreidentify.par +
+imred$echelle/refspectra.par +
+imred$echelle/dispcor.par -
+imred$echelle/identify.par -
+imred$echelle/reidentify.par -
+ New version of this package using new echelle tools instead of ONEDSPEC
+ tools. (4/7/88 Valdes)
+
+================================================================================
+
+noao$imred/echelle/reidentify.par
+ Valdes, Jan 4, 1988
+ Updated parameter file for new REIDENTIFY parameter.
+
+noao$imred/echelle/echelle.cl
+noao$imred/echelle/echelle.men
+noao$imred/echelle/apnormalize.par +
+ Added the APNORMALIZE task to the package. (fv 7/1/87)
+
+noao$imred/echelle/dispcor.par
+ Valdes, March 5, 1987
+ 1. The DISPCOR default parameter file has been updated because of
+ changes to the task; most notable being that wstart and wpc are
+ list structured.
+
+noao$imred/echelle/echelle.cl
+noao$imred/echelle/echelle.men
+noao$imred/echelle/shedit.par +
+ Valdes, October 6, 1986
+ 1. Added new task SHEDIT.
+
+noao$imred/echelle/identify.par
+ Valdes, October 3, 1986
+ 1. Added new IDENTIFY parameter "threshold".
+
+noao$imred/echelle/echelle.cl
+noao$imred/echelle/echelle.men
+noao$imred/echelle/apdefine.par -
+noao$imred/echelle/extract.par -
+noao$imred/echelle/trace.par -
+ Valdes, September 16, 1986
+ 1. Package modified to use the new APEXTRACT package.
+ 2. Deleted obsolete files
+
+noao$imred/echelle/echelle.hd
+noao$imred/echelle/echelle.men
+noao$imred/echelle/doc/Tutorial.hlp +
+ Valdes, August 18, 1986:
+ 1. Added prototype tutorial document to the ECHELLE package.
+
+====================================
+Version 2.3 Release, August 18, 1986
+====================================
+
+echelle: Valdes, July 3, 1986:
+ 1. IDENTIFY parameter file updated to reflect new name for line list.
+
+=====================================
+STScI Pre-release and SUN 2.3 Release
+=====================================
+
+echelle: Valdes, June 2, 1986:
+ 1. New APEXTRACT tasks defined in package.
+
+echelle: Valdes, May 12, 1986:
+ 1. SPLOT updated. New parameters XMIN, XMAX, YMIN, YMAX.
+
+echelle: Valdes, April 7, 1986:
+ 1. Package parameter file changed to delete latitude.
+ 2. DISPCOR latitude parameter now obtained from OBSERVATORY.
+
+echelle: Valdes, March 27, 1986:
+ 1. New task SETDISP added.
+ 2. APEXTRACT tasks defined in ECHELLE package instead of loaded from
+ TWODSPEC$APEXTRACT.
+
+===========
+Release 2.2
+===========
+From Valdes December 31, 1985:
+
+1. Tasks FLATFIT, FLATDIV, and EXTRACT have been removed from the ECHELLE
+package. The TWODSPEC package containing the task EXTRACT is loaded with
+the ECHELLE package.
+.endhelp
diff --git a/noao/imred/echelle/calibrate.par b/noao/imred/echelle/calibrate.par
new file mode 100644
index 00000000..532ca60e
--- /dev/null
+++ b/noao/imred/echelle/calibrate.par
@@ -0,0 +1,13 @@
+# CALIBRATE parameter file
+
+input,s,a,,,,Input spectra to calibrate
+output,s,a,,,,Output calibrated spectra
+extinct,b,h,yes,,,Apply extinction correction?
+flux,b,h,yes,,,Apply flux calibration?
+extinction,s,h,)_.extinction,,,Extinction file
+observatory,s,h,)_.observatory,,,Observatory of observation
+ignoreaps,b,h,no,,,Ignore aperture numbers in flux calibration?
+sensitivity,s,h,"sens",,,Image root name for sensitivity spectra
+fnu,b,h,no,,,Create spectra having units of FNU?
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
diff --git a/noao/imred/echelle/demos/demoarc.dat b/noao/imred/echelle/demos/demoarc.dat
new file mode 100644
index 00000000..fa0a179d
--- /dev/null
+++ b/noao/imred/echelle/demos/demoarc.dat
@@ -0,0 +1,38 @@
+ OBJECT = 'First comp ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 60. / actual integration time
+ DARKTIME= 60. / total elapsed time
+ IMAGETYP= 'comp ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:11:30.00 ' / universal time
+ ST = '09:04:54.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:09:03.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '48.760 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDMEAN = 179.398
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/echelle/demos/demoobj.dat b/noao/imred/echelle/demos/demoobj.dat
new file mode 100644
index 00000000..78f3b9ad
--- /dev/null
+++ b/noao/imred/echelle/demos/demoobj.dat
@@ -0,0 +1,37 @@
+ OBJECT = 'V640Mon 4500 ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 1200. / actual integration time
+ DARKTIME= 1200. / total elapsed time
+ IMAGETYP= 'object ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:19:55.00 ' / universal time
+ ST = '09:13:15.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:08:52.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '44.580 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/echelle/demos/demos.cl b/noao/imred/echelle/demos/demos.cl
new file mode 100644
index 00000000..00033829
--- /dev/null
+++ b/noao/imred/echelle/demos/demos.cl
@@ -0,0 +1,20 @@
+# DEMOS -- Run specified demo provided a demo file exists.
+
+procedure demos (demoname)
+
+file demoname {prompt="Demo name"}
+
+begin
+ file demo, demofile
+
+ if ($nargs == 0 && mode != "h")
+ type ("demos$demos.men")
+ demo = demoname
+ demofile = "demos$" // demo // ".cl"
+ if (access (demofile)) {
+ task $demo=(demofile)
+ demo
+# cl (< demofile)
+ } else
+ error (1, "Unknown demo " // demo)
+end
diff --git a/noao/imred/echelle/demos/demos.men b/noao/imred/echelle/demos/demos.men
new file mode 100644
index 00000000..09d41022
--- /dev/null
+++ b/noao/imred/echelle/demos/demos.men
@@ -0,0 +1,7 @@
+ MENU of ECHELLE Demonstrations
+
+ mkdoecslit - Make test echelle slit data (3 orders, 100x256)
+ doecslit - Quick test of DOECSLIT (small images, no comments, no delays)
+
+ mkdofoe - Make test FOE data (3 orders, 100x256)
+ dofoe - Quick test of DOFOE (small images, no comments, no delays)
diff --git a/noao/imred/echelle/demos/demos.par b/noao/imred/echelle/demos/demos.par
new file mode 100644
index 00000000..4181ed59
--- /dev/null
+++ b/noao/imred/echelle/demos/demos.par
@@ -0,0 +1,2 @@
+demoname,f,a,"",,,"Demo name"
+mode,s,h,"ql",,,
diff --git a/noao/imred/echelle/demos/demostd.dat b/noao/imred/echelle/demos/demostd.dat
new file mode 100644
index 00000000..7588f3fa
--- /dev/null
+++ b/noao/imred/echelle/demos/demostd.dat
@@ -0,0 +1,36 @@
+ OBJECT = 'V640Mon 4500 ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 1200. / actual integration time
+ DARKTIME= 1200. / total elapsed time
+ IMAGETYP= 'object ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:19:55.00 ' / universal time
+ ST = '09:13:15.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:08:52.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '44.580 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/echelle/demos/doecslit.cl b/noao/imred/echelle/demos/doecslit.cl
new file mode 100644
index 00000000..17483089
--- /dev/null
+++ b/noao/imred/echelle/demos/doecslit.cl
@@ -0,0 +1,21 @@
+# Create demo data if needed.
+
+task $mkdoecslit=demos$mkdoecslit.cl
+mkdoecslit
+imdel ("demo*.??h,sens*", verify=no, >& "dev$null")
+imcopy ("Bdemoobj1", "demoobj1", verbose=no)
+imcopy ("Bdemoobj2", "demoobj2", verbose=no)
+imcopy ("Bdemoarc", "demoarc", verbose=no)
+imcopy ("Bdemostd", "demostd", verbose=no)
+
+unlearn doecslit apscat1 apscat2
+sparams.extras = no
+sparams.bandwidth = 3
+sparams.bandsep = 3
+delete ("demologfile,demoplotfile,std", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdoecslit.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/echelle/demos/dofoe.cl b/noao/imred/echelle/demos/dofoe.cl
new file mode 100644
index 00000000..9a7d0da3
--- /dev/null
+++ b/noao/imred/echelle/demos/dofoe.cl
@@ -0,0 +1,13 @@
+# Create demo data if needed.
+
+task $mkdofoe=demos$mkdofoe.cl
+mkdofoe
+
+unlearn dofoe params
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdofoe.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/echelle/demos/ecdofoe.dat b/noao/imred/echelle/demos/ecdofoe.dat
new file mode 100644
index 00000000..edaa050b
--- /dev/null
+++ b/noao/imred/echelle/demos/ecdofoe.dat
@@ -0,0 +1,33 @@
+# Tue 10:20:50 16-Nov-93
+begin ecidentify demoarc.ec
+ id demoarc.ec
+ task ecidentify
+ image demoarc.ec
+ features 8
+ 1 116 78.35 4965.0792 4965.0795 4.0 1 1
+ 2 115 77.21 5009.335 5009.3344 4.0 1 1
+ 2 115 227.04 5019.8062 5019.8062 4.0 1 1
+ 3 114 11.56 5049.8052 5049.796 4.0 1 1
+ 3 114 25.44 5050.7874 5050.7842 4.0 1 1
+ 3 114 89.66 5055.3289 5055.3473 4.0 1 1
+ 3 114 184.46 5062.0332 5062.0371 4.0 1 1
+ 3 114 225.77 5064.9549 5064.9454 4.0 1 1
+ offset 117
+ slope -1
+ niterate 3
+ lowreject 3.
+ highreject 3.
+ coefficients 12
+ 1.
+ 2.
+ 2.
+ 1.
+ 1.
+ 256.
+ 114.
+ 116.
+ 576485.7847819133
+ 1024.71926036047
+ -134.8425017381852
+ -3.224100491592999
+
diff --git a/noao/imred/echelle/demos/mkdoecslit.cl b/noao/imred/echelle/demos/mkdoecslit.cl
new file mode 100644
index 00000000..1d5fa14b
--- /dev/null
+++ b/noao/imred/echelle/demos/mkdoecslit.cl
@@ -0,0 +1,137 @@
+# Create test data if needed.
+
+procedure mkdoecslit ()
+begin
+
+ artdata
+ artdata.nxc = 5
+ artdata.nyc = 5
+ artdata.nxsub = 10
+ artdata.nysub = 10
+ artdata.nxgsub = 5
+ artdata.nygsub = 5
+ artdata.dynrange = 100000.
+ artdata.psfrange = 10.
+ artdata.ranbuf = 0
+
+ if (!access ("Bdemoflat." // envget ("imtype"))) {
+ print ("Creating example demoflat ...")
+ mkechelle ("Bdemoflat", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="slit", width=20., scattered=10., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=20000., temperature=5700., lines="",
+ nrandom=0, peak=5.0, sigma=0.1, seed=2, >& "dev$null")
+ mknoise ("Bdemoflat", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=no,
+ seed=5, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+
+ if (!access ("Bdemoobj1." // envget ("imtype"))) {
+ print ("Creating example demoobj1 ...")
+ mkechelle ("Bdemoobj1", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="gaussian", width=4., scattered=25., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=500., temperature=7700., lines="",
+ nrandom=100, peak=-0.2, sigma=0.3, seed=1, >& "dev$null")
+ mkechelle ("Bdemoobj1", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="slit", width=20., scattered=10., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=200., temperature=5700., lines="",
+ nrandom=20, peak=5.0, sigma=0.1, seed=2, >& "dev$null")
+ mknoise ("Bdemoobj1", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=1, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+
+ if (!access ("Bdemoobj2." // envget ("imtype"))) {
+ print ("Creating example demoobj2 ...")
+ mkechelle ("Bdemoobj2", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="gaussian", width=4., scattered=25., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=10., z=no, continuum=500., temperature=7700., lines="",
+ nrandom=100, peak=-0.2, sigma=0.3, seed=1, >& "dev$null")
+ mkechelle ("Bdemoobj2", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="slit", width=20., scattered=10., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=200., temperature=5700., lines="",
+ nrandom=20, peak=5.0, sigma=0.1, seed=2, >& "dev$null")
+ mknoise ("Bdemoobj2", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=4, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+
+ if (!access ("Bdemostd." // envget ("imtype"))) {
+ print ("Creating example demostd ...")
+ mkechelle ("Bdemostd", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demostd.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="gaussian", width=4., scattered=25., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=500., temperature=10000., lines="",
+ nrandom=0, peak=-0.5, sigma=0.5, seed=3, >& "dev$null")
+ mkechelle ("Bdemostd", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demostd.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="slit", width=20., scattered=10., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=200., temperature=5700., lines="",
+ nrandom=20, peak=5.0, sigma=0.1, seed=2, >& "dev$null")
+ mknoise ("Bdemostd", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=2, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+
+ if (!access ("Bdemoarc." // envget ("imtype"))) {
+ print ("Creating example demoarc ...")
+ mkechelle ("Bdemoarc", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoarc.dat", list=no, make=yes,
+ comments=no, xc=INDEF, yc=INDEF, pixsize=0.027,
+ profile="slit", width=20., scattered=10., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=20., temperature=0.,
+ lines="mkexamples$ecthorium.dat", nrandom=0, peak=-0.5,
+ sigma=0.05, seed=1, >& "dev$null")
+ mknoise ("Bdemoarc", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=3, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+end
diff --git a/noao/imred/echelle/demos/mkdofoe.cl b/noao/imred/echelle/demos/mkdofoe.cl
new file mode 100644
index 00000000..6a18aaea
--- /dev/null
+++ b/noao/imred/echelle/demos/mkdofoe.cl
@@ -0,0 +1,103 @@
+# Create test data if needed.
+
+procedure mkdofoe ()
+begin
+
+ artdata
+ artdata.nxc = 5
+ artdata.nyc = 5
+ artdata.nxsub = 10
+ artdata.nysub = 10
+ artdata.nxgsub = 5
+ artdata.nygsub = 5
+ artdata.dynrange = 100000.
+ artdata.psfrange = 10.
+ artdata.ranbuf = 0
+
+ if (!access ("demoobj." // envget ("imtype"))) {
+ print ("Creating example demoobj ...")
+ mkechelle ("demoobj", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=50, yc=50.1, pixsize=0.027,
+ profile="gaussian", width=4., scattered=25., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=500., temperature=7700., lines="",
+ nrandom=100, peak=-0.2, sigma=0.3, seed=1, >& "dev$null")
+ mkechelle ("demoobj", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=60, yc=51.6, pixsize=0.027,
+ profile="gaussian", width=4., scattered=0., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=4.95, temperature=0.,
+ lines="mkexamples$ecthorium.dat", nrandom=0, peak=-0.5,
+ sigma=0.05, seed=1, >& "dev$null")
+ mknoise ("demoobj", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=1, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+
+ if (!access ("demoflat." // envget ("imtype"))) {
+ print ("Creating example demoflat ...")
+ mkechelle ("demoflat", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=50, yc=50.2, pixsize=0.027,
+ profile="gaussian", width=4., scattered=25., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=1000., temperature=5700., lines="",
+ nrandom=0, peak=-0.2, sigma=0.3, seed=1, >& "dev$null")
+ mkechelle ("demoflat", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoobj.dat", list=no, make=yes,
+ comments=no, xc=60, yc=51.7, pixsize=0.027,
+ profile="gaussian", width=4., scattered=25., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=990., temperature=7700., lines="",
+ nrandom=0, peak=-0.2, sigma=0.3, seed=1, >& "dev$null")
+ mknoise ("demoflat", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=2, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+
+ if (!access ("demoarc." // envget ("imtype"))) {
+ print ("Creating example demoarc ...")
+ mkechelle ("demoarc", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoarc.dat", list=no, make=yes,
+ comments=no, xc=50, yc=50, pixsize=0.027,
+ profile="gaussian", width=4., scattered=0., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=10., temperature=0.,
+ lines="mkexamples$ecthorium.dat", nrandom=0, peak=-0.5,
+ sigma=0.05, seed=1, >& "dev$null")
+ mkechelle ("demoarc", yes, ncols=100, nlines=256, norders=21,
+ title="Artificial Echelle Spectrum",
+ header="demos$demoarc.dat", list=no, make=yes,
+ comments=no, xc=60, yc=51.5, pixsize=0.027,
+ profile="gaussian", width=4., scattered=0., f=590., gmm=31.6,
+ blaze=63., theta=69., order=112, wavelength=5007.49,
+ dispersion=2.61, cf=590., cgmm=226., cblaze=4.53,
+ ctheta=-11.97, corder=1, cwavelength=6700., cdispersion=70.,
+ rv=0., z=no, continuum=9.9, temperature=0.,
+ lines="mkexamples$ecthorium.dat", nrandom=0, peak=-0.5,
+ sigma=0.05, seed=1, >& "dev$null")
+ mknoise ("demoarc", output="", ncols=512, nlines=512, title="",
+ header="", background=0., gain=1., rdnoise=10., poisson=yes,
+ seed=3, cosrays="", ncosrays=0, energy=30000., radius=0.5,
+ ar=1., pa=0., comments=no)
+ }
+end
diff --git a/noao/imred/echelle/demos/xgdoecslit.dat b/noao/imred/echelle/demos/xgdoecslit.dat
new file mode 100644
index 00000000..3c59563a
--- /dev/null
+++ b/noao/imred/echelle/demos/xgdoecslit.dat
@@ -0,0 +1,105 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\sechel\n
+\r
+onedstds$spechayescal/\r
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdoecslit\n
+demoobj1,demoobj2\r
+demostd\r
+demoarc\r
+\r
+demostd\r
+rdnoise\r
+gain\r
+\r
+3\r
+\r
+\r
+y\r
+y\r
+y\r
+\r
+\r
+scat\r
+y\r
+^Z
+doecslit\sredo+\n
+\n
+\n
+b/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+N\r
+n\n
+n\n
+\n
+\n
+\n
+\n
+:/<-5\s\s\s\s/=(.\s=\r sample\s9:92\r
+f/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+i/<-5\s\s\s\s/=(.\s=\r
+m200.\s\s\s\s20=*,.\r 4965\r
+m3+0*\s\s\s\s3,&*1)\r 4966\r
+k/<-5\s\s\s\s/=(.\s=\r
+m(?3"\s\s\s\s(?+&<:\r 5002\r
+m+\s3$\s\s\s\s+\s:&:-\r 5003.6\r
+m2*3$\s\s\s\s2+.&:-\r 5009.3\r
+k/<-5\s\s\s\s/=(.\s=\r
+m%937\s\s\s\s%90&"$\r 5044.7\r
+m,,3&\s\s\s\s,,/&7?\r 5049.8\r
+m9,4'\s\s\s\s9,?%,)\r 5059.8\r
+m3-4"\s\s\s\s3.\s%32\r 5055.3\r
+f/<-5\s\s\s\s/=(.\s=\r
+:/<-5\s\s\s\s/=(.\s=\r xo\s4\r
+o/<-5\s\s\s\s/=(.\s=\r 114\r
+q/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+o/<-5\s\s\s\s/=(.\s=\r 114\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+hz14\n
+y\n
+q/<-5\s\s\s\s/=(.\s=\r
+NO!\r
+y\n
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+\n
+1\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+2\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+2\n
+q/<-5\s\s\s\s/=(.\s=\r
+imdel\sdemoobj1.ec\n
+del\sdatabase/apdemoobj1\n
+doecslit\squick+\n
+\n
+imdel\sdemoobj1.ec\n
+doecslit\sbatch+\ssplot-\n
+\n
diff --git a/noao/imred/echelle/demos/xgdofoe.dat b/noao/imred/echelle/demos/xgdofoe.dat
new file mode 100644
index 00000000..bd436909
--- /dev/null
+++ b/noao/imred/echelle/demos/xgdofoe.dat
@@ -0,0 +1,50 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\sechelle\n
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdofoe\n
+demoobj\r
+demoflat\r
+demoflat\r
+demoarc\r
+\r
+rdnoise\r
+gain\r
+\r
+3\r
+5\r
+^Z
+dofoe\sredo+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+i/<-5\s\s\s\s/=(.\s=\r
+m++4'\s\s\s\s++8%,)\r 4965\r
+k/<-5\s\s\s\s/=(.\s=\r
+m+)4'\s\s\s\s+)=%,)\r 5009\r
+m9!4'\s\s\s\s9""%,)\r 5020\r
+k/<-5\s\s\s\s/=(.\s=\r
+m%'5'\s\s\s\s%'*$#!\r 5049.8\r
+m&15(\s\s\s\s&1%$\s4\r 5050.8\r
+m,.55\s\s\s\s,.)#/4\r 5055.3\r
+m5#7\s\s\s\s\s5$2!7:\r 5062\r
+m8=78\s\s\s\s8>.\s8)\r 5064.9\r
+f/<-5\s\s\s\s/=(.\s=\r
+o/<-5\s\s\s\s/=(.\s=\r 114\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/echelle/doc/Tutorial.hlp b/noao/imred/echelle/doc/Tutorial.hlp
new file mode 100644
index 00000000..655ca307
--- /dev/null
+++ b/noao/imred/echelle/doc/Tutorial.hlp
@@ -0,0 +1,184 @@
+.help Tutorial Aug86 "Echelle Tutorial"
+.ih
+TOPICS
+The echelle tutorial consists of a number of different topics. To obtain help
+on a particular topic type "tutor topic" where the topic is one of the
+following:
+
+.nf
+ TOPICS
+
+ topics - List of topics
+ overview - An overview of the echelle reduction process
+ dataio - Reading and writing data tapes
+ tracing - Tracing the positions of the orders
+ extraction - Extracting one or two dimensional spectra
+ references - Additional references
+ all - Print all of the tutorial
+.fi
+
+The topics are kept brief and describe the simplest operations. More
+sophisticated discussions are available for all the tasks in the printed
+documentation and through the on-line \fBhelp\fR facility; i.e. "help taskname".
+.ih
+OVERVIEW
+The \fBechelle\fR package provides for the extraction of the orders from
+two dimensional echelle images into one dimensional spectra. After extraction
+the one dimensional spectra are wavelength and flux calibrated. The usual
+flow of the reductions is
+.ls (1)
+Read data from tape.
+.le
+.ls (2)
+Set the dispersion axis in the image headers using the task \fBsetdisp\fR.
+This is required by many of the tasks which follow.
+.le
+.ls (3)
+Trace one or more images to define the positions of the orders within the
+two dimensional format.
+.le
+.ls (4)
+Extract the orders into one dimensional spectra.
+.le
+.ls (5)
+Use arc calibration spectra to determine wavelength solutions.
+.le
+.ls (6)
+Apply the wavelength solutions to the other spectra and rebin the spectra
+into linear or logarithmic wavelength intervals.
+.le
+.ls (7)
+Determine flux calibrations using standard star observations.
+.le
+.ls (8)
+Apply the flux calibrations to the other object spectra.
+.le
+.ls (9)
+Save the reductions as FITS images and make plots of the spectra.
+.le
+
+There are many variations on these steps possible with the great flexibility
+of the reduction tools at your disposal. The most important one to mention
+is that the orders may be extracted as two dimensional strips in order to
+apply more complex geometric distortion corrections using the \fBlongslit\fR
+package.
+.ih
+DATAIO
+To read CCD Camera format tapes use \fBrcamera\fR from the \fBmtlocal\fR
+package. FITS format tapes are read and written with \fBrfits\fR and
+\fBwfits\fR from the \fBdataio\fR package. Remember you need to
+\fBallocate\fR the tape drive before you can read or write tapes and
+you should \fBdeallocate\fR the tapes when you are through with the
+tape drive.
+
+.nf
+ ec> allocate mta
+ ec> deallocate mta
+ ec> rcamera mta 1-99 ech datatype=r >rcam.log &
+ ec> rfits mta 1-99 ech datatype=r >rfits.log &
+ ec> wfits mta spectra*
+.fi
+.ih
+TRACING
+The positions of the orders across the image dispersion axis as a function
+of position along the dispersion axis are determined by the task \fBtrace\fR.
+There are three steps in tracing an image; defining the initial positions of
+the orders at one point along the dispersion, automatically determining
+the positions at other points in steps from the starting point, and fitting
+smooth curves to the positions as a function of dispersion position. The
+first and last steps are interactive, at least initially. After the first
+image other images may be traced noninteractively.
+
+Select an image with narrow, strong profiles and run trace:
+
+ ec> trace imagename
+
+When you are asked if you want to edit the apertures respond with "yes".
+The central cut across the dispersion is graphed. Position the cursor
+over the first order to be traced and type 'm'. Adjust the width of the
+extraction aperture with the 'l', 'u', or 'y' keys or specify the lower
+and upper widths explicitly with ":lower value" or ":upper value".
+If background subtraction is to be used type 'b' and set the background
+fitting parameters (see the \fBbackground\fR tutorial)
+Now mark the remaining orders with the 'm' key. The widths of the
+previous aperture are preserved for each new aperture. When you are
+satisfied with the marked apertures type 'q'.
+
+The positions of the orders are now traced in steps from the initial point.
+Once the positions have been traced you are asked whether to fit the
+traced apertures interactively. Respond with "yes". You will now be
+asked specifically if the first aperture is to be fit. Respond with "yes"
+again. The traced positions are graphed along with a fitted curve. You now
+have many options for adjusting the fit. The most important one is the
+order which is set by typing ":order value", where value is the desired
+order, and then 'f' to refit the data. For full information of the
+options see the help for \fBicfit\fR. When you are satisfied type 'q'.
+
+You are then prompted for the next order. The previous fitting parameters
+will be used so at this point you may want to just answer "NO" to skip
+the interactive fitting of the other traced orders, though the graphs of the
+fit will still be displayed.
+
+You now have several options about how to define the positions of the
+orders in your other images.
+
+.ls (1)
+You may apply the tracing to all other observations with no
+further tracing. This is done by specifying the traced image
+as the "reference" in the extraction process.
+.le
+.ls (2)
+You may maintain the same shape of the traces and correct for
+shifts in the positions of the orders across the dispersion
+by recentering each aperture. This is done
+with the task \fBapedit\fR or the editing switch during extraction
+using the first traced image as the reference. The apertures are
+recenter using the 'c' key.
+.le
+.ls (3)
+Finally, you may retrace other images either from scratch or
+using the first traced image as the initial reference. In the latter
+case the tracing may be done noninteractively as a batch process.
+.le
+.ih
+EXTRACTION
+There are two types of extraction; to one dimensional spectra or
+to two dimensional strips. The second type of extraction is accomplished
+by the task \fBstripextract\fR in the \fBtwodspec.apextract\fR package
+and is used if further reductions using the \fBlongslit\fR package are
+desired. Normally, however, one ignores the small geometric distortion
+in which curves of constant wavelength differ slightly from the image
+dispersion axis.
+
+Extraction of the traced echelle orders is performed by the task
+\fBsumextract\fR. The pixels within each aperture at each point along
+the dispersion axis are summed to produce one dimensional spectra, one
+for each order and each extracted image. The sum may be weighted
+in two ways; "profile" or "variance" weighting. The variance weighting
+may require that you know the CCD readout noise and photon/ADU conversion.
+For a description of the weights see the help for \fBsumextract\fR
+or the paper "The APEXTRACT Package". The spectra may also be cleaned
+of cosmic rays and bad pixels at the same time and have a background
+subtracted. The background subtraction parameters must be set when
+defining the apertures or later using the apedit mode in \fBapedit\fR,
+\fBtrace\fR, or \fBsumextract\fR. See the tutorial on \fBbackground\fR
+for further information.
+
+Once the extraction parameters have been set simply type
+
+ ec> sumextract images
+
+where images is the list of images to be extracted. If each image has
+not been traced then a traced reference image should be given.
+One may correct for shifts relative to the traced image by setting the
+switch to edit the apertures and then recentering each aperture before
+extracting. If there is no aperture editing then the extractions may
+be done as a background or batch process.
+.ih
+REFERENCES
+.ls (1)
+Pilachowski, C. and J. V. Barnes, \fINotes on the IRAF for Reduction of
+Echelle/CCD Data\fR, NOAO Central Computer Services, 1986. This document
+is also available in the \fBIRAF User Handbook , Vol. 2B -- NOAO Cookbooks\fR.
+.le
+.endhelp
diff --git a/noao/imred/echelle/doc/doecslit.hlp b/noao/imred/echelle/doc/doecslit.hlp
new file mode 100644
index 00000000..7bf69f00
--- /dev/null
+++ b/noao/imred/echelle/doc/doecslit.hlp
@@ -0,0 +1,1230 @@
+.help doecslit Feb93 noao.imred.echelle
+.ih
+NAME
+doecslit -- Echelle slit spectra reduction task
+.ih
+USAGE
+doecslit objects
+.ih
+SUMMARY
+\fBDoecslit\fR subtracts background sky or scattered light, extracts,
+wavelength calibrates, and flux calibrates multiorder echelle slit spectra
+which have been processed to remove the detector characteristics; i.e. CCD
+images have been bias, dark count, and flat field corrected. The spectra
+should be oriented such that pixels of constant wavelength are aligned with
+the image columns or lines. Small departures from this alignment are not
+critical resulting in only a small loss of resolution. Single order
+observations should be reduced with \fBdoslit\fR.
+.ih
+PARAMETERS
+.ls objects
+List of object images to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set
+and dependent calibration data has changed. If the images contain the
+keyword IMAGETYP then only those with a value of "object" or "OBJECT"
+are used and those with a value of "comp" or "COMPARISON" are added
+to the list of arcs. Extracted spectra are ignored.
+.le
+.ls apref = ""
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a bright star spectrum.
+.le
+.ls arcs = "" (at least one if dispersion correcting)
+List of arc calibration spectra. These spectra are used to define
+the dispersion functions. The first spectrum is used to mark lines
+and set the dispersion function interactively and dispersion functions
+for all other arc spectra are derived from it. If the images contain
+the keyword IMAGETYP then only those with a value of "comp" or
+"COMPARISON" are used. All others are ignored as are extracted spectra.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining which arc spectra are to be assigned to which object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIsparams.sort\fR, such as the Julian date
+is made.
+.le
+.ls standards = "" (at least one if flux calibrating)
+List of standard star spectra. The standard stars must have entries in
+the calibration database (package parameter \fIechelle.caldir\fR).
+.le
+
+.ls readnoise = 0., gain = 1. (apsum)
+Read out noise in photons and detector gain in photons per data value.
+This parameter defines the minimum noise sigma and the conversion between
+photon Poisson statistics and the data number statistics. Image header
+keywords (case insensitive) may be specified to obtain the values from the
+image header.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the standard star or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls norders = 10 (apfind)
+Number of orders to be found automatically.
+.le
+.ls width = 5. (apedit)
+Approximate full width of the spectrum profiles. This parameter is used
+to define a width and error radius for the profile centering algorithm,
+and defaults for the aperture limits and background regions.
+.le
+
+.ls dispcor = yes
+Dispersion correct spectra? This may involve either defining a nonlinear
+dispersion coordinate system in the image header or resampling the
+spectra to uniform linear wavelength coordinates as selected by
+the parameter \fIsparams.linearize\fR.
+.le
+.ls extcor = no
+Extinction correct the spectra?
+.le
+.ls fluxcal = no
+Flux calibrate the spectra using standard star observations?
+.le
+.ls resize = no (apresize)
+Resize the defaults apertures for each object based on the spectrum profile?
+.le
+.ls clean = no (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction. In addition the datamax parameters
+can be useful.
+.le
+.ls trace = yes (non-quicklook mode only) (aptrace)
+Allow tracing each object spectrum separately? If not set then the trace
+from the aperture reference is used, with recentering to allow for shifts
+across the dispersion. If set then each object and standard star
+image is retraced. Retracing is NOT done in quicklook mode.
+.le
+.ls background = "none" (apsum, apscatter)
+Type of background light subtraction. The choices are "none" for no
+background subtraction, "scattered" for a global scattered light
+subtraction, "average" to average the background within background regions,
+"median" to use the median in background regions, "minimum" to use the
+minimum in background regions, or "fit" to fit across the dispersion using
+the background within background regions. The scattered light option fits
+and subtracts a smooth global background and modifies the input images.
+This is a slow operation and so is NOT performed in quicklook mode. The
+other background options are local to each aperture. The "fit" option uses
+additional fitting parameters from \fBsparams\fR and the "scattered" option
+uses parameters from \fBapscat1\fR and \fBapscat2\fR.
+.le
+.ls splot = no
+Plot the final spectra? In quicklook mode a noninteractive, stacked plot
+is automatically produced using the task \fBspecplot\fR while in
+non-quicklook mode a query is given and the task \fBsplot\fR is used for
+interactive plotting.
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed unless required by the
+update option.
+.le
+.ls update = no
+Update processing of previously processed spectra if the aperture
+reference image, the dispersion reference image, or standard star
+calibration data are changed?
+.le
+.ls quicklook = no
+Extract and calibrate spectra with minimal interaction? In quicklook mode
+only aperture reference definitions, the initial dispersion function
+solution, and the standard star setup are done interactively. Scattered
+light subtraction and individual object tracing are not performed.
+Normally the \fIsplot\fR option is set in this mode to produce an automatic
+final spectrum plot for each object. It is recommended that this mode not be
+used for final reductions.
+.le
+.ls batch = no
+Process spectra as a background or batch job provided there are no interactive
+steps remaining.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls sparams = "" (pset)
+Name of parameter set containing additional processing parameters. This
+parameter is only for indicating the link to the parameter set
+\fBsparams\fR and should not be given a value. The parameter set may be
+examined and modified in the usual ways (typically with "epar
+sparams" or ":e sparams" from the parameter editor). The parameters are
+described below.
+.le
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls extras = no (apsum)
+Include raw unweighted and uncleaned spectra, the background spectra, and
+the estimated sigma spectra in a three dimensional output image format.
+See the discussion in the \fBapextract\fR package for further information.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Fraction of the peak to set aperture limits during automatic resizing.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 2 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- BACKGROUND AND SCATTERED LIGHT PARAMETERS --
+.ls b_function = "legendre", b_order = 1 (apsum)
+Default background fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls b_naverage = -100 (apsum)
+Default number of points to average or median. Positive numbers
+average that number of sequential points to form a fitting point.
+Negative numbers median that number, in absolute value, of sequential
+points. A value of 1 does no averaging and each data point is used in the
+fit.
+.le
+.ls b_niterate = 0 (apsum)
+Default number of rejection iterations. If greater than zero the fit is
+used to detect deviant fitting points and reject them before repeating the
+fit. The number of iterations of this process is given by this parameter.
+.le
+.ls b_low_reject = 3., b_high_reject = 3. (apsum)
+Default background lower and upper rejection sigmas. If greater than zero
+points deviating from the fit below and above the fit by more than this
+number of times the sigma of the residuals are rejected before refitting.
+.le
+.ls buffer = 1. (apscatter)
+Buffer distance from the edge of any aperture for data to be included
+in the scattered light determination. This parameter may be modified
+interactively.
+.le
+.ls apscat1 = "", apscat2 = "" (apscatter)
+Parameter sets for the fitting functions across and along the dispersion.
+These parameters are those used by \fBicfit\fR. These parameters are
+usually set interactively.
+.le
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum) (none|variance)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum and approfile) (fit1d|fit2d)
+Type of profile fitting algorithm to use. The "fit1d" algorithm is
+preferred except in cases of extreme tilt.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelist$thar.dat" (ecidentify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelist$".
+.le
+.ls match = 1. (ecidentify)
+The maximum difference for a match between the dispersion function computed
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 4. (ecidentify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 10. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "legendre", i_xorder = 3, i_yorder = 3 (ecidentify)
+The default function, function order for the pixel position dependence, and
+function order for the aperture number dependence to be fit to the arc
+wavelengths. The functions choices are "chebyshev" or "legendre".
+.le
+.ls i_niterate = 3, i_low = 3.0, i_high = 3.0 (ecidentify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (ecreidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.le
+.le
+.ls sort = "jd" (setjd and refspectra)
+Image header keyword to be used as the sorting parameter for selection
+based on order. The header parameter must be numeric but otherwise may
+be anything. Common sorting parameters are times or positions.
+.le
+.ls group = "ljd" (setjd and refspectra)
+Image header keyword to be used to group spectra. For those selection
+methods which use the group parameter the reference and object
+spectra must have identical values for this keyword. This can
+be anything but it must be constant within a group. Common grouping
+parameters are the date of observation "date-obs" (provided it does not
+change over a night) or the local Julian day number.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling using
+the linear dispersion parameters specified by other parameters. If
+no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data is not interpolated. Note the interpolation
+function type is set by the package parameter \fIinterp\fR.
+.le
+.ls log = no (ecdispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (ecdispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+
+.ce
+-- SENSITIVITY CALIBRATION PARAMETERS --
+.ls bandwidth = 10., bandsep = 10. (standard)
+Interpolated bandpass grid. If INDEF then the same bandpasses as in the
+calibration files are used otherwise the calibration data is interpolated
+to the specified set of bandpasses.
+.le
+.ls s_interact = yes (standard)
+Display the bandpasses on the standard star data and allow interactive
+addition and deletion of bandpasses.
+.le
+.ls s_function = "spline3", s_order = 1 (sensfunc)
+Function and order used to fit the sensitivity data. The function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline3" cubic spline,
+and "spline1" linear spline.
+Order of the sensitivity fitting function. The value corresponds to the
+number of polynomial terms or the number of spline pieces. The default
+values may be changed interactively.
+.le
+.ls fnu = no (calibrate)
+The default calibration is into units of F-lambda. If \fIfnu\fR = yes then
+the calibrated spectrum will be in units of F-nu.
+.le
+
+.ce
+PACKAGE PARAMETERS
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.le
+.ls extinction = "onedstds$kpnoextinct.dat" (standard, sensfunc, calibrate)
+Extinction file for a site. There are two extinction files in the
+NOAO standards library, onedstds$, for KPNO and CTIO. These extinction
+files are used for extinction and flux calibration.
+.le
+.ls caldir (standard)
+Standard star calibration directory. A directory containing standard
+star data files. Note that the directory name must end with '/'.
+There are a number of standard star calibrations directories in the NOAO
+standards library, onedstds$.
+.le
+.ls observatory = "observatory" (observatory)
+The default observatory to use for latitude dependent computations.
+If the OBSERVAT keyword in the image header it takes precedence over
+this parameter.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc) (dispcor)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls database = "database"
+Database name used by various tasks. This is a directory which is created
+if necessary.
+.le
+.ls verbose = no
+Verbose output? If set then almost all the information written to the
+logfile is also written to the terminal except when the task is a
+background or batch process.
+.le
+.ls logfile = "logfile"
+If specified detailed text log information is written to this file.
+.le
+.ls plotfile = ""
+If specified metacode plots are recorded in this file for later review.
+Since plot information can become large this should be used only if
+really desired.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+\fBDoecslit\fR subtracts background sky or scattered light, extracts,
+wavelength calibrates, and flux calibrates multiorder echelle slit spectra
+which have been processed to remove the detector characteristics; i.e. CCD
+images have been bias, dark count, and flat field corrected. The spectra
+should be oriented such that pixels of constant wavelength are aligned with
+the image columns or lines. Small departures from this alignment are not
+critical resulting in only a small loss of resolution. Single order
+observations should be reduced with \fBdoslit\fR.
+
+The task is a command language script which collects and combines the
+functions and parameters of many general purpose tasks to provide a single,
+complete data reduction path and a degree of guidance, automation, and
+record keeping. In the following description and in the parameter section
+the various general tasks used are identified. Further
+information about those tasks and their parameters may be found in their
+documentation. \fBDoecslit\fR also simplifies and consolidates parameters
+from those tasks and keeps track of previous processing to avoid
+duplications.
+
+The general organization of the task is to do the interactive setup steps,
+such as the aperture definitions and reference dispersion function
+determination, first using representative calibration data and then perform
+the majority of the reductions automatically, possibly as a background
+process, with reference to the setup data. In addition, the task
+determines which setup and processing operations have been completed in
+previous executions of the task and, contingent on the \fIredo\fR and
+\fIupdate\fR options, skip or repeat some or all the steps.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage
+since there are many variations possible.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images are first processed with \fBccdproc\fR for overscan,
+zero level, dark count, and flat field corrections.
+.le
+.ls [2]
+Set the \fBdoecslit\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, an aperture reference image (usually a bright
+star spectrum) to use in finding the orders and defining the
+aperture parameters, one or more arc images, and one or more standard
+star images. If there are many object, arc, or standard star images
+you might prepare "@ files". Set the detector and data
+specific parameters. Select the processing options desired.
+Finally you might wish to review the \fBsparams\fR algorithm parameters
+though the defaults are probably adequate.
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the current execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+The specified number of orders (ranked by peak strength) in the aperture
+reference image are located and default fixed width apertures are
+assigned. If the resize option is set the apertures are resized by finding
+the level which is 5% (the default) of the peak above local background.
+You then have the option of entering the aperture editing loop to check the
+aperture positions, sizes, and background fitting parameters. This is
+highly recommended. Note that it is important that the aperture numbers be
+sequential with the orders and if any orders are skipped the aperture
+numbers should also skip. It is also important to verify the background
+regions with the 'b' key. Usually you want any changes made to the
+background definitions to apply to all apertures so use the 'a' key to
+select all apertures before modifying the background parameters. To exit
+the background mode and then to exit the review mode use 'q'.
+.le
+.ls [5]
+The order positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively
+to examine the traced positions and adjust the fitting parameters. To exit
+the interactive fitting type 'q'. Not all orders need be examined and the
+"NO" response will quit the interactive fitting using the last defined
+fitting parameters on the remaining traces.
+.le
+.ls [6]
+Apertures are now defined for all standard and object images. This is only
+done if there are no previous aperture definitions for the image. The
+aperture references previously defined are used as the initial set of
+apertures for each image. The apertures are then recentered by an average
+shift over all orders and resized if that option is selected.
+The apertures may also be retraced and interactively examined
+for each image if the tracing option is selected and quicklook mode is not.
+.le
+.ls [7]
+If scattered light subtraction is selected the scattered light parameters
+are set using the aperture reference image and the task \fBapscatter\fR.
+The purpose of this is to interactively define the aperture buffer distance
+for the scattered light and the cross and parallel dispersion fitting
+parameters. The fitting parameters are taken from and recorded in the
+parameter sets \fBapscat1\fR and \fBapscat2\fR. All other scattered light
+subtractions are done noninteractively with these parameters. Note that
+the scattered light correction modifies the input images. Scattered light
+subtraction is not done in quicklook mode.
+.le
+.ls [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The dispersion function is defined using the task
+\fBecidentify\fR. Identify a few arc lines in a few orders with 'm' and
+'o' and use the 'l' line list identification command to automatically add
+additional lines and fit the dispersion function. Check the quality of the
+dispersion function fit with 'f'. When satisfied exit with 'q'.
+.le
+.ls [9]
+If the flux calibration option is selected the standard star spectra are
+processed (if not done previously). The images are background subtracted,
+extracted, and wavelength calibrated. The appropriate arc
+calibration spectra are extracted and the dispersion function refit
+using the arc reference spectrum as a starting point. The standard star
+fluxes through the calibration bandpasses are compiled. You are queried
+for the name of the standard star calibration data file. Because echelle
+spectra are often at much higher dispersion than the calibration data,
+interpolated bandpasses may be defined with the bandpass parameters in
+\fBsparams\fR and checked or modified interactively.
+
+After all the standard stars are processed a sensitivity function is
+determined using the interactive task \fBsensfunc\fR. Finally, the
+standard star spectra are extinction corrected and flux calibrated
+using the derived sensitivity function.
+.le
+.ls [10]
+The object spectra are now automatically background subtracted
+(an alternative to scattered light subtraction),
+extracted, wavelength calibrated, and flux calibrated.
+.le
+.ls [11]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'. In quicklook mode the spectra are plotted
+noninteractively with \fBspecplot\fR.
+.le
+.ls [12]
+The final spectra will have the same name as the original 2D images
+with a ".ec" extension added.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of echelle slit object, standard star, and arc
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must be
+processed to remove overscan, bias, dark count, and flat field effects.
+This is generally done using the \fBccdred\fR package. Flat fields which
+are not contaminated by low counts between the apertures may be prepared
+with the task \fBapflatten\fR (recommended) or \fBapnormalize\fR. Lines of
+constant wavelength across the orders should be closely aligned with one of
+the image axes. Sometimes the orders are aligned rather than the spectral
+features. This will result in a small amount of resolution loss but is
+often acceptable. In some cases one may correct for misalignment with the
+\fBrotate\fR task. More complex geometric problems and observations of
+extended objects should be handled by the \fBlongslit\fR package and single
+order observations should be processed by \fBdoslit\fR.
+
+The aperture reference spectrum is generally a bright star. The arc
+spectra are comparison arc lamp observations (they must all be of the same
+type). The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in task \fBrefspectra\fR.
+
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ec" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, order, and wavelength
+information. When the \fIextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images. The task \fBscombine\fR is used to combine or merge orders into
+a single spectrum.
+
+\fBPackage Parameters\fR
+
+The \fBechelle\fR package parameters set parameters which change
+infrequently and define the standard I/O functions. The extinction file
+is used for making extinction corrections and the standard star
+calibration directory is used for determining flux calibrations from
+standard star observations. The calibration directories contain data files
+with standard star fluxes and band passes. The available extinction
+files and flux calibration directories may be listed using the command:
+.nf
+
+ cl> page onedstds$README
+
+.fi
+The extinction correction requires computation of an air mass using the
+task \fBsetairmass\fR. The air mass computation needs information
+about the observation and, in particular, the latitude of the observatory.
+This is determined using the OBSERVAT image header keyword. If this
+keyword is not present the observatory parameter is used. See the
+task \fBobservatory\fR for more on defining the observatory parameters.
+
+The spectrum interpolation type is used whenever a spectrum needs to be
+resampled for linearization or performing operations between spectra
+with different sampling. The "sinc" interpolation may be of interest
+as an alternative but see the cautions given in \fBonedspec.package\fR.
+
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdoecslit\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of the apertures, traces, and extracted
+spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The input images are specified by image lists. The lists may be
+a list of explicit comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+To allow wildcard image lists to be used safely and conveniently the
+image lists are checked to remove extracted images (the .ec images)
+and to automatically identify object and arc spectra. Object and arc
+images are identified by the keyword IMAGETYP with values of "object",
+"OBJECT", "comp", or "COMPARISON" (the current practice at NOAO).
+If arc images are found in the object list they are transferred to the
+arc list while if object images are found in the arc list they are ignored.
+All other image types, such as biases, darks, or flat fields, are
+ignored. This behavior allows simply specifying all images with a wildcard
+in the object list with automatic selections of arc spectra or a
+wildcard in the arc list to automatically find the arc spectra.
+If the data lack the identifying information it is up to the user
+to explicitly set the proper lists.
+
+As mentioned earlier, all the arc images must be of the same type;
+that is taken with the same arc lamp. The aperture reference parameter
+is a single image name which is usually a bright star.
+
+The next set of parameters describe the noise characteristics and the
+general layout of the orders. The read out noise and gain are used when
+"cleaning" cosmic rays and when using variance or optimal weighting. These
+parameters must be fairly accurate. Note that these are the effective
+parameters and must be adjusted if previous processing has modified the
+pixel values; such as with an unnormalized flat field.
+
+The general direction in which the orders run is specified by the
+dispersion axis parameter. Recall that ideally it is the direction
+of constant wavelength which should be aligned with an image axis and
+the dispersion direction will not be aligned because of the cross-dispersion.
+The \fInorders\fR parameter is used to automatically find the orders. The
+specified number of the brightest peaks are found. Generally after finding the
+orders the aperture definitions are reviewed and adjusted interactively.
+The profile width should be approximately the full width at the profile
+base. The default aperture limits and background regions are all
+derived from this width parameter.
+
+The next set of parameters select the processing steps and options. The
+various calibration steps may be done simultaneously, that is at the same
+time as the basic extractions, or in separate executions of the task.
+Typically, all the desired operations are done at the same time.
+Dispersion correction requires at least one arc spectrum and flux
+calibration requires dispersion correction and at least one standard star
+observation.
+
+The \fIresize\fR option resets the edges of the extraction apertures based
+on the profile for each object and standard star order. The default
+resizing is to the 5% point relative to the peak measured above the
+background. This allows following changes in the seeing. However, one
+should consider the consequences of this if attempting to flux calibrate
+the observations. Except in quicklook mode, the apertures for each object
+and standard star observation may be reviewed graphically and further
+adjustments made to the aperture width and background regions.
+
+The apertures for each observation are adjusted for small shifts relative
+to the reference aperture definitions. If you think this is not sufficient,
+say to account for rotation of the detector or for differing atmospheric
+dispersion, the \fItrace\fR option allows redefining the aperture trace
+functions for each observation. Note this is only allowed in non-quicklook
+mode.
+
+The \fIclean\fR option invokes a profile
+fitting and deviant point rejection algorithm as well as a variance weighting
+of points in the aperture. See the next section for more about
+requirements to use this option.
+
+The \fIbackground\fR option selects a type of correction for background
+or scattered light. If the type is "scattered" a global scattered light
+is fit to the data between the apertures and subtracted from the images.
+\fINote that the input images are modified by this operation\fR.
+This option is slow and is not allowed in quicklook
+mode. Alternatively, a local background may be subtracted using
+background regions defined for each aperture. The background may be
+within the slit for a sky subtraction or outside of the slit for a
+local scattered light subtraction. The data in the regions
+may be averaged, medianed, or the minimum value used. Another choice
+is to fit the data in the background regions by a function and interpolate
+to the object aperture.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are a new
+reference image, adding the scattered light subtraction option, a new arc
+reference image, and new standard stars. If all input spectra are to be
+processed regardless of previous processing the \fIredo\fR flag may be
+used. Note that reprocessing clobbers the previously processed output
+spectra.
+
+The final step is to plot the spectra if the \fIsplot\fR option is
+selected. In non-quicklook mode there is a query which may be
+answered either in lower or upper case. The plotting uses the interactive
+task \fBsplot\fR. In quicklook mode the plot appears noninteractively
+using the task \fBspecplot\fR.
+
+The \fIquicklook\fR option provides a simpler, less interactive, mode.
+The quicklook mode automatically assigns the reference apertures to
+the object and standard star observations without interactive review
+or tracing, does not do the time consuming scattered light correction,
+and the \fIsplot\fR option selects a noninteractive plot to be
+shown at the end of processing of each object and standard star
+spectrum. While the algorithms used in quicklook mode are nearly the same
+as in non-quicklook mode and the final results may be the same it is
+recommended that the greater degree of monitoring and review in
+non-quicklook mode be used for careful final reductions.
+
+The batch processing option allows object spectra to be processed as a
+background or batch job. This will occur only if the interactive
+\fIsplot\fR option is not active; either not set, turned off during
+processing with "NO", or in quicklook mode. In batch processing the
+terminal output is suppressed.
+
+The \fIlistonly\fR option prints a summary of the processing steps
+which will be performed on the input spectra without actually doing
+anything. This is useful for verifying which spectra will be affected
+if the input list contains previously processed spectra. The listing
+does not include any arc spectra which may be extracted to dispersion
+calibrate an object spectrum.
+
+The last parameter (excluding the task mode parameter) points to
+another parameter set for the algorithm parameters. The default
+parameter set is called \fBsparams\fR. The algorithm parameters are
+discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the
+\fBdoecslit\fR task and the parameters which control and modify the
+algorithms. The algorithm parameters available to you are
+collected in the parameter set \fBsparams\fR. These parameters are
+taken from the various general purpose tasks used by the \fBdoecslit\fR
+processing task. Additional information about these parameters and
+algorithms may be found in the help for the actual
+task executed. These tasks are identified below. The aim of this
+parameter set organization is to collect all the algorithm parameters
+in one place separate from the processing parameters and include only
+those which are relevant for echelle slit data. The parameter values
+can be changed from the defaults by using the parameter editor,
+.nf
+
+cl> epar sparams
+
+.fi
+or simple typing \fIsparams\fR.
+The parameter editor can also be entered when editing the \fBdoecslit\fR
+parameters by typing \fI:e\fR when positioned at the \fIsparams\fR
+parameter.
+
+\fBAperture Definitions\fR
+
+The first operation is to define the extraction apertures, which include the
+aperture width, background regions, and position dependence with
+wavelength, for the input echelle slit spectra and, if flux calibration is
+selected, the standard star spectra. This is done only for spectra which
+do not have previously defined apertures unless the \fIredo\fR option is
+set to force all definitions to be redone. Thus, apertures may be
+defined separately using the \fBapextract\fR tasks. This is particularly
+useful if one needs to use reference images to define apertures for very
+weak spectra which are not well centered or traced by themselves.
+
+Initially apertures are defined for a specified \fIaperture reference\fR
+image. The selected number of orders are found automatically by selecting
+the highest peaks in a cut across the dispersion. Apertures are assigned
+with a width given by the \fIwidth\fR parameter and numbered sequentially.
+The background regions are also defined in terms of the width parameter
+starting at one width distance from the profile center and extending to two
+widths on both sides of the profile. As an example, if the width parameter
+is 5 pixels the default aperture limits are +/- 2.5 pixels and the
+background sample regions will be "-10:-5,5:10". If the \fIresize\fR
+parameter is set the aperture limits are adjusted to a specified point on
+the spectrum profile (see \fBapresize\fR).
+
+A query is then given allowing the aperture definitions to be reviewed and
+modified. Queries made by \fBdoecslit\fR generally may be answered with either
+lower case "yes" or "no" or with upper case "YES" or "NO". The upper
+case responses apply to all further queries and so are used to eliminate
+further queries of that kind.
+
+Reviewing the aperture definitions is highly recommended to check the
+aperture numbering, aperture limits, and background regions. The aperture
+numbers must be linearly related, with a slope of +/- 1, to the true order
+numbers though absolute order numbers need not be known. The key point is
+that if an order is skipped the aperture numbers must also skip. The
+background regions are checked with the 'b' key. Typically one adjusts all
+the background regions at the same time by selecting all apertures with
+the 'a' key first. To exit the background and aperture editing steps type
+'q'.
+
+Next the positions of the orders at various points along the dispersion
+are measured and "trace functions" are fit. The user is asked whether
+to fit each trace function interactively. This is selected to adjust
+the fitting parameters such as function type and order. When
+interactively fitting a query is given for each aperture. After the
+first aperture one may skip reviewing the other traces.
+
+After the aperture reference image is done all the object and standard star
+images are checked for aperture definitions and those without definitions
+are assigned apertures. The assignment consists of inheriting the aperture
+from the reference aperture image, recentering the apertures based on an
+average shift that best centers all the apertures, resizing the apertures
+if the resize option is selected, and retracing the spectral orders if the
+retracing option is selected. Retracing is only allowed in non-quicklook
+mode (set by the \fIquicklook\fR parameter). Also interactive review of
+the aperture definitions is only done in
+non-quicklook mode. In quicklook mode the aperture definitions are all set
+noninteractively without retracing. It is recommended that quicklook only
+be used for initial quick extractions and calibration and that for final
+reductions one at least review the aperture definitions and possibly
+retrace each observation.
+
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBsparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the object position on the slit and the number
+of image lines or columns to sum are set by the \fIline\fR and \fInsum\fR
+parameters. A line of INDEF (the default) selects the middle of the
+image. The automatic finding algorithm is described for the task
+\fBapfind\fR and basically finds the strongest peaks. The resizing is
+described in the task \fBapresize\fR and the parameters used are also
+described there. The tracing is
+done as described in \fBaptrace\fR and consists of stepping along the image
+using the specified \fIt_step\fR parameter. The function fitting uses the
+\fBicfit\fR commands with the other parameters from the tracing section.
+
+\fBBackground or Scattered Light Subtraction\fR
+
+In addition to not subtracting any sky or scattered light there are two
+approaches to subtracting background light. The first is to determine
+a smooth global scattered light component. The second is to subtract
+a locally determined background at each point along the dispersion and
+for each aperture. This can be either for a sky subtraction if the
+background regions are within the slit or scattered light if the
+background regions are outside of the slit. Note that background
+subtraction is only done for object and standard star images and not
+for arc spectra. Also, the global scattered light option is not done
+in quicklook mode.
+
+The global scattered light fitting and subtraction is done with the task
+\fBapscatter\fR. The function fitting parameters are set interactively
+using the aperture reference spectrum. All other subtractions are done
+noninteractively with the same set of parameters. The scattered light is
+subtracted from the input images, thus modifying them, and one might wish
+to first make backups of the original images.
+
+The scattered light is measured between the apertures using a specified
+buffer distance from the aperture edges. The scattered light pixels are
+fit by a series of one dimensional functions across the dispersion. The
+independent fits are then smoothed along the dispersion by again fitting
+low order functions. These fits then define the smooth scattered light
+surface to be subtracted from the image. The fitting parameters are
+defined and recorded in the two parameter sets \fIapscat1\fR and
+\fIapscat2\fR. The scattered light algorithm is described more fully in
+\fBapscatter\fR. This algorithm is relatively slow.
+
+Local background subtraction is done during extraction based on background
+regions and parameters defined by the default background parameters or
+changed during interactive review of the apertures. The background
+subtraction options are to subtract the average, median, or minimum of the
+pixels in the background regions, or to fit a function and subtract the
+function from under the extracted object pixels. The background regions
+are specified in pixels from the aperture center and follow changes in
+center of the spectrum along the dispersion. The syntax is colon separated
+ranges with multiple ranges separated by a comma or space. The background
+fitting uses the \fBicfit\fR routines which include medians, iterative
+rejection of deviant points, and a choice of function types and orders.
+Note that it is important to use a method which rejects cosmic rays such as
+using either medians over all the background regions (\fIbackground\fR =
+"median") or median samples during fitting (\fIb_naverage\fR < -1). The
+background subtraction algorithm and options are described in greater
+detail in \fBapsum\fR and \fBapbackground\fR.
+
+\fBExtraction\fR
+
+The actual extraction of the spectra is done by summing across the
+fixed width apertures at each point along the dispersion.
+The default is to simply sum the pixels using
+partial pixels at the ends. There is an option to weight the
+sum based on a Poisson variance model using the \fIreadnoise\fR and
+\fIgain\fR detector parameters. Note that if the \fIclean\fR
+option is selected the variance weighted extraction is used regardless
+of the \fIweights\fR parameter. The sigma thresholds for cleaning
+are also set in the \fBsparams\fR parameters.
+
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain.
+These numbers need to be adjusted if the image has been processed
+such that the intensity scale has a different origin (such as
+a scattered light subtraction) or scaling (such as caused by unnormalized
+flat fielding). These options also require using background subtraction
+if the profile does not go to zero. For optimal extraction and
+cleaning to work it is recommended that any flat fielding be done
+using flat fields produced by \fBapflatten\fR, no scattered light
+correction, and using background subtraction if there is any
+appreciable sky or to compensate for scattered light.
+For further discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+
+\fBDispersion Correction\fR
+
+If dispersion correction is not selected, \fIdispcor\fR=no, then the object
+spectra are simply extracted. The extracted spectra may be plotted
+by setting the \fIsplot\fR option. This produces a query and uses
+the interactive \fBsplot\fR task in non-quicklook mode and uses
+\fBspecplot\fR noninteractively in quicklook mode.
+
+Dispersion corrections are applied to the extracted spectra if the
+\fIdispcor\fR processing parameter is set. There
+are three basic steps involved; determining the dispersion functions
+relating pixel position to wavelength, assigning the appropriate
+dispersion function to a particular observation, and either storing
+the nonlinear dispersion function in the image headers or resampling the
+spectra to evenly spaced pixels in wavelength.
+
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted using the reference aperture definition.
+Note extractions of arc spectra are not background or scattered light
+subtracted. The interactive task \fBecidentify\fR is used to define the
+dispersion function. The idea is to mark some lines in a few orders whose
+wavelengths are known (with the line list used to supply additional lines after
+the first few identifications define the approximate wavelengths) and to fit a
+function giving the wavelength from the aperture number and pixel position.
+
+The arc dispersion function parameters are for \fBecidentify\fR and it's
+related partner \fBecreidentify\fR. The parameters define a line list for
+use in automatically assigning wavelengths to arc lines, a centering width
+(which should match the line widths at the base of the lines), the
+dispersion function type and orders, parameters to exclude bad lines from
+function fits, and defining whether to refit the dispersion function as
+opposed to simply determining a zero point shift. The defaults should
+generally be adequate and the dispersion function fitting parameters may be
+altered interactively. One should consult the help for the two tasks for
+additional details of these parameters and the interactive operation of
+\fBecidentify\fR.
+
+Once the reference dispersion function is defined other arc spectra are
+extracted as required by the object spectra. The assignment of arcs is
+done either explicitly with an arc assignment table (parameter
+\fIarctable\fR) or based on a header parameter such as a time.
+This assignments are made by the task
+\fBrefspectra\fR. When two arcs are assigned to an object spectrum an
+interpolation is done between the two dispersion functions. This makes an
+approximate correction for steady drifts in the dispersion.
+
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+
+In non-quicklook mode the arc spectra assigned to each object are
+extracted using the same apertures as the object. This accounts for
+changes in the recentering, aperture sizes, and tracing functions.
+In quicklook mode the arc spectra are extracted using the reference
+apertures. When the same arc is used for several object images this
+allows the arc spectrum to only be extracted once.
+
+Defining the dispersion function for a new arc extraction is done with
+the task \fBecreidentify\fR. This is done noninteractively with log
+information recorded about the line reidentifications and the fit.
+
+The last step of dispersion correction is setting the dispersion
+of the object image from the arc images. There are two choices here.
+If the \fIlinearize\fR parameter is not set the nonlinear dispersion
+function is stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+
+If the \fIlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength. For echelle spectra each order is linearized independently so
+that the wavelength interval per pixel is different in different orders.
+This preserves most of the resolution and avoids over or under sampling of
+the highest or lowest dispersion orders. The wavelength limits are
+taken from the limits determined from the arc reference spectrum and
+the number of pixels is the same as the original images. The dispersion
+per pixel is then derived from these constraints.
+
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+
+\fBFlux Calibration\fR
+
+Flux calibration consists of an extinction correction and an instrumental
+sensitivity calibration. The extinction correction only depends on the
+extinction function defined by the package parameter \fIextinct\fR and
+determination of the airmass from the header parameters (the air mass is
+computed by \fBsetairmass\fR as mentioned earlier). The sensitivity
+calibration depends on a sensitivity calibration spectrum determined from
+standard star observations for which there are tabulated absolute fluxes.
+The task that applies both the extinction correction and sensitivity
+calibration to each extracted object spectrum is \fBcalibrate\fR. Consult
+the manual page for this task for more information.
+
+Generation of the sensitivity calibration spectrum is done before
+processing any object spectra since it has two interactive steps and
+requires all the standard star observations. The first step is tabulating
+the observed fluxes over the same bandpasses as the calibrated absolute
+fluxes. For very high resolution it may be the case that the measured
+calibration bandpasses are too large or sparse. In this case one must
+interpolate the calibration data to bandpasses appropriate for the data.
+If the bandpass widths and separations are given as INDEF then the same
+bandpasses as in the calibration file are used. Otherwise a uniform grid
+of bandpasses is interpolated. Using interpolated bandpasses is not
+rigorous but is sometimes the only choice for echelle spectra.
+
+The standard star tabulations are done after each standard star is
+extracted and dispersion corrected. You are asked for the name of the
+standard star as tabulated in the absolute flux data files in the directory
+\fIcaldir\fR defined by the package parameters. If the \fIinteract\fR
+parameter is yes the bandpasses can be displayed on the data and you can
+interactively add or delete bandpasses. The tabulation of the standard star
+observations over the standard bandpasses is done by the task
+\fBstandard\fR. The tabulated data is stored in the file \fIstd\fR. Note
+that if the \fIredo\fR flag is not set any new standard stars specified in
+subsequent executions of \fBdoecslit\fR are added to the previous data in
+the data file, otherwise the file is first deleted. Modification of the
+tabulated standard star data, such as by adding new stars, will cause any
+spectra in the input list which have been previously calibrated to be
+reprocessed if the \fIupdate\fR flag is set.
+
+After the standard star calibration bandpass fluxes are tabulated the
+information from all the standard stars is combined to produce a
+sensitivity function for use by \fBcalibrate\fR. The sensitivity function
+determination is interactive and uses the task \fBsensfunc\fR. This task
+allows fitting a smooth sensitivity function to the ratio of the observed
+to calibrated fluxes verses wavelength. The types of manipulations one
+needs to do include deleting bad observations, possibly removing variable
+extinction (for poor data), and possibly deriving a revised extinction
+function. This is a complex operation and one should consult the manual
+page for \fBsensfunc\fR. The sensitivity function is saved as one
+dimensional spectra (one per order) with the root name \fIsens\fR.
+Deletion of these images will also cause reprocessing to occur if the
+\fIupdate\fR flag is set.
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is similar to the sequence
+performed by the test procedure "demos doecslit".
+
+.nf
+ec> demos mkecslit
+Creating example longslit in image demoobj ...
+Creating example longslit in image demostd ...
+Creating example longslit in image demoarc ...
+ec> echelle.verbose=no
+ec> echelle.caldir=onedstds$spechayescal/
+ec> doecslit Bdemoobj apref=Bdemostd arcs=Bdemoarc stand=Bdemostd \
+>>> norders=3 extcor+ fluxcal+ resize+ splot+
+Set reference aperture for Bdemostd
+Edit apertures for Bdemostd? (yes):
+<Check background with 'b', exit background and review with 'q'>
+Fit traced positions for Bdemostd interactively? (yes):
+Fit curve to aperture 1 of Bdemostd interactively (yes):
+<Exit with 'q'>
+Fit curve to aperture 2 of Bdemostd interactively (yes): N
+Edit apertures for Bdemoobj? (yes):
+<Check background with 'b', exit background and review with 'q'>
+Fit traced positions for Bdemoobj interactively? (yes): N
+Extract arc reference image Bdemoarc
+Determine dispersion solution for Bdemoarc
+<Type 'm' at first strong line (pixel 156) and identify it as 4965>
+<Type 'k' to go to next order>
+<Mark 52->5002, 74->5003.6, 155->5009.3>
+<Type 'k' to go to next order and mark 18->5044.7, 231->5059.8>
+<Type 'f' to see the fit residuals>
+<Type 'q' to quit fit and then 'q' to exit>
+Extract standard star spectrum Bdemostd
+Assign arc spectra for Bdemostd
+Extract and reidentify arc spectrum Bdemoarc
+Dispersion correct Bdemostd
+B...ec.imh: ap = 1, w1 = 4953.9, w2 = 4972.2, dw = 0.071, nw = 256
+B...ec.imh: ap = 2, w1 = 4998.3, w2 = 5016.5, dw = 0.071, nw = 256
+B...ec.imh: ap = 3, w1 = 5043.5, w2 = 5061.6, dw = 0.070, nw = 256
+Compile standard star fluxes for Bdemostd
+Bdemostd.ec.imh[1]: Artificial Echelle Spectrum
+Star name in calibration list: hz14
+Bdemostd.ec.imh[1]: Edit bandpasses? (no|yes|NO|YES|NO!|YES!) (no): y
+<Exit with 'q'>
+Bdemostd.ec.imh[2]: Artificial Echelle Spectrum
+Bdemostd.ec.imh[2]: Edit bandpasses? (no|yes|NO|YES|NO!|YES!) (y): N
+Bdemostd.ec.imh[3]: Artificial Echelle Spectrum
+Bdemostd.ec.imh[3]: Edit bandpasses? (no|yes|NO|YES|NO!|YES!) (N):
+Compute sensitivity function
+Fit aperture 1 interactively? (no|yes|NO|YES) (no|yes|NO|YES) (yes):
+<Exit with 'q'>
+Sensitivity function for aperture 1 --> sens.0001
+Fit aperture 2 interactively? (no|yes|NO|YES) (no|yes|NO|YES) (yes): N
+Sensitivity function for aperture 2 --> sens.0002
+Sensitivity function for aperture 3 --> sens.0003
+Flux and/or extinction calibrate standard stars
+Standard stars:
+Splot spectrum? (no|yes|NO|YES) (yes):
+Image line/aperture to plot (0:) (1):
+<Exit with 'q'>
+Extract object spectrum Bdemoobj
+Assign arc spectra for Bdemoobj
+Extract and reidentify arc spectrum Bdemoarc
+Dispersion correct Bdemoobj
+B...ec.imh: ap = 1, w1 = 4953.9, w2 = 4972.2, dw = 0.071, nw = 256
+B...ec.imh: ap = 2, w1 = 4998.3, w2 = 5016.5, dw = 0.071, nw = 256
+B...ec.imh: ap = 3, w1 = 5043.5, w2 = 5061.6, dw = 0.070, nw = 256
+Extinction correct Bdemoobj
+Flux calibrate Bdemoobj
+Bdemoobj.ec.imh:
+Splot spectrum? (no|yes|NO|YES) (yes):
+Image line/aperture to plot (0:) (1):
+<Exit with 'q'>
+.fi
+.ih
+REVISIONS
+.ls DOECSLIT V2.10.3
+The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. A bug which
+alphabetized the arc spectra was fixed.
+.le
+.ih
+SEE ALSO
+apbackground, apedit, apfind, approfiles, aprecenter, apresize, apsum, aptrace,
+apvariance, calibrate, ccdred, center1d, ctioslit, dispcor,
+echelle.doecslit, ecidentify, ecreidentify, icfit, kpnocoude, kpnoslit,
+msred, observatory, onedspec.package, refspectra, sensfunc, setairmass, setjd,
+splot, standard
+.endhelp
diff --git a/noao/imred/echelle/doc/doecslit.ms b/noao/imred/echelle/doc/doecslit.ms
new file mode 100644
index 00000000..a93f3e8b
--- /dev/null
+++ b/noao/imred/echelle/doc/doecslit.ms
@@ -0,0 +1,1479 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND February 1993
+.TL
+Guide to the Slit Spectra Reduction Task DOECSLIT
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+\fBDoecslit\fR subtracts background sky or scattered light, extracts,
+wavelength calibrates, and flux calibrates multiorder echelle slit spectra
+which have been processed to remove the detector characteristics; i.e. CCD
+images have been bias, dark count, and flat field corrected. The spectra
+should be oriented such that pixels of constant wavelength are aligned with
+the image columns or lines. Small departures from this alignment are not
+critical resulting in only a small loss of resolution. Single order
+observations should be reduced with \fBdoslit\fR.
+.AE
+.NH
+Introduction
+.LP
+\fBDoecslit\fR subtracts background sky or scattered light, extracts,
+wavelength calibrates, and flux calibrates multiorder echelle slit spectra
+which have been processed to remove the detector characteristics; i.e. CCD
+images have been bias, dark count, and flat field corrected. The spectra
+should be oriented such that pixels of constant wavelength are aligned with
+the image columns or lines. Small departures from this alignment are not
+critical resulting in only a small loss of resolution. Single order
+observations should be reduced with \fBdoslit\fR.
+.LP
+The task is a command language script which collects and combines the
+functions and parameters of many general purpose tasks to provide a single,
+complete data reduction path and a degree of guidance, automation, and
+record keeping. In the following description and in the parameter section
+the various general tasks used are identified. Further
+information about those tasks and their parameters may be found in their
+documentation. \fBDoecslit\fR also simplifies and consolidates parameters
+from those tasks and keeps track of previous processing to avoid
+duplications.
+.LP
+The general organization of the task is to do the interactive setup steps,
+such as the aperture definitions and reference dispersion function
+determination, first using representative calibration data and then perform
+the majority of the reductions automatically, possibly as a background
+process, with reference to the setup data. In addition, the task
+determines which setup and processing operations have been completed in
+previous executions of the task and, contingent on the \f(CWredo\fR and
+\f(CWupdate\fR options, skip or repeat some or all the steps.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage
+since there are many variations possible.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images are first processed with \fBccdproc\fR for overscan,
+zero level, dark count, and flat field corrections.
+.IP [2]
+Set the \fBdoecslit\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, an aperture reference image (usually a bright
+star spectrum) to use in finding the orders and defining the
+aperture parameters, one or more arc images, and one or more standard
+star images. If there are many object, arc, or standard star images
+you might prepare "@ files". Set the detector and data
+specific parameters. Select the processing options desired.
+Finally you might wish to review the \fBsparams\fR algorithm parameters
+though the defaults are probably adequate.
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the current execution and no further queries of that
+type will be made.
+.IP [4]
+The specified number of orders (ranked by peak strength) in the aperture
+reference image are located and default fixed width apertures are
+assigned. If the resize option is set the apertures are resized by finding
+the level which is 5% (the default) of the peak above local background.
+You then have the option of entering the aperture editing loop to check the
+aperture positions, sizes, and background fitting parameters. This is
+highly recommended. Note that it is important that the aperture numbers be
+sequential with the orders and if any orders are skipped the aperture
+numbers should also skip. It is also important to verify the background
+regions with the 'b' key. Usually you want any changes made to the
+background definitions to apply to all apertures so use the 'a' key to
+select all apertures before modifying the background parameters. To exit
+the background mode and then to exit the review mode use 'q'.
+.IP [5]
+The order positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively
+to examine the traced positions and adjust the fitting parameters. To exit
+the interactive fitting type 'q'. Not all orders need be examined and the
+"NO" response will quit the interactive fitting using the last defined
+fitting parameters on the remaining traces.
+.IP [6]
+Apertures are now defined for all standard and object images. This is only
+done if there are no previous aperture definitions for the image. The
+aperture references previously defined are used as the initial set of
+apertures for each image. The apertures are then recentered by an average
+shift over all orders and resized if that option is selected.
+The apertures may also be retraced and interactively examined
+for each image if the tracing option is selected and quicklook mode is not.
+.IP [7]
+If scattered light subtraction is selected the scattered light parameters
+are set using the aperture reference image and the task \fBapscatter\fR.
+The purpose of this is to interactively define the aperture buffer distance
+for the scattered light and the cross and parallel dispersion fitting
+parameters. The fitting parameters are taken from and recorded in the
+parameter sets \fBapscat1\fR and \fBapscat2\fR. All other scattered light
+subtractions are done noninteractively with these parameters. Note that
+the scattered light correction modifies the input images. Scattered light
+subtraction is not done in quicklook mode.
+.IP [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The dispersion function is defined using the task
+\fBecidentify\fR. Identify a few arc lines in a few orders with 'm' and 'o'
+and use the 'l' line list identification command to automatically add
+additional lines and fit the dispersion function. Check the quality of the
+dispersion function fit with 'f'. When satisfied exit with 'q'.
+.IP [9]
+If the flux calibration option is selected the standard star spectra are
+processed (if not done previously). The images are background subtracted,
+extracted, and wavelength calibrated. The appropriate arc
+calibration spectra are extracted and the dispersion function refit
+using the arc reference spectrum as a starting point. The standard star
+fluxes through the calibration bandpasses are compiled. You are queried
+for the name of the standard star calibration data file. Because echelle
+spectra are often at much higher dispersion than the calibration data
+interpolated bandpasses may be defined with the bandpass parameters in
+\fBsparams\fR and checked or modified interactively.
+.IP
+After all the standard stars are processed a sensitivity function is
+determined using the interactive task \fBsensfunc\fR. Finally, the
+standard star spectra are extinction corrected and flux calibrated
+using the derived sensitivity function.
+.IP [10]
+The object spectra are now automatically background subtracted
+(an alternative to scattered light subtraction),
+extracted, wavelength calibrated, and flux calibrated.
+.IP [11]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'. In quicklook mode the spectra are plotted
+noninteractively with \fBspecplot\fR.
+.IP [12]
+The final spectra will have the same name as the original 2D images
+with a ".ec" extension added.
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of echelle slit object, standard star, and arc
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must be
+processed to remove overscan, bias, dark count, and flat field effects.
+This is generally done using the \fBccdred\fR package. Flat fields which
+are not contaminated by low counts between the apertures may be prepared
+with the task \fBapflatten\fR (recommended) or \fBapnormalize\fR. Lines of
+constant wavelength across the orders should be closely aligned with one of
+the image axes. Sometimes the orders are aligned rather than the spectral
+features. This will result in a small amount of resolution loss but is
+often acceptable. In some cases one may correct for misalignment with the
+\fBrotate\fR task. More complex geometric problems and observations of
+extended objects should be handled by the \fBlongslit\fR package and single
+order observations should be processed by \fBdoslit\fR.
+.LP
+The aperture reference spectrum is generally a bright star. The arc
+spectra are comparison arc lamp observations (they must all be of the same
+type). The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in task \fBrefspectra\fR.
+.LP
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ec" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, order, and wavelength
+information. When the \f(CWextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images. The task \fBscombine\fR is used to combine or merge orders into
+a single spectrum.
+.NH
+Package Parameters
+.LP
+The \fBechelle\fR package parameters, shown in Figure 1, set parameters
+which change infrequently and define the standard I/O functions.
+.KS
+
+.ce
+Figure 1: Package Parameter Set for the ECHELLE Package
+
+.V1
+cl> epar echelle
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = echelle
+
+(extinct= onedstds$kpnoextinct.dat) Extinction file
+(caldir = onedstds$spechayescal/) Standard star calibration directory
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+(dispaxi= 2) Image axis for 2D images
+(nsum = 1) Number of lines/columns to sum for 2D images
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Text log file
+(plotfil= ) Plot file
+
+(records= ) Record number extensions
+(version= ECHELLE V3: July 1991)
+
+.KE
+.V2
+The extinction file
+is used for making extinction corrections and the standard star
+calibration directory is used for determining flux calibrations from
+standard star observations. The calibration directories contain data files
+with standard star fluxes and band passes. The available extinction
+files and flux calibration directories may be listed using the command:
+.V1
+
+ cl> page onedstds$README
+
+.V2
+The extinction correction requires computation of an air mass using the
+task \fBsetairmass\fR. The air mass computation needs information
+about the observation and, in particular, the latitude of the observatory.
+This is determined using the OBSERVAT image header keyword. If this
+keyword is not present the observatory parameter is used. See the
+task \fBobservatory\fR for more on defining the observatory parameters.
+.LP
+The spectrum interpolation type is used whenever a spectrum needs to be
+resampled for linearization or performing operations between spectra
+with different sampling. The "sinc" interpolation may be of interest
+as an alternative but see the cautions given in \fBonedspec.package\fR.
+.LP
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+everything that the task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of the apertures, traces, and extracted
+spectra but can become quite large.
+.NH
+Processing Parameters
+.LP
+The \fBdoecslit\fR parameters are shown in Figure 2.
+.KS
+
+.ce
+Figure 2: Parameter Set for DOECSLIT
+
+.V1
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = echelle
+ TASK = doecslit
+
+objects = List of object spectra
+(apref = ) Aperture reference spectrum
+(arcs = ) List of arc spectra
+(arctabl= ) Arc assignment table (optional)
+(standar= ) List of standard star spectra
+.KE
+.V1
+
+(readnoi= 0.) Read out noise sigma (photons)
+(gain = 1.) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(norders= 10) Number of orders
+(width = 5.) Width of profiles (pixels)
+
+(dispcor= yes) Dispersion correct spectra?
+(extcor = no) Extinction correct spectra?
+(fluxcal= no) Flux calibrate spectra?
+(resize = no) Resize object apertures?
+(clean = no) Detect and replace bad pixels?
+(trace = yes) Trace object spectra?
+(backgro= none) Background to subtract
+(splot = no) Plot the final spectra?
+(redo = no) Redo operations if previously done?
+(update = no) Update spectra if cal data changes?
+(quicklo= no) Approximate quicklook reductions?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(sparams= ) Algorithm parameters
+
+.V2
+The input images are specified by image lists. The lists may be
+a list of explicit comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+To allow wildcard image lists to be used safely and conveniently the
+image lists are checked to remove extracted images (the .ec images)
+and to automatically identify object and arc spectra. Object and arc
+images are identified by the keyword IMAGETYP with values of "object",
+"OBJECT", "comp", or "COMPARISON" (the current practice at NOAO).
+If arc images are found in the object list they are transferred to the
+arc list while if object images are found in the arc list they are ignored.
+All other image types, such as biases, darks, or flat fields, are
+ignored. This behavior allows simply specifying all images with a wildcard
+in the object list with automatic selections of arc spectra or a
+wildcard in the arc list to automatically find the arc spectra.
+If the data lack the identifying information it is up to the user
+to explicitly set the proper lists.
+.LP
+As mentioned earlier, all the arc images must be of the same type;
+that is taken with the same arc lamp. The aperture reference parameter
+is a single image name which is usually a bright star.
+.LP
+The next set of parameters describe the noise characteristics and the
+general layout of the orders. The read out noise and gain are used when
+"cleaning" cosmic rays and when using variance or optimal weighting. These
+parameters must be fairly accurate. Note that these are the effective
+parameters and must be adjusted if previous processing has modified the
+pixel values; such as with an unnormalized flat field.
+.LP
+The general direction in which the orders run is specified by the
+dispersion axis parameter. Recall that ideally it is the direction
+of constant wavelength which should be aligned with an image axis and
+the dispersion direction will not be aligned because of the cross-dispersion.
+The \f(CWnorders\fR parameter is used to automatically find the orders. The
+specified number of the brightest peaks are found. Generally after finding the
+orders the aperture definitions are reviewed and adjusted interactively.
+The profile width should be approximately the full width at the profile
+base. The default aperture limits and background regions are all
+derived from this width parameter.
+.LP
+The next set of parameters select the processing steps and options. The
+various calibration steps may be done simultaneously, that is at the same
+time as the basic extractions, or in separate executions of the task.
+Typically, all the desired operations are done at the same time.
+Dispersion correction requires at least one arc spectrum and flux
+calibration requires dispersion correction and at least one standard star
+observation.
+.LP
+The \f(CWresize\fR option resets the edges of the extraction apertures based
+on the profile for each object and standard star order. The default
+resizing is to the 5% point relative to the peak measured above the
+background. This allows following changes in the seeing. However, one
+should consider the consequences of this if attempting to flux calibrate
+the observations. Except in quicklook mode, the apertures for each object
+and standard star observation may be reviewed graphically and further
+adjustments made to the aperture width and background regions.
+.LP
+The apertures for each observation are adjusted for small shifts relative
+to the reference aperture definitions. If you think this is not sufficient,
+say to account for rotation of the detector or for differing atmospheric
+dispersion, the \f(CWtrace\fR option allows redefining the aperture trace
+functions for each observation. Note this is only allowed in non-quicklook
+mode.
+.LP
+The \f(CWclean\fR option invokes a profile
+fitting and deviant point rejection algorithm as well as a variance weighting
+of points in the aperture. See the next section for more about
+requirements to use this option.
+.LP
+The \f(CWbackground\fR option selects a type of correction for background
+or scattered light. If the type is "scattered" a global scattered light
+is fit to the data between the apertures and subtracted from the images.
+\fINote that the input images are modified by this operation\fR.
+This option is slow and is not allowed in quicklook
+mode. Alternatively, a local background may be subtracted using
+background regions defined for each aperture. The background may be
+within the slit for a sky subtraction or outside of the slit for a
+local scattered light subtraction. The data in the regions
+may be averaged, medianed, or the minimum value used. Another choice
+is to fit the data in the background regions by a function and interpolate
+to the object aperture.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a new
+reference image, adding the scattered light subtraction option, a new arc
+reference image, and new standard stars. If all input spectra are to be
+processed regardless of previous processing the \f(CWredo\fR flag may be
+used. Note that reprocessing clobbers the previously processed output
+spectra.
+.LP
+The final step is to plot the spectra if the \f(CWsplot\fR option is
+selected. In non-quicklook mode there is a query which may be
+answered either in lower or upper case. The plotting uses the interactive
+task \fBsplot\fR. In quicklook mode the plot appears noninteractively
+using the task \fBspecplot\fR.
+.LP
+The \f(CWquicklook\fR option provides a simpler, less interactive, mode.
+The quicklook mode automatically assigns the reference apertures to
+the object and standard star observations without interactive review
+or tracing, does not do the time consuming scattered light correction,
+and the \f(CWsplot\fR option selects a noninteractive plot to be
+shown at the end of processing of each object and standard star
+spectrum. While the algorithms used in quicklook mode are nearly the same
+as in non-quicklook mode and the final results may be the same it is
+recommended that the greater degree of monitoring and review in
+non-quicklook mode be used for careful final reductions.
+.LP
+The batch processing option allows object spectra to be processed as a
+background or batch job. This will occur only if the interactive
+\f(CWsplot\fR option is not active; either not set, turned off during
+processing with "NO", or in quicklook mode. In batch processing the
+terminal output is suppressed.
+.LP
+The \f(CWlistonly\fR option prints a summary of the processing steps
+which will be performed on the input spectra without actually doing
+anything. This is useful for verifying which spectra will be affected
+if the input list contains previously processed spectra. The listing
+does not include any arc spectra which may be extracted to dispersion
+calibrate an object spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to
+another parameter set for the algorithm parameters. The default
+parameter set is called \fBsparams\fR. The algorithm parameters are
+discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the
+\fBdoecslit\fR task and the parameters which control and modify the
+algorithms. The algorithm parameters available to you are
+collected in the parameter set \fBsparams\fR. These parameters are
+taken from the various general purpose tasks used by the \fBdoecslit\fR
+processing task. Additional information about these parameters and
+algorithms may be found in the help for the actual
+task executed. These tasks are identified below. The aim of this
+parameter set organization is to collect all the algorithm parameters
+in one place separate from the processing parameters and include only
+those which are relevant for echelle slit data. The parameter values
+can be changed from the defaults by using the parameter editor,
+.V1
+
+cl> epar sparams
+
+.V2
+or simple typing \f(CWsparams\fR.
+The parameter editor can also be entered when editing the \fBdoecslit\fR
+parameters by typing \f(CW:e\fR when positioned at the \f(CWsparams\fR
+parameter. Figure 3 shows the parameter set.
+.KS
+
+.ce
+Figure 3: Algorithm Parameter Set
+
+.V1
+cl> epar sparams
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = echelle
+ TASK = sparams
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(extras = no) Extract sky, sigma, etc.?
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+.KE
+.V1
+
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 2) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+ -- BACKGROUND AND SCATTERED LIGHT PARAMETERS --
+(b_funct= legendre) Background function
+(b_order= 1) Background function order
+(b_naver= -100) Background average or median
+(b_niter= 0) Background rejection iterations
+(b_low = 3.) Background lower rejection sigma
+(b_high = 3.) Background upper rejection sigma
+(buffer = 1.) Buffer distance from apertures
+(apscat1= ) Fitting parameters across the dispersion
+(apscat2= ) Fitting parameters along the dispersion
+
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli= linelist$thar.dat) Line list
+(match = 1.) Line list matching limit in Angstroms
+(fwidth = 4.) Arc line widths in pixels
+(cradius= 10.) Centering radius in pixels
+(i_funct= legendre) Echelle coordinate function
+(i_xorde= 3) Order of coordinate function along dispersion
+(i_yorde= 3) Order of coordinate function across dispersion
+(i_niter= 3) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying
+
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+ -- SENSITIVITY CALIBRATION PARAMETERS --
+(bandwid= 10.) Bandpass widths
+(bandsep= 10.) Bandpass separation
+(s_inter= yes) Graphic interaction to examine/define bandpasses
+(s_funct= spline3) Fitting function
+(s_order= 1) Order of sensitivity function
+(fnu = no) Create spectra having units of FNU?
+
+.V2
+.NH 2
+Aperture Definitions
+.LP
+The first operation is to define the extraction apertures, which include the
+aperture width, background regions, and position dependence with
+wavelength, for the input echelle slit spectra and, if flux calibration is
+selected, the standard star spectra. This is done only for spectra which
+do not have previously defined apertures unless the \f(CWredo\fR option is
+set to force all definitions to be redone. Thus, apertures may be
+defined separately using the \fBapextract\fR tasks. This is particularly
+useful if one needs to use reference images to define apertures for very
+weak spectra which are not well centered or traced by themselves.
+.LP
+Initially apertures are defined for a specified \fIaperture reference\fR
+image. The selected number of orders are found automatically by selecting
+the highest peaks in a cut across the dispersion. Apertures are assigned
+with a width given by the \f(CWwidth\fR parameter and numbered sequentially.
+The background regions are also defined in terms of the width parameter
+starting at one width distance from the profile center and extending to two
+widths on both sides of the profile. As an example, if the width parameter
+is 5 pixels the default aperture limits are +/- 2.5 pixels and the
+background sample regions will be "-10:-5,5:10". If the \f(CWresize\fR
+parameter is set the aperture limits are adjusted to a specified point on
+the spectrum profile (see \fBapresize\fR).
+.LP
+A query is then given allowing the aperture definitions to be reviewed and
+modified. Queries made by \fBdoecslit\fR generally may be answered with either
+lower case "yes" or "no" or with upper case "YES" or "NO". The upper
+case responses apply to all further queries and so are used to eliminate
+further queries of that kind.
+.LP
+Reviewing the aperture definitions is highly recommended to check the
+aperture numbering, aperture limits, and background regions. The aperture
+numbers must be linearly related, with a slope of +/- 1, to the true order
+numbers though absolute order numbers need not be known. The key point is
+that if an order is skipped the aperture numbers must also skip. The
+background regions are checked with the 'b' key. Typically one adjusts all
+the background regions at the same time by selecting all apertures with
+the 'a' key first. To exit the background and aperture editing steps type
+'q'.
+.LP
+Next the positions of the orders at various points along the dispersion
+are measured and "trace functions" are fit. The user is asked whether
+to fit each trace function interactively. This is selected to adjust
+the fitting parameters such as function type and order. When
+interactively fitting a query is given for each aperture. After the
+first aperture one may skip reviewing the other traces.
+.LP
+After the aperture reference image is done all the object and standard star
+images are checked for aperture definitions and those without definitions
+are assigned apertures. The assignment consists of inheriting the aperture
+from the reference aperture image, recentering the apertures based on an
+average shift that best centers all the apertures, resizing the apertures
+if the resize option is selected, and retracing the spectral orders if the
+retracing option is selected. Retracing is only allowed in non-quicklook
+mode (set by the \f(CWquicklook\fR parameter). Also interactive review of
+the aperture definitions is only done in
+non-quicklook mode. In quicklook mode the aperture definitions are all set
+noninteractively without retracing. It is recommended that quicklook only
+be used for initial quick extractions and calibration and that for final
+reductions one at least review the aperture definitions and possibly
+retrace each observation.
+.LP
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBsparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the object position on the slit and the number
+of image lines or columns to sum are set by the \f(CWline\fR and \f(CWnsum\fR
+parameters. A line of INDEF (the default) selects the middle of the
+image. The automatic finding algorithm is described for the task
+\fBapfind\fR and basically finds the strongest peaks. The resizing is
+described in the task \fBapresize\fR and the parameters used are also
+described there. The tracing is
+done as described in \fBaptrace\fR and consists of stepping along the image
+using the specified \f(CWt_step\fR parameter. The function fitting uses the
+\fBicfit\fR commands with the other parameters from the tracing section.
+.NH 2
+Background or Scattered Light Subtraction
+.LP
+In addition to not subtracting any sky or scattered light there are two
+approaches to subtracting background light. The first is to determine
+a smooth global scattered light component. The second is to subtract
+a locally determined background at each point along the dispersion and
+for each aperture. This can be either for a sky subtraction if the
+background regions are within the slit or scattered light if the
+background regions are outside of the slit. Note that background
+subtraction is only done for object and standard star images and not
+for arc spectra. Also, the global scattered light option is not done
+in quicklook mode.
+.LP
+The global scattered light fitting and subtraction is done with the task
+\fBapscatter\fR. The function fitting parameters are set interactively
+using the aperture reference spectrum. All other subtractions are done
+noninteractively with the same set of parameters. The scattered light is
+subtracted from the input images, thus modifying them, and one might wish
+to first make backups of the original images.
+.LP
+The scattered light is measured between the apertures using a specified
+buffer distance from the aperture edges. The scattered light pixels are
+fit by a series of one dimensional functions across the dispersion. The
+independent fits are then smoothed along the dispersion by again fitting
+low order functions. These fits then define the smooth scattered light
+surface to be subtracted from the image. The fitting parameters are
+defined and recorded in the two parameter sets \f(CWapscat1\fR and
+\f(CWapscat2\fR. The scattered light algorithm is described more fully in
+\fBapscatter\fR. This algorithm is relatively slow.
+.LP
+Local background subtraction is done during extraction based on background
+regions and parameters defined by the default background parameters or
+changed during interactive review of the apertures. The background
+subtraction options are to subtract the average, median, or minimum of the
+pixels in the background regions, or to fit a function and subtract the
+function from under the extracted object pixels. The background regions
+are specified in pixels from the aperture center and follow changes in
+center of the spectrum along the dispersion. The syntax is colon separated
+ranges with multiple ranges separated by a comma or space. The background
+fitting uses the \fBicfit\fR routines which include medians, iterative
+rejection of deviant points, and a choice of function types and orders.
+Note that it is important to use a method which rejects cosmic rays such as
+using either medians over all the background regions (\f(CWbackground\fR =
+"median") or median samples during fitting (\f(CWb_naverage\fR < -1). The
+background subtraction algorithm and options are described in greater
+detail in \fBapsum\fR and \fBapbackground\fR.
+.NH 2
+Extraction
+.LP
+The actual extraction of the spectra is done by summing across the
+fixed width apertures at each point along the dispersion.
+The default is to simply sum the pixels using
+partial pixels at the ends. There is an option to weight the
+sum based on a Poisson variance model using the \f(CWreadnoise\fR and
+\f(CWgain\fR detector parameters. Note that if the \f(CWclean\fR
+option is selected the variance weighted extraction is used regardless
+of the \f(CWweights\fR parameter. The sigma thresholds for cleaning
+are also set in the \fBsparams\fR parameters.
+.LP
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain.
+These numbers need to be adjusted if the image has been processed
+such that the intensity scale has a different origin (such as
+a scattered light subtraction) or scaling (such as caused by unnormalized
+flat fielding). These options also require using background subtraction
+if the profile does not go to zero. For optimal extraction and
+cleaning to work it is recommended that any flat fielding be done
+using flat fields produced by \fBapflatten\fR, no scattered light
+correction, and using background subtraction if there is any
+appreciable sky or to compensate for scattered light.
+For further discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+.NH 2
+Dispersion Correction
+.LP
+If dispersion correction is not selected, \f(CWdispcor\fR=no, then the object
+spectra are simply extracted. The extracted spectra may be plotted
+by setting the \f(CWsplot\fR option. This produces a query and uses
+the interactive \fBsplot\fR task in non-quicklook mode and uses
+\fBspecplot\fR noninteractively in quicklook mode.
+.LP
+Dispersion corrections are applied to the extracted spectra if the
+\f(CWdispcor\fR processing parameter is set. There
+are three basic steps involved; determining the dispersion functions
+relating pixel position to wavelength, assigning the appropriate
+dispersion function to a particular observation, and either storing
+the nonlinear dispersion function in the image headers or resampling the
+spectra to evenly spaced pixels in wavelength.
+.LP
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted using the reference aperture definition.
+Note extractions of arc spectra are not background or scattered light
+subtracted. The interactive task \fBecidentify\fR is used to define the
+dispersion function. The idea is to mark some lines in a few orders whose
+wavelengths are known (with the line list used to supply additional lines after
+the first few identifications define the approximate wavelengths) and to fit a
+function giving the wavelength from the aperture number and pixel position.
+.LP
+The arc dispersion function parameters are for \fBecidentify\fR and it's
+related partner \fBecreidentify\fR. The parameters define a line list for
+use in automatically assigning wavelengths to arc lines, a centering width
+(which should match the line widths at the base of the lines), the
+dispersion function type and orders, parameters to exclude bad lines from
+function fits, and defining whether to refit the dispersion function as
+opposed to simply determining a zero point shift. The defaults should
+generally be adequate and the dispersion function fitting parameters may be
+altered interactively. One should consult the help for the two tasks for
+additional details of these parameters and the interactive operation of
+\fBecidentify\fR.
+.LP
+Once the reference dispersion function is defined other arc spectra are
+extracted as required by the object spectra. The assignment of arcs is
+done either explicitly with an arc assignment table (parameter
+\f(CWarctable\fR) or based on a header parameter such as a time.
+This assignments are made by the task
+\fBrefspectra\fR. When two arcs are assigned to an object spectrum an
+interpolation is done between the two dispersion functions. This makes an
+approximate correction for steady drifts in the dispersion.
+.LP
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+.LP
+In non-quicklook mode the arc spectra assigned to each object are
+extracted using the same apertures as the object. This accounts for
+changes in the recentering, aperture sizes, and tracing functions.
+In quicklook mode the arc spectra are extracted using the reference
+apertures. When the same arc is used for several object images this
+allows the arc spectrum to only be extracted once.
+.LP
+Defining the dispersion function for a new arc extraction is done with
+the task \fBecreidentify\fR. This is done noninteractively with log
+information recorded about the line reidentifications and the fit.
+.LP
+The last step of dispersion correction is setting the dispersion
+of the object image from the arc images. There are two choices here.
+If the \f(CWlinearize\fR parameter is not set the nonlinear dispersion
+function is stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+.LP
+If the \f(CWlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength. For echelle spectra each order is linearized independently so
+that the wavelength interval per pixel is different in different orders.
+This preserves most of the resolution and avoids over or under sampling of
+the highest or lowest dispersion orders. The wavelength limits are
+taken from the limits determined from the arc reference spectrum and
+the number of pixels is the same as the original images. The dispersion
+per pixel is then derived from these constraints.
+.LP
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+.NH 2
+Flux Calibration
+.LP
+Flux calibration consists of an extinction correction and an instrumental
+sensitivity calibration. The extinction correction only depends on the
+extinction function defined by the package parameter \f(CWextinct\fR and
+determination of the airmass from the header parameters (the air mass is
+computed by \fBsetairmass\fR as mentioned earlier). The sensitivity
+calibration depends on a sensitivity calibration spectrum determined from
+standard star observations for which there are tabulated absolute fluxes.
+The task that applies both the extinction correction and sensitivity
+calibration to each extracted object spectrum is \fBcalibrate\fR. Consult
+the manual page for this task for more information.
+.LP
+Generation of the sensitivity calibration spectrum is done before
+processing any object spectra since it has two interactive steps and
+requires all the standard star observations. The first step is tabulating
+the observed fluxes over the same bandpasses as the calibrated absolute
+fluxes. For very high resolution it may be the case that the measured
+calibration bandpasses are too large or sparse. In this case one must
+interpolate the calibration data to bandpasses appropriate for the data.
+If the bandpass widths and separations are given as INDEF then the same
+bandpasses as in the calibration file are used. Otherwise a uniform grid
+of bandpasses is interpolated. Using interpolated bandpasses is not
+rigorous but is sometimes the only choice for echelle spectra.
+.LP
+The standard star tabulations are done after each standard star is
+extracted and dispersion corrected. You are asked for the name of the
+standard star as tabulated in the absolute flux data files in the directory
+\f(CWcaldir\fR defined by the package parameters. If the \f(CWinteract\fR
+parameter is yes the bandpasses can be displayed on the data and you can
+interactively add or delete bandpasses. The tabulation of the standard star
+observations over the standard bandpasses is done by the task
+\fBstandard\fR. The tabulated data is stored in the file \f(CWstd\fR. Note
+that if the \f(CWredo\fR flag is not set any new standard stars specified in
+subsequent executions of \fBdoecslit\fR are added to the previous data in
+the data file, otherwise the file is first deleted. Modification of the
+tabulated standard star data, such as by adding new stars, will cause any
+spectra in the input list which have been previously calibrated to be
+reprocessed if the \f(CWupdate\fR flag is set.
+.LP
+After the standard star calibration bandpass fluxes are tabulated the
+information from all the standard stars is combined to produce a
+sensitivity function for use by \fBcalibrate\fR. The sensitivity function
+determination is interactive and uses the task \fBsensfunc\fR. This task
+allows fitting a smooth sensitivity function to the ratio of the observed
+to calibrated fluxes verses wavelength. The types of manipulations one
+needs to do include deleting bad observations, possibly removing variable
+extinction (for poor data), and possibly deriving a revised extinction
+function. This is a complex operation and one should consult the manual
+page for \fBsensfunc\fR. The sensitivity function is saved as one
+dimensional spectra (one per order) with the root name \f(CWsens\fR.
+Deletion of these images will also cause reprocessing to occur if the
+\f(CWupdate\fR flag is set.
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+Task Help References
+.LP
+Each task in the \fBspecred\fR packages and tasks used by \fBdofibers\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters and apidtable
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apfit - Fit 2D spectra and output the fit, difference, or ratio
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apmask - Create and IRAF pixel list mask of the apertures
+apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ ecidentify - Identify features in spectrum for dispersion solution
+ecreidentify - Automatically identify features in spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ splot - Preliminary spectral plot/analysis
+ standard - Identify standard stars to be used in sensitivity calc
+
+ doecslit - Process Echelle slit spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+
+.V2
+.SH
+Appendix A: DOECSLIT Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object images to be processed. Previously processed spectra are
+ignored unless the \f(CWredo\fR flag is set or the \f(CWupdate\fR flag is set
+and dependent calibration data has changed. If the images contain the
+keyword IMAGETYP then only those with a value of "object" or "OBJECT"
+are used and those with a value of "comp" or "COMPARISON" are added
+to the list of arcs. Extracted spectra are ignored.
+.LE
+apref = ""
+.LS
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a bright star spectrum.
+.LE
+arcs = "" (at least one if dispersion correcting)
+.LS
+List of arc calibration spectra. These spectra are used to define
+the dispersion functions. The first spectrum is used to mark lines
+and set the dispersion function interactively and dispersion functions
+for all other arc spectra are derived from it. If the images contain
+the keyword IMAGETYP then only those with a value of "comp" or
+"COMPARISON" are used. All others are ignored as are extracted spectra.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining which arc spectra are to be assigned to which object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \f(CWsparams.sort\fR, such as the Julian date
+is made.
+.LE
+standards = "" (at least one if flux calibrating)
+.LS
+List of standard star spectra. The standard stars must have entries in
+the calibration database (package parameter \f(CWechelle.caldir\fR).
+.LE
+
+readnoise = 0., gain = 1. (apsum)
+.LS
+Read out noise in photons and detector gain in photons per data value.
+This parameter defines the minimum noise sigma and the conversion between
+photon Poisson statistics and the data number statistics. Image header
+keywords (case insensitive) may be specified to obtain the values from the
+image header.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the standard star or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+norders = 10 (apfind)
+.LS
+Number of orders to be found automatically.
+.LE
+width = 5. (apedit)
+.LS
+Approximate full width of the spectrum profiles. This parameter is used
+to define a width and error radius for the profile centering algorithm,
+and defaults for the aperture limits and background regions.
+.LE
+
+dispcor = yes
+.LS
+Dispersion correct spectra? This may involve either defining a nonlinear
+dispersion coordinate system in the image header or resampling the
+spectra to uniform linear wavelength coordinates as selected by
+the parameter \f(CWsparams.linearize\fR.
+.LE
+extcor = no
+.LS
+Extinction correct the spectra?
+.LE
+fluxcal = no
+.LS
+Flux calibrate the spectra using standard star observations?
+.LE
+resize = no (apresize)
+.LS
+Resize the default apertures for each object based on the spectrum profile?
+.LE
+clean = no (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction. In addition the datamax parameters
+can be useful.
+.LE
+trace = yes (non-quicklook mode only) (aptrace)
+.LS
+Allow tracing each object spectrum separately? If not set then the trace
+from the aperture reference is used, with recentering to allow for shifts
+across the dispersion. If set then each object and standard star
+image is retraced. Retracing is NOT done in quicklook mode.
+.LE
+background = "none" (apsum, apscatter)
+.LS
+Type of background light subtraction. The choices are "none" for no
+background subtraction, "scattered" for a global scattered light
+subtraction, "average" to average the background within background regions,
+"median" to use the median in background regions, "minimum" to use the
+minimum in background regions, or "fit" to fit across the dispersion using
+the background within background regions. The scattered light option fits
+and subtracts a smooth global background and modifies the input images.
+This is a slow operation and so is NOT performed in quicklook mode. The
+other background options are local to each aperture. The "fit" option uses
+additional fitting parameters from \fBsparams\fR and the "scattered" option
+uses parameters from \fBapscat1\fR and \fBapscat2\fR.
+.LE
+splot = no
+.LS
+Plot the final spectra? In quicklook mode a noninteractive, stacked plot
+is automatically produced using the task \fBspecplot\fR while in
+non-quicklook mode a query is given and the task \fBsplot\fR is used for
+interactive plotting.
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed unless required by the
+update option.
+.LE
+update = no
+.LS
+Update processing of previously processed spectra if the aperture
+reference image, the dispersion reference image, or standard star
+calibration data are changed?
+.LE
+quicklook = no
+.LS
+Extract and calibrate spectra with minimal interaction? In quicklook mode
+only aperture reference definitions, the initial dispersion function
+solution, and the standard star setup are done interactively. Scattered
+light subtraction and individual object tracing are not performed.
+Normally the \f(CWsplot\fR option is set in this mode to produce an automatic
+final spectrum plot for each object. It is recommended that this mode not be
+used for final reductions.
+.LE
+batch = no
+.LS
+Process spectra as a background or batch job provided there are no interactive
+steps remaining.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+sparams = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. This
+parameter is only for indicating the link to the parameter set
+\fBsparams\fR and should not be given a value. The parameter set may be
+examined and modified in the usual ways (typically with "epar sparams"
+or ":e sparams" from the parameter editor). The parameters are
+described below.
+.LE
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+extras = no (apsum)
+.LS
+Include raw unweighted and uncleaned spectra, the background spectra, and
+the estimated sigma spectra in a three dimensional output image format.
+See the discussion in the \fBapextract\fR package for further information.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Fraction of the peak to set aperture limits during automatic resizing.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \f(CWnsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 2 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+.LE
+
+.ce
+-- BACKGROUND AND SCATTERED LIGHT PARAMETERS --
+
+b_function = "legendre", b_order = 1 (apsum)
+.LS
+Default background fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+b_naverage = -100 (apsum)
+.LS
+Default number of points to average or median. Positive numbers
+average that number of sequential points to form a fitting point.
+Negative numbers median that number, in absolute value, of sequential
+points. A value of 1 does no averaging and each data point is used in the
+fit.
+.LE
+b_niterate = 0 (apsum)
+.LS
+Default number of rejection iterations. If greater than zero the fit is
+used to detect deviant fitting points and reject them before repeating the
+fit. The number of iterations of this process is given by this parameter.
+.LE
+b_low_reject = 3., b_high_reject = 3. (apsum)
+.LS
+Default background lower and upper rejection sigmas. If greater than zero
+points deviating from the fit below and above the fit by more than this
+number of times the sigma of the residuals are rejected before refitting.
+.LE
+buffer = 1. (apscatter)
+.LS
+Buffer distance from the edge of any aperture for data to be included
+in the scattered light determination. This parameter may be modified
+interactively.
+.LE
+apscat1 = "", apscat2 = "" (apscatter)
+.LS
+Parameter sets for the fitting functions across and along the dispersion.
+These parameters are those used by \fBicfit\fR. These parameters are
+usually set interactively.
+.LE
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum) (none|variance)
+.LS
+Type of extraction weighting. Note that if the \f(CWclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \f(CWgain\fR and \f(CWreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum and approfile) (fit1d|fit2d)
+.LS
+Type of profile fitting algorithm to use. The "fit1d" algorithm is
+preferred except in cases of extreme tilt.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (identify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelist$thar.dat" (ecidentify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelist$".
+.LE
+match = 1. (ecidentify)
+.LS
+The maximum difference for a match between the dispersion function computed
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 4. (ecidentify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 10. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "legendre", i_xorder = 3, i_yorder = 3 (ecidentify)
+.LS
+The default function, function order for the pixel position dependence, and
+function order for the aperture number dependence to be fit to the arc
+wavelengths. The functions choices are "chebyshev" or "legendre".
+.LE
+i_niterate = 3, i_low = 3.0, i_high = 3.0 (ecidentify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (ecreidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.LE
+.LE
+sort = "jd" (setjd and refspectra)
+.LS
+Image header keyword to be used as the sorting parameter for selection
+based on order. The header parameter must be numeric but otherwise may
+be anything. Common sorting parameters are times or positions.
+.LE
+group = "ljd" (setjd and refspectra)
+.LS
+Image header keyword to be used to group spectra. For those selection
+methods which use the group parameter the reference and object
+spectra must have identical values for this keyword. This can
+be anything but it must be constant within a group. Common grouping
+parameters are the date of observation "date-obs" (provided it does not
+change over a night) or the local Julian day number.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling using
+the linear dispersion parameters specified by other parameters. If
+no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data is not interpolated. Note the interpolation
+function type is set by the package parameter \f(CWinterp\fR.
+.LE
+log = no (ecdispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (ecdispcor)
+.LS
+Conserve the total flux during interpolation? If \f(CWno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \f(CWyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+-- SENSITIVITY CALIBRATION PARAMETERS --
+
+bandwidth = 10., bandsep = 10. (standard)
+.LS
+Interpolated bandpass grid. If INDEF then the same bandpasses as in the
+calibration files are used otherwise the calibration data is interpolated
+to the specified set of bandpasses.
+.LE
+s_interact = yes (standard)
+.LS
+Display the bandpasses on the standard star data and allow interactive
+addition and deletion of bandpasses.
+.LE
+s_function = "spline3", s_order = 1 (sensfunc)
+.LS
+Function and order used to fit the sensitivity data. The function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline3" cubic spline,
+and "spline1" linear spline.
+Order of the sensitivity fitting function. The value corresponds to the
+number of polynomial terms or the number of spline pieces. The default
+values may be changed interactively.
+.LE
+fnu = no (calibrate)
+.LS
+The default calibration is into units of F-lambda. If \f(CWfnu\fR = yes then
+the calibrated spectrum will be in units of F-nu.
+.LE
+
+.ce
+PACKAGE PARAMETERS
+
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.LE
+extinction = "onedstds$kpnoextinct.dat" (standard, sensfunc, calibrate)
+.LS
+Extinction file for a site. There are two extinction files in the
+NOAO standards library, onedstds$, for KPNO and CTIO. These extinction
+files are used for extinction and flux calibration.
+.LE
+caldir (standard)
+.LS
+Standard star calibration directory. A directory containing standard
+star data files. Note that the directory name must end with '/'.
+There are a number of standard star calibrations directories in the NOAO
+standards library, onedstds$.
+.LE
+observatory = "observatory" (observatory)
+.LS
+The default observatory to use for latitude dependent computations.
+If the OBSERVAT keyword in the image header it takes precedence over
+this parameter.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc) (dispcor)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.V1
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.V2
+.LE
+database = "database"
+.LS
+Database name used by various tasks. This is a directory which is created
+if necessary.
+.LE
+verbose = no
+.LS
+Verbose output? If set then almost all the information written to the
+logfile is also written to the terminal except when the task is a
+background or batch process.
+.LE
+logfile = "logfile"
+.LS
+If specified detailed text log information is written to this file.
+.LE
+plotfile = ""
+.LS
+If specified metacode plots are recorded in this file for later review.
+Since plot information can become large this should be used only if
+really desired.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/echelle/doc/dofoe.hlp b/noao/imred/echelle/doc/dofoe.hlp
new file mode 100644
index 00000000..6dfab76a
--- /dev/null
+++ b/noao/imred/echelle/doc/dofoe.hlp
@@ -0,0 +1,1155 @@
+.help dofoe Feb93 noao.imred.echelle
+.ih
+NAME
+dofoe -- Fiber Optic Echelle (FOE) data reduction task
+.ih
+USAGE
+dofoe objects
+.ih
+SUMMARY
+The \fBdofoe\fR reduction task is specialized for scattered light
+subtraction, extraction, flat fielding, and wavelength calibration of Fiber
+Optic Echelle (FOE) spectra. There may be one fiber or two fibers where
+the second fiber is illuminated by an arc calibration during arc and object
+exposures and a flat field during flat field exposures. It is a command
+language script which collects and combines the functions and parameters of
+many general purpose tasks to provide a single complete data reduction
+path. The task provides a degree of guidance, automation, and record
+keeping necessary when dealing with the complexities of reducing this type
+of data.
+.ih
+PARAMETERS
+.ls objects
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.le
+.ls apref = ""
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.le
+.ls flat = "" (optional)
+Flat field spectrum. If specified the one dimensional flat field spectrum
+is extracted and used to make flat field calibrations.
+.le
+.ls arcs = "" (at least one if dispersion correcting)
+List of arc spectra. The first arc in the list is used to create a
+dispersion solution interactively. All other arc spectra will be
+automatically reidentified.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining arc spectra to be assigned to object spectra (see
+\fBrefspectra\fR). If not specified an assignment based on a header
+parameter, \fIparams.sort\fR, such as the observation time is made.
+.le
+
+.ls readnoise = "0." (apsum)
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.le
+.ls gain = "1." (apsum)
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls norders = 12 (apfind)
+Number of orders to be found. This number is used during the automatic
+definition of the apertures from the aperture reference spectrum. Note
+that when there is a second fiber for simultaneous arcs the specified
+number will be automatically doubled for finding both sets of orders.
+So in either case specify only the number of orders from a single fiber.
+The interactive review of the aperture assignments allows verification
+and adjustments to the automatic aperture definitions.
+.le
+.ls width = 4. (apedit)
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.le
+.ls arcaps = "2x2"
+When there is only a single fiber set this parameter to "". When there is
+a second fiber used to create simultaneous arcs during the object exposures
+this parameter specifies a list of aperture numbers for the arc fibers.
+Since the object and arc fiber orders are paired the default setting
+expects the even number apertures to be the are apertures. This should be
+checked interactively.
+.le
+
+.ls fitflat = yes (flat1d)
+Fit and divide the extracted flat field orders by a smooth function
+in order to normalize the wavelength response? If not done the flat field
+spectral shape (which includes the blaze function) will be divided
+out of the object spectra, thus altering the object data values.
+If done only the small scale response variations are included in the
+flat field and the object spectra will retain their observed flux
+levels and blaze function.
+.le
+.ls background = "none" (apsum, apscatter)
+Type of background light subtraction. The choices are "none" for no
+background subtraction, "scattered" for a global scattered light
+subtraction, "average" to average the background within background regions,
+"median" to use the median in background regions, "minimum" to use the
+minimum in background regions, or "fit" to fit across the dispersion using
+the background within background regions. The scattered light option fits
+and subtracts a smooth global background and modifies the input images.
+This is a slow operation and so is NOT performed in quicklook mode. The
+other background options are local to each aperture at each point along the
+dispersion. The "fit" option uses additional fitting parameters from
+\fBparams\fR and the "scattered" option uses parameters from \fBapscat1\fR
+and \fBapscat2\fR.
+.le
+.ls clean = yes (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.le
+.ls dispcor = yes
+Dispersion correct spectra? Depending on the \fIparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.le
+.ls update = no
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.le
+.ls batch = no
+Process spectra as a background or batch job.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls params = "" (pset)
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.le
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdofoe\fR.
+.ls observatory = "observatory"
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For FOE data the image headers
+identify the observatory as "kpno" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter.
+.le
+.ls database = "database"
+Database (directory) used for storing aperture and dispersion information.
+.le
+.ls verbose = no
+Print verbose information available with various tasks.
+.le
+.ls logfile = "logfile", plotfile = ""
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.le
+.ls records = ""
+Dummy parameter to be ignored.
+.le
+.ls version = "ECHELLE: ..."
+Version of the package.
+.le
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdofoe\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls extras = no (apsum)
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.le
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+.ls lower = -3., upper = 3. (apdefault)
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 2 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- DEFAULT BACKGROUND PARAMETERS --
+.ls buffer = 1. (apscatter)
+Buffer distance from the edge of any aperture for data to be included
+in the scattered light determination. This parameter may be modified
+interactively.
+.le
+.ls apscat1 = "", apscat2 = "" (apscatter)
+Parameter sets for the fitting functions across and along the dispersion.
+These parameters are those used by \fBicfit\fR. These parameters are
+usually set interactively.
+.le
+.ls b_function = "legendre", b_order = 1 (apsum)
+Default background fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls b_naverage = -100 (apsum)
+Default number of points to average or median. Positive numbers
+average that number of sequential points to form a fitting point.
+Negative numbers median that number, in absolute value, of sequential
+points. A value of 1 does no averaging and each data point is used in the
+fit.
+.le
+.ls b_niterate = 0 (apsum)
+Default number of rejection iterations. If greater than zero the fit is
+used to detect deviant fitting points and reject them before repeating the
+fit. The number of iterations of this process is given by this parameter.
+.le
+.ls b_low_reject = 3., b_high_reject = 3. (apsum)
+Default background lower and upper rejection sigmas. If greater than zero
+points deviating from the fit below and above the fit by more than this
+number of times the sigma of the residuals are rejected before refitting.
+.le
+.ls b_smooth = 10 (apsum)
+Box car smoothing length for background when using background
+subtraction. Since the background noise is often the limiting factor
+for good extraction one may box car smooth the background to improve the
+statistics.
+.le
+
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum) (fit1d|fit2d)
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for FOE data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+.ls f_interactive = no (fit1d)
+Fit the one dimensional flat field order spectra interactively?
+This is used if \fIfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.le
+.ls f_function = "spline3", f_order = 20 (fit1d)
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelist$thar.dat" (ecidentify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelist$".
+.le
+.ls match = 1. (ecidentify)
+The maximum difference for a match between the dispersion function computed
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 4. (ecidentify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 4. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "chebyshev", i_xorder = 3, i_yorder = 3 (ecidentify)
+The default function, function order for the pixel position dependence, and
+function order for the aperture number dependence to be fit to the arc
+wavelengths. The functions choices are "chebyshev" or "legendre".
+.le
+.ls i_niterate = 3, i_low = 3.0, i_high = 3.0 (ecidentify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (ecreidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.le
+.le
+.ls sort = "jd", group = "ljd" (refspectra)
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdofoe\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.le
+.ls log = no (dispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (dispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+The \fBdofoe\fR reduction task is specialized for scattered light
+subtraction, extraction, flat fielding, and wavelength calibration of Fiber
+Optic Echelle (FOE) spectra. There may be one fiber or two fibers where
+the second fiber is illuminated by an arc calibration during arc and object
+exposures and a flat field during flat field exposures. When there is
+just one fiber the parameter \fIarcaps\fR is set to "" and when there are
+two fibers the parameter is used to select which of the defined
+apertures are the orders from the simultaneous arc fiber.
+
+This task is a command language script which collects and combines the
+functions and parameters of many general purpose tasks to provide a single
+complete data reduction path. The task provides a degree of guidance,
+automation, and record keeping necessary when dealing with the complexities
+of reducing this type of data.
+
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \fIredo\fR and \fIupdate\fR options, skip or
+repeat some or all the steps.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage. Since
+\fBdofoe\fR combines many separate, general purpose tasks the description
+given here refers to these tasks and leaves some of the details to their
+help documentation.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images must first be processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+.le
+.ls [2]
+Set the \fBdofoe\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Verify and set the format parameters, particularly the number of orders to be
+extracted and processed. The processing parameters are set
+for simple extraction and dispersion correction but dispersion correction
+can be turned off for quicklook or background subtraction and cleaning
+may be added.
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+The apertures are defined using the specified aperture reference image
+which is usually a flat field in which both the object and arc fibers are
+illuminated. The specified number of orders are found automatically and
+sequential apertures assigned. The resize option sets the aperture size to
+the widths of the profiles at a fixed fraction of the peak height.
+.le
+.ls [5]
+The automatic order identification and aperture assignment is based on peak
+height and may be incorrect. The interactive aperture editor is entered
+with a plot of the apertures. When there is a second simultaneous arc
+fiber it is essential that the object and arc
+fiber orders are properly paired with the arc fibers having even aperture
+numbers and the object fibers having odd aperture numbers. It is also
+required that no orders be skipped in the region of interest. Missing
+orders are added with the 'm' key. Once all orders have been marked the
+aperture numbers are resequenced with 'o'. If local background subtraction
+is selected the background regions should be checked with the 'b' key.
+Preceding this with the 'a' key allows any changes to the background
+regions to be applied to all orders. To exit type 'q'.
+.le
+.ls [6]
+The order positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all orders need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.le
+.ls [7]
+If flat fielding is to be done the flat field spectra are extracted. A
+smooth function is fit to each flat field spectrum to remove the large
+scale spectral signature. The final response spectra are normalized to a
+unit mean over all fibers.
+.le
+.ls [8]
+If scattered light subtraction is selected the scattered light parameters
+are set using the aperture reference image and the task \fBapscatter\fR.
+The purpose of this is to interactively define the aperture buffer distance
+for the scattered light and the cross and parallel dispersion fitting
+parameters. The fitting parameters are taken from and recorded in the
+parameter sets \fBapscat1\fR and \fBapscat2\fR. All other scattered light
+subtractions are done noninteractively with these parameters. Note that
+the scattered light correction modifies the input images.
+.le
+.ls [9]
+If dispersion correction is selected the first arc in the arc list is
+extracted. One fiber is used to identify the arc lines and define the
+dispersion function using the task \fBecidentify\fR. Identify a few arc
+lines in a few orders with 'm' and 'k' or 'o', use the 'l' line list
+identification command to automatically add additional lines and fit the
+dispersion function. Check the quality of the dispersion function fit
+with 'f'. When satisfied exit with 'q'.
+.le
+.ls [10]
+If there is a second fiber the dispersion function is automatically
+determined using the task \fBecreidentify\fR.
+.le
+.ls [11]
+The arc reference spectrum is dispersion corrected.
+If the spectra are resampled to a linear dispersion system
+(which will be the same for all spectra) the dispersion parameters
+determined from the dispersion solution are printed.
+.le
+.ls [12]
+The object spectra are now automatically background subtracted (an
+alternative to scattered light subtraction), extracted, flat fielded,
+and dispersion corrected. Any new dispersion function reference arcs
+assigned to the object images are automatically extracted and
+dispersion functions determined. A zero point wavelength correction
+is computed from the simultaneous arc fiber spectrum and applied to
+the object spectrum if orders from the second fiber have been identified
+with the \fIarcaps\fR parameter.
+.le
+.ls [13]
+The final spectra will have the same name as the original 2D images
+with a ".ec" extension added.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of single or dual fiber FOE object and calibration
+spectra stored as IRAF images. The \fIarcaps\fR parameter is used to
+discriminate between the two cases. The type of image format is defined by
+the environment parameter \fIimtype\fR. Only images with that extension
+will be processed and created. The raw CCD images must be processed to
+remove overscan, bias, and dark count effects. This is generally done
+using the \fBccdred\fR package. Flat fielding is generally not done at
+this stage but as part of \fBdofoe\fR. The calibration spectra are flat
+field observations in all fibers, comparison arc lamp spectra in all
+fibers, and, for dual fiber model, arc spectra in one fiber while the
+second fiber observes the object. If for some reason the flat field or
+calibration arc spectra have separate exposures for the two fibers the
+separate exposures may simply be added.
+
+The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in the task \fBrefspectra\fR.
+
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ec" extension. Each line in the reduced image is a one dimensional
+spectrum (an echelle order) with associated aperture and wavelength
+information. When the \fIextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images. The task \fBscombine\fR is used to combine or merge orders into
+a single spectrum.
+
+\fBPackage Parameters\fR
+
+The \fBechelle\fR package parameters set parameters affecting all the tasks
+in the package. Some of the parameters are not applicable to the
+\fBdofoe\fR task. The observatory parameter is only required for data
+without an OBSERVAT header parameter (currently included in NOAO data).
+The spectrum interpolation type might be changed to "sinc" but with the
+cautions given in \fBonedspec.package\fR. The dispersion axis parameter is
+only needed if a DISPAXIS image header parameter is not defined. The other
+parameters define the standard I/O functions. The verbose parameter
+selects whether to print everything which goes into the log file on the
+terminal. It is useful for monitoring what the \fBdofoe\fR task does. The
+log and plot files are useful for keeping a record of the processing. A
+log file is highly recommended. A plot file provides a record of
+apertures, traces, and extracted spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The input images are specified by image lists. The lists may be
+a list of explicit, comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+The aperture reference spectrum is used to find the orders and trace
+them. Thus, this requires an image with good signal in both fibers
+which usually means a flat field spectrum. It is recommended that
+flat field correction be done using one dimensional extracted spectra
+rather than as two dimensional images. This is done if a flat field
+spectrum is specified. The arc assignment table is used to specifically
+assign arc spectra to particular object spectra and the format
+of the file is described in \fBrefspectra\fR.
+
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. The dispersion axis defines the wavelength direction
+of spectra in the image if not defined in the image header by the keyword
+DISPAXIS. The width parameter (in pixels) is used for the profile
+centering algorithm (\fBcenter1d\fR).
+
+The number of orders selects the number of orders for a single
+fiber and "pairs" of object and arc
+fiber profiles for dual fibers. The number specified will be
+automatically found based on the strongest peaks.
+In the dual fiber case it is important that both elements of a pair be found,
+so no orders be skipped, and the aperture numbers must be sequential with
+arc profiles having even aperture numbers and object profiles having
+odd numbers in the region of interest, the automatic identification is
+just a starting point for the interactive review. The even/odd
+relationship between object and arc profiles is set by the \fIarcaps\fR
+parameter and so may be reversed if desired.
+
+The next set of parameters select the processing steps and options. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels, including the blaze function, and not introducing the reciprocal of
+the flat field spectrum into the object spectra. If not selected the flat
+field will remove the blaze function from the observations and introduce
+some wavelength dependence from the flat field lamp spectrum.
+
+The \fIbackground\fR option selects the type of correction for background or
+scattered light. If the type is "scattered" a global scattered light is
+fit to the data between the apertures and subtracted from the images.
+\fINote that the input images are modified by this operation\fR. This
+option is slow. Alternatively, a local background may be subtracted using
+background regions defined for each aperture. The data in the regions may
+be averaged, medianed, or the minimum value used. Another choice is to fit
+the data in the background regions by a function and interpolate to the
+object aperture.
+
+The \fIclean\fR option invokes a profile fitting and deviant point rejection
+algorithm as well as a variance weighting of points in the aperture. These
+options require knowing the effective (i.e. accounting for any image
+combining) read out noise and gain. For a discussion of cleaning and
+variance weighted extraction see \fBapvariance\fR and \fBapprofiles\fR.
+
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are a new
+reference image, new flat field, adding the scattered light option, and a
+new arc reference image. If all input spectra are to be processed
+regardless of previous processing the \fIredo\fR flag may be used. Note
+that reprocessing clobbers the previously processed output spectra.
+
+The \fIbatch\fR processing option allows object spectra to be processed as
+a background or batch job. The \fIlistonly\fR option prints a summary of
+the processing steps which will be performed on the input spectra without
+actually doing anything. This is useful for verifying which spectra will
+be affected if the input list contains previously processed spectra. The
+listing does not include any arc spectra which may be extracted to
+dispersion calibrate an object spectrum.
+
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdofoe\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the \fBdofoe\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdofoe\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+FOE data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.nf
+
+ cl> epar params
+
+.fi
+or simple typing \fIparams\fR. The parameter editor can also be
+entered when editing the \fBdofoe\fR parameters by typing \fI:e
+params\fR or simply \fI:e\fR if positioned at the \fIparams\fR
+parameter.
+
+\fBAperture Definitions\fR
+
+The first operation is to define the extraction apertures, which include the
+aperture width, background regions, and position dependence with
+wavelength, for the object and arc orders of interest. This is done
+on a reference spectrum which is usually a flat field taken through
+all fibers. Other spectra will inherit the reference apertures and
+apply a correction for any shift of the orders across the dispersion.
+The reference apertures are defined only once unless the \fIredo\fR
+option is set.
+
+The selected number of orders are found automatically by selecting the
+highest peaks in a cut across the dispersion. Note that the specified
+number of orders is multiplied by two in defining the apertures when
+there is a second fiber. Apertures
+are assigned with a limits set by the \fIlower\fR and
+\fIupper\fR parameter and numbered sequentially. A query is then
+given allowing the aperture limits to be "resized" based on the profile
+itself (see \fBapresize\fR).
+
+A cut across the orders is then shown with the apertures marked and
+an interactive aperture editing mode is entered (see \fBapedit\fR).
+For \fBdofoe\fR the aperture identifications and numbering is particularly
+critical. When there is a single fiber the aperture numbers must
+be sequential with the order numbers. If an order is skipped then the
+aperture number must also be skipped.
+
+For dual fibers all "pairs" of object and arc orders in the region of
+interest must be defined without skipping any orders. The orders must
+also be numbered sequentially (though the direction does not matter)
+so that the arc apertures are either all even or all odd as defined
+by the \fIarcaps\fR parameter (the default is even numbers for the
+arc apertures). The 'o' key will provide the necessary reordering.
+
+If local background subtraction is used the background regions should
+also be checked with the 'b' key. Typically one adjusts all
+the background regions at the same time by selecting all apertures with
+the 'a' key first. To exit the background and aperture editing steps type
+'q'.
+
+Next the positions of the orders at various points along the dispersion are
+measured and "trace functions" are fit. The user is asked whether to fit
+each trace function interactively. This is selected to adjust the fitting
+parameters such as function type and order. When interactively fitting a
+query is given for each aperture. After the first aperture one may skip
+reviewing the other traces by responding with "NO". Queries made by
+\fBdofoe\fR generally may be answered with either lower case "yes" or "no"
+or with upper case "YES" or "NO". The upper case responses apply to all
+further queries and so are used to eliminate further queries of that kind.
+
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The
+default line or column for finding the orders and the number of image lines
+or columns to sum are set by the \fIline\fR and \fInsum\fR parameters. A
+line of INDEF (the default) selects the middle of the image. The automatic
+finding algorithm is described for the task \fBapfind\fR and basically
+finds the strongest peaks. The resizing is described in the task
+\fBapresize\fR and the parameters used are also described there and
+identified in the PARAMETERS section. The tracing is done as described in
+\fBaptrace\fR and consists of stepping along the image using the specified
+\fIt_step\fR parameter. The function fitting uses the \fBicfit\fR commands
+with the other parameters from the tracing section.
+
+\fBBackground or Scattered Light Subtraction\fR
+
+In addition to not subtracting any background scattered light there are two
+approaches to subtracting this light. The first is to determine a smooth
+global scattered light component. The second is to subtract a locally
+determined background at each point along the dispersion and for each
+aperture. Note that background subtraction is only done for object images
+and not for arc images.
+
+The global scattered light fitting and subtraction is done with the task
+\fBapscatter\fR. The function fitting parameters are set interactively
+using the aperture reference spectrum. All other subtractions are done
+noninteractively with the same set of parameters. The scattered light is
+subtracted from the input images, thus modifying them, and one might wish
+to first make backups of the original images.
+
+The scattered light is measured between the apertures using a specified
+buffer distance from the aperture edges. The scattered light pixels are
+fit by a series of one dimensional functions across the dispersion. The
+independent fits are then smoothed along the dispersion by again fitting
+low order functions. These fits then define the smooth scattered light
+surface to be subtracted from the image. The fitting parameters are
+defined and recorded in the two parameter sets \fIapscat1\fR and
+\fIapscat2\fR. The scattered light algorithm is described more fully in
+\fBapscatter\fR. This algorithm is relatively slow.
+
+Local background subtraction is done during extraction based on background
+regions and parameters defined by the default background parameters or
+changed during interactive review of the apertures. The background
+subtraction options are to subtract the average, median, or minimum of the
+pixels in the background regions, or to fit a function and subtract the
+function from under the extracted object pixels. The background regions
+are specified in pixels from the aperture center and follow changes in
+center of the spectrum along the dispersion. The syntax is colon separated
+ranges with multiple ranges separated by a comma or space. The background
+fitting uses the \fBicfit\fR routines which include medians, iterative
+rejection of deviant points, and a choice of function types and orders.
+Note that it is important to use a method which rejects cosmic rays such as
+using either medians over all the background regions (\fIbackground\fR =
+"median") or median samples during fitting (\fIb_naverage\fR < -1).
+The background smoothing parameter \fIb_smooth\fR is may be used
+to provide some additional local smoothing of the background light.
+The background subtraction algorithm and options are described in greater
+detail in \fBapsum\fR and \fBapbackground\fR.
+
+\fBExtraction\fR
+
+The actual extraction of the spectra is done by summing across the fixed
+width apertures at each point along the dispersion. The default is to
+simply sum the pixels using partial pixels at the ends. There is an
+option to weight the sum based on a Poisson noise model using the
+\fIreadnoise\fR and \fIgain\fR detector parameters. Note that if the
+\fIclean\fR option is selected the variance weighted extraction is used
+regardless of the \fIweights\fR parameter. The sigma threshold for
+cleaning are also set in the \fBparams\fR parameters.
+
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain. These
+numbers need to be adjusted if the image has been processed such that the
+intensity scale has a different origin (such as a scattered light
+subtraction) or scaling (such as caused by unnormalized flat fielding).
+These options also require using background subtraction if the profile does
+not go to zero. For optimal extraction and cleaning to work it is
+recommended that any scattered light be accounted for by local background
+subtraction rather than with the scattered light subtraction and the
+\fIfitflat\fR option be used. The \fIb_smooth\fR parameter is also
+appropriate in this application and improves the optimal extraction results
+by reducing noise in the background signal. For further discussion of
+cleaning and variance weighted extraction see \fBapvariance\fR and
+\fBapprofiles\fR as well as \fBapsum\fR.
+
+\fBFlat Field Correction\fR
+
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdofoe\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdofoe\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+
+The first step is extraction of the flat field spectrum, if one is specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups. When
+the \fIfitflat\fR option is selected (the default) the extracted flat field
+spectra are fit by smooth functions and the ratio of the flat field spectra
+to the smooth functions define the response spectra. The default fitting
+function and order are given by the parameters \fIf_function\fR and
+\fIf_order\fR. If the parameter \fIf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+
+If the \fIfitflat\fR option is not selected the extracted and globally
+normalized flat field spectra are directly divided in the object spectra.
+This removes the blaze function, thus altering the data counts, and
+introduces the reciprocal of the flat field spectrum in the object
+spectra.
+
+The final step is to normalize the flat field spectra by the mean counts over
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra.
+
+\fBDispersion Correction\fR
+
+If dispersion correction is not selected, \fIdispcor\fR=no, then the object
+spectra are simply extracted. If it is selected the arc spectra are used
+to dispersion calibrate the object spectra. There are three steps involved;
+determining the dispersion functions relating pixel position to wavelength,
+assigning the appropriate dispersion function to a particular observation,
+and either storing the nonlinear
+dispersion function in the image headers or resampling the spectra to
+evenly spaced pixels in wavelength. When there are two fibers there is
+also a step of applying a zero point correction to the object fiber based
+on the arc fiber.
+
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted using the reference aperture
+definitions. Note extractions of arc spectra are not background or
+scattered light subtracted. The interactive task \fBecidentify\fR is used
+to define the dispersion function in one fiber. The idea is to mark some
+lines in a few orders whose wavelengths are known (with the line list used
+to supply additional lines after the first few identifications define the
+approximate wavelengths) and to fit a function giving the wavelength from
+the aperture number and pixel position. The dispersion function for the
+second fiber, if one is present, is then determined automatically by
+reference to the first fiber using the task \fBecreidentify\fR.
+
+The arc dispersion function parameters are for \fBecidentify\fR and it's
+related partner \fBecreidentify\fR. The parameters define a line list for
+use in automatically assigning wavelengths to arc lines, a centering width
+(which should match the line widths at the base of the lines), the
+dispersion function type and orders, parameters to exclude bad lines from
+function fits, and defining whether to refit the dispersion function as
+opposed to simply determining a zero point shift. The defaults should
+generally be adequate and the dispersion function fitting parameters may be
+altered interactively. One should consult the help for the two tasks for
+additional details of these parameters and the interactive operation of
+\fBecidentify\fR.
+
+Once the reference dispersion functions are defined other arc spectra are
+extracted as they are assign to the object spectra. The assignment of
+arcs is done either explicitly with an arc assignment table (parameter
+\fIarctable\fR) or based on a header parameter such as a time.
+The assignments are made by the task \fBrefspectra\fR. When two arcs are
+assigned to an object spectrum an interpolation is done between the two
+dispersion functions. This makes an approximate correction for steady
+drifts in the dispersion.
+
+When a second arc fiber monitors any zero point shifts in the dispersion
+functions it is probably only necessary to have one or two arc spectra, one
+at the beginning and/or one at the end of the night.
+
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+
+Defining the dispersion function for a new arc extraction is done with
+the task \fBecreidentify\fR. This is done noninteractively with log
+information recorded about the line reidentifications and the fit.
+
+When there are two fibers there are two full dispersion function from the
+single or pair of arc spectra, one for the object fiber and one for the arc
+fiber. When an object spectrum is extracted so is the simultaneous arc
+spectrum. A zero point shift of the arc spectrum relative to the
+dispersion solution of the dual arc observation is computed using
+\fBecreidentify\fR (\fIrefit\fR=no). This zero point shift is assumed to
+be the same for the object fiber and it is added to the dispersion function
+of the dual arc observation for the object fiber. Note that this does not
+assume that the object and arc fiber dispersion functions are the same or
+have the same wavelength origin, but only that the same shift in wavelength
+zero point applies to both fibers. Once the dispersion function correction
+is determined from the extracted arc fiber spectrum it is deleted leaving
+only the object spectrum.
+
+The last step of dispersion correction is setting the dispersion
+of the object spectrum. There are two choices here.
+If the \fIlinearize\fR parameter is not set the nonlinear dispersion
+function is stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+
+If the \fIlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength. For echelle spectra each order is linearized independently so
+that the wavelength interval per pixel is different in different orders.
+This preserves most of the resolution and avoids over or under sampling of
+the highest or lowest dispersion orders. The wavelength limits are
+taken from the limits determined from the arc reference spectrum and
+the number of pixels is the same as the original images. The dispersion
+per pixel is then derived from these constraints.
+
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is also the sequence performed
+by the test procedure "demos dofoe". Because the images are small the
+dispersion solution is somewhat simplistic.
+
+.nf
+ec> demos mkdofoe
+Creating image demoobj ...
+Creating image demoflat ...
+Creating image demoarc ...
+ec> echelle.verbose = yes
+ec> dofoe demoobj apref=demoflat flat=demoflat arcs=demoarc \
+>>> norders=3 width=5.
+Set reference apertures for demoflat
+Searching aperture database ...
+Finding apertures ...
+Mar 4 9:39: FIND - 6 apertures found for demoflat
+Resize apertures for demoflat? (yes):
+Resizing apertures ...
+Mar 4 9:39: RESIZE - 6 apertures resized for demoflat
+<Review aperture assignments. Exit with 'q'>
+Fit traced positions for demoflat interactively? (yes):
+Tracing apertures ...
+Fit curve to aperture 1 of demoflat interactively (yes):
+<Review trace and fit. Exit with 'q'>
+Fit curve to aperture 2 of demoflat interactively (yes): N
+Mar 4 9:39: TRACE - 6 apertures traced in demoflat.
+Mar 4 9:39: DATABASE - 6 apertures for demoflat written to database
+Create response function demoflatnorm.ec
+Extract flat field demoflat
+Searching aperture database ...
+Mar 4 9:39: DATABASE - 6 apertures read for demoflat from database
+Extracting apertures ...
+Mar 4 9:39: EXTRACT - Aperture 1 from demoflat --> demoflat.ec
+Mar 4 9:39: EXTRACT - Aperture 2 from demoflat --> demoflat.ec
+Mar 4 9:39: EXTRACT - Aperture 3 from demoflat --> demoflat.ec
+Mar 4 9:39: EXTRACT - Aperture 4 from demoflat --> demoflat.ec
+Mar 4 9:39: EXTRACT - Aperture 5 from demoflat --> demoflat.ec
+Mar 4 9:40: EXTRACT - Aperture 6 from demoflat --> demoflat.ec
+Fit and ratio flat field demoflat
+Create the normalized response demoflatnorm.ec
+demoflatnorm.ec -> demoflatnorm.ec using bzero: 0. and bscale: 1.
+ mean: 1. median: 0.9990048 mode: 0.9876572
+ upper: INDEF lower: INDEF
+Extract arc reference image demoarc
+Mar 4 9:40: DATABASE - 6 apertures read for demoflat from database
+Mar 4 9:40: DATABASE - 6 apertures for demoarc written to database
+Mar 4 9:40: EXTRACT - Aperture 1 from demoarc --> demoarc.ec
+Mar 4 9:40: EXTRACT - Aperture 2 from demoarc --> demoarc.ec
+Mar 4 9:40: EXTRACT - Aperture 3 from demoarc --> demoarc.ec
+Mar 4 9:40: EXTRACT - Aperture 4 from demoarc --> demoarc.ec
+Mar 4 9:40: EXTRACT - Aperture 5 from demoarc --> demoarc.ec
+Mar 4 9:40: EXTRACT - Aperture 6 from demoarc --> demoarc.ec
+Determine dispersion solution for demoarc
+<Mark lines with 'm' and change orders with 'k'
+<'m' line at pixel 78 and assign 4965.
+<'k' to order 2
+<'m' line at pixel 78 and assign 5009
+<'m' line at pixel 78 and assign 5020
+<'k' to order 3
+<'m' line at pixel 78 and assign 5049.8
+<'m' line at pixel 78 and assign 5050.8
+<'m' line at pixel 78 and assign 5055.3
+<'m' line at pixel 78 and assign 5062
+<'m' line at pixel 78 and assign 5064.9
+<'f' to fit
+<'q' to quit fit and 'q' to quit ECIDENTIFY
+
+ECREIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Wed 09:54:16 04-Mar-92
+ Reference image = demoarc.ec, Refit = yes
+ Image Found Fit Pix Shift User Shift Z Shift RMS
+ d...ec 8/8 8/8 1.48 7.06 2.11E-5 0.00879
+d...ec: ap = 1, w1 = 4959.1, w2 = 4978.5, dw = 0.076, nw = 256
+d...ec: ap = 2, w1 = 5003.4, w2 = 5022.1, dw = 0.073, nw = 256
+d...ec: ap = 3, w1 = 5049.0, w2 = 5067.0, dw = 0.070, nw = 256
+Extract object spectrum demoobj
+Searching aperture database ...
+Mar 4 9:54: DATABASE - 6 apertures read for demoflat from database
+Recentering apertures ...
+Mar 4 9:54: RECENTER - 6 apertures shifted by -0.03 for demoobj.
+Mar 4 9:54: DATABASE - 6 apertures for demoobj written to database
+Extracting apertures ...
+Mar 4 9:54: EXTRACT - Aperture 1 from demoobj --> demoobj.ec
+Mar 4 9:54: EXTRACT - Aperture 2 from demoobj --> demoobj.ec
+Mar 4 9:54: EXTRACT - Aperture 3 from demoobj --> demoobj.ec
+Mar 4 9:54: EXTRACT - Aperture 4 from demoobj --> demoobj.ec
+Mar 4 9:54: EXTRACT - Aperture 5 from demoobj --> demoobj.ec
+Mar 4 9:54: EXTRACT - Aperture 6 from demoobj --> demoobj.ec
+Assign arc spectra for demoobj
+[demoobj] refspec1='demoarc'
+Reidentify arc fibers in demoobj with respect to demoarc
+
+ECREIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Wed 09:54:28 04-Mar-92
+ Reference image = demoarcarc.ec, Refit = no
+ Image Found Fit Pix Shift User Shift Z Shift RMS
+ d...ec 8/8 8/8 0.119 0.566 1.69E-6 0.00834
+Dispersion correct demoobj
+d...ec.imh: ap = 1, w1 = 4959.1, w2 = 4978.5, dw = 0.076, nw = 256
+d...ec.imh: ap = 2, w1 = 5003.4, w2 = 5022.1, dw = 0.073, nw = 256
+d...ec.imh: ap = 3, w1 = 5049.0, w2 = 5067.0, dw = 0.070, nw = 256
+.fi
+.ih
+REVISIONS
+.ls DOFOE V2.10.3
+The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. A scattered
+light subtraction processing option has been added.
+.le
+.ih
+SEE ALSO
+apedit, apfind, approfiles, aprecenter, apresize, apsum, aptrace, apvariance,
+ccdred, center1d, dispcor, fit1d, icfit, ecidentify, observatory,
+onedspec.package, refspectra, ecreidentify, setairmass, setjd
+.endhelp
diff --git a/noao/imred/echelle/doc/dofoe.ms b/noao/imred/echelle/doc/dofoe.ms
new file mode 100644
index 00000000..1f283f46
--- /dev/null
+++ b/noao/imred/echelle/doc/dofoe.ms
@@ -0,0 +1,1371 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND February 1993
+.TL
+Guide to the Fiber Optic Echelle Reduction Task DOFOE
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+The \fBdofoe\fR reduction task is specialized for scattered light
+subtraction, extraction, flat fielding, and wavelength calibration of Fiber
+Optic Echelle (FOE) spectra. It is a command language script which
+collects and combines the functions and parameters of many general purpose
+tasks to provide a single complete data reduction path. The task provides
+a degree of guidance, automation, and record keeping necessary when dealing
+with the complexities of reducing this type of data.
+.AE
+.NH
+Introductions
+.LP
+The \fBdofoe\fR reduction task is specialized for scattered light
+subtraction, extraction, flat fielding, and wavelength calibration of Fiber
+Optic Echelle (FOE) spectra. It is a command language script which
+collects and combines the functions and parameters of many general purpose
+tasks to provide a single complete data reduction path. The task provides
+a degree of guidance, automation, and record keeping necessary when dealing
+with the complexities of reducing this type of data.
+.LP
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \f(CWredo\fR and \f(CWupdate\fR options, skip or
+repeat some or all the steps.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage. Since
+\fBdofoe\fR combines many separate, general purpose tasks the description
+given here refers to these tasks and leaves some of the details to their
+help documentation.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images must first be processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+.IP [2]
+Set the \fBdofoe\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Verify and set the format parameters, particularly the number of orders to be
+extracted and processed. The processing parameters are set
+for simple extraction and dispersion correction but dispersion correction
+can be turned off for quicklook or background subtraction and cleaning
+may be added.
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.IP [4]
+The apertures are defined using the specified aperture reference image
+which is usually a flat field in which both the object and arc fibers are
+illuminated. The specified number of orders are found automatically and
+sequential apertures assigned. The resize option sets the aperture size to
+the widths of the profiles at a fixed fraction of the peak height.
+.IP [5]
+The automatic order identification and aperture assignment is based on peak
+height and may be incorrect. The interactive aperture editor is entered
+with a plot of the apertures. It is essential that the object and arc
+fiber orders are properly paired with the arc fibers having even aperture
+numbers and the object fibers having odd aperture numbers. It is also
+required that no orders be skipped in the region of interest. Missing
+orders are added with the 'm' key. Once all orders have been marked the
+aperture numbers are resequenced with 'o'. If local background subtraction
+is selected the background regions should be checked with the 'b' key.
+Preceding this with the 'a' key allows any changes to the background
+regions to be applied to all orders. To exit type 'q'.
+.IP [6]
+The order positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all orders need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.IP [7]
+If flat fielding is to be done the flat field spectra are extracted. A
+smooth function is fit to each flat field spectrum to remove the large
+scale spectral signature. The final response spectra are normalized to a
+unit mean over all fibers.
+.IP [8]
+If scattered light subtraction is selected the scattered light parameters
+are set using the aperture reference image and the task \fBapscatter\fR.
+The purpose of this is to interactively define the aperture buffer distance
+for the scattered light and the cross and parallel dispersion fitting
+parameters. The fitting parameters are taken from and recorded in the
+parameter sets \fBapscat1\fR and \fBapscat2\fR. All other scattered light
+subtractions are done noninteractively with these parameters. Note that
+the scattered light correction modifies the input images.
+.IP [9]
+If dispersion correction is selected the first arc in the arc list is
+extracted. One fiber is used to identify the arc lines and define the
+dispersion function using the task \fBecidentify\fR. Identify a few arc
+lines in a few orders with 'm' and 'k' or 'o', use the 'l' line list
+identification command to automatically add additional lines and fit the
+dispersion function. Check the quality of the dispersion function fit
+with 'f'. When satisfied exit with 'q'.
+.IP [10]
+The other fiber dispersion function is automatically determined using
+the task \fBecreidentify\fR.
+.IP [11]
+The arc reference spectrum is dispersion corrected.
+If the spectra are resampled to a linear dispersion system
+(which will be the same for all spectra) the dispersion parameters
+determined from the dispersion solution are printed.
+.IP [12]
+The object spectra are now automatically background subtracted (an
+alternative to scattered light subtraction), extracted, flat fielded,
+and dispersion corrected. Any new dispersion function reference arcs
+assigned to the object images are automatically extracted and
+dispersion functions determined. A zero point wavelength correction
+is computed from the simultaneous arc fiber spectrum and applied to
+the object spectrum.
+.IP [13]
+The final spectra will have the same name as the original 2D images
+with a ".ec" extension added.
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of dual fiber FOE object and calibration spectra
+stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must be processed to remove
+overscan, bias, and dark count effects. This is generally done using the
+\fBccdred\fR package. Flat fielding is generally not done at this stage
+but as part of \fBdofoe\fR. The calibration spectra are
+flat field observations in both fibers, comparison arc lamp spectra
+in both fibers, and arc spectra in one fiber while the second
+fiber observes the object. If for some reason the flat field or
+calibration arc spectra have separate exposures for the two fibers
+the separate exposures may simply be added.
+.LP
+The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in the task \fBrefspectra\fR.
+.LP
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ec" extension. Each line in the reduced image is a one dimensional
+spectrum (an echelle order) with associated aperture and wavelength
+information. When the \f(CWextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images. The task \fBscombine\fR is used to combine or merge orders into
+a single spectrum.
+.NH
+Package Parameters
+.LP
+The \fBechelle\fR package parameters, shown in Figure 1, set parameters
+affecting all the tasks in the package. Some of the parameters are not
+applicable to the \fBdofoe\fR task.
+.KS
+.V1
+
+.ce
+Figure 1: Package Parameter Set for the ECHELLE Package
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = echelle
+
+(extinct= onedstds$kpnoextinct.dat) Extinction file
+(caldir = onedstds$spechayescal/) Standard star calibration directory
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+(dispaxi= 2) Image axis for 2D images
+(nsum = 1) Number of lines/columns to sum for 2D images
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Text log file
+(plotfil= ) Plot file
+
+(records= ) Record number extensions
+(version= ECHELLE V3: July 1991)
+
+.KE
+.V2
+The observatory parameter is only required for data
+without an OBSERVAT header parameter (currently included in NOAO data).
+The spectrum interpolation type might be changed to "sinc" but with the
+cautions given in \fBonedspec.package\fR. The dispersion axis parameter is
+only needed if a DISPAXIS image header parameter is not defined. The other
+parameters define the standard I/O functions. The verbose parameter
+selects whether to print everything which goes into the log file on the
+terminal. It is useful for monitoring what the \fBdofoe\fR task does. The
+log and plot files are useful for keeping a record of the processing. A
+log file is highly recommended. A plot file provides a record of
+apertures, traces, and extracted spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+.NH
+Processing Parameters
+.LP
+The \fBdofoe\fR parameters are shown in Figure 2.
+.KS
+.V1
+
+.ce
+Figure 2: Parameters Set for DOFOE
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = echelle
+ TASK = dofoe
+
+objects = List of object spectra
+(apref = ) Aperture reference spectrum
+(flat = ) Flat field spectrum
+(arcs = ) List of arc spectra
+(arctabl= ) Arc assignment table (optional)
+
+.KE
+.V1
+(readnoi= 0.) Read out noise sigma (photons)
+(gain = 1.) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(norders= 12) Number of orders
+(width = 4.) Width of profiles (pixels)
+(arcaps = 2x2) Arc apertures
+
+(fitflat= yes) Fit and ratio flat field spectrum?
+(backgro= none) Background to subtract
+(clean = no) Detect and replace bad pixels?
+(dispcor= yes) Dispersion correct spectra?
+(redo = no) Redo operations if previously done?
+(update = no) Update spectra if cal data changes?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(params = ) Algorithm parameters
+
+.V2
+The input images are specified by image lists. The lists may be
+a list of explicit, comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+The aperture reference spectrum is used to find the orders and trace
+them. Thus, this requires an image with good signal in both fibers
+which usually means a flat field spectrum. It is recommended that
+flat field correction be done using one dimensional extracted spectra
+rather than as two dimensional images. This is done if a flat field
+spectrum is specified. The arc assignment table is used to specifically
+assign arc spectra to particular object spectra and the format
+of the file is described in \fBrefspectra\fR.
+.LP
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. The dispersion axis defines the wavelength direction
+of spectra in the image if not defined in the image header by the keyword
+DISPAXIS. The width parameter (in pixels) is used for the profile
+centering algorithm (\fBcenter1d\fR).
+.LP
+The number of orders selects the number of "pairs" of object and arc
+fiber profiles to be automatically found based on the strongest peaks.
+Because it is important that both elements of a pair be found,
+no orders be skipped, and the aperture numbers be sequential with
+arc profiles having even aperture numbers and object profiles having
+odd numbers in the region of interest, the automatic identification is
+just a starting point for the interactive review. The even/odd
+relationship between object and arc profiles is set by the \f(CWarcaps\fR
+parameter and so may be reversed if desired.
+.LP
+The next set of parameters select the processing steps and options. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels, including the blaze function, and not introducing the reciprocal of
+the flat field spectrum into the object spectra. If not selected the flat
+field will remove the blaze function from the observations and introduce
+some wavelength dependence from the flat field lamp spectrum.
+.LP
+The \f(CWbackground\fR option selects the type of correction for background or
+scattered light. If the type is "scattered" a global scattered light is
+fit to the data between the apertures and subtracted from the images.
+\fINote that the input images are modified by this operation\fR. This
+option is slow. Alternatively, a local background may be subtracted using
+background regions defined for each aperture. The data in the regions may
+be averaged, medianed, or the minimum value used. Another choice is to fit
+the data in the background regions by a function and interpolate to the
+object aperture.
+.LP
+The \f(CWclean\fR option invokes a profile fitting and deviant point rejection
+algorithm as well as a variance weighting of points in the aperture. These
+options require knowing the effective (i.e. accounting for any image
+combining) read out noise and gain. For a discussion of cleaning and
+variance weighted extraction see \fBapvariance\fR and \fBapprofiles\fR.
+.LP
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a new
+reference image, new flat field, adding the scattered light option, and a
+new arc reference image. If all input spectra are to be processed
+regardless of previous processing the \f(CWredo\fR flag may be used. Note
+that reprocessing clobbers the previously processed output spectra.
+.LP
+The \f(CWbatch\fR processing option allows object spectra to be processed as
+a background or batch job. The \f(CWlistonly\fR option prints a summary of
+the processing steps which will be performed on the input spectra without
+actually doing anything. This is useful for verifying which spectra will
+be affected if the input list contains previously processed spectra. The
+listing does not include any arc spectra which may be extracted to
+dispersion calibrate an object spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdofoe\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the \fBdofoe\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdofoe\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+FOE data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.V1
+
+ cl> epar params
+
+.V2
+or simple typing \f(CWparams\fR. The parameter editor can also be
+entered when editing the \fBdofoe\fR parameters by typing \f(CW:e
+params\fR or simply \f(CW:e\fR if positioned at the \f(CWparams\fR
+parameter. Figure 3 shows the parameter set.
+.KS
+.V1
+
+.ce
+Figure 3: Algorithm Parameter Set
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = echelle
+ TASK = params
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(extras = no) Extract sky, sigma, etc.?
+
+ -- DEFAULT APERTURE LIMITS --
+(lower = -3.) Lower aperture limit relative to center
+(upper = 3.) Upper aperture limit relative to center
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+
+.KE
+.KS
+.V1
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 2) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- DEFAULT BACKGROUND PARAMETERS --
+(buffer = 1.) Buffer distance from apertures
+(apscat1= ) Fitting parameters across the dispersion
+(apscat2= ) Fitting parameters along the dispersion
+(b_funct= legendre) Background function
+(b_order= 2) Background function order
+(b_sampl= -10:-6,6:10) Background sample regions
+(b_naver= -3) Background average or median
+(b_niter= 0) Background rejection iterations
+(b_low = 3.) Background lower rejection sigma
+(b_high = 3.) Background upper rejection sigma
+(b_grow = 0.) Background rejection growing radius
+(b_smoot= 10) Background smoothing length
+
+.KE
+.KS
+.V1
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+
+.KE
+.KS
+.V1
+ -- FLAT FIELD FUNCTION FITTING PARAMETERS --
+(f_inter= no) Fit flat field interactively?
+(f_funct= spline3) Fitting function
+(f_order= 20) Fitting function order
+
+.KE
+.KS
+.V1
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli= linelist$thar.dat) Line list
+(match = 1.) Line list matching limit in Angstroms
+(fwidth = 4.) Arc line widths in pixels
+(cradius= 4.) Centering radius in pixels
+(i_funct= chebyshev) Echelle coordinate function
+(i_xorde= 3) Order of coordinate function along dispersion
+(i_yorde= 3) Order of coordinate function across dispersion
+(i_niter= 3) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying?
+
+.KE
+.KS
+.V1
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+.KE
+.KS
+.V1
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+.KE
+.V2
+.NH 2
+Aperture Definitions
+.LP
+The first operation is to define the extraction apertures, which include the
+aperture width, background regions, and position dependence with
+wavelength, for the object and arc orders of interest. This is done
+on a reference spectrum which is usually a flat field taken through
+both fibers. Other spectra will inherit the reference apertures and
+apply a correction for any shift of the orders across the dispersion.
+The reference apertures are defined only once unless the \f(CWredo\fR
+option is set.
+.LP
+The selected number of orders are found automatically by selecting the
+highest peaks in a cut across the dispersion. Note that the specified
+number of orders is multiplied by two in defining the apertures. Apertures
+are assigned with a limits set by the \f(CWlower\fR and
+\f(CWupper\fR parameter and numbered sequentially. A query is then
+given allowing the aperture limits to be "resized" based on the profile
+itself (see \fBapresize\fR).
+.LP
+A cut across the orders is then shown with the apertures marked and
+an interactive aperture editing mode is entered (see \fBapedit\fR).
+For \fBdofoe\fR the aperture identifications and numbering is particularly
+critical. All "pairs" of object and arc orders in the region of
+interest must be defined without skipping any orders. The orders must
+also be numbered sequentially (though the direction does not matter)
+so that the arc apertures are either all even or all odd as defined
+by the \f(CWarcaps\fR parameter (the default is even numbers for the
+arc apertures). The 'o' key will provide the necessary reordering.
+If local background subtraction is used the background regions should
+also be checked with the 'b' key. Typically one adjusts all
+the background regions at the same time by selecting all apertures with
+the 'a' key first. To exit the background and aperture editing steps type
+'q'.
+.LP
+Next the positions of the orders at various points along the dispersion are
+measured and "trace functions" are fit. The user is asked whether to fit
+each trace function interactively. This is selected to adjust the fitting
+parameters such as function type and order. When interactively fitting a
+query is given for each aperture. After the first aperture one may skip
+reviewing the other traces by responding with "NO". Queries made by
+\fBdofoe\fR generally may be answered with either lower case "yes" or "no"
+or with upper case "YES" or "NO". The upper case responses apply to all
+further queries and so are used to eliminate further queries of that kind.
+.LP
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the orders and the number of image lines or
+columns to sum are set by the \f(CWline\fR and \f(CWnsum\fR parameters. A line
+of INDEF (the default) selects the middle of the image. The automatic
+finding algorithm is described for the task \fBapfind\fR and basically
+finds the strongest peaks. The resizing is described in the task
+\fBapresize\fR and the parameters used are also described there and
+identified in the PARAMETERS section. The tracing is done as described in
+\fBaptrace\fR and consists of stepping along the image using the specified
+\f(CWt_step\fR parameter. The function fitting uses the \fBicfit\fR commands
+with the other parameters from the tracing section.
+.NH 2
+Background or Scattered Light Subtraction
+.LP
+In addition to not subtracting any background scattered light there are two
+approaches to subtracting this light. The first is to determine a smooth
+global scattered light component. The second is to subtract a locally
+determined background at each point along the dispersion and for each
+aperture. Note that background subtraction is only done for object images
+and not for arc images.
+.LP
+The global scattered light fitting and subtraction is done with the task
+\fBapscatter\fR. The function fitting parameters are set interactively
+using the aperture reference spectrum. All other subtractions are done
+noninteractively with the same set of parameters. The scattered light is
+subtracted from the input images, thus modifying them, and one might wish
+to first make backups of the original images.
+.LP
+The scattered light is measured between the apertures using a specified
+buffer distance from the aperture edges. The scattered light pixels are
+fit by a series of one dimensional functions across the dispersion. The
+independent fits are then smoothed along the dispersion by again fitting
+low order functions. These fits then define the smooth scattered light
+surface to be subtracted from the image. The fitting parameters are
+defined and recorded in the two parameter sets \f(CWapscat1\fR and
+\f(CWapscat2\fR. The scattered light algorithm is described more fully in
+\fBapscatter\fR. This algorithm is relatively slow.
+.LP
+Local background subtraction is done during extraction based on background
+regions and parameters defined by the default background parameters or
+changed during interactive review of the apertures. The background
+subtraction options are to subtract the average, median, or minimum of the
+pixels in the background regions, or to fit a function and subtract the
+function from under the extracted object pixels. The background regions
+are specified in pixels from the aperture center and follow changes in
+center of the spectrum along the dispersion. The syntax is colon separated
+ranges with multiple ranges separated by a comma or space. The background
+fitting uses the \fBicfit\fR routines which include medians, iterative
+rejection of deviant points, and a choice of function types and orders.
+Note that it is important to use a method which rejects cosmic rays such as
+using either medians over all the background regions (\f(CWbackground\fR =
+"median") or median samples during fitting (\f(CWb_naverage\fR < -1).
+The background smoothing parameter \f(CWb_smooth\fR is may be used
+to provide some additional local smoothing of the background light.
+The background subtraction algorithm and options are described in greater
+detail in \fBapsum\fR and \fBapbackground\fR.
+.NH 2
+Extraction
+.LP
+The actual extraction of the spectra is done by summing across the fixed
+width apertures at each point along the dispersion. The default is to
+simply sum the pixels using partial pixels at the ends. There is an
+option to weight the sum based on a Poisson noise model using the
+\f(CWreadnoise\fR and \f(CWgain\fR detector parameters. Note that if the
+\f(CWclean\fR option is selected the variance weighted extraction is used
+regardless of the \f(CWweights\fR parameter. The sigma threshold for
+cleaning are also set in the \fBparams\fR parameters.
+.LP
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain. These
+numbers need to be adjusted if the image has been processed such that the
+intensity scale has a different origin (such as a scattered light
+subtraction) or scaling (such as caused by unnormalized flat fielding).
+These options also require using background subtraction if the profile does
+not go to zero. For optimal extraction and cleaning to work it is
+recommended that any scattered light be accounted for by local background
+subtraction rather than with the scattered light subtraction and the
+\f(CWfitflat\fR option be used. The \f(CWb_smooth\fR parameter is also
+appropriate in this application and improves the optimal extraction results
+by reducing noise in the background signal. For further discussion of
+cleaning and variance weighted extraction see \fBapvariance\fR and
+\fBapprofiles\fR as well as \fBapsum\fR.
+.NH 2
+Flat Field Correction
+.LP
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdofoe\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdofoe\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+.LP
+The first step is extraction of the flat field spectrum, if one is specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups. When
+the \f(CWfitflat\fR option is selected (the default) the extracted flat field
+spectra are fit by smooth functions and the ratio of the flat field spectra
+to the smooth functions define the response spectra. The default fitting
+function and order are given by the parameters \f(CWf_function\fR and
+\f(CWf_order\fR. If the parameter \f(CWf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+.LP
+If the \f(CWfitflat\fR option is not selected the extracted and globally
+normalized flat field spectra are directly divided in the object spectra.
+This removes the blaze function, thus altering the data counts, and
+introduces the reciprocal of the flat field spectrum in the object
+spectra.
+.LP
+The final step is to normalize the flat field spectra by the mean counts over
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra.
+.NH 2
+Dispersion Correction
+.LP
+If dispersion correction is not selected, \f(CWdispcor\fR=no, then the object
+spectra are simply extracted. If it is selected the arc spectra are used
+to dispersion calibrate the object spectra. There are four steps involved;
+determining the dispersion functions relating pixel position to wavelength,
+assigning the appropriate dispersion function to a particular observation,
+determining a zero point wavelength shift from the arc fiber to be applied
+to the object fiber dispersion function, and either storing the nonlinear
+dispersion function in the image headers or resampling the spectra to
+evenly spaced pixels in wavelength.
+.LP
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted using the reference aperture
+definitions. Note extractions of arc spectra are not background or
+scattered light subtracted. The interactive task \fBecidentify\fR is used
+to define the dispersion function in one fiber. The idea is to mark some
+lines in a few orders whose wavelengths are known (with the line list used
+to supply additional lines after the first few identifications define the
+approximate wavelengths) and to fit a function giving the wavelength from
+the aperture number and pixel position. The dispersion function for
+the second fiber is then determined automatically by reference to the first
+fiber using the task \fBecreidentify\fR.
+.LP
+The arc dispersion function parameters are for \fBecidentify\fR and it's
+related partner \fBecreidentify\fR. The parameters define a line list for
+use in automatically assigning wavelengths to arc lines, a centering width
+(which should match the line widths at the base of the lines), the
+dispersion function type and orders, parameters to exclude bad lines from
+function fits, and defining whether to refit the dispersion function as
+opposed to simply determining a zero point shift. The defaults should
+generally be adequate and the dispersion function fitting parameters may be
+altered interactively. One should consult the help for the two tasks for
+additional details of these parameters and the interactive operation of
+\fBecidentify\fR.
+.LP
+Once the reference dispersion functions are defined other arc spectra are
+extracted as they are assign to the object spectra. The assignment of
+arcs is done either explicitly with an arc assignment table (parameter
+\f(CWarctable\fR) or based on a header parameter such as a time.
+The assignments are made by the task \fBrefspectra\fR. When two arcs are
+assigned to an object spectrum an interpolation is done between the two
+dispersion functions. This makes an approximate correction for steady
+drifts in the dispersion. Because the arc fiber monitors any zero point
+shifts in the dispersion functions it is probably only necessary to have
+one or two arc spectra, one at the beginning and/or one at the end of the
+night.
+.LP
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+.LP
+Defining the dispersion function for a new arc extraction is done with
+the task \fBecreidentify\fR. This is done noninteractively with log
+information recorded about the line reidentifications and the fit.
+.LP
+From the one or two arc spectra come two full dispersion function,
+one for the object fiber and one for the arc fiber. When an object
+spectrum is extracted so is the simultaneous arc spectrum. A zero point
+shift of the arc spectrum relative to the dispersion solution of the
+dual arc observation is computed using \fBecreidentify\fR
+(\f(CWrefit\fR=no). This zero point shift is assumed to be the same for the
+object fiber and it is added to the dispersion function of the dual arc
+observation for the object fiber. Note that this does not assume that the
+object and arc fiber dispersion functions are the same or have the same
+wavelength origin, but only that the same shift in wavelength zero point
+applies to both fibers. Once the dispersion function correction is
+determined from the extracted arc fiber spectrum it is deleted leaving only
+the object spectrum.
+.LP
+The last step of dispersion correction is setting the dispersion
+of the object spectrum. There are two choices here.
+If the \f(CWlinearize\fR parameter is not set the nonlinear dispersion
+function is stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+.LP
+If the \f(CWlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength. For echelle spectra each order is linearized independently so
+that the wavelength interval per pixel is different in different orders.
+This preserves most of the resolution and avoids over or under sampling of
+the highest or lowest dispersion orders. The wavelength limits are
+taken from the limits determined from the arc reference spectrum and
+the number of pixels is the same as the original images. The dispersion
+per pixel is then derived from these constraints.
+.LP
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+Task Help References
+.LP
+Each task in the \fBspecred\fR packages and tasks used by \fBdofibers\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters and apidtable
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apfit - Fit 2D spectra and output the fit, difference, or ratio
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apmask - Create and IRAF pixel list mask of the apertures
+apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ ecidentify - Identify features in spectrum for dispersion solution
+ecreidentify - Automatically identify features in spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ splot - Preliminary spectral plot/analysis
+ standard - Identify standard stars to be used in sensitivity calc
+
+ dofoe - Process Fiber Optic Echelle spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+
+.V2
+.SH
+Appendix A: DOFOE Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \f(CWredo\fR flag is set or the \f(CWupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.LE
+apref = ""
+.LS
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.LE
+flat = "" (optional)
+.LS
+Flat field spectrum. If specified the one dimensional flat field spectrum
+is extracted and used to make flat field calibrations.
+.LE
+arcs = "" (at least one if dispersion correcting)
+.LS
+List of arc spectra in which both fibers have arc spectra. These spectra
+are used to define the dispersion functions for each fiber apart from a
+zero point correction made with the arc fiber during an observation. One
+fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for the other fiber and arc
+spectra are derived from it.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining arc spectra to be assigned to object spectra (see
+\fBrefspectra\fR). If not specified an assignment based on a header
+parameter, \f(CWparams.sort\fR, such as the observation time is made.
+.LE
+
+readnoise = "0." (apsum)
+.LS
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.LE
+gain = "1." (apsum)
+.LS
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+norders = 12 (apfind)
+.LS
+Number of orders to be found. This number is used during the automatic
+definition of the apertures from the aperture reference spectrum. Note
+that the number of apertures defined is twice this number, one set for
+the object fiber orders and one set for the arc fiber orders.
+The interactive review of the aperture assignments allows verification
+and adjustments to the automatic aperture definitions.
+.LE
+width = 4. (apedit)
+.LS
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.LE
+arcaps = "2x2"
+.LS
+List of arc fiber aperture numbers.
+Since the object and arc fiber orders are paired the default setting
+expects the even number apertures to be the are apertures. This should
+be checked interactively.
+.LE
+
+fitflat = yes (flat1d)
+.LS
+Fit and divide the extracted flat field field orders by a smooth function
+in order to normalize the wavelength response? If not done the flat field
+spectral shape (which includes the blaze function) will be divided
+out of the object spectra, thus altering the object data values.
+If done only the small scale response variations are included in the
+flat field and the object spectra will retain their observed flux
+levels and blaze function.
+.LE
+background = "none" (apsum, apscatter)
+.LS
+Type of background light subtraction. The choices are "none" for no
+background subtraction, "scattered" for a global scattered light
+subtraction, "average" to average the background within background regions,
+"median" to use the median in background regions, "minimum" to use the
+minimum in background regions, or "fit" to fit across the dispersion using
+the background within background regions. The scattered light option fits
+and subtracts a smooth global background and modifies the input images.
+This is a slow operation and so is NOT performed in quicklook mode. The
+other background options are local to each aperture at each point along the
+dispersion. The "fit" option uses additional fitting parameters from
+\fBparams\fR and the "scattered" option uses parameters from \fBapscat1\fR
+and \fBapscat2\fR.
+.LE
+clean = yes (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.LE
+dispcor = yes
+.LS
+Dispersion correct spectra? Depending on the \f(CWparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.LE
+update = no
+.LS
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.LE
+batch = no
+.LS
+Process spectra as a background or batch job.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+params = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.LE
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdofoe\fR.
+
+observatory = "observatory"
+.LS
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For FOE data the image headers
+identify the observatory as "kpno" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.V1
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.V2
+.LE
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter.
+.LE
+database = "database"
+.LS
+Database (directory) used for storing aperture and dispersion information.
+.LE
+verbose = no
+.LS
+Print verbose information available with various tasks.
+.LE
+logfile = "logfile", plotfile = ""
+.LS
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.LE
+records = ""
+.LS
+Dummy parameter to be ignored.
+.LE
+version = "ECHELLE: ..."
+.LS
+Version of the package.
+.LE
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdofoe\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+extras = no (apsum)
+.LS
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.LE
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+
+lower = -3., upper = 3. (apdefault)
+.LS
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \f(CWnsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 2 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+.LE
+
+.ce
+-- DEFAULT BACKGROUND PARAMETERS --
+
+buffer = 1. (apscatter)
+.LS
+Buffer distance from the edge of any aperture for data to be included
+in the scattered light determination. This parameter may be modified
+interactively.
+.LE
+apscat1 = "", apscat2 = "" (apscatter)
+.LS
+Parameter sets for the fitting functions across and along the dispersion.
+These parameters are those used by \fBicfit\fR. These parameters are
+usually set interactively.
+.LE
+b_function = "legendre", b_order = 1 (apsum)
+.LS
+Default background fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+b_naverage = -100 (apsum)
+.LS
+Default number of points to average or median. Positive numbers
+average that number of sequential points to form a fitting point.
+Negative numbers median that number, in absolute value, of sequential
+points. A value of 1 does no averaging and each data point is used in the
+fit.
+.LE
+b_niterate = 0 (apsum)
+.LS
+Default number of rejection iterations. If greater than zero the fit is
+used to detect deviant fitting points and reject them before repeating the
+fit. The number of iterations of this process is given by this parameter.
+.LE
+b_low_reject = 3., b_high_reject = 3. (apsum)
+.LS
+Default background lower and upper rejection sigmas. If greater than zero
+points deviating from the fit below and above the fit by more than this
+number of times the sigma of the residuals are rejected before refitting.
+.LE
+b_smooth = 10 (apsum)
+.LS
+Box car smoothing length for background when using background
+subtraction. Since the background noise is often the limiting factor
+for good extraction one may box car smooth the the background to improve the
+statistics.
+.LE
+
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum)
+.LS
+Type of extraction weighting. Note that if the \f(CWclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \f(CWgain\fR and \f(CWreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum) (fit1d|fit2d)
+.LS
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for FOE data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+
+f_interactive = no (fit1d)
+.LS
+Fit the one dimensional flat field order spectra interactively?
+This is used if \f(CWfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.LE
+f_function = "spline3", f_order = 20 (fit1d)
+.LS
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (identify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelist$thar.dat" (ecidentify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelist$".
+.LE
+match = 1. (ecidentify)
+.LS
+The maximum difference for a match between the dispersion function computed
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 4. (ecidentify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 4. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "chebyshev", i_xorder = 3, i_yorder = 3 (ecidentify)
+.LS
+The default function, function order for the pixel position dependence, and
+function order for the aperture number dependence to be fit to the arc
+wavelengths. The functions choices are "chebyshev" or "legendre".
+.LE
+i_niterate = 3, i_low = 3.0, i_high = 3.0 (ecidentify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (ecreidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.LE
+.LE
+sort = "jd", group = "ljd" (refspectra)
+.LS
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdofoe\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.LE
+log = no (dispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (dispcor)
+.LS
+Conserve the total flux during interpolation? If \f(CWno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \f(CWyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/echelle/doc/ecidentify.hlp b/noao/imred/echelle/doc/ecidentify.hlp
new file mode 100644
index 00000000..61187a43
--- /dev/null
+++ b/noao/imred/echelle/doc/ecidentify.hlp
@@ -0,0 +1,770 @@
+.help ecidentify May88 noao.imred.echelle
+.ih
+NAME
+ecidentify -- Determine the dispersion relation in echelle spectra
+.ih
+USAGE
+ecidentify images
+.ih
+PARAMETERS
+.ls images
+List of echelle format spectra in which to identify lines and fit
+dispersion functions.
+.le
+.ls database = "database"
+Database in which the feature data and dispersion functions are recorded.
+.le
+.ls coordlist = "linelists$idhenear.dat"
+User coordinate list consisting of an ordered list of line coordinates. A
+comment line of the form "# units <units>", where <units> is one of the
+understood units names, defines the units of the line list. If no units
+are specified then Angstroms are assumed. Some standard line lists are
+available in the directory "linelists$". The standard line lists are
+described under the topic \fIlinelists\fR.
+.le
+.ls units = ""
+The units to use if no database entry exists. The units are specified as
+described in
+
+.nf
+ cl> help onedspec.package section=units
+.fi
+
+If no units are specified and a coordinate list is used then the units of
+the coordinate list are selected. If a database entry exists then the
+units defined there override both this parameter and the coordinate list.
+.le
+.ls match = 1.
+The maximum difference for a match between the feature coordinate function
+value and a coordinate in the coordinate list. The unit of this parameter
+is that of the user coordinates.
+.le
+.ls maxfeatures = 100
+Maximum number of the strongest features to be selected automatically from
+the coordinate list (function 'l') or from the image data (function 'y').
+.le
+.ls zwidth = 10.
+Width of graphs, in user coordinates, when in zoom mode (function 'z').
+.le
+
+The following parameters are used in determining feature positions.
+.ls ftype = "emission"
+Type of features to be identified. The possibly abbreviated choices are
+"emission" and "absorption".
+.le
+.ls fwidth = 4.
+Width in pixels of features to be identified.
+.le
+.ls cradius = 5.
+The maximum distance, in pixels, allowed between a feature position
+and the initial estimate when defining a new feature.
+.le
+.ls threshold = 10.
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls minsep = 2.
+The minimum separation, in pixels, allowed between feature positions
+when defining a new feature.
+.le
+
+The following default parameters are used when fitting a function to
+the user coordinates. If a previous solution is read from the database
+then the parameters from that solution override the defaults below.
+.ls function = "chebyshev"
+The function to be fit to the user coordinates as a function of the pixel
+coordinate and aperture number. The choices are bi-dimensional
+"chebyshev" and "legendre" polynomials.
+.le
+.ls xorder = 2
+Order of the fitting function along each echelle order.
+The order is the number of polynomial terms; i.e. xorder = 2 is a linear
+function.
+.le
+.ls yorder = 2
+Order of the fitting function with respect to the aperture number.
+The order is the number of polynomial terms; i.e. yorder = 2 is a linear
+function.
+.le
+.ls niterate = 0, lowreject = 3, highreject = 3.
+Default number of rejection iterations and the sigma clipping thresholds. If
+\fIniterate\fR is zero then no rejection is done.
+.le
+
+The following parameters control the graphics input and output.
+.ls graphics = "stdgraph"
+Graphics device. The default is the standard graphics device which is
+generally a graphics terminal.
+.le
+.ls curosr = ""
+Cursor input file. If a cursor file is not given then the standard graphics
+cursor is read.
+.le
+.ih
+CURSOR KEYS
+
+.nf
+ ECIDENTIFY CURSOR KEY AND COLON COMMAND SUMMARY
+
+? Help a Affect all features c Center feature(s)
+d Delete feature(s) f Fit dispersion g Fit zero point shift
+i Initialize j Go to previous order k Go to next order
+l Match coordinate list m Mark feature n Next feature
+o Go to specified order p Pan graph q Quit
+r Redraw graph s Shift feature t Reset position
+u Enter user coordinate w Window graph x Crosscorrelate peaks
+y Find peaks z Zoom graph . Nearest feature
++ Next feature - Previous feature I Interrupt
+
+:show [file] :features [file] :coordlist [file]
+:cradius [value] :threshold [value] :database [file]
+:ftype [type] :fwidth [value] :image [image]
+:labels [type] :match [value] :maxfeatures [value]
+:minsep [value] :read [image] :write [image]
+:zwidth [value]
+
+
+ ECHELLE DISPERSION FUNCTION FITTING COMMAND SUMMARY
+
+? Help c Print coordinates d Delete point
+f Fit dispersion o Fit with fixed order offset q Quit
+r Redraw graph u Undelete point w Window graph
+x Set ordinate y Set abscissa I Interrupt
+
+:show :function [value] :highreject [value] :lowreject [value]
+:niterate [value] :xorder [value] :yorder [value]
+
+.fi
+
+ ECIDENTIFY CURSOR KEYS AND COLON COMMANDS
+.ls ?
+Clear the screen and print a menu of cursor and colon commands.
+.le
+.ls a
+Apply next (c)enter or (d)elete operation to (a)ll features
+.le
+.ls c
+(C)enter the feature nearest the cursor. Used when changing the position
+finding parameters or when features are defined from a previous feature list.
+May be used in combination with the (a)ll key.
+.le
+.ls d
+(D)elete the feature nearest the cursor. (D)elete all features when preceded
+by the (a)ll key. This does not affect the dispersion function.
+.le
+.ls f
+(F)it a function of the pixel coordinates and aperture numbers to the user
+coordinates. This enters an interactive function fitting package.
+.le
+.ls g
+Fit a zero point shift to the user coordinates by minimizing the difference
+between the user and fitted coordinates. The coordinate dispersion function
+is not changed.
+.le
+.ls i
+(I)nitialize (delete features and dispersion function fit).
+.le
+.ls j
+Go to the next aperture in decreasing line number in the echelle format image.
+Wrap around to the last line from the first line.
+.le
+.ls k
+Go to the next aperture in increasing line number in the echelle format image.
+Wrap around to the first line from the last line.
+.le
+.ls l
+(L)ocate features in the coordinate list. A coordinate function must be
+defined or at least four features in more than one aperture must have user
+coordinates from which a coordinate function can be determined by an
+initial automatic function fit.
+.le
+.ls m
+(M)ark a new feature using the cursor position as the initial position
+estimate.
+.le
+.ls n
+Move the cursor or zoom to the (n)ext feature (same as +).
+.le
+.ls o
+Go to a specific aperture (related to an echelle (o)rder). The user
+is queried for the aperture number.
+.le
+.ls p
+(P)an to the original window after (z)ooming on a feature.
+.le
+.ls q
+(Q)uit and continue with next image.
+.le
+.ls r
+(R)edraw the graph.
+.le
+.ls s
+(S)hift the fit coordinates relative to the pixel coordinates. The
+user specifies the desired coordinate at the position of the cursor
+and a zero point shift to the fit coordinates is applied. If features
+are defined then they are recentered and the shift is the average shift.
+The shift in pixels, user coordinates, and z (fractional shift) is printed.
+The user shift is for the fundamental order and the shift for each order
+is then given by this shift divided by the order number.
+.le
+.ls t
+Reset the current feature to the position of the cursor. The feature
+is \fInot\fR recentered. This is used to mark an arbitrary position.
+.le
+.ls u
+Enter a new (u)ser coordinate for the current feature.
+When (m)arking a new feature the user coordinate is also requested.
+.le
+.ls w
+(W)indow the graph. A window prompt is given and a number of windowing
+options may be given. For more help type '?' to the window prompt or
+see help under \fIgtools\fR.
+.le
+.ls x
+Crosscorrelate features with the data peaks and reregister. This is
+generally used with a feature list from a different image.
+The mean shift in user coordinates, mean shift in pixels, and the fractional
+shift in user coordinates is printed. The user shift is scaled to the
+fundamental order.
+.le
+.ls y
+Up to \fImaxfeatures\fR emission peaks are found automatically (in order of
+peak intensity) and, if a dispersion solution is defined, the peaks are
+identified from the coordinate list.
+.le
+.ls z
+(Z)oom on the feature nearest the cursor. The width of the zoom window
+is determined by the parameter \fIzwidth\fR.
+.le
+.ls .
+Move the cursor or zoom window to the feature nearest the cursor.
+.le
+.ls 4 +
+Move the cursor or zoom window to the (n)ext feature.
+This does not automatically move to the next aperture.
+.le
+.ls 4 -
+Move the cursor or zoom window to the previous feature.
+This does not automatically move to the next aperture.
+.le
+.ls I
+Interrupt the task immediately. The database is not updated.
+.le
+
+Parameters are shown or set with the following "colon commands", which may be
+abbreviated. To show the value of a parameter type the parameter name alone
+and to set a new value follow the parameter name by the value.
+.ls :show file
+Show the values of all the parameters. If a file name is given then the
+output is appended to that file. If no file is given then the terminal
+is cleared and the output is sent to the terminal.
+.le
+.ls :features file
+Print the feature list and the fit rms. If a file name is given then the
+output is appended to that file. If no file is given then the terminal
+is cleared and the output is sent to the terminal.
+.le
+.ls :coordlist file
+Set or show the coordinate list file.
+.le
+.ls :cradius value
+Set or show the centering radius in pixels.
+.le
+.ls :threshold value
+Set or show the detection threshold for centering.
+.le
+.ls :database name
+Set or show the database for recording feature records.
+.le
+.ls :ftype value
+Set or show the feature type (emission or absorption).
+.le
+.ls :fwidth value
+Set or show the feature width in pixels.
+.le
+.ls :image imagename
+Set a new image or show the current image.
+.le
+.ls :labels value
+Set or show the feature label type (none, index, pixel, or user).
+.le
+.ls :match value
+Set or show the coordinate list matching distance.
+.le
+.ls :maxfeatures value
+Set or show the maximum number of features automatically found.
+.le
+.ls :minsep value
+Set or show the minimum separation allowed between features.
+.le
+.ls :read name
+Read a record from the database. The record name defaults to the image name.
+.le
+.ls :threshold value
+Set or show the centering threshold.
+.le
+.ls :write name
+Write a record to the database. The record name defaults to the image name.
+.le
+.ls :zwidth value
+Set or show the zoom width in user units.
+.le
+
+
+ DISPERSION FUNCTION FITTING COMMANDS
+.ls ?
+Page help information.
+.le
+.ls c
+Print input and fitted coordinates of point nearest the cursor.
+.le
+.ls d
+Delete the nearest undeleted point to the cursor.
+.le
+.ls f
+Fit a dispersion function including determining the order offset.
+.le
+.ls o
+Fit a dispersion function with the order offset fixed. The user is queried
+for the order offset. This is faster than the interactive fit to also
+determine the order.
+.le
+.ls q
+Quit and return to the spectrum display.
+.le
+.ls r
+Redraw the graph.
+.le
+.ls u
+Undelete the nearest deleted point to the cursor (which may be outside the
+graph window).
+.le
+.ls w
+Window the graph (type ? to the window prompt for more help).
+.le
+.ls x
+Set the quantity plotted along the ordinate (x axis).
+.le
+.ls y
+Set the quantity plotted along the abscissa (y axis).
+.le
+.ls I
+Interrupt the task immediately. No information is saved in the database.
+.le
+
+.ls :function [value]
+Print or set the function type (chebyshev|legendre).
+.le
+.ls :show
+Print current function and orders.
+.le
+.ls :niterate [value], :lowreject [value], :highreject [value]
+Print or set the iterative rejection parameters.
+.le
+.ls :xorder [value]
+Print or set the order for the dispersion dependence.
+.le
+.ls :yorder [value]
+Print or set the order for the echelle order dependence.
+.le
+.ih
+DESCRIPTION
+Emission and absorption features in echelle format spectra (see \fIapsum\fR)
+are identified interactively and from a line list and a dispersion
+function is determined. The results of the line identifications and
+dispersion function are stored in a database for further reference and
+for use with the tasks \fBecreidentify\fR and \fBecdispcor\fR. Also
+the reference spectrum keyword REFSPEC is added to the image header.
+This is used by \fBrefspectra\fR and \fBecdispcor\fR.
+
+Each spectrum in the input list is identified in turn. Initially the
+order in the first image line is graphed. The user may change the
+displayed order with the 'j', 'k', and 'o' keys. The initial feature
+list and dispersion function are read from the database if an entry
+exists. The features are marked on the graph. The image coordinates
+are in pixels unless a dispersion function is defined, in which case
+they are in user coordinate units (usually wavelength in Angstroms).
+The aperture number, pixel coordinate, coordinate function value, and
+user coordinate for the current feature are displayed on the status
+line.
+
+For consistency the orders are always identified by their aperture
+numbers in this task and all other tasks. These are the
+identifications assigned when extracting the orders using the task
+\fIapsum\fR. If the user has assigned true order numbers as the
+aperture numbers then there is no distinction between aperture and
+order number. However, it is often the case that the aperture numbers
+are simply assigned sequentially and the true order numbers may not
+even be known. Initially the orders are the same as the apertures
+numbers but after fitting a dispersion function the true order numbers
+will be determined. This information is also recorded in the database
+and indicated in the graph titles but selecting an order to be graphed
+with 'o' and the status line information is always in terms of the
+aperture number.
+
+The graphics cursor is used to select features and perform various
+functions. A menu of the keystroke options and functions is printed
+with the key '?'. The cursor keys and their functions are defined in
+the CURSOR KEYS sections and described further below. The standard
+cursor mode keys are also available to window and redraw the graph and
+to produce hardcopy "snaps".
+
+There are two types of feature selection functions; defining new
+features and selecting previously defined features. The key 'm' marks
+a new feature nearest the cursor position. The feature position is
+determined by the feature centering algorithm (see help for
+\fBcenter1d\fR). The type of feature, emission or absorption, is set
+by the \fIftype\fR parameter. If the new position is within a distance
+given by the parameter \fIminsep\fR of a previous feature it is
+considered to be the same feature and replaces the old feature
+(normally the position of the new feature will be exactly the same as
+the original feature). The coordinate list is searched for a match
+between the coordinate function value (when defined) and a user
+coordinate in the list. If a match is found it becomes the default
+user coordinate which the user may override. The new feature is marked
+on the graph and it becomes the current feature. The redefinition of a
+feature which is within the minimum separation may be used to set the
+user coordinate from the coordinate list. The key 't' allows setting
+the position of a feature to other than that found by the centering
+algorithm.
+
+The 'y' key applies a peak finding algorithm and up to the maximum
+number of features (\fImaxfeatures\fR) are found. If there are more
+peaks only the strongest are kept. The peaks are then matched against
+the coordinate list to find user coordinate values.
+
+To select a different feature as the current feature the keys '.', 'n',
+'+', and '-' are used. The '.' selects the feature nearest the cursor,
+the 'n' and '+' select the next feature, and the '-' selects the
+previous feature relative to the current feature in the feature list as
+ordered by pixel coordinate. These keys are useful when redefining the
+user coordinate with the 'u' key and when examining features in zoom
+mode. To change apertures (orders) the 'j', 'k', and 'o' keys are
+used.
+
+If four or more features are identified spanning the range of the data
+(in pixel coordinates and in order number) or if a coordinate function
+is defined then the 'l' key may be used to identify additional features
+from a coordinate list. If a coordinate function is not defined the
+default function is fit to the user coordinates of the currently
+defined features. Then for each coordinate value in the coordinate
+list the pixel coordinate is determined and a search for a feature at
+that point is made. If a feature is found (based on the parameters
+\fIftype, fwidth\fR, \fIcradius\fR, and \fBthreshold\fR) its user
+coordinate value based on the coordinate function is determined. If
+the coordinate function value matches the user coordinate from the
+coordinate list within the error limit set by the parameter \fImatch\fR
+then the new feature is entered in the feature list. Up to a maximum
+number of features, set by the parameter \fImaxfeatures\fR, may be
+defined in this way. A new user coordinate function is fit to all the
+located features. Finally, the graph is redrawn in user coordinates
+with the additional features found from the coordinate list marked.
+
+The 'f' key fits a two dimensional function of the pixel coordinates
+and aperture number to the user coordinates. The type of function and
+the orders are initially set with the parameters \fIfunction\fR,
+\fIxorder\fR, and \fIyorder\fR. The value of the function for a
+particular pixel coordinate is called the function coordinate and each
+feature in the feature list has a function coordinate value. The
+fitted function also is used to convert pixel coordinates to user
+coordinates in the graph. Depending on the orders of the function
+four or more features are required covering at least two orders.
+A description of the dispersion function fitting is given the section
+ECHELLE DISPERSION FUNCTION FITTING.
+
+If a zero point shift is desired without changing the coordinate function
+the user may specify the coordinate of a point in the spectrum with
+the 's' key from which a shift is determined. The 'g' key also
+determines a shift by minimizing the difference between the user
+coordinates and the fitted coordinates. This is used when a previously
+determined coordinate function is applied to a new spectrum having
+fewer or poorer lines and only a zero point shift can reasonably be
+determined. Note that the zero point shift is in user coordinates
+for the fundamental order. The shift for any particular order is then
+the zero point shift divided by the order number.
+
+Features may be delete with the key 'd'. All features are deleted when
+the 'a' key immediately precedes the delete key. Deleting the features
+does not delete the coordinate function. To delete both the features
+and the dispersion function the initialize key 'i' is used. Note
+features deleted during dispersion function fitting also are removed
+from the feature list upon exiting the fitting package.
+
+It is common to transfer the feature identifications and coordinate
+function from one image to another. When a new image without a
+database entry is examined, such as when going to the next image in the
+input list or selecting a new image with the ":image" command, the
+current feature list and coordinate function are kept. Alternatively,
+a database record from a different image may be read with the ":read"
+command. When transferring feature identifications between images the
+feature coordinates will not agree exactly with the new image feature
+positions and several options are available to reregister the feature
+positions. The key 'c' centers the feature nearest the cursor using
+the current position as the starting point. When preceded with the 'a'
+key all the features are recentered (the user must refit the coordinate
+function if desired). As an aside, the recentering function is also
+useful when the parameters governing the feature centering algorithm
+are changed.
+
+The (c)entering function is applicable when the shift between the
+current and true feature positions is small. Larger shifts may be
+determined automatically with the 'x' function which correlates
+features in the image with the feature list. The features are then
+recentered. A zero point shift may also be given interactively with
+the 's' key by using the cursor to indicate the coordinate of a point
+in the spectrum. If there are no features then the shift is exactly as
+marked by the cursor but if there are features the approximate shift is
+applied and then the features are recentered. The shift is then the
+mean shift of the features after recentering. The shift is used as a
+zero point offset added to the dispersion function. The shift is
+computed in user coordinates for the fundamental order. Shifts for
+each order are given by scaling of this shift.
+
+In addition to the single keystroke commands there are commands
+initiated by the key ':' (colon commands). As with the keystroke
+commands there are a number of standard graphics features available
+begining with ":." (type ":.help" for these commands). The colon
+commands allow the task parameter values to be listed and to be reset
+within the task. A parameter is listed by typing its name. The colon
+command ":show" lists all the parameters. A parameter value is reset
+by typing the parameter name followed by the new value; for example
+":match 10". Other colon commands display the feature list
+(:features), control reading and writing records to the database (:read
+and :write), and set the graph display format.
+
+The feature identification process for an image is completed by typing
+'q' to quit. Attempting to quit an image without explicitly recording
+changes in the feature database produces a warning message and an
+opportunity to record the information in the database. As an immediate
+exit the 'I' interrupt key may be used. This does not save the feature
+information.
+.ih
+ECHELLE DISPERSION FUNCTION FITTING
+If a minimum of four features over at least two orders, depending on
+the default function orders, have been identified a dispersion function
+relating the user coordinates to the extracted pixel coordinate and
+aperture number may be fit. However, more features are preferable to
+determine changes in the dispersion as a function of position and
+order.
+
+The form of the function fit explicitly includes the basic order number
+dependence of echelle spectra; namely the wavelength of a particular
+point along the dispersion direction in different orders varies as the
+reciprocal of the order number. Because of distortions, the differing
+extraction paths through the two dimensional image, and rotations of
+the spectra relative to the axis of constant dispersion (i.e. aligning
+the orders with the image columns or lines instead of aligning the
+emission and absorption features) there will be residual dependancies on
+the extracted pixel positions and orders. These residual dependancies
+are fit by a two dimensional polynomial of arbitrary order including
+cross terms. Because the basic order number dependence has been
+removed the orders should be relatively low. Currently the functions
+are bi-dimensional chebyshev and legendre polynomials though other
+function may be added in the future.
+
+Since the true order number may not be known initially a linear
+relation between the aperture numbers and the order numbers is also
+determined which minimizes the residuals. This relation allows an
+unknown offset and possible a reversed direction of increasing order.
+The fitted function is then represented as:
+
+.nf
+ y = offset +/- aperture
+
+ wavelength = f (x, y) / y
+.fi
+
+where y is the order number and x is the extracted pixel coordinate along the
+dispersion.
+
+If the order offset is known initially or as a result of previous the 'o'
+fit may be used. The dispersion minimization for the order offset is
+then not done. This will, therefore, be faster than using the full
+fit, key 'f', to also determine the order offset.
+
+The fitting is done interactively as a submode of \fBecidentify\fR with its
+own set of cursor commands. It is entered using the 'f' key and exited using
+the 'q' key. The list of commands is given the CURSOR KEY section and is
+available from the fitting mode with '?'. The functionality of this fitting
+is fairly simple; the function and orders may be changed, points may be deleted
+and undeleted, and the results of the fit may be displayed in various formats
+by selecting quantities to be plotted along either axis. Generally one
+changes plotting of the pixel coordinate, order number, and wavelength
+along the x axis and residuals or radial velocity errors along the y axis.
+One switches between increasing the x order and the y order while switching
+between plotting verses x positions and order number until the residuals
+have been reduced to remove all systematic trends.
+.ih
+DATABASE RECORDS
+The database specified by the parameter \fIdatabase\fR is a directory of
+simple text files. The text files have names beginning with 'ec' followed
+by the entry name, usually the name of the image. The database text files
+consist of a number of records. A record begins with a line starting with the
+keyword "begin". The rest of the line is the record identifier. Records
+read and written by \fBecidentify\fR have "ecidentify" as the first word of the
+identifier. Following this is a name which may be specified following the
+":read" or ":write" commands. If no name is specified then the image name
+is used. The lines following the record identifier contain
+the feature information and dispersion function coefficients.
+.ih
+ECHELLE DISPERSION FUNCTIONS
+The fitted echelle dispersion functions are evaluated as described in
+this section. The basic equations are
+
+.nf
+ (1) w = (f(x,o) + shift) / o
+ (2) o = ap * slope + offset
+.fi
+
+where w is the wavelength, x is the pixel coordinate along the order, o is
+the order, and ap is the aperture number. The database parameter "shift"
+provides a wavelength zero point shift and the parameters "slope" and
+"offset" provide the transformation between aperture number and order.
+Note that the function f(x,o) and the shift are in terms of first order
+wavelengths.
+
+The database entries contain "parameter value" pairs. This includes the
+parameters "shift", "offset", and "slope" defined above. The default
+values for these if they are absent are 0, 0, and 1 respectively. The
+"coefficients" parameter specifies the number of coefficients that follow
+and define the first order wavelength dispersion function. The
+coefficients and functions are described below.
+
+The numerical values following the "coefficients" parameter, shown in
+the order in which they appear, have the following meaning.
+
+.nf
+ type Function type: 1=chebychev, 2=legendre
+ xpow Highest power of x
+ opow Highest power of o
+ xterms Type of cross terms: Always 1 for echelle functions
+ xmin Minimum x for normalization
+ xmax Maximum x for normalization
+ omin Minimum o for normalization
+ omax Maximum o for normalization
+ Cmn Coefficients: m=0-xpow, n=0-opow, m varies first
+.fi
+
+The functions are evaluated by a sum over m and n up to the specified
+highest powers.
+
+.nf
+ (3) f(x,o) = sum {Cmn * Pm * Pn} m=0-xpow, n=0-opow
+.fi
+
+The Cmn are the coefficients of the polynomial terms Pm and Pn which
+are defined as follows.
+
+.nf
+ Chebyshev:
+ xnorm = (2 * x - (xmax + xmin)) / (xmax - xmin)
+ P0 = 1.0
+ P1 = xnorm
+ Pm+1 = 2.0 * xnorm * Pm - Pm-1
+
+ onorm = (2 * o - (omax + omin)) / (omax - omin)
+ P0 = 1.0
+ P1 = onorm
+ Pn+1 = 2.0 * onorm * Pn - Pn-1
+
+ Legendre:
+ xnorm = (2 * x - (xmax + xmin)) / (xmax - xmin)
+ P0 = 1.0
+ P1 = xnorm
+ Pm+1 = ((2m + 1) * xnorm * Pm - m * Pm-1)/ (m + 1)
+
+ onorm = (2 * o - (omax + omin)) / (omax - omin)
+ P0 = 1.0
+ P1 = onorm
+ Pn+1 = ((2n + 1) * onorm * Pn - n * Pn-1)/ (n + 1)
+.fi
+
+Note that the polynomial terms are obtained by first normalizing the x and
+o values to the range -1 to 1 and then iteratively evaluating them.
+.ih
+EXAMPLES
+Because this task is interactive it is difficult to provide an actual
+example. The following describes a typical usage on arc spectra.
+
+ cl> ecidentify arc1.ec,arc2.ec
+
+.ls (1)
+The database is searched for an entry for arc1.ec. None is found and
+the first order is plotted as a function of pixel coordinate.
+.le
+.ls (2)
+Using a line identification chart or vast experience one of the
+emission lines is identified and marked with the 'm' key. Using the
+cursor position a center is found by the centering algorithm. The
+aperture number, pixel position, wavelength (which is currently the
+same as the pixel position), and a prompt for the true value with the
+default value INDEF is printed. The true wavelength is typed in and the
+status line is redrawn with the information for the feature.
+.le
+.ls (3)
+The orders are changed with the 'j', 'k', or 'o' key and further lines are
+identified with the 'm' key.
+.le
+.ls (4)
+After a number of lines have been marked spanning the full range of the orders
+and pixel coordinates the key 'l' is typed. The program now fits a preliminary
+dispersion solution using the current function and function orders. Using this
+function it examines each line in the line list and checks to see if there is
+an emission line at that point. With many orders and lots of lines this may
+take some time. After additional lines have been identified (up to
+\fImaxfeatures\fR lines) the function is refit. Finally the current order
+is regraphed in user coordinates.
+.le
+.ls (5)
+Again we look at some orders and see if the automatic line identifications
+make sense.
+.le
+.ls (6)
+We next enter the dispersion function fitting mode with 'f'. A plot of the
+residuals vs. pixel position is drawn. Some obvious misidentifications may
+be deleted with the 'd' key. One way to proceed with determining the
+function orders is to start at the lowest orders (xorder = 2 for linear
+and yorder = 1 for no order dependence beyond the basic dependence). We then
+increase each order one at a time. The x axis is changed between order
+number and pixel position using the 'x' key to see the dependence on each
+dimension. The orders are increased until there are no systematic trends
+apparent. Normally the y order (for the aperture or order number dependence)
+is low such as 2 to 4 while the x order (for the dispersion direction) is
+whatever is needed to account for distortions. Also one can prune deviant
+points with the 'd' key. Note that the order offset derived from the
+aperture number is given in the title block along with the RMS. When done
+we exit with 'q'.
+.le
+.ls (7)
+The new function fit is then evaluated for all orders and the current order
+is redrawn based on the new dispersion. Note also that the status line
+information for the current feature has both the fitted wavelength and the
+user identified wavelength. We can add or delete lines and iterate with the
+fitting until we are happy with the feature list and dispersion function.
+.le
+.ls (8)
+Typing 'q' exits the graph and prints a query about saving the information
+in the database. We answer yes to this query. Note that information can
+also be saved while still in the graphics loop using ":write".
+.le
+.ls (9)
+The next image in the list is then graphed but the last dispersion solution
+and feature list is maintained. If the shift is small for the new arc we
+type 'a' 'c' to recenter all the features. This does not refit the dispersion
+automatically so we then do 'f'. Alternatively, we could use the 's' or 'x'
+keys to determine a large shift and do the recentering.
+.le
+.ls (10)
+Finally we can exit with 'q' or examine further images with the ":image"
+command.
+.le
+.ih
+REVISIONS
+.ls ECIDENTIFY V2.11
+The dispersion units are now determined from a user parameter,
+the coordinate list, or the database entry.
+.le
+.ih
+SEE ALSO
+apsum, center1d, gtools, ecreidentify, identify
+.endhelp
diff --git a/noao/imred/echelle/doc/ecreidentify.hlp b/noao/imred/echelle/doc/ecreidentify.hlp
new file mode 100644
index 00000000..53ab0f8c
--- /dev/null
+++ b/noao/imred/echelle/doc/ecreidentify.hlp
@@ -0,0 +1,117 @@
+.help ecreidentify Jun88 noao.imred.echelle
+.ih
+NAME
+ecreidentify -- Reidentify features in echelle spectra
+.ih
+USAGE
+ecreidentify images reference
+.ih
+PARAMETERS
+.ls images
+Echelle images in which the features in the reference image are to be
+reidentified and a new dispersion function fit.
+.le
+.ls reference
+Echelle image with previously identified features and dispersion
+function.
+.le
+.ls shift = 0.
+Shift in user coordinates to be added to the reference features before
+centering. If INDEF then a shift is determined by correlating the
+reference features to features automatically identified in the image to
+be reidentified.
+.le
+.ls cradius = 5.
+Centering radius in pixels. If a reidentified feature falls further
+than this distance from the reference position (after shifting) it is
+not reidentified.
+.le
+.ls threshold = 10.
+In order for a feature center to be determined the range of pixel
+intensities around the feature must exceed this threshold.
+.le
+.ls refit = yes
+Refit the dispersion function? If yes and there are more than 4
+features in more than one order and a dispersion function was defined
+in the reference image then a new dispersion function of the same type
+and order offset
+as in the reference image is fit using the new pixel positions.
+Otherwise only a zero point shift is determined from the revised fitted
+coordinates without changing the form of the dispersion function.
+.le
+.ls database = "database"
+Database containing the feature data for the reference image and in
+which the features for the reidentified images are recorded.
+.le
+.ls logfiles = "STDOUT,logfile"
+List of file in which to keep a processing log. If a null file, "", is
+given then no log is kept. If the log file is "STDOUT" then the log is
+written to the terminal.
+.le
+.ih
+DESCRIPTION
+Emission or absorption features in a reference echelle spectrum are
+reidentified in other echelle spectra. The features for the reference
+image and those determined for reidentified images are recorded in the
+specified database.
+
+The first step in transferring identifications from the reference
+spectrum to another spectrum is to add a shift (in wavelength) to each
+feature in the reference image. The shift is specified by the
+parameter \fIshift\fR. This shift is for the fundamental order (order
+number 1) which is then applied to each order by dividing by the order
+number. If the shift is specified as INDEF then a shift is determined
+by finding the peaks in the input spectrum and correlating these peaks
+against the feature in the reference spectrum. This is the 'x'
+algorithm described in \fBecidentify\fR.
+
+After the shift has been added to move the reference features to near
+the input spectrum features these positions are adjusted by centering
+on the features using the \fBcenter1d\fR algorithm. The parameters
+\fIcradius\fR and \fIthreshold\fR are used in this operation. If the
+centering fails to find the feature within the centering radius
+(\fIcradius\fR) that feature is eliminated from the feature list.
+
+If the parameter \fIrefit\fR has the value "no" then the average shift
+in the feature positions is recorded as a zero point wavelength offset
+for the fundamental order without changing the shape of the dispersion
+function. If the parameter has the value "yes" then the new feature
+positions are used to refit the dispersion function (of the same function
+type and orders). The order offset is also maintained.
+
+Log information is written to the specified log files. To log this to
+the terminal, called the standard output, use STDOUT. The log
+information includes reference spectrum, the spectrum being reidentified,
+the number of initial features and the number actually reidentified,
+the average shift in pixels, the average shift in wavelength (in terms
+of the fundamental order), the average fractional shift in wavelength
+(which can be scaled to a radial velocity), and the RMS of the features
+wavelengths given by the dispersion function to the user specified true
+wavelengths.
+.ih
+EXAMPLES
+The features in the spectrum f033.ec were identified previously
+with the task \fBecidentify\fR. The features positions in f043.ec are
+are reidentified with and without refitting the dispersion function as
+follows:
+
+.nf
+ec> ecreidentify f043.ec f033.ec
+
+ECREIDENTIFY: NOAO/IRAF V2.7 seaman@puppis Mon 09:03:51 27-Jun-88
+ Reference image = f033.ec, Refit = yes
+ Image Found Pix Shift User Shift Z Shift RMS
+ f043.ec 561/561 0.11 -1.07 -1.9E-6 0.0117
+
+
+ec> ecreidentify f043.ec f033.ec refit=no
+
+ECREIDENTIFY: NOAO/IRAF V2.7 seaman@puppis Mon 09:15:21 27-Jun-88
+ Reference image = f033.ec, Refit = no
+ Image Found Pix Shift User Shift Z Shift RMS
+ f043.ec 561/561 0.11 -1.07 -1.9E-6 0.0131
+.fi
+.ih
+SEE ALSO
+center1d, ecidentify
+.endhelp
diff --git a/noao/imred/echelle/echelle.cl b/noao/imred/echelle/echelle.cl
new file mode 100644
index 00000000..c911c090
--- /dev/null
+++ b/noao/imred/echelle/echelle.cl
@@ -0,0 +1,82 @@
+#{ ECHELLE -- Echelle Spectral Reduction Package
+
+# Load necessary packages
+proto # bscale
+
+# Increase header space for echelle format keywords
+s1 = envget ("min_lenuserarea")
+if (s1 == "")
+ reset min_lenuserarea = 100000
+else if (int (s1) < 100000)
+ reset min_lenuserarea = 100000
+
+package echelle
+
+# Ecslitproc and dofoe
+cl < doecslit$slittasks.cl
+cl < dofoe$dofoetasks.cl
+
+# Demos
+set demos = "echelle$demos/"
+task demos = "demos$demos.cl"
+
+# Onedspec tasks
+task continuum,
+ deredden,
+ dispcor,
+ dopcor,
+ ecidentify,
+ ecreidentify,
+ refspectra,
+ sapertures,
+ sarith,
+ sflip,
+ slist,
+ specplot,
+ specshift,
+ splot = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Different default parameters
+task calibrate,
+ sensfunc,
+ standard = "echelle$x_onedspec.e"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ apfit,
+ apflatten,
+ apmask,
+ apnormalize,
+ aprecenter,
+ apresize,
+ apscatter,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apfit1 = "apextract$apfit1.par"
+task apflat1 = "apextract$apflat1.par"
+task apnorm1 = "apextract$apnorm1.par"
+task apdefault = "apextract$apdefault.par"
+task apscat1 = "apextract$apscat1.par"
+task apscat2 = "apextract$apscat2.par"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apfit1, apflat1, apnorm1
+hidetask apscat1, apscat2, dispcor1
+
+# Set echelle extraction output
+apall.format = "echelle"
+apsum.format = "echelle"
+
+clbye
diff --git a/noao/imred/echelle/echelle.hd b/noao/imred/echelle/echelle.hd
new file mode 100644
index 00000000..b2f73fba
--- /dev/null
+++ b/noao/imred/echelle/echelle.hd
@@ -0,0 +1,11 @@
+# Help directory for the ECHELLE package.
+
+$doc = "./doc/"
+
+ecidentify hlp=doc$ecidentify.hlp
+ecreidentify hlp=doc$ecreidentify.hlp
+
+doecslit hlp=doc$doecslit.hlp
+dofoe hlp=doc$dofoe.hlp
+
+revisions sys=Revisions
diff --git a/noao/imred/echelle/echelle.men b/noao/imred/echelle/echelle.men
new file mode 100644
index 00000000..c6e94618
--- /dev/null
+++ b/noao/imred/echelle/echelle.men
@@ -0,0 +1,40 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters and apidtable
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apfit - Fit 2D spectra and output the fit, difference, or ratio
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apmask - Create and IRAF pixel list mask of the apertures
+ apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ ecidentify - Identify features in spectrum for dispersion solution
+ ecreidentify - Automatically identify features in spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Preliminary spectral plot/analysis
+ standard - Identify standard stars to be used in sensitivity calc
+
+ doecslit - Process Echelle slit spectra
+ dofoe - Process Fiber Optic Echelle (FOE) spectra
+ demos - Demonstrations and tests
diff --git a/noao/imred/echelle/echelle.par b/noao/imred/echelle/echelle.par
new file mode 100644
index 00000000..679bcb86
--- /dev/null
+++ b/noao/imred/echelle/echelle.par
@@ -0,0 +1,15 @@
+# ECHELLE SPECTRAL REDUCTION PACKAGE
+extinction,s,h,onedstds$kpnoextinct.dat,,,Extinction file
+caldir,s,h,,,,Standard star calibration directory
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Text log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,"",,,Record number extensions
+version,s,h,"ECHELLE V3: July 1991"
diff --git a/noao/imred/echelle/sensfunc.par b/noao/imred/echelle/sensfunc.par
new file mode 100644
index 00000000..cd80efbe
--- /dev/null
+++ b/noao/imred/echelle/sensfunc.par
@@ -0,0 +1,17 @@
+standards,s,a,std,,,Input standard star data file (from STANDARD)
+sensitivity,s,a,"sens",,,Output root sensitivity function imagename
+apertures,s,h,"",,,Aperture selection list
+ignoreaps,b,h,no,,,Ignore apertures and make one sensitivity function?
+logfile,f,h,"logfile",,,Output log for statistics information
+extinction,f,h,)_.extinction,,,Extinction file
+newextinction,f,h,"extinct.dat",,,Output revised extinction file
+observatory,s,h,)_.observatory,,,Observatory of data
+function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,Fitting function
+order,i,h,6,1,,Order of fit
+interactive,b,h,yes,,,Determine sensitivity function interactively?
+graphs,s,h,"sr",,,Graphs per frame
+marks,s,h,"plus cross box",,,Data mark types (marks deleted added)
+colors,s,h,"2 1 3 4",,,Colors (lines marks deleted added)
+cursor,*gcur,h,"",,,Graphics cursor input
+device,s,h,"stdgraph",,,Graphics output device
+answer,s,q, yes,"no|yes|NO|YES",,"(no|yes|NO|YES)"
diff --git a/noao/imred/echelle/standard.par b/noao/imred/echelle/standard.par
new file mode 100644
index 00000000..99b98877
--- /dev/null
+++ b/noao/imred/echelle/standard.par
@@ -0,0 +1,21 @@
+input,f,a,,,,Input image file root name
+output,s,a,std,,,Output flux file (used by SENSFUNC)
+samestar,b,h,yes,,,Same star in all apertures?
+beam_switch,b,h,no,,,Beam switch spectra?
+apertures,s,h,"",,,Aperture selection list
+bandwidth,r,h,INDEF,,,Bandpass widths
+bandsep,r,h,INDEF,,,Bandpass separation
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
+extinction,s,h,)_.extinction,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+observatory,s,h,)_.observatory,,,Observatory for data
+interact,b,h,yes,,,Graphic interaction to define new bandpasses
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+star_name,s,q,,,,Star name in calibration list
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
+mag,r,q,,,,Magnitude of star
+magband,s,q,,"U|B|V|R|I|J|H|K|L|Lprime|M",,"Magnitude type"
+teff,s,q,,,,Effective temperature or spectral type
+answer,s,q,no,,,"(no|yes|NO|YES|NO!|YES!)"
diff --git a/noao/imred/generic/Revisions b/noao/imred/generic/Revisions
new file mode 100644
index 00000000..7ae1bfa5
--- /dev/null
+++ b/noao/imred/generic/Revisions
@@ -0,0 +1,220 @@
+.help revisions Jun88 noao.imred.generic
+.nf
+
+generic$generic.cl
+generic$generic.men
+generic$cosmicrays.par -
+ Removed the cosmicrays task which is now in crutil. (6/12/02, Valdes)
+
+=====
+V2.12
+=====
+
+generic$mkpkg
+ Added missing <error.h> dependency for flat1d.x (12/13/01, MJF)
+
+generic$normalize.cl
+generic$normalize.par -
+ Rewrote as modern cl procedure script with possiblity to extend to
+ MEF extensions. (5/13/99, Valdes)
+
+generic$flatten.cl
+generic$normalize.cl
+ Changed use of FILES for image list expansion to SECTIONS.
+ (5/13/99, Valdes)
+
+=======
+V2.11.1
+=======
+
+generic$normalize.cl
+generic$normflat.cl
+ Added CCDMEAN = 1 to final image header. (4/16/93, Valdes)
+
+generic$flat1d.x
+generic$doc/flat1d.hlp
+ CCDMEAN is set to 1. (3/15/93, Valdes)
+
+=======
+V2.10.2
+=======
+=======
+V2.10.1
+=======
+=======
+V2.10.0
+=======
+=====
+V2.10
+=====
+generic$flatten.par
+ Changed the default values of the keeplog and logfile parameters from
+ ")_.keeplog" and ")_.logfile" to ")generic.keeplog" and ")generic.logfile"
+ respectively. This change avoids a parameter redirection in the irred
+ package. (8/26/91 LED)
+
+generic$darksub.cl
+ Included all parameters in IMARITH calls due to complaint about title
+ being changed. (12/9/91)
+
+====
+V2.9
+====
+
+generic$normalize.cl
+generic$normflat.cl
+generic$normalize.par
+generic$normflat.par
+ Modified use of IMSTATISTICS for additional fields parameter. If the
+ user redefined the defaults for IMSTAT then NORMALIZE and NORMFLAT
+ will fail. (11/30/89, Valdes)
+
+generic$flatten.par
+generic$normalize.par
+generic$normflat.par
+ Change parameter reference from )generic. to )_. to allow defining
+ tasks in other packages.
+
+generic$darksub.cl
+ The last change caused the nscan to have the wrong value causing an
+ improper error (exposure time for ... not found). The value of nscan()
+ is now saved before scanning the output list which must be done before
+ any error breaks to avoid out of sync lists. (7/1/88 Valdes)
+
+generic$darksub.cl
+ The fscan on the output list must immediately follow the fscan on the
+ input list to prevent the lists getting out of sync. (2/4/88 Valdes)
+
+generic$cosmicrays.par +
+generic$generic.cl
+generic$generic.men
+ Added link to COSMICRAY program in CCDRED. (12/11/87 Valdes)
+
+====
+V2.5
+====
+
+====
+V2.3
+====
+
+generic$flat1d.x: Valdes, July 3, 1986
+ 1. FLAT1D modified to use new ICFIT package.
+
+=========================================
+STScI Pre-release and 1st SUN 2.3 Release
+=========================================
+
+generic$darksub.cl: Valdes, April 8, 1986
+ 1. DARKSUB rewritten to use exposure times from the image headers and
+ to allow input and output images.
+
+===========
+Release 2.2
+===========
+
+From Valdes Feb 12, 1986:
+
+1. The GENERIC package script was inadvertently defining CMDSTR which
+was moved to the SYSTEM package. This caused MKSCRIPT to fail if
+and only if GENERIC was loaded. This has now been fixed.
+------
+From Valdes Feb 10, 1986:
+
+1. The scripts DARKSUB, NORMFLAT, and NORMALIZE have been changed so
+that calls to IMCOPY have VERBOSE=NO when dealing with temporary images.
+Previously, output from imcopy was being printed but, of course, the
+temporary image filenames were meaningless to the user.
+------
+From Valdes Nov 7, 1985:
+
+1. The generally useful task MKSCRIPT has been moved to the system
+package.
+------
+From Valdes Nov 1, 1985:
+
+1. A general image reduction tool for creating command scripts has
+been added. The task is called MKSCRIPT. See the help page for
+this task. An improved version of this task may eventually replace
+MKSCRIPT as a system task. (As a technical detail a hidden task CMDSTR
+used by MKSCRIPT has also been added)
+------
+From Valdes Oct 17, 1985:
+
+1. Flat1d and background now allow averaging of image lines or columns
+when interactively setting the fitting parameters. The syntax is
+"Fit line = 10 30"; i.e. blank separated line or column numbers. A
+single number selects just one line or column. Be aware however, that
+the actual fitting of the image is still done on each column or line
+individually.
+
+2. The zero line in the interactive curve fitting graphs has been removed.
+This zero line interfered with fitting data near zero.
+------
+From Valdes Oct 4, 1985:
+
+1. Flat1d and background modified to allow lower and upper rejection
+limits and rejection iteration. This means the parameter file has changed.
+------
+From Valdes Oct 1, 1985:
+
+1. Task revisions renamed to revs.
+-----
+From Valdes on August 26, 1985:
+
+1. Flat1d was modified to eliminate fitting of lines or columns in which
+all the data is below minflat. Also if all the data is greater than minflat
+then the ratio is computed without checking each data point which is more
+efficient (particularly with the vector operators). Thus, flat1d should
+be somewhat faster; particularly for applications like multi-slits where
+many parts of the data are less than minflat.
+
+------
+From Valdes on August 7, 1985:
+
+1. Flat1d and background have new parameters to select the graphics
+output device and the graphics cursor input.
+
+2. Flat1d and background (fit1d) have been recompiled to use the "improved"
+icfit and gtools packages.
+
+------
+From Valdes on July 26, 1985:
+
+1. Help page available for flat1d.
+
+2. Background has been modified to use new fit1d task. It now does
+column backgrounds without transposes and allows image sections.
+
+------
+From Valdes on July 25, 1985:
+
+1. A new task called flat1d replaces lineflat and colflat. It is
+essentially the same as lineflat except for an extra parameter "axis"
+which selects the axis along which the 1D functions are to be fit.
+Axis 1 is lines and axis 2 is columns. The advantages of this change are:
+
+ a. Column fitting is done without transposing the image.
+ b. The colflat script using image transpositions would not
+ work the same as lineflat when used with sections. Now
+ it is possible to mosaic several flat fields as need with
+ multiple slits or apertures.
+ c. Because no transpose is needed and it is not a script
+ flat1d should work faster than colflat.
+ d. The prompts for interactive fitting are now correct when
+ doing column fits.
+
+------
+From Valdes on July 23, 1985:
+
+1. The task revisions has been added to page revisions to the generic
+package. The intent is that each package will have a revisions task.
+Note that this means there may be multiple tasks named revisions loaded
+at one time. Typing revisions alone will give the revisions for the
+current package. To get the system revisions type system.revisions.
+
+2. The tasks linebckgrnd and colbckgrnd have been combined into one
+task with the extra hidden parameter "axis". With axis=1 the task
+is the same as linebckgrnd and with axis=2 the task is the same as
+colbckgrnd.
+.endhelp
diff --git a/noao/imred/generic/background.cl b/noao/imred/generic/background.cl
new file mode 100644
index 00000000..005703ed
--- /dev/null
+++ b/noao/imred/generic/background.cl
@@ -0,0 +1,6 @@
+#{ BACKGROUND -- Subtract a line or column background.
+
+fit1d (input, output, "difference", axis=axis, interactive=interactive,
+ sample=sample, naverage=naverage, function=function, order=order,
+ low_reject=low_reject, high_reject=high_reject, niterate=niterate,
+ grow=grow, graphics=graphics, cursor=cursor.p_filename)
diff --git a/noao/imred/generic/background.par b/noao/imred/generic/background.par
new file mode 100644
index 00000000..aa5c703a
--- /dev/null
+++ b/noao/imred/generic/background.par
@@ -0,0 +1,16 @@
+# BACKGROUND -- Subtract line or column background
+
+input,s,a,,,,Input images to be background subtracted
+output,s,a,,,,Output background subtracted images
+axis,i,h,1,1,2,Axis along which background is fit and subtracted
+interactive,b,h,yes,,,Set fitting parameters interactively?
+sample,s,h,"*",,,Sample of points to use in fit
+naverage,i,h,1,,,Number of points in sample averaging
+function,s,h,"chebyshev","spline3|legendre|chebyshev|spline1",,Fitting function
+order,i,h,1,1,,Order of fitting function
+low_reject,r,h,0.,0.,,Low rejection in sigma of fit
+high_reject,r,h,0.,0.,,High rejection in sigma of fit
+niterate,i,h,1,0,,Number of rejection iterations
+grow,r,h,0.,0.,,Rejection growing radius
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
diff --git a/noao/imred/generic/darksub.cl b/noao/imred/generic/darksub.cl
new file mode 100644
index 00000000..c4197c50
--- /dev/null
+++ b/noao/imred/generic/darksub.cl
@@ -0,0 +1,99 @@
+# DARKSUB -- Scale and subtract a dark count image.
+
+procedure darksub (input, output, darkimage)
+
+string input {prompt="Input images to be dark count subtracted"}
+string output {prompt="Output dark count subtracted images"}
+file darkimage {prompt="Dark count image"}
+
+string exposure {prompt="Header parameter for exposure times"}
+string pixtype="1" {prompt="Pixel type of final images"}
+bool verbose=yes {prompt="Verbose output?"}
+struct *list1
+struct *list2
+
+begin
+ file darkim
+ file dark
+ file file1
+ file file2
+ string in
+ string out
+ real exp
+ real expd
+ real expi
+ int stat
+
+ # Make temporary filenames.
+ dark = mktemp ("tmp")
+ file1 = mktemp ("tmp")
+ file2 = mktemp ("tmp")
+
+ # Determine exposure time of dark image. Quit if no exposure time.
+ darkim = darkimage
+ hselect (darkim, exposure, yes, > file1)
+ list1 = file1
+ stat = fscan (list1, expd)
+ list1 = ""
+ delete (file1, verify=no)
+
+ if (stat == EOF || nscan() < 1)
+ error (1, "Exposure time for " // darkim // " not found.")
+ if (expd == 0.)
+ error (2, "Exposure time for " // darkim // " is zero.")
+ exp = expd
+
+ # Make a temporary image for the scaled dark.
+ imcopy (darkim, dark, verbose=no)
+
+ # Expand the list of input and output images in temporary files.
+ hselect (input, "$I,"//exposure, yes, > file1)
+ sections (output, option="root", > file2)
+
+ # Loop through the input and output images.
+ list1 = file1
+ list2 = file2
+ while (fscan (list1, in, expi) != EOF) {
+
+ stat = nscan()
+
+ # Check that the output list has not been exhausted.
+ if (fscan (list2, out) == EOF) {
+ print (" Output list exhausted before input list.")
+ break
+ }
+
+ # Check that there is an exposure time for the input image.
+ if (stat < 2) {
+ print (" Exposure time for ", in, " not found.")
+ next
+ }
+ if (expi == 0.) {
+ print (" Exposure time for ", in, " is zero.")
+ next
+ }
+
+ # Print log output.
+ if (verbose) {
+ time ()
+ print (" ", out, " = ", in, " - ", expi/expd, "* ", darkim)
+ }
+
+ # Scale the dark image if necessary.
+ if (expi != exp) {
+ imarith (dark, "*", expi / exp, dark, title="", divzero=0.,
+ hparams="", pixtype="", calctype="", verbose=no, noact=no)
+ exp = expi
+ }
+
+ # Subtract the dark image from the input image.
+ imarith (in, "-", dark, out, title="", divzero=0.,
+ hparams="", pixtype=pixtype, calctype=pixtype,
+ verbose=no, noact=no)
+ }
+
+ # Finish up.
+ imdelete (dark, verify=no)
+ delete (file1, verify=no)
+ delete (file2, verify=no)
+end
diff --git a/noao/imred/generic/doc/Spelldict b/noao/imred/generic/doc/Spelldict
new file mode 100644
index 00000000..bb9573fb
--- /dev/null
+++ b/noao/imred/generic/doc/Spelldict
@@ -0,0 +1,51 @@
+Chebyshev
+INDEF
+IRAF
+Oct84
+RMS
+Sep84
+biasimage
+biassub
+ccd
+chebyshev
+chimage.section
+chimages
+chimages.section
+chimages.transpose
+colbckgrnd
+colflat
+darkimage
+darksub
+datatype
+dcbias
+dcbias.bias
+dcbias.trim
+div
+elp
+expdark
+expimage
+flatfield
+fliplines
+frame1
+frame2
+frame3
+images.imcopy
+imarith
+imcopy
+imlinefit
+imred.keeplog
+imred.logfile
+imstatistics
+imtranspose
+legendre
+linebckgrnd
+lineflat
+min
+minflat
+ndhelp
+ngrow
+normflat
+pixtype
+spline3
+trim.section
+
diff --git a/noao/imred/generic/doc/background.hlp b/noao/imred/generic/doc/background.hlp
new file mode 100644
index 00000000..7fa1fdd6
--- /dev/null
+++ b/noao/imred/generic/doc/background.hlp
@@ -0,0 +1,82 @@
+.help background Jul85 noao.imred.generic
+.ih
+NAME
+background -- Fit and subtract a line or column background
+.ih
+USAGE
+background input output
+.ih
+PARAMETERS
+.ls input
+Images to be background subtracted. The images may contain image sections.
+.le
+.ls output
+Output images to be created and modified. The number of output images must
+match the number of input images.
+.le
+.ls axis = 1
+Axis along which to fit the background and subtract. Axis 1 fits and
+subtracts the background along the lines and axis 2 fits and subtracts
+the background along the columns.
+.le
+.ls interactive = yes
+Set the fitting parameters interactively?
+.le
+.ls sample = "*"
+Lines or columns to be used in the background fits. The default "*" selects
+all lines or columns.
+.le
+.ls naverage = 1
+Number of sample points to combined to create a fitting point.
+A positive value specifies an average and a negative value specifies
+a median.
+.le
+.ls function = spline3
+Function to be fit to the image lines or columns. The functions are
+"legendre" (legendre polynomial), "chebyshev" (chebyshev polynomial),
+"spline1" (linear spline), and "spline3" (cubic spline). The functions
+may be abbreviated.
+.le
+.ls order = 1
+The order of the polynomials or the number of spline pieces.
+.le
+.ls low_reject = 0., high_reject = 0.
+Low and high rejection limits in units of the residual sigma.
+.le
+.ls niterate = 1
+Number of rejection iterations.
+.le
+.ls grow = 1.
+When a pixel is rejected, pixels within this distance of the rejected pixel
+are also rejected.
+.le
+.ls graphics = "stdgraph"
+Graphics device for interactive graphics output.
+.le
+.ls cursor = ""
+Graphics cursor input
+.le
+.ih
+DESCRIPTION
+For each line or column in the input images a function is fit to the columns
+or lines specified by the sample parameter. This function is then subtracted
+from the entire line or column to create an output line or column.
+The function fitting parameters may be set interactively.
+This task is a script using \fBfit1d\fR. For more discussion about
+the parameters see the help text for \fBicfit\fR and \fBfit1d\fR.
+.ih
+EXAMPLES
+A spectrum of an object runs down the center of a 500 x 500 image. To
+subtract a constant background using columns 10 to 100 and 410 to 500:
+
+ cl> background image image sample="10:100,410:500"
+
+To subtract a quadratic background from the columns of an image in which
+the spectrum lies between lines 50 and 70:
+
+ cl> background image image axis=2 sample="1:40,80:120" o=3
+
+.ih
+SEE ALSO
+fit1d, icfit
+.endhelp
diff --git a/noao/imred/generic/doc/darksub.hlp b/noao/imred/generic/doc/darksub.hlp
new file mode 100644
index 00000000..c6349e03
--- /dev/null
+++ b/noao/imred/generic/doc/darksub.hlp
@@ -0,0 +1,60 @@
+.help darksub Apr86 noao.imred.generic
+.ih
+NAME
+darksub -- Scale and subtract a dark count image
+.ih
+USAGE
+darksub input output darkimage
+.ih
+PARAMETERS
+.ls input
+List of input images from which to subtract the dark count image.
+.le
+.ls output
+List of output dark count subtracted images. The output images may
+be the same as the input images. The input and output image lists should
+contain the same number of images.
+.le
+.ls darkimage
+Dark count image to be scaled and subtracted from the input images.
+.le
+.ls exposure = ""
+Header parameter name from which to obtain the exposure times.
+.le
+.ls pixtype = "1"
+The pixel datatype of the dark subtracted images. The default ("1")
+is the pixel datatype of the original image. The other choices are
+"short", "integer", "long", "real", and "double".
+.le
+.ls verbose = yes
+Print log of operations performed.
+.le
+.ih
+DESCRIPTION
+The dark count image is scaled by the ratio of the input image exposure to the
+dark count image exposure and subtracted from each of the input images.
+The exposures are obtained from the image headers under the specified
+name. The output images may have the same names as the input images.
+A temporary image is used for the scaled dark count image and the original
+image is not modified. The pixel datatype of the output images is
+specified by the parameter \fIpixtype\fR. The default ("1") uses the
+datatype of the input image. A log of the operations performed may be
+printed on the standard output when the verbose options is specified.
+
+Note that this task can be used to subtract any type of image from a set
+of images in which the subtracted image must be scaled to a given exposure.
+.ih
+EXAMPLES
+To subtract the dark count image 'dark' from obs1, obs2, and obs3:
+
+.nf
+ cl> darksub obs1,obs2 obs1,obs2 dark exp="exposure"
+ Tue 18:50:56 08-Apr-86
+ obs1 = obs1 - 5.0049997336067 * dark
+ Tue 18:51:05 08-Apr-86
+ obs2 = obs2 - 5.009999733075 * dark
+.fi
+.ih
+SEE ALSO
+imarith
+.endhelp
diff --git a/noao/imred/generic/doc/flat1d.hlp b/noao/imred/generic/doc/flat1d.hlp
new file mode 100644
index 00000000..0f855828
--- /dev/null
+++ b/noao/imred/generic/doc/flat1d.hlp
@@ -0,0 +1,157 @@
+.help flat1d Mar93 noao.imred.generic
+.ih
+NAME
+flat1d -- Make flat fields by fitting a 1D function to the image
+.ih
+USAGE
+flat1d input output
+.ih
+PARAMETERS
+.ls input
+Calibration images to be used to make the flat fields. The images may
+contain image sections. Only the region covered by the section will be
+modified in the output image.
+.le
+.ls output
+Flat field images to be created or modified. The number of output images
+must match the number of input images. If an output image does not exist
+it is first created and initialized to unit response.
+.le
+.ls axis = 1
+Axis along which the one dimensional fitting is done. Axis 1 corresponds
+to fitting the image lines and axis 2 corresponds to fitting the columns.
+.le
+.ls interactive = yes
+Set the fitting parameters interactively?
+.le
+.ls sample = "*"
+Lines or columns to be used in the fits.
+.le
+.ls naverage = 1
+Number of sample points to combined to create a fitting point.
+A positive value specifies an average and a negative value specifies
+a median.
+.le
+.ls function = spline3
+Function to be fit to the image lines or columns. The functions are
+"legendre" (legendre polynomial), "chebyshev" (chebyshev polynomial),
+"spline1" (linear spline), and "spline3" (cubic spline). The functions
+may be abbreviated.
+.le
+.ls order = 1
+The order of the polynomials or the number of spline pieces.
+.le
+.ls low_reject = 2.5, high_reject = 2.5
+Low and high rejection limits in units of the residual sigma.
+.le
+.ls niterate = 1
+Number of rejection iterations.
+.le
+.ls grow = 1.
+When a pixel is rejected, pixels within this distance of the rejected pixel
+are also rejected.
+.le
+.ls minflat = 0.
+When the fitted value is less than the value of this parameter the flat
+field value is set to unity.
+.le
+.ls graphics = "stdgraph"
+Graphics device for interactive graphics output.
+.le
+.ls cursor = ""
+Graphics cursor input
+.le
+.ih
+DESCRIPTION
+Flat fields are created containing only the small scale variations in the
+calibration images. The large scale variations in the images are modeled
+by fitting a function to each image line or column with deviant pixel rejection.
+The flat field values are obtained by taking the ratio of the image values
+to the function fit. However, if the fitted value is less than the
+parameter \fIminflat\fR the flat field value is set to unity.
+
+The function fitting parameters may be set interactively when the interactive
+flag is set using the interactive curve fitting package \fBicfit\fR.
+The cursor mode commands for this package are described in a separate
+help entry under "icfit". For two dimensional images the user is
+prompted for the sample line or column or a blank-separated range to be
+averaged and graphed.
+Note that the lines or columns are relative the input image section; for
+example line 1 is the first line of the image section and not the first
+line of the image. Any number of lines or columns may be examined.
+When satisfied with the fit parameters the user
+responds with a carriage return to the line or column prompt.
+The function is then fit to all the lines or columns and the flat field
+ratios are determined.
+
+If the output image does not exist initially it is created with the same
+size as the input image \fIwithout\fR an image section and initialized
+to unit response. Subsequently the flat field data modifies the pixel
+values in the output image. Input image sections may be used to restrict
+the region in which the flat field response is determined leaving the
+rest of the output image unmodified. This ability is particularly useful
+when dealing with multi-aperture data.
+
+This task is very similar to \fBfit1d\fR with the addition of the
+parameter \fIminflat\fR and the deletion of the parameter \fItype\fR
+which is always "ratio".
+.ih
+EXAMPLES
+1. Create a flat field from the calibration image "quartz" with the
+spectrum running along the lines. Exclude the first and last columns,
+use a spline fit of 25 pieces (a width of 32 pixels over 800 columns),
+and set grow to 4 pixels.
+
+.nf
+ cl> flat1d quartz flat order=25 sample="2:799" grow=4 \
+ >>> interactive=no
+
+ or
+
+ cl> flat1d quartz[2:799,*] flat order=25 grow=4 inter-
+.fi
+
+The fitting parameters may be set interactively in which case the fitting
+parameters need not be specified. The command would be
+
+.nf
+ cl> flat1d quartz flat
+ quartz: Fit column = 1 10
+ quartz: Fit column =
+.fi
+
+The user selects sample columns to be fit interactively with the interactive
+curve fitting package. When satisfied with the fit parameters
+respond with a carriage return to the prompt. The function is then fit to
+all the columns and the flat field ratios are determined.
+
+2. As an example for multi-slit spectra the locations of the slits are
+determined and a file containing the image sections is created.
+Since there must be the same number of output images another file
+containing the output images is also created. For
+example the files might contain
+
+.nf
+ File quartzs File flats
+ _______________ __________
+ quartz[23:40,*] flat
+ quartz[55:61,*] flat
+ quartz[73:84,*] flat
+.fi
+
+A flat field for the slits is then obtained with the command
+
+ cl> flat1d @quartzs flats axis=2
+.ih
+REVISIONS
+.ls FLAT1D V2.10.3
+The image header keyword "CCDMEAN = 1." is now added or updated.
+.le
+.ih
+BUGS
+The creation of multi-slit files and the need for an equal number of
+repeated output files is annoying. It will be worked on in the future.
+.ih
+SEE ALSO
+fit1d, icfit
+.endhelp
diff --git a/noao/imred/generic/doc/flatten.hlp b/noao/imred/generic/doc/flatten.hlp
new file mode 100644
index 00000000..0e35f647
--- /dev/null
+++ b/noao/imred/generic/doc/flatten.hlp
@@ -0,0 +1,42 @@
+.help flatten Sep84 noao.imred.generic
+.ih
+NAME
+flatten -- Flatten images by dividing by a flat field
+.ih
+USAGE
+flatten images flatfield
+.ih
+PARAMETERS
+.ls images
+Images to be flattened.
+.le
+.ls flatfield
+Flat field image to be divided into the images.
+.le
+.ls minflat = INDEF
+All flat field pixels less than or equal to this value are replaced by
+unit response. If INDEF all the flat field pixels are used.
+.le
+.ls pixtype = "real"
+The pixel datatype of the flattened image. The null string ("") defaults
+the pixel datatype to that of the original image before flattening.
+The other choices are "short", "integer", "long", and "real".
+.le
+.ih
+DESCRIPTION
+Each of the \fIimages\fR is flatten by dividing by the \fIflatfield\fR
+flat field image. The flattened images replace the original images.
+The pixel datatype of the flattened images is specified by the
+\fIpixtype\fR. The null string ("") leaves the datatype of the images
+unchanged. Low values in the flat field may be replaced by unit response
+by specifying a \fIminflat\fR value. All pixels in the flat field less
+than or equal to \fIminflat\fR are given unit response.
+.ih
+EXAMPLES
+To flatten a set of two dimensional images excluding pixels below
+.2 in the flat field:
+
+.nf
+ cl> flatten frame* flat minflat=0.2
+.fi
+.endhelp
diff --git a/noao/imred/generic/doc/normalize.hlp b/noao/imred/generic/doc/normalize.hlp
new file mode 100644
index 00000000..f5fb80f7
--- /dev/null
+++ b/noao/imred/generic/doc/normalize.hlp
@@ -0,0 +1,45 @@
+.help normalize Sep84 noao.imred.generic
+.ih
+NAME
+normalize -- Normalize images
+.ih
+USAGE
+normalize images
+.ih
+PARAMETERS
+.ls images
+Images to be normalized.
+.le
+.ls norm = INDEF
+Normalization factor to be used if not INDEF. If INDEF the normalization
+factor is determined by sampling the images.
+.le
+.ls sample_section = "[]"
+Section of the image to be sampled in determining the image mean.
+.le
+.ls lower = INDEF
+Lower limit of pixel values for calculating the normalization.
+INDEF corresponds to the minimum possible pixel value.
+.le
+.ls upper = INDEF
+Upper limit of pixel values for calculating the normalization.
+INDEF corresponds to the maximum possible pixel value.
+.le
+.ih
+DESCRIPTION
+Each of the images is normalized. The normalization is specified by the
+parameter \fInorm\fR. If the value of \fInorm\fR is INDEF then a normalization
+is determined by sampling the image. The normalization is then the mean
+of the pixels in the sample section with values in the range \fIlower\fR
+to \fIupper\fR. The default sample section selects all pixels in the image.
+The normalized images are of datatype "real" and replace the original images.
+.ih
+EXAMPLES
+To normalize a set of two dimensional images excluding deviant pixels below
+1000 and above 5000 and subsampling every fifth pixel in each dimension:
+
+ cl> normalize frame* sample=[*:5,*:5] low=1000 up=5000
+.ih
+SEE ALSO
+imstatistics, normflat
+.endhelp
diff --git a/noao/imred/generic/doc/normflat.hlp b/noao/imred/generic/doc/normflat.hlp
new file mode 100644
index 00000000..3020d296
--- /dev/null
+++ b/noao/imred/generic/doc/normflat.hlp
@@ -0,0 +1,54 @@
+.help normflat Sep84 noao.imred.generic
+.ih
+NAME
+normflat -- Create a flat field by normalizing a calibration image
+.ih
+USAGE
+normflat image flatfield
+.ih
+PARAMETERS
+.ls image
+Calibration image to be used.
+.le
+.ls flatfield
+Flat field to be created.
+.le
+.ls norm = INDEF
+Normalization factor to be used if not INDEF. If INDEF the normalization
+factor is automatically determined.
+.le
+.ls minflat = INDEF
+Minimum data value to be used in determining the normalization and in
+creating the flat field. Values less than or equal to this value are
+replaced with a flat field value of 1.
+.le
+.ls sample_section = "[]"
+Section of the image to be sampled in determining the normalization if
+norm = INDEF.
+.le
+.ih
+DESCRIPTION
+A flat field is created from a calibration image by normalizing the calibration
+image. The normalization is specified with the parameter \fInorm\fR. If the
+value of \fInorm\fR is INDEF then the normalization is determined by sampling
+the pixels in the sample section with values greater than \fIminflat\fR.
+This task differs from the task \fBnormalize\fR in that data values less
+than or equal to \fIminflat\fR are replaced with unity in the normalized
+flat field.
+.ih
+EXAMPLES
+To create a flat field from a calibration image "quartz" using pixels
+above 1000 and selecting the normalization to be 3500:
+
+ cl> normflat quartz flat norm=3500 minflat=1000
+
+To determine a normalization from the pixels above 1000 and sampling
+every fifth pixel in each dimension:
+
+.nf
+ cl> normflat quartz flat minflat=1000 sample=[*:5,*:5]
+.fi
+.ih
+SEE ALSO
+normalize
+.endhelp
diff --git a/noao/imred/generic/flat1d.par b/noao/imred/generic/flat1d.par
new file mode 100644
index 00000000..90e0ad3d
--- /dev/null
+++ b/noao/imred/generic/flat1d.par
@@ -0,0 +1,17 @@
+# FLAT1D -- Make flat fields by fitting a function to the image line or cols.
+
+input,s,a,,,,Calibration images
+output,s,a,,,,Flat field images
+axis,i,h,1,1,2,Axis to fit
+interactive,b,h,yes,,,Set fitting parameters interactively?
+sample,s,h,"*",,,Sample points to use in fit
+naverage,i,h,1,,,Number of points in sample averaging
+function,s,h,"spline3","spline3|legendre|chebyshev|spline1",,Fitting function
+order,i,h,1,1,,Order of fitting function
+low_reject,r,h,2.5,0.,,Low rejection in sigma of fit
+high_reject,r,h,2.5,0.,,High rejection in sigma of fit
+niterate,i,h,1,0,,Number of rejection iterations
+grow,r,h,1.,0.,,Rejection growing radius in pixels
+minflat,r,h,0.,,,Minimum fit value for computing a flat field value
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
diff --git a/noao/imred/generic/flat1d.x b/noao/imred/generic/flat1d.x
new file mode 100644
index 00000000..5a0797fc
--- /dev/null
+++ b/noao/imred/generic/flat1d.x
@@ -0,0 +1,478 @@
+include <error.h>
+include <imhdr.h>
+include <pkg/gtools.h>
+
+# FLAT1D -- Fit a function to image lines or columns and output an image
+# consisting of the ratio. Set a minimum value test to the denominator.
+# The fitting parameters may be set interactively using the icfit package.
+
+procedure flat1d ()
+
+int listin # Input image list
+int listout # Output image list
+int axis # Image axis to fit
+real minflat # Minimum fit value for ratio
+bool interactive # Interactive?
+
+char sample[SZ_LINE] # Sample ranges
+int naverage # Sample averaging size
+char function[SZ_LINE] # Curve fitting function
+int order # Order of curve fitting function
+real low_reject, high_reject # Rejection thresholds
+int niterate # Number of rejection iterations
+real grow # Rejection growing radius
+
+char input[SZ_LINE] # Input image
+char output[SZ_FNAME] # Output image
+pointer in, out # IMIO pointers
+pointer ic # ICFIT pointer
+pointer gt # GTOOLS pointer
+
+int imtopen(), imtgetim(), imtlen(), gt_init()
+int clgeti()
+real clgetr()
+bool clgetb()
+
+begin
+ # Get input and output lists and check that the number of images
+ # are the same.
+
+ call clgstr ("input", input, SZ_LINE)
+ listin = imtopen (input)
+ call clgstr ("output", input, SZ_LINE)
+ listout = imtopen (input)
+ if (imtlen (listin) != imtlen (listout)) {
+ call imtclose (listin)
+ call imtclose (listout)
+ call error (0, "Input and output image lists do not match")
+ }
+
+ # Get task parameters.
+
+ axis = clgeti ("axis")
+ minflat = clgetr ("minflat")
+ interactive = clgetb ("interactive")
+
+ # Initialize the ICFIT package.
+ call clgstr ("sample", sample, SZ_LINE)
+ naverage = clgeti ("naverage")
+ call clgstr ("function", function, SZ_LINE)
+ order = clgeti ("order")
+ low_reject = clgetr ("low_reject")
+ high_reject = clgetr ("high_reject")
+ niterate = clgeti ("niterate")
+ grow = clgetr ("grow")
+
+ call ic_open (ic)
+ call ic_pstr (ic, "sample", sample)
+ call ic_puti (ic, "naverage", naverage)
+ call ic_pstr (ic, "function", function)
+ call ic_puti (ic, "order", order)
+ call ic_putr (ic, "low", low_reject)
+ call ic_putr (ic, "high", high_reject)
+ call ic_puti (ic, "niterate", niterate)
+ call ic_putr (ic, "grow", grow)
+ call ic_pstr (ic, "ylabel", "")
+
+ gt = gt_init()
+ call gt_sets (gt, GTTYPE, "line")
+
+ # Fit each input image.
+
+ while ((imtgetim (listin, input, SZ_LINE) != EOF) &&
+ (imtgetim (listout, output, SZ_FNAME) != EOF)) {
+
+ iferr (call f1d_immap (input, output, in, out)) {
+ call erract (EA_WARN)
+ next
+ }
+ call f1d_flat1d (in, out, ic, gt, input, axis, minflat, interactive)
+ call imunmap (in)
+ call imunmap (out)
+ }
+
+ call ic_closer (ic)
+ call gt_free (gt)
+ call imtclose (listin)
+ call imtclose (listout)
+end
+
+
+# F1D_FLAT1D -- Given the image descriptor determine the fitting function
+# for each line or column and create an output image. If the interactive flag
+# is set then set the fitting parameters interactively.
+
+define MAXBUF 512 * 100 # Maximum number of pixels per block
+
+procedure f1d_flat1d (in, out, ic, gt, title, axis, minflat, interactive)
+
+pointer in # IMIO pointer for input image
+pointer out # IMIO pointer for output image
+pointer ic # ICFIT pointer
+pointer gt # GTOOLS pointer
+char title[ARB] # Title
+int axis # Image axis to fit
+real minflat # Minimum value for flat division
+bool interactive # Interactive?
+
+char graphics[SZ_FNAME]
+int i, nx, new
+real mindata, maxdata
+pointer cv, gp, sp, x, wts, indata, outdata
+
+int f1d_getline(), f1d_getdata(), strlen()
+pointer gopen()
+
+begin
+ # Error check.
+
+ if (IM_NDIM (in) > 2)
+ call error (0, "Image dimensions > 2 are not implemented")
+ if (axis > IM_NDIM (in))
+ call error (0, "Axis exceeds image dimension")
+
+ # Allocate memory for curve fitting.
+
+ nx = IM_LEN (in, axis)
+ call smark (sp)
+ call salloc (x, nx, TY_REAL)
+ call salloc (wts, nx, TY_REAL)
+
+ do i = 1, nx
+ Memr[x+i-1] = i
+ call amovkr (1., Memr[wts], nx)
+
+ call ic_putr (ic, "xmin", Memr[x])
+ call ic_putr (ic, "xmax", Memr[x+nx-1])
+
+ # If the interactive flag is set then use icg_fit to set the
+ # fitting parameters. Get_fitline returns EOF when the user
+ # is done. The weights are reset since the user may delete
+ # points.
+
+ if (interactive) {
+ call clgstr ("graphics", graphics, SZ_FNAME)
+ gp = gopen (graphics, NEW_FILE, STDGRAPH)
+ i = strlen (title)
+ indata = NULL
+ while (f1d_getline (ic, gt, in, axis, title, indata) != EOF) {
+ title[i + 1] = EOS
+ call icg_fit (ic, gp, "cursor", gt, cv, Memr[x], Memr[indata],
+ Memr[wts], nx)
+ call amovkr (1., Memr[wts], nx)
+ }
+ call gclose (gp)
+ }
+
+ # Loop through the input image and create an output image.
+
+ new = YES
+
+ while (f1d_getdata (in, out, axis, MAXBUF, indata, outdata) != EOF) {
+
+ call alimr (Memr[indata], nx, mindata, maxdata)
+ if (maxdata >= minflat) {
+ call ic_fit (ic, cv, Memr[x], Memr[indata], Memr[wts],
+ nx, new, YES, new, new)
+ new = NO
+
+ call cvvector (cv, Memr[x], Memr[outdata], nx)
+ }
+
+ call f1d_flat (Memr[indata], Memr[outdata], Memr[outdata], nx,
+ minflat, mindata, maxdata)
+ }
+
+ call imaddr (out, "ccdmean", 1.)
+
+ call cvfree (cv)
+ call sfree (sp)
+end
+
+
+# F1D_IMMAP -- Map images for flat1d.
+
+procedure f1d_immap (input, output, in, out)
+
+char input[ARB] # Input image
+char output[ARB] # Output image
+pointer in # Input IMIO pointer
+pointer out # Output IMIO pointer
+
+pointer sp, root, sect, line, data
+
+int access(), impnlr()
+pointer immap()
+errchk immap
+
+begin
+ # Get the root name and section of the input image.
+
+ call smark (sp)
+ call salloc (root, SZ_FNAME, TY_CHAR)
+ call salloc (sect, SZ_FNAME, TY_CHAR)
+
+ call get_root (input, Memc[root], SZ_FNAME)
+ call get_section (input, Memc[sect], SZ_FNAME)
+
+ # If the output image is not accessible then create it as a new copy
+ # of the full input image and initialize to unit response.
+
+ if (access (output, READ_WRITE, BINARY_FILE) == NO) {
+ in = immap (Memc[root], READ_ONLY, 0)
+ out = immap (output, NEW_COPY, in)
+ IM_PIXTYPE(out) = TY_REAL
+
+ call salloc (line, IM_MAXDIM, TY_LONG)
+ call amovkl (long (1), Meml[line], IM_MAXDIM)
+ while (impnlr (out, data, Meml[line]) != EOF)
+ call amovkr (1., Memr[data], IM_LEN(out, 1))
+
+ call imunmap (in)
+ call imunmap (out)
+ }
+
+ # Map the input and output images.
+
+ in = immap (input, READ_ONLY, 0)
+
+ call sprintf (Memc[root], SZ_FNAME, "%s%s")
+ call pargstr (output)
+ call pargstr (Memc[sect])
+ out = immap (Memc[root], READ_WRITE, 0)
+
+ call sfree (sp)
+end
+
+
+# F1D_GETDATA -- Get a line of image data.
+
+int procedure f1d_getdata (in, out, axis, maxbuf, indata, outdata)
+
+pointer in # Input IMIO pointer
+pointer out # Output IMIO pointer
+int axis # Image axis
+int maxbuf # Maximum buffer size for column axis
+pointer indata # Input data pointer
+pointer outdata # Output data pointer
+
+int i, index, last_index, col1, col2, nc, ncols, nlines, ncols_block
+pointer inbuf, outbuf, ptr
+
+pointer imgl1r(), impl1r(), imgl2r(), impl2r(), imgs2r(), imps2r()
+
+data index/0/
+
+begin
+ # Increment to the next image vector.
+
+ index = index + 1
+
+ # Initialize for the first vector.
+
+ if (index == 1) {
+ ncols = IM_LEN (in, 1)
+ if (IM_NDIM (in) == 1)
+ nlines = 1
+ else
+ nlines = IM_LEN (in, 2)
+
+ switch (axis) {
+ case 1:
+ last_index = nlines
+ case 2:
+ last_index = ncols
+ ncols_block = max (1, min (ncols, maxbuf / nlines))
+ col2 = 0
+
+ call malloc (indata, nlines, TY_REAL)
+ call malloc (outdata, nlines, TY_REAL)
+ }
+ }
+
+ # Finish up if the last vector has been done.
+
+ if (index > last_index) {
+ if (axis == 2) {
+ ptr = outbuf + index - 1 - col1
+ do i = 1, nlines {
+ Memr[ptr] = Memr[outdata+i-1]
+ ptr = ptr + nc
+ }
+
+ call mfree (indata, TY_REAL)
+ call mfree (outdata, TY_REAL)
+ }
+
+ index = 0
+ return (EOF)
+ }
+
+ # Get the next image vector.
+
+ switch (axis) {
+ case 1:
+ if (IM_NDIM (in) == 1) {
+ indata = imgl1r (in)
+ outdata = impl1r (out)
+ } else {
+ indata = imgl2r (in, index)
+ outdata = impl2r (out, index)
+ }
+ case 2:
+ if (index > 1) {
+ ptr = outbuf + index - 1 - col1
+ do i = 1, nlines {
+ Memr[ptr] = Memr[outdata+i-1]
+ ptr = ptr + nc
+ }
+ }
+
+ if (index > col2) {
+ col1 = col2 + 1
+ col2 = min (ncols, col1 + ncols_block - 1)
+ inbuf = imgs2r (in, col1, col2, 1, nlines)
+ outbuf = imps2r (out, col1, col2, 1, nlines)
+ nc = col2 - col1 + 1
+ }
+
+ ptr = inbuf + index - col1
+ do i = 1, nlines {
+ Memr[indata+i-1] = Memr[ptr]
+ ptr = ptr + nc
+ }
+ }
+ return (index)
+end
+
+# F1D_FLAT -- For the flat field values by ratioing the image data by the fit.
+# If the fit value is less than minflat then the ratio is set to 1.
+
+procedure f1d_flat (data, fit, flat, npts, minflat, mindata, maxdata)
+
+real data[npts] # Image data
+real fit[npts] # Fit to image data
+real flat[npts] # Ratio of image data to the fit
+int npts # Number of points
+real minflat # Minimum fit value for ratio
+real mindata # Minimum data value
+real maxdata # Maximum data value
+
+int i
+
+begin
+ if (mindata >= minflat)
+ call adivr (data, fit, flat, npts)
+
+ else if (maxdata < minflat)
+ call amovkr (1., flat, npts)
+
+ else {
+ do i = 1, npts {
+ if (fit[i] < minflat)
+ flat[i] = 1.
+ else
+ flat[i] = data[i] / fit[i]
+ }
+ }
+end
+
+
+# F1D_GETLINE -- Get image data to be fit interactively. Return EOF
+# when the user enters EOF or CR. Default is 1 and the out of bounds
+# requests are silently limited to the nearest in edge.
+
+int procedure f1d_getline (ic, gt, im, axis, title, data)
+
+pointer ic # ICFIT pointer
+pointer gt # GTOOLS pointer
+pointer im # IMIO pointer
+int axis # Image axis
+char title[ARB] # Title
+pointer data # Image data
+
+char line[SZ_LINE]
+int i, j, stat, imlen
+pointer x
+
+int getline(), nscan()
+pointer imgl1r()
+
+data stat/EOF/
+
+begin
+ # If the image is one dimensional do not prompt.
+
+ if (IM_NDIM (im) == 1) {
+ if (stat == EOF) {
+ call sprintf (title, SZ_LINE, "%s\n%s")
+ call pargstr (title)
+ call pargstr (IM_TITLE(im))
+ call gt_sets (gt, GTTITLE, title)
+ call mfree (data, TY_REAL)
+ call malloc (data, IM_LEN(im, 1), TY_REAL)
+ call amovr (Memr[imgl1r(im)], Memr[data], IM_LEN(im, 1))
+ stat = OK
+ } else
+ stat = EOF
+
+ return (stat)
+ }
+
+ # If the image is two dimensional prompt for the line or column.
+
+ switch (axis) {
+ case 1:
+ imlen = IM_LEN (im, 2)
+ call sprintf (title, SZ_LINE, "%s: Fit line =")
+ call pargstr (title)
+ case 2:
+ imlen = IM_LEN (im, 1)
+ call sprintf (title, SZ_LINE, "%s: Fit column =")
+ call pargstr (title)
+ }
+
+ call printf ("%s ")
+ call pargstr (title)
+ call flush (STDOUT)
+
+ if (getline(STDIN, line) == EOF)
+ return (EOF)
+
+ call sscan (line)
+ call gargi (i)
+ call gargi (j)
+
+ switch (nscan()) {
+ case 0:
+ stat = EOF
+ return (stat)
+ case 1:
+ i = max (1, min (imlen, i))
+ j = i
+ case 2:
+ i = max (1, min (imlen, i))
+ j = max (1, min (imlen, j))
+ }
+
+ call sprintf (title, SZ_LINE, "%s %d - %d\n%s")
+ call pargstr (title)
+ call pargi (i)
+ call pargi (j)
+ call pargstr (IM_TITLE(im))
+
+ call gt_sets (gt, GTTITLE, title)
+
+ switch (axis) {
+ case 1:
+ call ic_pstr (ic, "xlabel", "Column")
+ call xt_21imavg (im, axis, 1, IM_LEN(im, 1), i, j, x, data, imlen)
+ case 2:
+ call ic_pstr (ic, "xlabel", "Line")
+ call xt_21imavg (im, axis, i, j, 1, IM_LEN(im, 2), x, data, imlen)
+ }
+ call mfree (x, TY_REAL)
+
+ stat = OK
+ return (stat)
+end
diff --git a/noao/imred/generic/flatten.cl b/noao/imred/generic/flatten.cl
new file mode 100644
index 00000000..928e09b2
--- /dev/null
+++ b/noao/imred/generic/flatten.cl
@@ -0,0 +1,64 @@
+#{ FLATTEN -- Divide images by a flat field
+
+#images,s,a,,,,Images to be flattened
+#flatfield,f,a,,,,Flat field
+#minflat,r,h,INDEF,,,Minimum flat field value
+#pixtype,s,h,"real",,,Flattened image pixel datatype
+#keeplog,b,h,@generic.keeplog,,,Keep log of processing?
+#logfile,f,h,@generic.logfile,,,Log file
+#imlist,f,h
+#imfd,*s,h
+#input,f,h
+#flat,f,h
+#flt,f,h
+
+{
+ # Startup message.
+ if (keeplog) {
+ time (>> logfile)
+ print (" FLATTEN: Flatten images.", >> logfile)
+ }
+
+ # Set temporary files.
+ imlist = mktemp ("tmp$ims")
+
+ # Replace low flat field values if needed.
+ flat = flatfield
+ if (minflat == INDEF)
+ flt = flat
+ else {
+ if (keeplog)
+ print (" Minimum flat field value = ", minflat, >> logfile)
+ flt = mktemp ("tmp$ims")
+ imcopy (flat, flt, verbose=no)
+ imreplace (flt, 1., upper=minflat)
+ }
+
+ # Generate image list.
+ sections (images, option="fullname", >imlist)
+ imfd = imlist
+
+ while (fscan (imfd, input) != EOF) {
+
+ # Print output.
+ if (keeplog) {
+ time (>> logfile)
+ print (" Flatten ", input, " with ", flat, ".", >> logfile)
+ }
+
+ # Flatten the image with the flat field. Replace the input
+ # image by the flattened image.
+
+ imarith (input, "/", flt, input, pixtype=pixtype, calctype="real")
+ }
+
+ if (minflat != INDEF)
+ imdelete (flt, verify=no)
+ delete (imlist, verify=no)
+
+ # Ending message.
+ if (keeplog) {
+ time (>> logfile)
+ print (" FLATTEN: Done.", >> logfile)
+ }
+}
diff --git a/noao/imred/generic/flatten.par b/noao/imred/generic/flatten.par
new file mode 100644
index 00000000..7eedfe58
--- /dev/null
+++ b/noao/imred/generic/flatten.par
@@ -0,0 +1,13 @@
+# FLATTEN -- Divide images by a flat field
+
+images,s,a,,,,Images to be flattened
+flatfield,f,a,,,,Flat field
+minflat,r,h,INDEF,,,Minimum flat field value
+pixtype,s,h,"real",,,Flattened image pixel datatype
+keeplog,b,h,)generic.keeplog,,,Keep log of processing?
+logfile,f,h,)generic.logfile,,,Log file
+imlist,f,h
+imfd,*s,h
+input,f,h
+flat,f,h
+flt,f,h
diff --git a/noao/imred/generic/generic.cl b/noao/imred/generic/generic.cl
new file mode 100644
index 00000000..8a187758
--- /dev/null
+++ b/noao/imred/generic/generic.cl
@@ -0,0 +1,17 @@
+#{ GENERIC -- Generic image reduction tools
+
+# Load dependent packages:
+images
+proto # Task "imreplace"
+
+package generic
+
+task flat1d = generic$x_generic.e
+
+task background = generic$background.cl
+task darksub = generic$darksub.cl
+task flatten = generic$flatten.cl
+task normalize = generic$normalize.cl
+task normflat = generic$normflat.cl
+
+clbye()
diff --git a/noao/imred/generic/generic.hd b/noao/imred/generic/generic.hd
new file mode 100644
index 00000000..3464ed28
--- /dev/null
+++ b/noao/imred/generic/generic.hd
@@ -0,0 +1,11 @@
+# Help directory for the GENERIC package.
+
+$doc = "./doc/"
+
+background hlp=doc$background.hlp, src=background.cl
+darksub hlp=doc$darksub.hlp, src=darksub.cl
+flatten hlp=doc$flatten.hlp, src=flatten.cl
+flat1d hlp=doc$flat1d.hlp, src=flat1d.x
+normalize hlp=doc$normalize.hlp, src=normalize.cl
+normflat hlp=doc$normflat.hlp, src=normflat.cl
+revisions sys=Revisions
diff --git a/noao/imred/generic/generic.men b/noao/imred/generic/generic.men
new file mode 100644
index 00000000..9241c0d7
--- /dev/null
+++ b/noao/imred/generic/generic.men
@@ -0,0 +1,6 @@
+ background - Fit and subtract a line or column background
+ darksub - Scale and subtract a dark count image
+ flat1d - Make flat field by fitting a 1D func. to the lines or columns
+ flatten - Flatten images using a flat field
+ normalize - Normalize images
+ normflat - Create a flat field by normalizing and replacing low values
diff --git a/noao/imred/generic/generic.par b/noao/imred/generic/generic.par
new file mode 100644
index 00000000..7c5c6b6c
--- /dev/null
+++ b/noao/imred/generic/generic.par
@@ -0,0 +1,5 @@
+# GENERIC package parameter file.
+
+keeplog,b,h,)imred.keeplog,,,Keep log of processing?
+logfile,f,h,)imred.logfile,,,Log file
+version,s,h,"May 1985"
diff --git a/noao/imred/generic/mkpkg b/noao/imred/generic/mkpkg
new file mode 100644
index 00000000..2c33031b
--- /dev/null
+++ b/noao/imred/generic/mkpkg
@@ -0,0 +1,54 @@
+# Make the GENERIC package.
+# Make the GENERIC package.
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $call generic
+ ;
+
+install:
+ $move x_generic.e noaobin$
+ ;
+
+generic:
+ $omake x_generic.x
+ $link x_generic.o libpkg.a -lxtools -lcurfit
+ ;
+
+libpkg.a:
+ flat1d.x <imhdr.h> <pkg/gtools.h>
+ ;
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $call generic
+ ;
+
+install:
+ $move x_generic.e noaobin$
+ ;
+
+generic:
+ $omake x_generic.x
+ $link x_generic.o libpkg.a -lxtools -lcurfit
+ ;
+
+libpkg.a:
+ flat1d.x <imhdr.h> <pkg/gtools.h> <error.h>
+ ;
diff --git a/noao/imred/generic/normalize.cl b/noao/imred/generic/normalize.cl
new file mode 100644
index 00000000..59290c1c
--- /dev/null
+++ b/noao/imred/generic/normalize.cl
@@ -0,0 +1,79 @@
+# NORMALIZE -- Compute the average of a sample region and normalize.
+
+procedure normalize (images)
+
+string images {prompt="Images to be normalized"}
+real norm = INDEF {prompt="Normalization value"}
+string sample_section = "[]" {prompt="Sample section"}
+real lower = INDEF {prompt="Lower limit of data values for sampling"}
+real upper = INDEF {prompt="Upper limit of data values for sampling"}
+bool keeplog = ")_.keeplog" {prompt="Keep log of processing?"}
+file logfile = ")_.logfile" {prompt="Log file"}
+
+struct *imfd
+
+begin
+ file imlist, input, tmp
+ real mean
+ int stat
+ bool mef
+
+ mef = no
+
+ # Query parameters.
+ input = images
+
+ # Set temporary files.
+ imlist = mktemp ("tmp$ims")
+ tmp = mktemp ("tmp")
+
+ # Startup message.
+ if (keeplog) {
+ time (>> logfile)
+ print (" NORMALIZE: Normalize images.", >> logfile)
+ }
+
+ # Generate image list.
+ sections (input, option="fullname", >imlist)
+
+ # Process list.
+ imfd = imlist
+ while (fscan (imfd, input) != EOF) {
+
+ # Determine normalization.
+ if (norm == INDEF) {
+ # Determine the mean of the sample region.
+ imstatistics (input // sample_section, fields="mean",
+ lower=lower, upper=upper, format=no) | scan (mean)
+ } else
+ mean = norm
+
+ # Print output.
+ if (keeplog) {
+ time (>> logfile)
+ print (" Normalization for ", input, " = ", mean, >> logfile)
+ }
+
+ if (mean != 0.) {
+ # Normalize the image by the mean.
+ if (mef) {
+ imarith (input, "/", mean, tmp, pixtype="real",
+ calctype="real")
+ imcopy (tmp, input//"[]", verbose-)
+ imdelete (tmp, verify-)
+ } else
+ imarith (input, "/", mean, input, pixtype="real",
+ calctype="real")
+ hedit (input, "ccdmean", 1., add=yes, verify=no, show=no,
+ update=yes)
+ } else
+ print (" WARNING: Cannot normalize ", input, ".")
+ }
+ imfd = ""; delete (imlist, verify=no)
+
+ # Ending message.
+ if (keeplog) {
+ time (>> logfile)
+ print (" NORMALIZE: Done.", >> logfile)
+ }
+end
diff --git a/noao/imred/generic/normflat.cl b/noao/imred/generic/normflat.cl
new file mode 100644
index 00000000..60b7025b
--- /dev/null
+++ b/noao/imred/generic/normflat.cl
@@ -0,0 +1,69 @@
+#{ NORMFLAT -- Make a flat field by normalizing and replacing low values.
+
+# image,f,a,,,,Calibration image
+# flatfield,f,a,,,,Flat field image
+# norm,r,h,INDEF,,,Normalization if not INDEF
+# minflat,r,h,INDEF,,,Minimum data value to use in the flat field
+# sample_section,s,h,"[]",,,Sample section for determining normalization
+# keeplog,b,h,@generic.keeplog,,,Keep log of processing?
+# logfile,f,h,@generic.logfile,,,Log file
+# img,f,h
+# flt,f,h
+# tmp,f,h
+# rlist,*s,h
+# mean,r,h
+# stat,i,h
+
+{
+ # Get query parameters and set temporary parameters.
+ img = image
+ flt = flatfield
+ tmp = mktemp ("tmp$gec")
+
+ # Startup message.
+ if (keeplog) {
+ time (>> logfile)
+ print (" NORMFLAT: Create a flat field.\n", >> logfile)
+ print (" Calibration image: ", img, >> logfile)
+ print (" Flat field: ", flt, >> logfile)
+ if (minflat != INDEF)
+ print (" Minimum data value used in flat field = ", minflat,
+ >> logfile)
+ }
+
+ # Determine normalization.
+ if (norm == INDEF) {
+ # Determine the mean of the sample region.
+
+ imstatistics (img // sample_section, fields="mean",
+ lower=minflat, upper=INDEF, format=no, > tmp)
+ rlist = tmp
+ stat = fscan (rlist, mean)
+ rlist = ""
+ delete (tmp, verify=no)
+ } else
+ mean = norm
+
+ if (keeplog)
+ print (" Normalization = ", mean, >> logfile)
+
+ # Replace low values by the mean and normalize.
+ if (mean != 0.) {
+ if (minflat != INDEF) {
+ imcopy (img, flt, verbose=no)
+ imreplace (flt, mean, upper=minflat)
+ imarith (flt, "/", mean, flt, pixtype="real")
+ } else
+ imarith (img, "/", mean, flt, pixtype="real")
+ } else
+ print (" ERROR: Cannot normalize calibration image.")
+
+ # Set CCDMEAN to 1.
+ hedit (flt, "ccdmean", "1.", add=yes, update=yes, show=no, verify=no)
+
+ # Ending message.
+ if (keeplog) {
+ time (>> logfile)
+ print (" NORMFLAT: Done.", >> logfile)
+ }
+}
diff --git a/noao/imred/generic/normflat.par b/noao/imred/generic/normflat.par
new file mode 100644
index 00000000..bec48aa3
--- /dev/null
+++ b/noao/imred/generic/normflat.par
@@ -0,0 +1,15 @@
+# NORMFLAT -- Make a flat field by normalizing and replacing low values.
+
+image,f,a,,,,Calibration image
+flatfield,f,a,,,,Flat field image
+norm,r,h,INDEF,,,Normalization if not INDEF
+minflat,r,h,INDEF,,,Minimum data value to use in the flat field
+sample_section,s,h,"[]",,,Sample section for determining normalization
+keeplog,b,h,)_.keeplog,,,Keep log of processing?
+logfile,f,h,)_.logfile,,,Log file
+img,f,h
+flt,f,h
+tmp,f,h
+rlist,*s,h
+mean,r,h
+stat,i,h
diff --git a/noao/imred/generic/x_generic.x b/noao/imred/generic/x_generic.x
new file mode 100644
index 00000000..f2a61e43
--- /dev/null
+++ b/noao/imred/generic/x_generic.x
@@ -0,0 +1 @@
+task flat1d
diff --git a/noao/imred/hydra/Revisions b/noao/imred/hydra/Revisions
new file mode 100644
index 00000000..b8318135
--- /dev/null
+++ b/noao/imred/hydra/Revisions
@@ -0,0 +1,61 @@
+.help revisions Jul91 noao.imred.hydra
+.nf
+imred$hydra/doc/dohydra.hlp
+ Fixed minor formating problem. (4/22/99, Valdes)
+
+=======
+V2.11.1
+=======
+
+imred$hydra/doc/dohdydra.hlp
+imred$hydra/doc/dohdydra.ms
+ Updated for change where if both crval and cdelt are INDEF then the
+ automatic identification is not done. (5/2/96, Valdes)
+
+imred$hydra/doc/dohydra.hlp
+ Fixed typo. (4/22/97, Valdes)
+
+imred$hydra/demos/mkbig.cl
+imred$hydra/demos/mkdohydra.cl
+imred$hydra/demos/mkdonessie.cl
+ Made the ARTDATA package parameters explicit (4/15/97, Valdes)
+
+imred$hydra/hydra.cl
+ Increased the minimum min_lenuserarea from 40000 to 100000.
+ (7/31/96, Valdes)
+
+imred$hydra/dohydra.cl
+imred$hydra/dohydra.par
+imred$hydra/params.par
+imred$hydra/doc/dohydra.hlp
+imred$hydra/doc/dohydra.ms
+ Added crval/cdelt parameters used in new version with automatic arc
+ line identification. (4/5/96, Valdes)
+
+imred$hydra/doc/dohydra.hlp
+imred$hydra/doc/dohydra.ms
+ Describes the new header option for the aperture identification table.
+ (7/25/95, Valdes)
+
+imred$hydra/hydra.cl
+imred$hydra/dohydra.cl
+imred$hydra/dohydra.par
+imred$hydra/doc/dohydra.hlp
+imred$hydra/doc/dohydra.ms
+imred$hydra/demos/xgdohydra.dat
+imred$hydra/demos/xgbig.dat
+imred$hydra/demos/xgdohydra1.dat
+imred$hydra/demos/xgdohydranl.dat
+imred$hydra/demos/xgdonessie.dat
+ Added sky alignment option. (7/19/95, Valdes)
+
+=======
+V2.10.4
+=======
+
+imred$hydra/hydra.cl
+ Renamed "response" to "fibresponse". (12/31/94, Valdes)
+
+imred/hydra/*
+ Installed (7/24/91, Valdes)
+.endhelp
diff --git a/noao/imred/hydra/demos/big.cl b/noao/imred/hydra/demos/big.cl
new file mode 100644
index 00000000..7596599f
--- /dev/null
+++ b/noao/imred/hydra/demos/big.cl
@@ -0,0 +1,13 @@
+# Create demo data if needed.
+
+cl (< "demos$mkbig.cl")
+
+unlearn dohydra params
+params.order = "increasing"
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgbig.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/hydra/demos/demos.cl b/noao/imred/hydra/demos/demos.cl
new file mode 100644
index 00000000..5b065c51
--- /dev/null
+++ b/noao/imred/hydra/demos/demos.cl
@@ -0,0 +1,18 @@
+# DEMOS -- Run specified demo provided a demo file exists.
+
+procedure demos (demoname)
+
+file demoname {prompt="Demo name"}
+
+begin
+ file demo, demofile
+
+ if ($nargs == 0 && mode != "h")
+ type ("demos$demos.men")
+ demo = demoname
+ demofile = "demos$" // demo // ".cl"
+ if (access (demofile))
+ cl (< demofile)
+ else
+ error (1, "Unknown demo " // demo)
+end
diff --git a/noao/imred/hydra/demos/demos.men b/noao/imred/hydra/demos/demos.men
new file mode 100644
index 00000000..8c81d0f6
--- /dev/null
+++ b/noao/imred/hydra/demos/demos.men
@@ -0,0 +1,13 @@
+ MENU of HYDRA Demonstrations
+
+ mkdohdyra - Make Hydra test data (12 fibers, 100x256)
+ dohydra - Quick Hydra test with linear resampling
+ dohydra1 - Quick Hydra test with single standard star
+ dohydral - Quick Hydra test with logarithmic resampling
+ dohydranl - Quick Hydra test with nonlinear dispersion
+
+ mkdonessie - Make Nessie test data (12 fibers, 100x256)
+ donessie - Quick Nessie test (small images, no comments, no delays)
+
+ mkbig - Make large number of fiber test data (300 fibers, 1500x256)
+ big - Test with a large number of fibers
diff --git a/noao/imred/hydra/demos/demos.par b/noao/imred/hydra/demos/demos.par
new file mode 100644
index 00000000..4181ed59
--- /dev/null
+++ b/noao/imred/hydra/demos/demos.par
@@ -0,0 +1,2 @@
+demoname,f,a,"",,,"Demo name"
+mode,s,h,"ql",,,
diff --git a/noao/imred/hydra/demos/dohydra.cl b/noao/imred/hydra/demos/dohydra.cl
new file mode 100644
index 00000000..7a81d37e
--- /dev/null
+++ b/noao/imred/hydra/demos/dohydra.cl
@@ -0,0 +1,12 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdohydra.cl")
+
+unlearn dohydra params
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdohydra.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/hydra/demos/dohydra1.cl b/noao/imred/hydra/demos/dohydra1.cl
new file mode 100644
index 00000000..d18ac1bb
--- /dev/null
+++ b/noao/imred/hydra/demos/dohydra1.cl
@@ -0,0 +1,12 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdohydra.cl")
+
+unlearn dohydra params
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdohydra1.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/hydra/demos/dohydral.cl b/noao/imred/hydra/demos/dohydral.cl
new file mode 100644
index 00000000..b0b99f4f
--- /dev/null
+++ b/noao/imred/hydra/demos/dohydral.cl
@@ -0,0 +1,13 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdohydra.cl")
+
+unlearn dohydra params
+params.log = yes
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdohydra.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/hydra/demos/dohydranl.cl b/noao/imred/hydra/demos/dohydranl.cl
new file mode 100644
index 00000000..8b90cd54
--- /dev/null
+++ b/noao/imred/hydra/demos/dohydranl.cl
@@ -0,0 +1,14 @@
+
+# Create demo data if needed.
+
+cl (< "demos$mkdohydra.cl")
+
+unlearn dohydra params
+params.linearize = no
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdohydranl.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/hydra/demos/donessie.cl b/noao/imred/hydra/demos/donessie.cl
new file mode 100644
index 00000000..154992f2
--- /dev/null
+++ b/noao/imred/hydra/demos/donessie.cl
@@ -0,0 +1,12 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdonessie.cl")
+
+unlearn dohydra params
+delete ("demologfile,demoplotfile", verify=no, >& "dev$null")
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdonessie.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/hydra/demos/fibers.dat b/noao/imred/hydra/demos/fibers.dat
new file mode 100644
index 00000000..fcfa74b5
--- /dev/null
+++ b/noao/imred/hydra/demos/fibers.dat
@@ -0,0 +1,44 @@
+ 1 2 0.804985 gauss 2.7 0 355.825 0.002
+ 2 0 0.642824 gauss 2.7 0 349.692 0.002
+ 3 1 0.901402 gauss 2.7 0 343.900 0.002
+ 4 0 0.795503 gauss 2.7 0 337.464 0.002
+ 5 1 0.989898 gauss 2.7 0 331.099 0.002
+ 6 1 0.934496 gauss 2.7 0 324.886 0.002
+ 7 1 0.888073 gauss 2.7 0 318.907 0.002
+ 8 0 0.860567 gauss 2.7 0 312.805 0.002
+ 9 1 0.677534 gauss 2.7 0 306.601 0.002
+11 1 1.086792 gauss 2.7 0 294.340 0.002
+12 1 1.000867 gauss 2.7 0 288.223 0.002
+13 1 1.011535 gauss 2.7 0 282.295 0.002
+14 1 1.059941 gauss 2.7 0 276.397 0.002
+15 1 1.070633 gauss 2.7 0 270.036 0.002
+16 1 1.014929 gauss 2.7 0 263.795 0.002
+17 0 1.056154 gauss 2.7 0 257.857 0.002
+19 1 1.010262 gauss 2.7 0 245.340 0.002
+20 1 1.329210 gauss 2.7 0 239.071 0.002
+21 1 1.012730 gauss 2.7 0 232.936 0.002
+22 2 1.053946 gauss 2.7 0 226.763 0.002
+23 1 1.376721 gauss 2.7 0 220.742 0.002
+24 1 1.396739 gauss 2.7 0 214.579 0.002
+25 1 1.301325 gauss 2.7 0 208.787 0.002
+26 1 0.810463 gauss 2.7 0 202.392 0.002
+27 1 1.219917 gauss 2.7 0 196.406 0.002
+28 1 0.729413 gauss 2.7 0 189.815 0.002
+30 1 1.257244 gauss 2.7 0 177.277 0.002
+31 1 1.077903 gauss 2.7 0 171.462 0.002
+32 1 1.085670 gauss 2.7 0 165.604 0.002
+33 1 0.800563 gauss 2.7 0 159.134 0.002
+34 1 1.147771 gauss 2.7 0 152.901 0.002
+35 0 1.096679 gauss 2.7 0 146.801 0.002
+36 1 1.164292 gauss 2.7 0 141.093 0.002
+37 1 0.457727 gauss 2.7 0 134.824 0.002
+38 1 1.269284 gauss 2.7 0 128.719 0.002
+39 1 1.309297 gauss 2.7 0 122.536 0.002
+41 1 1.283618 gauss 2.7 0 110.218 0.002
+42 1 0.687173 gauss 2.7 0 103.963 0.002
+43 1 1.175850 gauss 2.7 0 98.0091 0.002
+44 1 0.757532 gauss 2.7 0 91.9606 0.002
+45 1 1.015546 gauss 2.7 0 79.5097 0.002
+46 1 0.372036 gauss 2.7 0 73.5889 0.002
+47 0 1.065080 gauss 2.7 0 67.4535 0.002
+48 2 0.939866 gauss 2.7 0 60.9762 0.002
diff --git a/noao/imred/hydra/demos/header.dat b/noao/imred/hydra/demos/header.dat
new file mode 100644
index 00000000..b9891d07
--- /dev/null
+++ b/noao/imred/hydra/demos/header.dat
@@ -0,0 +1,36 @@
+OBJECT = 'V640Mon 4500 ' / object name
+OBSERVAT= 'KPNO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 1200. / actual integration time
+DARKTIME= 1200. / total elapsed time
+IMAGETYP= 'object ' / object, dark, bias, etc.
+DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+UT = '12:19:55.00 ' / universal time
+ST = '09:13:15.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:08:52.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '44.580 ' / zenith distance
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'IRAF/ARTDATA ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/hydra/demos/mkbig.cl b/noao/imred/hydra/demos/mkbig.cl
new file mode 100644
index 00000000..80b88572
--- /dev/null
+++ b/noao/imred/hydra/demos/mkbig.cl
@@ -0,0 +1,29 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkfibers ("demoobj", type="object", fibers="demos$mkbig.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=1500, nlines=256, wstart=5786., wend=7362., seed=1)
+mkfibers ("demoflat", type="flat", fibers="demos$mkbig.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=1500, nlines=256, wstart=5786., wend=7362., seed=2)
+mkfibers ("demoarc", type="henear", fibers="demos$mkbig.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=1500, nlines=256, wstart=5786., wend=7362., seed=3)
+
+# Create the setup files.
+delete ("demoapid", verify=no, >& "dev$null")
+list = "demos$mkbig.dat"
+while (fscan (list, i, j) != EOF)
+ print (i, j, >> "demoapid")
+list = ""
diff --git a/noao/imred/hydra/demos/mkbig.dat b/noao/imred/hydra/demos/mkbig.dat
new file mode 100644
index 00000000..c2260523
--- /dev/null
+++ b/noao/imred/hydra/demos/mkbig.dat
@@ -0,0 +1,300 @@
+1 1 1.104 gauss 2.0 0 9.8 .002
+2 0 0.963 gauss 2.0 0 14.7 .002
+3 1 1.245 gauss 2.0 0 19.6 .002
+4 0 1.007 gauss 2.0 0 24.5 .002
+5 1 0.961 gauss 2.0 0 29.4 .002
+6 0 1.074 gauss 2.0 0 34.3 .002
+7 1 1.104 gauss 2.0 0 39.2 .002
+8 0 1.167 gauss 2.0 0 44.1 .002
+9 1 1.152 gauss 2.0 0 49.0 .002
+10 0 1.035 gauss 2.0 0 53.9 .002
+11 1 1.089 gauss 2.0 0 58.8 .002
+12 0 0.901 gauss 2.0 0 63.7 .002
+13 1 1.054 gauss 2.0 0 68.6 .002
+14 0 0.999 gauss 2.0 0 73.5 .002
+15 1 1.017 gauss 2.0 0 78.4 .002
+16 0 0.930 gauss 2.0 0 83.3 .002
+17 1 0.821 gauss 2.0 0 88.2 .002
+18 0 0.903 gauss 2.0 0 93.1 .002
+19 1 0.957 gauss 2.0 0 98.0 .002
+20 0 1.004 gauss 2.0 0 102.9 .002
+21 1 0.777 gauss 2.0 0 107.8 .002
+22 0 0.978 gauss 2.0 0 112.7 .002
+23 1 0.820 gauss 2.0 0 117.6 .002
+24 0 0.902 gauss 2.0 0 122.5 .002
+25 1 0.825 gauss 2.0 0 127.4 .002
+26 0 0.975 gauss 2.0 0 132.3 .002
+27 1 1.121 gauss 2.0 0 137.2 .002
+28 0 1.158 gauss 2.0 0 142.1 .002
+29 1 0.782 gauss 2.0 0 147.0 .002
+30 0 0.956 gauss 2.0 0 151.9 .002
+31 1 0.994 gauss 2.0 0 156.8 .002
+32 0 1.020 gauss 2.0 0 161.7 .002
+33 1 0.817 gauss 2.0 0 166.6 .002
+34 0 0.786 gauss 2.0 0 171.5 .002
+35 1 1.227 gauss 2.0 0 176.4 .002
+36 0 0.863 gauss 2.0 0 181.3 .002
+37 1 0.914 gauss 2.0 0 186.2 .002
+38 0 1.154 gauss 2.0 0 191.1 .002
+39 1 0.878 gauss 2.0 0 196.0 .002
+40 0 1.044 gauss 2.0 0 200.9 .002
+41 1 1.034 gauss 2.0 0 205.8 .002
+42 0 0.756 gauss 2.0 0 210.7 .002
+43 1 0.773 gauss 2.0 0 215.6 .002
+44 0 0.933 gauss 2.0 0 220.5 .002
+45 1 0.888 gauss 2.0 0 225.4 .002
+46 0 0.990 gauss 2.0 0 230.3 .002
+47 1 0.920 gauss 2.0 0 235.2 .002
+48 0 1.113 gauss 2.0 0 240.1 .002
+49 1 1.010 gauss 2.0 0 245.0 .002
+50 0 0.767 gauss 2.0 0 249.9 .002
+51 1 1.146 gauss 2.0 0 254.8 .002
+52 0 0.962 gauss 2.0 0 259.7 .002
+53 1 1.030 gauss 2.0 0 264.6 .002
+54 0 0.812 gauss 2.0 0 269.5 .002
+55 1 1.001 gauss 2.0 0 274.4 .002
+56 0 1.126 gauss 2.0 0 279.3 .002
+57 1 0.845 gauss 2.0 0 284.2 .002
+58 0 1.050 gauss 2.0 0 289.1 .002
+59 1 1.221 gauss 2.0 0 294.0 .002
+60 0 1.103 gauss 2.0 0 298.9 .002
+61 1 1.079 gauss 2.0 0 303.8 .002
+62 0 0.944 gauss 2.0 0 308.7 .002
+63 1 0.810 gauss 2.0 0 313.6 .002
+64 0 0.922 gauss 2.0 0 318.5 .002
+65 1 1.039 gauss 2.0 0 323.4 .002
+66 0 0.892 gauss 2.0 0 328.3 .002
+67 1 1.021 gauss 2.0 0 333.2 .002
+68 0 1.160 gauss 2.0 0 338.1 .002
+69 1 1.053 gauss 2.0 0 343.0 .002
+70 0 1.043 gauss 2.0 0 347.9 .002
+71 1 0.794 gauss 2.0 0 352.8 .002
+72 0 0.777 gauss 2.0 0 357.7 .002
+73 1 0.890 gauss 2.0 0 362.6 .002
+74 0 1.143 gauss 2.0 0 367.5 .002
+75 1 0.945 gauss 2.0 0 372.4 .002
+76 0 0.994 gauss 2.0 0 377.3 .002
+77 1 1.174 gauss 2.0 0 382.2 .002
+78 0 0.766 gauss 2.0 0 387.1 .002
+79 1 1.157 gauss 2.0 0 392.0 .002
+80 0 1.219 gauss 2.0 0 396.9 .002
+81 1 0.951 gauss 2.0 0 401.8 .002
+82 0 1.044 gauss 2.0 0 406.7 .002
+83 1 1.054 gauss 2.0 0 411.6 .002
+84 0 1.236 gauss 2.0 0 416.5 .002
+85 1 0.862 gauss 2.0 0 421.4 .002
+86 0 0.755 gauss 2.0 0 426.3 .002
+87 1 0.933 gauss 2.0 0 431.2 .002
+88 0 1.149 gauss 2.0 0 436.1 .002
+89 1 1.053 gauss 2.0 0 441.0 .002
+90 0 0.870 gauss 2.0 0 445.9 .002
+91 1 0.920 gauss 2.0 0 450.8 .002
+92 0 0.820 gauss 2.0 0 455.7 .002
+93 1 0.786 gauss 2.0 0 460.6 .002
+94 0 0.934 gauss 2.0 0 465.5 .002
+95 1 1.117 gauss 2.0 0 470.4 .002
+96 0 0.776 gauss 2.0 0 475.3 .002
+97 1 0.887 gauss 2.0 0 480.2 .002
+98 0 0.876 gauss 2.0 0 485.1 .002
+99 1 1.037 gauss 2.0 0 490.0 .002
+100 0 0.824 gauss 2.0 0 494.9 .002
+101 1 0.979 gauss 2.0 0 499.8 .002
+102 0 1.112 gauss 2.0 0 504.7 .002
+103 1 0.856 gauss 2.0 0 509.6 .002
+104 0 0.864 gauss 2.0 0 514.5 .002
+105 1 1.154 gauss 2.0 0 519.4 .002
+106 0 1.060 gauss 2.0 0 524.3 .002
+107 1 0.800 gauss 2.0 0 529.2 .002
+108 0 1.121 gauss 2.0 0 534.1 .002
+109 1 1.017 gauss 2.0 0 539.0 .002
+110 0 0.905 gauss 2.0 0 543.9 .002
+111 1 1.203 gauss 2.0 0 548.8 .002
+112 0 0.795 gauss 2.0 0 553.7 .002
+113 1 0.770 gauss 2.0 0 558.6 .002
+114 0 1.246 gauss 2.0 0 563.5 .002
+115 1 1.035 gauss 2.0 0 568.4 .002
+116 0 0.852 gauss 2.0 0 573.3 .002
+117 1 0.757 gauss 2.0 0 578.2 .002
+118 0 0.969 gauss 2.0 0 583.1 .002
+119 1 0.943 gauss 2.0 0 588.0 .002
+120 0 0.943 gauss 2.0 0 592.9 .002
+121 1 1.141 gauss 2.0 0 597.8 .002
+122 0 0.965 gauss 2.0 0 602.7 .002
+123 1 1.107 gauss 2.0 0 607.6 .002
+124 0 1.199 gauss 2.0 0 612.5 .002
+125 1 1.141 gauss 2.0 0 617.4 .002
+126 0 1.043 gauss 2.0 0 622.3 .002
+127 1 0.964 gauss 2.0 0 627.2 .002
+128 0 0.856 gauss 2.0 0 632.1 .002
+129 1 0.993 gauss 2.0 0 637.0 .002
+130 0 1.160 gauss 2.0 0 641.9 .002
+131 1 1.076 gauss 2.0 0 646.8 .002
+132 0 0.981 gauss 2.0 0 651.7 .002
+133 1 1.061 gauss 2.0 0 656.6 .002
+134 0 0.811 gauss 2.0 0 661.5 .002
+135 1 1.210 gauss 2.0 0 666.4 .002
+136 0 0.753 gauss 2.0 0 671.3 .002
+137 1 0.773 gauss 2.0 0 676.2 .002
+138 0 1.202 gauss 2.0 0 681.1 .002
+139 1 0.922 gauss 2.0 0 686.0 .002
+140 0 1.055 gauss 2.0 0 690.9 .002
+141 1 0.790 gauss 2.0 0 695.8 .002
+142 0 1.064 gauss 2.0 0 700.7 .002
+143 1 1.179 gauss 2.0 0 705.6 .002
+144 0 0.843 gauss 2.0 0 710.5 .002
+145 1 1.105 gauss 2.0 0 715.4 .002
+146 0 1.208 gauss 2.0 0 720.3 .002
+147 1 0.835 gauss 2.0 0 725.2 .002
+148 0 1.039 gauss 2.0 0 730.1 .002
+149 1 0.966 gauss 2.0 0 735.0 .002
+150 0 1.191 gauss 2.0 0 739.9 .002
+151 1 0.995 gauss 2.0 0 744.8 .002
+152 0 0.936 gauss 2.0 0 749.7 .002
+153 1 0.813 gauss 2.0 0 754.6 .002
+154 0 1.190 gauss 2.0 0 759.5 .002
+155 1 1.064 gauss 2.0 0 764.4 .002
+156 0 0.832 gauss 2.0 0 769.3 .002
+157 1 1.147 gauss 2.0 0 774.2 .002
+158 0 0.967 gauss 2.0 0 779.1 .002
+159 1 1.040 gauss 2.0 0 784.0 .002
+160 0 0.890 gauss 2.0 0 788.9 .002
+161 1 1.002 gauss 2.0 0 793.8 .002
+162 0 0.999 gauss 2.0 0 798.7 .002
+163 1 0.828 gauss 2.0 0 803.6 .002
+164 0 0.962 gauss 2.0 0 808.5 .002
+165 1 0.932 gauss 2.0 0 813.4 .002
+166 0 1.166 gauss 2.0 0 818.3 .002
+167 1 1.144 gauss 2.0 0 823.2 .002
+168 0 0.850 gauss 2.0 0 828.1 .002
+169 1 1.209 gauss 2.0 0 833.0 .002
+170 0 1.089 gauss 2.0 0 837.9 .002
+171 1 0.788 gauss 2.0 0 842.8 .002
+172 0 1.242 gauss 2.0 0 847.7 .002
+173 1 1.130 gauss 2.0 0 852.6 .002
+174 0 0.977 gauss 2.0 0 857.5 .002
+175 1 0.843 gauss 2.0 0 862.4 .002
+176 0 0.815 gauss 2.0 0 867.3 .002
+177 1 1.110 gauss 2.0 0 872.2 .002
+178 0 1.098 gauss 2.0 0 877.1 .002
+179 1 1.090 gauss 2.0 0 882.0 .002
+180 0 1.230 gauss 2.0 0 886.9 .002
+181 1 1.004 gauss 2.0 0 891.8 .002
+182 0 1.237 gauss 2.0 0 896.7 .002
+183 1 1.197 gauss 2.0 0 901.6 .002
+184 0 1.007 gauss 2.0 0 906.5 .002
+185 1 0.790 gauss 2.0 0 911.4 .002
+186 0 1.233 gauss 2.0 0 916.3 .002
+187 1 0.962 gauss 2.0 0 921.2 .002
+188 0 1.014 gauss 2.0 0 926.1 .002
+189 1 1.076 gauss 2.0 0 931.0 .002
+190 0 0.978 gauss 2.0 0 935.9 .002
+191 1 1.173 gauss 2.0 0 940.8 .002
+192 0 1.058 gauss 2.0 0 945.7 .002
+193 1 1.077 gauss 2.0 0 950.6 .002
+194 0 0.970 gauss 2.0 0 955.5 .002
+195 1 0.874 gauss 2.0 0 960.4 .002
+196 0 0.803 gauss 2.0 0 965.3 .002
+197 1 0.990 gauss 2.0 0 970.2 .002
+198 0 0.783 gauss 2.0 0 975.1 .002
+199 1 1.083 gauss 2.0 0 980.0 .002
+200 0 1.009 gauss 2.0 0 984.9 .002
+201 1 0.943 gauss 2.0 0 989.8 .002
+202 0 1.071 gauss 2.0 0 994.7 .002
+203 1 0.764 gauss 2.0 0 999.6 .002
+204 0 0.827 gauss 2.0 0 1004.5 .002
+205 1 0.938 gauss 2.0 0 1009.4 .002
+206 0 0.956 gauss 2.0 0 1014.3 .002
+207 1 1.094 gauss 2.0 0 1019.2 .002
+208 0 1.119 gauss 2.0 0 1024.1 .002
+209 1 0.957 gauss 2.0 0 1029.0 .002
+210 0 0.910 gauss 2.0 0 1033.9 .002
+211 1 0.827 gauss 2.0 0 1038.8 .002
+212 0 1.060 gauss 2.0 0 1043.7 .002
+213 1 1.154 gauss 2.0 0 1048.6 .002
+214 0 1.002 gauss 2.0 0 1053.5 .002
+215 1 0.797 gauss 2.0 0 1058.4 .002
+216 0 0.989 gauss 2.0 0 1063.3 .002
+217 1 0.810 gauss 2.0 0 1068.2 .002
+218 0 1.106 gauss 2.0 0 1073.1 .002
+219 1 0.863 gauss 2.0 0 1078.0 .002
+220 0 1.246 gauss 2.0 0 1082.9 .002
+221 1 0.963 gauss 2.0 0 1087.8 .002
+222 0 0.929 gauss 2.0 0 1092.7 .002
+223 1 0.835 gauss 2.0 0 1097.6 .002
+224 0 0.995 gauss 2.0 0 1102.5 .002
+225 1 0.897 gauss 2.0 0 1107.4 .002
+226 0 0.983 gauss 2.0 0 1112.3 .002
+227 1 1.187 gauss 2.0 0 1117.2 .002
+228 0 1.239 gauss 2.0 0 1122.1 .002
+229 1 0.900 gauss 2.0 0 1127.0 .002
+230 0 0.846 gauss 2.0 0 1131.9 .002
+231 1 1.096 gauss 2.0 0 1136.8 .002
+232 0 1.041 gauss 2.0 0 1141.7 .002
+233 1 0.968 gauss 2.0 0 1146.6 .002
+234 0 0.827 gauss 2.0 0 1151.5 .002
+235 1 1.108 gauss 2.0 0 1156.4 .002
+236 0 1.162 gauss 2.0 0 1161.3 .002
+237 1 0.884 gauss 2.0 0 1166.2 .002
+238 0 0.891 gauss 2.0 0 1171.1 .002
+239 1 0.974 gauss 2.0 0 1176.0 .002
+240 0 1.116 gauss 2.0 0 1180.9 .002
+241 1 0.830 gauss 2.0 0 1185.8 .002
+242 0 0.964 gauss 2.0 0 1190.7 .002
+243 1 0.963 gauss 2.0 0 1195.6 .002
+244 0 0.869 gauss 2.0 0 1200.5 .002
+245 1 0.962 gauss 2.0 0 1205.4 .002
+246 0 0.959 gauss 2.0 0 1210.3 .002
+247 1 1.182 gauss 2.0 0 1215.2 .002
+248 0 1.167 gauss 2.0 0 1220.1 .002
+249 1 1.124 gauss 2.0 0 1225.0 .002
+250 0 1.151 gauss 2.0 0 1229.9 .002
+251 1 1.218 gauss 2.0 0 1234.8 .002
+252 0 1.229 gauss 2.0 0 1239.7 .002
+253 1 1.108 gauss 2.0 0 1244.6 .002
+254 0 1.248 gauss 2.0 0 1249.5 .002
+255 1 1.135 gauss 2.0 0 1254.4 .002
+256 0 0.787 gauss 2.0 0 1259.3 .002
+257 1 1.156 gauss 2.0 0 1264.2 .002
+258 0 0.773 gauss 2.0 0 1269.1 .002
+259 1 1.129 gauss 2.0 0 1274.0 .002
+260 0 1.212 gauss 2.0 0 1278.9 .002
+261 1 1.092 gauss 2.0 0 1283.8 .002
+262 0 1.116 gauss 2.0 0 1288.7 .002
+263 1 0.892 gauss 2.0 0 1293.6 .002
+264 0 1.208 gauss 2.0 0 1298.5 .002
+265 1 0.795 gauss 2.0 0 1303.4 .002
+266 0 0.860 gauss 2.0 0 1308.3 .002
+267 1 0.967 gauss 2.0 0 1313.2 .002
+268 0 0.800 gauss 2.0 0 1318.1 .002
+269 1 0.902 gauss 2.0 0 1323.0 .002
+270 0 0.752 gauss 2.0 0 1327.9 .002
+271 1 1.164 gauss 2.0 0 1332.8 .002
+272 0 1.119 gauss 2.0 0 1337.7 .002
+273 1 0.932 gauss 2.0 0 1342.6 .002
+274 0 0.912 gauss 2.0 0 1347.5 .002
+275 1 0.806 gauss 2.0 0 1352.4 .002
+276 0 1.198 gauss 2.0 0 1357.3 .002
+277 1 1.242 gauss 2.0 0 1362.2 .002
+278 0 1.158 gauss 2.0 0 1367.1 .002
+279 1 0.881 gauss 2.0 0 1372.0 .002
+280 0 0.782 gauss 2.0 0 1376.9 .002
+281 1 1.000 gauss 2.0 0 1381.8 .002
+282 0 1.038 gauss 2.0 0 1386.7 .002
+283 1 0.872 gauss 2.0 0 1391.6 .002
+284 0 0.868 gauss 2.0 0 1396.5 .002
+285 1 1.079 gauss 2.0 0 1401.4 .002
+286 0 0.943 gauss 2.0 0 1406.3 .002
+287 1 0.825 gauss 2.0 0 1411.2 .002
+288 0 0.973 gauss 2.0 0 1416.1 .002
+289 1 1.061 gauss 2.0 0 1421.0 .002
+290 0 0.905 gauss 2.0 0 1425.9 .002
+291 1 0.954 gauss 2.0 0 1430.8 .002
+292 0 0.865 gauss 2.0 0 1435.7 .002
+293 1 0.761 gauss 2.0 0 1440.6 .002
+294 0 0.932 gauss 2.0 0 1445.5 .002
+295 1 0.818 gauss 2.0 0 1450.4 .002
+296 0 1.225 gauss 2.0 0 1455.3 .002
+297 1 0.949 gauss 2.0 0 1460.2 .002
+298 0 1.006 gauss 2.0 0 1465.1 .002
+299 1 0.880 gauss 2.0 0 1470.0 .002
+300 0 0.998 gauss 2.0 0 1474.9 .002
diff --git a/noao/imred/hydra/demos/mkdohydra.cl b/noao/imred/hydra/demos/mkdohydra.cl
new file mode 100644
index 00000000..543dadb5
--- /dev/null
+++ b/noao/imred/hydra/demos/mkdohydra.cl
@@ -0,0 +1,41 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkfibers ("demoobj", type="object", fibers="demos$mkdohydra1.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=1)
+mkfibers ("demoflat", type="flat", fibers="demos$mkdohydra1.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=2)
+mkfibers ("demoarc", type="henear", fibers="demos$mkdohydra1.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=3)
+mkfibers ("demostd", type="object", fibers="demos$mkdohydra2.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=1)
+
+# Create the setup files.
+delete ("demoapid1", verify=no, >& "dev$null")
+list = "demos$mkdohydra1.dat"
+while (fscan (list, i, j) != EOF) {
+ print (i, j, "Title", >> "demoapid1")
+ s1 = i // " " // j // " 01:23:45.67 +01:23:45.67 Title"
+ hedit ("demoobj,demoflat,demoarc,demostd", "slfib"//i, s1,
+ add=yes, verify=no, show=no, update=yes)
+}
+list = ""
+delete ("demoapid2", verify=no, >& "dev$null")
+list = "demos$mkdohydra2.dat"
+while (fscan (list, i, j) != EOF)
+ print (i, j, >> "demoapid2")
+list = ""
diff --git a/noao/imred/hydra/demos/mkdohydra1.dat b/noao/imred/hydra/demos/mkdohydra1.dat
new file mode 100644
index 00000000..87640612
--- /dev/null
+++ b/noao/imred/hydra/demos/mkdohydra1.dat
@@ -0,0 +1,12 @@
+36 1 1.164292 gauss 2.7 0 91.093 0.002
+37 0 0.457727 gauss 2.7 0 84.824 0.002
+38 1 1.269284 gauss 2.7 0 78.719 0.002
+39 1 1.309297 gauss 2.7 0 72.536 0.002
+41 0 1.283618 gauss 2.7 0 60.218 0.002
+42 1 0.687173 gauss 2.7 0 53.963 0.002
+43 1 1.175850 gauss 2.7 0 48.0091 0.002
+44 0 0.757532 gauss 2.7 0 41.9606 0.002
+45 1 1.015546 gauss 2.7 0 29.5097 0.002
+46 -1 0.372036 gauss 2.7 0 23.5889 0.002
+47 0 1.065080 gauss 2.7 0 17.4535 0.002
+48 1 0.939866 gauss 2.7 0 10.9762 0.002
diff --git a/noao/imred/hydra/demos/mkdohydra2.dat b/noao/imred/hydra/demos/mkdohydra2.dat
new file mode 100644
index 00000000..4b848596
--- /dev/null
+++ b/noao/imred/hydra/demos/mkdohydra2.dat
@@ -0,0 +1,12 @@
+36 0 1.164292 gauss 2.7 0 91.093 0.002
+37 0 0.457727 gauss 2.7 0 84.824 0.002
+38 0 1.269284 gauss 2.7 0 78.719 0.002
+39 0 1.309297 gauss 2.7 0 72.536 0.002
+41 0 1.283618 gauss 2.7 0 60.218 0.002
+42 0 0.687173 gauss 2.7 0 53.963 0.002
+43 1 1.175850 gauss 2.7 0 48.0091 0.002
+44 0 0.757532 gauss 2.7 0 41.9606 0.002
+45 0 1.015546 gauss 2.7 0 29.5097 0.002
+46 -1 0.372036 gauss 2.7 0 23.5889 0.002
+47 0 1.065080 gauss 2.7 0 17.4535 0.002
+48 0 0.939866 gauss 2.7 0 10.9762 0.002
diff --git a/noao/imred/hydra/demos/mkdonessie.cl b/noao/imred/hydra/demos/mkdonessie.cl
new file mode 100644
index 00000000..e67a90e1
--- /dev/null
+++ b/noao/imred/hydra/demos/mkdonessie.cl
@@ -0,0 +1,36 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkfibers ("demoobj", type="object", fibers="demos$mkdonessie.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=1)
+mkfibers ("demoflat", type="flat", fibers="demos$mkdonessie.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=2)
+mkfibers ("demoarc1", type="ehenear", fibers="demos$mkdonessie.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=3)
+mkfibers ("demoarc2", type="ohenear", fibers="demos$mkdonessie.dat",
+ title="Hydra artificial image", header="demos$header.dat",
+ ncols=100, nlines=256, wstart=5786., wend=7362., seed=4)
+#mkfibers ("demoarc3", type="mercury", fibers="demos$mkdonessie.dat",
+# title="Hydra artificial image", header="demos$header.dat",
+# ncols=100, nlines=256, wstart=5786., wend=7362., seed=4)
+
+# Create the setup files.
+delete ("demoapid,demoarcrep", verify=no, >& "dev$null")
+list = "demos$mkdonessie.dat"
+while (fscan (list, i, j) != EOF)
+ print (i, j, >> "demoapid")
+list = ""
+print ("demoarc1 demoarc2 1x2", > "demoarcrep")
diff --git a/noao/imred/hydra/demos/mkdonessie.dat b/noao/imred/hydra/demos/mkdonessie.dat
new file mode 100644
index 00000000..1113aae6
--- /dev/null
+++ b/noao/imred/hydra/demos/mkdonessie.dat
@@ -0,0 +1,12 @@
+36 2 1.164292 gauss 2.7 0 91.093 0.002
+37 0 0.457727 gauss 2.7 0 84.824 0.002
+38 1 1.269284 gauss 2.7 0 78.719 0.002
+39 1 1.309297 gauss 2.7 0 72.536 0.002
+41 0 1.283618 gauss 2.7 0 60.218 0.002
+42 1 0.687173 gauss 2.7 0 53.963 0.002
+43 1 1.175850 gauss 2.7 0 48.0091 0.002
+44 0 0.757532 gauss 2.7 0 41.9606 0.002
+45 1 1.015546 gauss 2.7 0 29.5097 0.002
+46 1 0.372036 gauss 2.7 0 23.5889 0.002
+47 0 1.065080 gauss 2.7 0 17.4535 0.002
+48 2 0.939866 gauss 2.7 0 10.9762 0.002
diff --git a/noao/imred/hydra/demos/mklist.cl b/noao/imred/hydra/demos/mklist.cl
new file mode 100644
index 00000000..b36b3a3e
--- /dev/null
+++ b/noao/imred/hydra/demos/mklist.cl
@@ -0,0 +1,27 @@
+# MKLIST - Make a fiber list.
+
+int nfibers
+real width, sep, flux
+file temp
+
+#nfibers = 300
+#width = 2.0
+#sep = 4.9
+nfibers = j
+width = x
+sep = y
+
+temp = mktemp ("tmp")
+urand (nfibers, 1, ndigits=4, seed=1, scale_factor=0.5, > temp)
+list = temp
+
+for (i=1; i<=nfibers; i+=1) {
+ if (fscan (list, flux) == EOF)
+ break
+ flux = 0.75 + flux
+ printf ("%d %d %5.3f gauss %4.1f 0 %6.1f .002\n", i, mod(i,2),
+ flux, width, sep*(i+1))
+}
+
+list = ""
+delete (temp, verify=no)
diff --git a/noao/imred/hydra/demos/xgbig.dat b/noao/imred/hydra/demos/xgbig.dat
new file mode 100644
index 00000000..074d8db5
--- /dev/null
+++ b/noao/imred/hydra/demos/xgbig.dat
@@ -0,0 +1,81 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\shydra\n
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+""\r
+^Z
+epar\sdohydra\n
+demoobj\r
+demoflat\r
+demoflat\r
+\r
+demoarc\r
+\r
+\r
+\r
+rdnoise\r
+gain\r
+\r
+300\r
+3\r
+4\r
+6\r
+demoapid\r
+6600\r
+6.1\r
+\r
+\r
+\r
+\r
+\r
+\r
+n\r
+\r
+n\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+^Z
+dohydra\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+j/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+f/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+n\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+\n
+41\n
+#/<-5\s\s\s\s/=(.\s=\r 37\r
+#/<-5\s\s\s\s/=(.\s=\r 45\r
+q/<-5\s\s\s\s/=(.\s=\r
+imdelete\sdemoobj.ms\n
+dohydra\sdemoobj\sskyedit-\ssplot-\sbatch+\n
diff --git a/noao/imred/hydra/demos/xgdohydra.dat b/noao/imred/hydra/demos/xgdohydra.dat
new file mode 100644
index 00000000..be94c8b0
--- /dev/null
+++ b/noao/imred/hydra/demos/xgdohydra.dat
@@ -0,0 +1,93 @@
+\O=NOAO/IRAF V2.10EXPORT valdes@puppis Tue 14:30:46 09-Feb-93
+\T=xgterm
+\G=xgterm
+epar\shydra\n
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdohydra\n
+demoobj\r
+demoflat\r
+demoflat\r
+\r
+demoarc\r
+\r
+\r
+\r
+rdnoise\r
+gain\r
+\r
+12\r
+4\r
+5\r
+7\r
+demoflat\r
+6600\r
+6.1\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+\r
+n\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+^Z
+dohydra\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+j/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+f/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+n\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+\n
+41\n
+#/<-5\s\s\s\s/=(.\s=\r 37\r
+#/<-5\s\s\s\s/=(.\s=\r 45\r
+q/<-5\s\s\s\s/=(.\s=\r
+imdelete\sdemoobj.ms\n
+dohydra\sdemoobj\sskyedit-\ssplot-\sbatch+\n
diff --git a/noao/imred/hydra/demos/xgdohydra1.dat b/noao/imred/hydra/demos/xgdohydra1.dat
new file mode 100644
index 00000000..ce9cebb7
--- /dev/null
+++ b/noao/imred/hydra/demos/xgdohydra1.dat
@@ -0,0 +1,89 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\shydra\n
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdohydra\n
+demostd\r
+demoflat\r
+demoflat\r
+\r
+demoarc\r
+\r
+\r
+\r
+rdnoise\r
+gain\r
+\r
+12\r
+4\r
+5\r
+7\r
+demoapid2\r
+6600\r
+6.1\r
+\r
+\r
+\r
+1\r
+\r
+\r
+y\r
+\r
+n\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+^Z
+type\sdemoapid2\n
+dohydra\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+j/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+f/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+n\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/hydra/demos/xgdohydranl.dat b/noao/imred/hydra/demos/xgdohydranl.dat
new file mode 100644
index 00000000..9efdb764
--- /dev/null
+++ b/noao/imred/hydra/demos/xgdohydranl.dat
@@ -0,0 +1,91 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\shydra\n
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdohydra\n
+demoobj\r
+demoflat\r
+demoflat\r
+\r
+demoarc\r
+\r
+\r
+\r
+rdnoise\r
+gain\r
+\r
+12\r
+4\r
+5\r
+7\r
+demoapid1\r
+6600\r
+6.1\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+\r
+n\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+^Z
+type\sdemoapid1\n
+dohydra\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+j/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+f/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+\n
+41\n
+#/<-5\s\s\s\s/=(.\s=\r 37\r
+#/<-5\s\s\s\s/=(.\s=\r 45\r
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/hydra/demos/xgdonessie.dat b/noao/imred/hydra/demos/xgdonessie.dat
new file mode 100644
index 00000000..49e57ff0
--- /dev/null
+++ b/noao/imred/hydra/demos/xgdonessie.dat
@@ -0,0 +1,94 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\shydra\n
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdohydra\n
+demoobj\r
+demoflat\r
+demoflat\r
+\r
+demoarc1\r
+\r
+demoarcrep\r
+\r
+rdnoise\r
+gain\r
+\r
+12\r
+4\r
+5\r
+7\r
+demoapid\r
+6600\r
+6.1\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+n\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+^Z
+type\sdemoapid,demoarcrep\n
+dohydra\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+k/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+f/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+n\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+q/<-5\s\s\s\s/=(.\s=\r
+q\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+\n
+41\n
+#/<-5\s\s\s\s/=(.\s=\r 37\r
+#/<-5\s\s\s\s/=(.\s=\r 45\r
+q/<-5\s\s\s\s/=(.\s=\r
+imdelete\sdemoobj.ms\n
+dohydra\sdemoobj\sskyedit-\ssplot-\sbatch+\n
diff --git a/noao/imred/hydra/doc/dohydra.hlp b/noao/imred/hydra/doc/dohydra.hlp
new file mode 100644
index 00000000..7c762d3b
--- /dev/null
+++ b/noao/imred/hydra/doc/dohydra.hlp
@@ -0,0 +1,1588 @@
+.help dohydra Jul95 noao.imred.hydra
+.ih
+NAME
+dohydra -- Hydra and Nessie data reduction task
+.ih
+USAGE
+dohydra objects
+.ih
+SUMMARY
+The \fBdohydra\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIHydra\fR and \fINessie\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by these multifiber instruments.
+.ih
+PARAMETERS
+.ls objects
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.le
+.ls apref = ""
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.le
+.ls flat = "" (optional)
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field calibrations. If a separate
+throughput file or image is not specified the flat field is also used
+for computing a fiber throughput correction.
+.le
+.ls throughput = "" (optional)
+Throughput file or image. If an image is specified, typically a blank
+sky observation, the total flux through
+each fiber is used to correct for fiber throughput. If a file consisting
+of lines with the aperture number and relative throughput is specified
+then the fiber throughput will be corrected by those values. If neither
+is specified but a flat field image is given it is used to compute the
+throughput.
+.le
+.ls arcs1 = "" (at least one if dispersion correcting)
+List of primary arc spectra. These spectra are used to define the dispersion
+functions for each fiber apart from a possible zero point correction made
+with secondary shift spectra or arc calibration fibers in the object spectra.
+One fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for all other fibers and
+arc spectra are derived from it.
+.le
+.ls arcs2 = "" (optional for Nessie)
+List of optional shift arc spectra. Features in these secondary observations
+are used to supply a wavelength zero point shift through the observing
+sequence. One type of observation is dome lamps containing characteristic
+emission lines.
+.le
+.ls arcreplace = "" (optional for Nessie)
+Special aperture replacement file. A characteristic of Nessie (though not
+Hydra) spectra is that it requires two exposures to illuminate all fibers
+with an arc calibration. The aperture replacement file assigns fibers from
+the second exposure to replace those in the first exposure. Only the first
+exposures are specified in the \fIarcs1\fR list. The file contains lines
+with the first exposure image name, the second exposure image name, and a
+list of apertures from the second exposure to be used instead of those in
+the first exposure.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIparams.sort\fR, such as the observation time is made.
+.le
+
+.ls readnoise = "RDNOISE" (apsum)
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.le
+.ls gain = "GAIN" (apsum)
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls fibers = 97 (apfind)
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum. It is best if this
+reflects the actual number of fibers which may be found in the aperture
+reference image.
+The interactive
+review of the aperture assignments allows verification and adjustments
+to the automatic aperture definitions.
+.le
+.ls width = 12. (apedit)
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.le
+.ls minsep = 8. (apfind)
+Minimum separation between fibers. Weaker spectra or noise within this
+distance of a stronger spectrum are rejected.
+.le
+.ls maxsep = 15. (apfind)
+Maximum separation between adjacent fibers. This parameter
+is used to identify missing fibers. If two adjacent spectra exceed this
+separation then it is assumed that a fiber is missing and the aperture
+identification assignments will be adjusted accordingly.
+.le
+.ls apidtable = "" (apfind)
+Aperture identification table. This may be either a text file or an
+image. A text file contains the fiber number, beam number defining object
+(1), sky (0), and arc (2) fibers, and a object title. An image contains
+the keywords SLFIBnnn with string value consisting of the fiber number,
+beam number, optional right ascension and declination, and an object
+title. For Nessie the user had to prepare the file for each plugboard, for
+Hydra at the 4meter the file was generated for the user, and for Hydra at
+the WIYN the image header contains the information. Unassigned and broken
+fibers (beam of -1) should be included in the identification information
+since they will automatically be excluded.
+.le
+.ls crval = INDEF, cdelt = INDEF (autoidentify)
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.le
+.ls objaps = "", skyaps = "", arcaps = ""
+List of object, sky, and arc aperture numbers. These are used to
+identify arc apertures for wavelength calibration and object and sky
+apertures for sky subtraction. Note sky apertures may be identified as
+both object and sky if one wants to subtract the mean sky from the
+individual sky spectra. Typically the different spectrum types are
+identified by their beam numbers and the default, null string,
+lists select all apertures.
+.le
+.ls objbeams = "0,1", skybeams = "0", arcbeams = 2
+List of object, sky, and arc beam numbers. The convention is that sky
+fibers are given a beam number of 0, object fibers a beam number of 1, and
+arc fibers a beam number of 2. The beam numbers are typically set in the
+\fIapidtable\fR. Unassigned or broken fibers may be given a beam number of
+-1 in the aperture identification table since apertures with negative beam
+numbers are not extracted. Note it is valid to identify sky fibers as both
+object and sky.
+.le
+
+.ls scattered = no (apscatter)
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.le
+.ls fitflat = yes (flat1d)
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.le
+.ls clean = yes (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.le
+.ls dispcor = yes
+Dispersion correct spectra? Depending on the \fIparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.le
+.ls savearcs = yes
+Save any simultaneous arc apertures? If no then the arc apertures will
+be deleted after use.
+.le
+.ls skyalign = no
+Align sky lines? If yes then for the first object spectrum you are asked
+to mark one or more sky lines to use for alignment. Then these lines will
+be found in all spectra and an average zeropoint shift computed and applied
+to the dispersion solution to align these lines. Note that this assumes
+the sky lines are seen in all fibers.
+.le
+.ls skysubtract = yes
+Subtract sky from the object spectra? If yes the sky spectra are combined
+and subtracted from the object spectra as defined by the object and sky
+aperture/beam parameters.
+.le
+.ls skyedit = yes
+Overplot all the sky spectra and allow contaminated sky spectra to be
+deleted?
+.le
+.ls saveskys = yes
+Save the combined sky spectrum? If no then the sky spectrum will be
+deleted after sky subtraction is completed.
+.le
+.ls splot = no
+Plot the final spectra with the task \fBsplot\fR?
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.le
+.ls update = yes
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.le
+.ls batch = no
+Process spectra as a background or batch job provided there are no interactive
+options (\fIskyedit\fR and \fIsplot\fR) selected.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls params = "" (pset)
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.le
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdohydra\fR.
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.le
+.ls observatory = "observatory"
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For Hydra data the image headers
+identify the observatory as "kpno" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls database = "database"
+Database (directory) used for storing aperture and dispersion information.
+.le
+.ls verbose = no
+Print verbose information available with various tasks.
+.le
+.ls logfile = "logfile", plotfile = ""
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.le
+.ls records = ""
+Dummy parameter to be ignored.
+.le
+.ls version = "HYDRA: ..."
+Version of the package.
+.le
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdohydra\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls order = "decreasing" (apfind)
+When assigning aperture identifications order the spectra "increasing"
+or "decreasing" with increasing pixel position (left-to-right or
+right-to-left in a cross-section plot of the image).
+.le
+.ls extras = no (apsum)
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.le
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+.ls lower = -5., upper = 5. (apdefault)
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 3 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+.ls buffer = 1. (apscatter)
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.le
+.ls apscat1 = "" (apscatter)
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.le
+.ls apscat2 = "" (apscatter)
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.le
+
+.ce
+
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum) (fit1d|fit2d)
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for Hydra/Nessie data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+.ls nsubaps = 1 (apsum)
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.le
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+.ls f_interactive = yes (fit1d)
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \fIfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.le
+.ls f_function = "spline3", f_order = 10 (fit1d)
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (autoidentify/identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.le
+.ls match = -3. (autoidentify/identify)
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 4. (autoidentify/identify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 10. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "spline3", i_order = 3 (autoidentify/identify)
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.le
+.ls i_niterate = 2, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (reidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+.ls addfeatures = no (reidentify)
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.le
+.le
+.ls sort = "jd", group = "ljd" (refspectra)
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdohydra\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.le
+.ls log = no (dispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (dispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+
+.ce
+-- SKY SUBTRACTION PARAMETERS --
+.ls combine = "average" (scombine) (average|median)
+Option for combining sky pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average" or "median"
+of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "none" (scombine) (none|minmax|avsigclip)
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+help for \fBscombine\fR. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the low and high pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.fi
+
+.le
+.ls scale = "none" (none|mode|median|mean)
+Multiplicative scaling to be applied to each spectrum. The choices are none
+or scale by the mode, median, or mean. This should not be necessary if the
+flat field and throughput corrections have been properly made.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+The \fBdohydra\fR reduction task is specialized for the extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIHydra\fR and \fINessie\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single, complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by these multifiber instruments.
+
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \fIredo\fR and \fIupdate\fR options, skip or
+repeat some or all the steps.
+
+The following description is oriented specifically to Hydra data but
+applies equally well to Nessie data except for a few minor differences
+which are discussed in a separate section. Since \fBdohydra\fR combines many
+separate, general purpose tasks the description given here refers to these
+tasks and leaves some of the details to their help documentation.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage for
+Hydra since there are many variations possible.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdofiber\fR task will abort if the image header keyword CCDRPOC,
+which is added by \fBccdproc\fR, is missing. If the data is processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.le
+.ls [2]
+Set the \fBdohydra\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. A throughput file or image,
+such as a blank sky observation, may also be specified. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Specify the aperture identification table (a file for 4meter data or an image
+for WIYN data) which is provided for each Hydra
+configuration. You might wish to verify the geometry parameters,
+separations, dispersion direction, etc., which may
+change with different detector setups. The processing parameters are set
+for complete reductions but for quicklook you might not use the clean
+option or dispersion calibration and sky subtraction.
+
+The parameters are set for a particular Hydra configuration and different
+configurations may use different flat fields, arcs, and aperture
+identification tables.
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+The apertures are defined using the specified aperture reference image.
+The spectra are found automatically and apertures assigned based on
+task parameters and the aperture identification table. Unassigned
+fibers will have a negative beam number and will be ignored in subsequent
+processing. The resize option sets the aperture size to the widths of
+the profiles at a fixed fraction of the peak height. The interactive
+review of the apertures is recommended. If the identifications are off
+by a shift the 'o' key is used. To exit the aperture review type 'q'.
+.le
+.ls [5]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.le
+.ls [6]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.le
+.ls [7]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra. If a throughput image, a
+sky flat, is specified then the total sky counts through each fiber are
+used to correct the total flat field counts. Alternatively, a separately
+derived throughput file can be used for specifying throughput corrections.
+If neither type of throughput is used the flat field also provides the
+throughput correction. The final response spectra are normalized to a unit
+mean over all fibers. The relative average throughput for each fiber is
+recorded in the log and possibly printed to the terminal.
+.le
+.ls [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.le
+.ls [9]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.le
+.ls [10]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.le
+.ls [11]
+If the sky line alignment option is selected and the sky lines have not
+been identified for a particular aperture identification table then you are
+asked to mark one or more sky lines. You may simply accept the wavelengths
+of these lines as defined by the dispersion solution for this spectrum and
+fiber or you may specify knowns wavelengths for the lines. These lines will
+be reidentified in all object spectra extracted and a mean zeropoint shift
+will be added to the dispersion solution. This has the effect of aligning
+these lines to optimize sky subtraction.
+.le
+.ls [12]
+The object spectra are now automatically scattered light subtracted,
+extracted, flat fielded, and dispersion corrected.
+.le
+.ls [13]
+When sky subtracting, the individual sky spectra may be reviewed and some
+spectra eliminated using the 'd' key. The last deleted spectrum may be
+recovered with the 'e' key. After exiting the review with 'q' you are
+asked for the combining option. The type of combining is dictated by the
+number of sky fibers.
+.le
+.ls [14]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.le
+.ls [15]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added. The flat field and arc spectra will
+also have part of the aperture identification table name added to
+allow different configurations to use the same 2D flat field and arcs
+but with different aperture definitions. If using the sky alignment
+option an image "align" with the aperture identification table name
+applied will also be created.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of Hydra or Nessie object and
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must
+be processed to remove overscan, bias, and dark count effects.
+This is generally done using the \fBccdred\fR package.
+The \fBdohydra\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is
+generally not done at this stage but as part of \fBdohydra\fR.
+If flat fielding is done as part of the basic CCD processing then
+a flattened flat field, blank sky observation, or throughput file
+should still be created for applying fiber throughput corrections.
+
+The task \fBdohydra\fR uses several types of calibration spectra. These
+are flat fields, blank sky flat fields, comparison lamp spectra, auxiliary
+mercury line (from the dome lights) or sky line spectra, and simultaneous
+arc spectra taken during the object observation. The flat field,
+throughput image or file, auxiliary emission line spectra, and simultaneous
+comparison fibers are optional. If a flat field is used then the sky flat
+or throughput file is optional assuming the flat field has the same fiber
+iillumination. It is legal to specify only a throughput image or file and
+leave the flat field blank in order to simply apply a throughput
+correction. Because only the total counts through each fiber are used from
+a throughput image, sky flat exposures need not be of high signal per
+pixel.
+
+There are three types of arc calibration methods. One is to take arc
+calibration exposures through all fibers periodically and apply the
+dispersion function derived from one or interpolated between pairs to the
+object fibers. This is the usual method with Hydra. Another method is to
+use only one or two all-fiber arcs to define the shape of the dispersion
+function and track zero point wavelength shifts with \fIsimultaneous arc\fR
+fibers taken during the object exposure. The simultaneous arcs may or may
+not be available at the instrument but \fBdohydra\fR can use this type of
+observation. The arc fibers are identified by their beam or aperture
+numbers. A related and mutually exclusive method is to use \fIauxiliary
+line spectra\fR such as lines in the dome lights or sky lines to monitor
+shifts relative to a few actual arc exposures. The main reason to do this
+is if taking arc exposures through all fibers is inconvenient as is the
+case with the manual Nessie plugboards.
+
+The assignment of arc or auxiliary line calibration exposures to object
+exposures is generally done by selecting the nearest in time and
+interpolating. There are other options possible which are described under
+the task \fBrefspectra\fR. The most general option is to define a table
+giving the object image name and the one or two arc spectra to be assigned
+to that object. That file is called an \fIarc assignment table\fR and it
+is one of the optional setup files which can used with \fBdohydra\fR.
+
+The first step in the processing is identifying the spectra in the images.
+The \fIaperture identification table\fR, which may be a text file or
+an image, contains information about the fiber
+assignments. This table is created for you when using Hydra but must be
+prepared by the user when using Nessie. A description of a file is
+given in the section concerning Nessie.
+
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. When the \fIextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tools such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images.
+
+\fBPackage Parameters\fR
+
+The \fBhydra\fR package parameters set parameters affecting all the
+tasks in the package.
+The dispersion axis parameter defines the image axis along which the
+dispersion runs. This is used if the image header doesn't define the
+dispersion axis with the DISPAXIS keyword.
+The observatory parameter is only required
+for data taken with fiber instruments other than Hydra or Nessie.
+The spectrum interpolation type might be changed to "sinc" but
+with the cautions given in \fBonedspec.package\fR.
+The other parameters define the standard I/O functions.
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdohydra\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of apertures, traces, and extracted spectra
+but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The list of objects and arcs can be @ files if desired. The aperture
+reference spectrum is usually the same as the flat field spectrum though it
+could be any exposure with enough signal to accurately define the positions
+and trace the spectra. The first list of arcs are the standard Th-Ar or
+HeNeAr comparison arc spectra (they must all be of the same type). The
+second list of arcs are the auxiliary emission line exposures mentioned
+previously and in the Nessie section.
+
+The arc replacement file is described in the Nessie section and the arc
+assignment table was described in the data file section. Note that even if
+an arc assignment table is specified, \fIall arcs to be used must also
+appear in the arc lists\fR in order for the task to know the type of arc
+spectrum.
+
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. The default will determine the values from the image
+itself. The dispersion axis defines the wavelength direction of spectra in
+the image if not defined in the image header by the keyword DISPAXIS. The
+width and separation parameters define the dimensions (in pixels) of the
+spectra (fiber profile) across the dispersion. The width parameter
+primarily affects the centering. The maximum separation parameter is
+important if missing spectra from the aperture identification table are to
+be correctly skipped. The number of fibers can be left at the default
+(for Hydra) and the task will try to account for unassigned or missing fibers.
+
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+
+The task needs to know which fibers are object, sky if sky subtraction is
+to be done, and simultaneous arcs if used. One could explicitly give the
+aperture numbers but the recommended way, provided an aperture
+identification file or image is used, is to select the apertures based on
+the beam numbers. The default values are those appropriate for the
+identification files generated for Hydra configurations. Sky subtracted
+sky spectra are useful for evaluating the sky subtraction. Since only the
+spectra identified as objects are sky subtracted one can exclude fibers
+from the sky subtraction. For example, if the \fIobjbeams\fR parameter is
+set to 1 then only those fibers with a beam of 1 will be sky subtracted.
+All other fibers will remain in the extracted spectra but will not be sky
+subtracted.
+
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra. The \fIclean\fR option invokes a profile fitting and
+deviant point rejection algorithm as well as a variance weighting of points
+in the aperture. These options require knowing the effective (i.e.
+accounting for any image combining) read out noise and gain. For a
+discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR.
+
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale. If simultaneous arc fibers are defined there is an option to delete
+them from the final spectra when they are no longer needed.
+
+The sky alignment option allows applying a zeropoint dispersion shift
+to all fibers based on one or more sky lines. This requires all fibers
+to have the sky lines visible. When there are sky lines this will
+improve the sky subtraction if there is a systematic error in the
+fiber iillumination between the sky and the arc calibration.
+
+The sky subtraction option selects whether to combine the sky fiber spectra
+and subtract this sky from the object fiber spectra. \fIDispersion
+correction and sky subtraction are independent operations.\fR This means
+that if dispersion correction is not done then the sky subtraction will be
+done with respect to pixel coordinates. This might be desirable in some
+quick look cases though it is incorrect for final reductions.
+
+The sky subtraction option has two additional options. The individual sky
+spectra may be examined and contaminated spectra deleted interactively
+before combining. This can be a useful feature in crowded regions. The
+final combined sky spectrum may be saved for later inspection in an image
+with the spectrum name prefixed by \fBsky\fR.
+
+After a spectrum has been processed it is possible to examine the results
+interactively using the \fBsplot\fR tasks. This option has a query which
+may be turned off with "YES" or "NO" if there are multiple spectra to be
+processed.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are new
+aperture identification table, new reference image, new flat fields, and a
+new arc reference. If all input spectra are to be processed regardless of
+previous processing the \fIredo\fR flag may be used. Note that
+reprocessing clobbers the previously processed output spectra.
+
+The \fIbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if sky spectra editing and
+\fBsplot\fR review (interactive operations) are turned off, either when the
+task is run or by responding with "NO" to the queries during processing.
+
+The \fIlistonly\fR option prints a summary of the processing steps which
+will be performed on the input spectra without actually doing anything.
+This is useful for verifying which spectra will be affected if the input
+list contains previously processed spectra. The listing does not include
+any arc spectra which may be extracted to dispersion calibrate an object
+spectrum.
+
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdohydra\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the \fBdohydra\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdohydra\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+Hydra or Nessie data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.nf
+
+ cl> epar params
+
+.fi
+or simple typing \fIparams\fR. The parameter editor can also be
+entered when editing the \fBdohydra\fR parameters by typing \fI:e
+params\fR or simply \fI:e\fR if positioned at the \fIparams\fR
+parameter.
+
+\fBExtraction\fR
+
+The identification of the spectra in the two dimensional images and their
+scattered light subtraction and extraction to one dimensional spectra
+in multispec format is accomplished
+using the tasks from the \fBapextract\fR package. The first parameters
+through \fInsubaps\fR control the extractions.
+
+The dispersion line is that used for finding the spectra, for plotting in
+the aperture editor, and as the starting point for tracing. The default
+value of \fBINDEF\fR selects the middle of the image. The aperture
+finding, adjusting, editing, and tracing operations also allow summing a
+number of dispersion lines to improve the signal. The number of lines is
+set by the \fInsum\fR parameter.
+
+The \fIorder\fR parameter defines whether the order of the aperture
+identifications in the aperture identification table (or the default
+sequential numbers if no file is used) is in the same sense as the image
+coordinates (increasing) or the opposite sense (decreasing). If the
+aperture identifications turn out to be opposite to what is desired when
+viewed in the aperture editing graph then simply change this parameter.
+
+The basic data output by the spectral extraction routines are the one
+dimensional spectra. Additional information may be output when the
+\fIextras\fR option is selected and the cleaning or variance weighting
+options are also selected. In this case a three dimensional image is
+produced with the first element of the third dimension being the cleaned
+and/or weighted spectra, the second element being the uncleaned and
+unweighted spectra, and the third element being an estimate of the sigma
+of each pixel in the extracted spectrum. Currently the sigma data is not
+used by any other tasks and is only for reference.
+
+The initial step of finding the fiber spectra in the aperture reference
+image consists of identifying the peaks in a cut across the dispersion,
+eliminating those which are closer to each other than the \fIminsep\fR
+distance, and then keeping the specified \fInfibers\fR highest peaks. The
+centers of the profiles are determined using the \fBcenter1d\fR algorithm
+which uses the \fIwidth\fR parameter.
+
+Apertures are then assigned to each spectrum. The initial edges of the
+aperture relative to the center are defined by the \fIlower\fR and
+\fIupper\fR parameters. The trickiest part of assigning the apertures is
+relating the aperture identification from the aperture identification table
+to automatically selected fiber profiles. The first aperture id in the
+file is assigned to the first spectrum found using the \fIorder\fR parameter to
+select the assignment direction. The numbering proceeds in this way except
+that if a gap greater than a multiple of the \fImaxsep\fR parameter is
+encountered then assignments in the file are skipped under the assumption
+that a fiber is missing (broken). In Hydra data it is expected that all
+fibers will be found in flat fields including the unassigned fibers and the
+assignment file will then identify the unassigned fibers. The unassigned
+fibers will later be excluded from extraction. For more on the finding and
+assignment algorithms see \fBapfind\fR.
+
+The initial apertures are the same for all spectra but they can each be
+automatically resized. The automatic resizing sets the aperture limits
+at a fraction of the peak relative to the interfiber minimum.
+The default \fIylevel\fR is to resize the apertures to 5% of the peak.
+See the description for the task \fBapresize\fR for further details.
+
+The user is given the opportunity to graphically review and adjust the
+aperture definitions. This is recommended. As mentioned previously, the
+correct identification of the fibers is tricky and it is fundamentally
+important that this be done correctly; otherwise the spectrum
+identifications will not be for the objects they say. An important command in
+this regard is the 'o' key which allows reordering the identifications
+based on the aperture identification table. This is required if the first
+fiber is actually missing since the initial assignment begins assigning the
+first spectrum found with the first entry in the aperture file. The
+aperture editor is a very powerful tool and is described in detail as
+\fBapedit\fR.
+
+The next set of parameters control the tracing and function fitting of the
+aperture reference positions along the dispersion direction. The position
+of a spectrum across the dispersion is determined by the centering
+algorithm (see \fBcenter1d\fR) at a series of evenly spaced steps, given by
+the parameter \fIt_step\fR, along the dispersion. The step size should be
+fine enough to follow position changes but it is not necessary to measure
+every point. The fitted points may jump around a little bit due to noise
+and cosmic rays even when summing a number of lines. Thus, a smooth
+function is fit. The function type, order, and iterative rejection of
+deviant points is controlled by the other trace parameters. For more
+discussion consult the help pages for \fBaptrace\fR and \fBicfit\fR. The
+default is to fit a cubic spline of three pieces with a single iteration of
+3 sigma rejection.
+
+The actual extraction of the spectra by summing across the aperture at each
+point along the dispersion is controlled by the next set of parameters.
+The default extraction simply sums the pixels using partial pixels at the
+ends. The options allow selection of a weighted sum based on a Poisson
+variance model using the \fIreadnoise\fR and \fIgain\fR detector
+parameters. Note that if the \fIclean\fR option is selected the variance
+weighted extraction is used regardless of the \fIweights\fR parameter. The
+sigma thresholds for cleaning are also set in the \fBparams\fR parameters.
+For more on the variance weighted extraction and cleaning see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+
+The last parameter, \fInsubaps\fR, is used only in special cases when it is
+desired to subdivide the fiber profiles into subapertures prior to
+dispersion correction. After dispersion correction the subapertures are
+then added together. The purpose of this is to correct for wavelength
+shifts across a fiber.
+
+\fBScattered Light Subtraction\fR
+
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+
+\fBFlat Field and Fiber Throughput Corrections\fR
+
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdohydra\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdohydra\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+
+In addition to correcting for pixel-to-pixel response the flat field step
+also corrects for differences in the fiber throughput. Thus, even if the
+pixel-to-pixel flat field corrections have been made in some other way it
+is desirable to use a sky or dome flat observation for determining a fiber
+throughput correction. Alternatively, a separately derived throughput
+file may be specified. This file consists of the aperture numbers
+(the same as used for the aperture reference) and relative throughput
+numbers.
+
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+After extraction one or more corrections are applied. If the \fIfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \fIf_function\fR and
+\fIf_order\fR. If the parameter \fIf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+
+If a throughput image is available (an observation of blank sky
+usually at twilight) it is extracted. If no flat field is used the average
+signal through each fiber is computed and this becomes the response
+normalization function. Note that a dome flat may be used in place of a
+sky in the sky flat field parameter for producing throughput only
+corrections. If a flat field is specified then each sky spectrum is
+divided by the appropriate flat field spectrum. The total counts through
+each fiber are multiplied into the flat field spectrum thus making the sky
+throughput of each fiber the same. This correction is important if the
+iillumination of the fibers differs between the flat field source and the
+sky. Since only the total counts are required the sky or dome flat field
+spectra need not be particularly strong though care must be taken to avoid
+objects.
+
+Instead of a sky flat or other throughput image a separately derived
+throughput file may be used. It may be used with or without a
+flat field.
+
+The final step is to normalize the flat field spectra by the mean counts of
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra. The final relative throughput values are recorded in the
+log and possibly printed on the terminal.
+
+These flat field response steps and algorithm are available as a separate
+task called \fBmsresp1d\fR.
+
+\fBDispersion Correction\fR
+
+Dispersion corrections are applied to the extracted spectra if the
+\fBdispcor\fR parameter is set. This can be a complicated process which
+the \fBdohydra\fR task tries to simplify for you. There are three basic
+steps involved; determining the dispersion functions relating pixel
+position to wavelength, assigning the appropriate dispersion function to a
+particular observation, and resampling the spectra to evenly spaced pixels
+in wavelength.
+
+The comparison arc spectra are used to define dispersion functions for the
+fibers using the tasks \fBautoidentify\fR and \fBreidentify\fR. The
+interactive \fBautoidentify\fR task is only used on the central fiber of the
+first arc spectrum to define the basic reference dispersion solution from
+which all other fibers and arc spectra are automatically derived using
+\fBreidentify\fR. \fBAutoidentify\fR attempts to automatically identify
+the arc lines using the \fIcrval\fR and \fIcdelt\fR parameters. Whether
+or not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+
+Generally, taking a number of comparison arc lamp exposures interspersed
+with the program spectra is sufficient to accurately dispersion calibrate
+Hydra spectra. However, there are some other calibration options
+which may be of interest. These options apply additional calibration data
+consisting either of auxiliary line spectra, such as from dome lights or
+night sky lines, or simultaneous arc lamp spectra taken through a few
+fibers during the object exposure. These options add complexity to the
+dispersion calibration process and were provided primarily for Nessie
+data. Therefore they are described later in the Nessie section.
+
+When only arc comparison lamp spectra are used, dispersion functions are
+determined independently for each fiber of each arc image and then assigned
+to the matching fibers in the program object observations. The assignment
+consists of selecting one or two arc images to calibrate each object
+image. When two bracketing arc spectra are used the dispersion functions
+are linearly interpolated (usually based on the time of the observations).
+
+The arc assignments may be done either explicitly with an arc assignment
+table (parameter \fIarctable\fR) or based on a header parameter. The task
+used is \fBrefspectra\fR and the user should consult this task if the
+default behavior is not what is desired. The default is to interpolate
+linearly between the nearest arcs based on the Julian date (corrected to
+the middle of the exposure). The Julian date and a local Julian day number
+(the day number at local noon) are computed automatically by the task
+\fBsetjd\fR and recorded in the image headers under the keywords JD and
+LJD. In addition the universal time at the middle of the exposure, keyword
+UTMIDDLE, is computed by the task \fBsetairmass\fR and this may also be used
+for ordering the arc and object observations.
+
+An optional step is to use sky lines in the spectra to compute a zeropoint
+dispersion shift that will align the sky lines. This may improve sky
+subtraction if the iillumination is not the same between the arc calibration
+and the sky. When selected the object spectrum is dispersion corrected
+using a non-linear dispersion function to avoid resampling the spectrum.
+The sky lines are then reidentified in wavelength space from a template
+list of sky lines. The mean shift in the lines for each fiber relative to
+the template in that fiber is computed to give the zeropoint shift. The
+database file is created when the first object is extracted. You are asked
+to mark the sky lines in one fiber and then the lines are automatically
+reidentified in all other fibers. Note that this technique requires the
+sky lines be found in all fibers.
+
+The last step of dispersion correction (resampling the spectrum to evenly
+spaced pixels in wavelength) is optional and relatively straightforward.
+If the \fIlinearize\fR parameter is no then the spectra are not resampled
+and the nonlinear dispersion information is recorded in the image header.
+Other IRAF tasks (the coordinate description is specific to IRAF) will use
+this information whenever wavelengths are needed. If linearizing is
+selected a linear dispersion relation, either linear in the wavelength or
+the log of the wavelength, is defined once and applied to every extracted
+spectrum. The resampling algorithm parameters allow selecting the
+interpolation function type, whether to conserve flux per pixel by
+integrating across the extent of the final pixel, and whether to linearize
+to equal linear or logarithmic intervals. The latter may be appropriate
+for radial velocity studies. The default is to use a fifth order
+polynomial for interpolation, to conserve flux, and to not use logarithmic
+wavelength bins. These parameters are described fully in the help for the
+task \fBdispcor\fR which performs the correction. The interpolation
+function options and the nonlinear dispersion coordinate system is
+described in the help topic \fBonedspec.package\fR.
+
+\fBSky Subtraction\fR
+
+Sky subtraction is selected with the \fIskysubtract\fR processing option.
+The sky spectra are selected by their aperture and beam numbers and
+combined into a single master sky spectrum
+which is then subtracted from each object spectrum. If the \fIskyedit\fR
+option is selected the sky spectra are plotted using the task
+\fBspecplot\fR. By default they are superposed to allow identifying
+spectra with unusually high signal due to object contamination. To
+eliminate a sky spectrum from consideration point at it with the cursor and
+type 'd'. The last deleted spectrum may be undeleted with 'e'. This
+allows recovery of incorrect or accidental deletions.
+
+The sky combining algorithm parameters define how the individual sky fiber
+spectra, after interactive editing, are combined before subtraction from
+the object fibers. The goals of combining are to reduce noise, eliminate
+cosmic-rays, and eliminate fibers with inadvertent objects. The common
+methods for doing this to use a median and/or a special sigma clipping
+algorithm (see \fBscombine\fR for details). The scale
+parameter determines whether the individual skys are first scaled to a
+common mode. The scaling should be used if the throughput is uncertain,
+but in that case you probably did the wrong thing in the throughput
+correction. If the sky subtraction is done interactively, i.e. with the
+\fIskyedit\fR option selected, then after selecting the spectra to be
+combined a query is made for the combining algorithm. This allows
+modifying the default algorithm based on the number of sky spectra
+selected since the "avsigclip" rejection algorithm requires at least
+three spectra.
+
+The combined sky spectrum is subtracted from only those spectra specified
+by the object aperture and beam numbers. Other spectra, such as comparison
+arc spectra, are retained unchanged. One may include the sky spectra as
+object spectra to produce residual sky spectra for analysis. The combined
+master sky spectra may be saved if the \fIsaveskys\fR parameter is set.
+The saved sky is given the name of the object spectrum with the prefix
+"sky".
+
+\fBNessie Data\fR
+
+Reducing Nessie data with \fBdohydra\fR is very similar. The differences
+are that additional setup and calibration are required since this
+instrument was a precursor to the more developed Hydra instrument.
+The discussion in this section also describes some features which may
+be applicable to other fiber instruments outside of the NOAO instruments.
+
+The Nessie comparison lamp exposures suffer from vignetting resulting in
+some fibers being poorly illuminated. By rearranging the fibers in the
+calibration plugboard and taking additional exposures one can obtain good
+arc spectra through all fibers. The task will merge the well exposed
+fibers from the multiple exposures into a single final extracted
+arc calibration image. One of the exposures of a set is selected as
+the primary exposure. This is the one specified in list of arcs,
+\fIarc1\fR. The other exposures of the set are referenced only in
+a a setup file, called an \fIarc replacement file\fR.
+
+The format of the arc replacement file is lines containing the primary
+arc image, a secondary arc image,
+and the apertures from the secondary arc to be merged into the
+final arc spectra. There can be more than one secondary
+exposure though it is unlikely. Figure 1 gives an example of this
+setup file.
+.nf
+
+ Figure 1: Example Arc Aperture Replacement File
+
+ cl> type arcreplace
+ nesjun042c nesjun049c 1,7,9,13,17,19,28,34
+
+.fi
+The primary arc exposure is "nesjun042c", the secondary arc is
+"nesjun049c", and the secondary apertures are 1, 7, etc. The syntax for
+the list of apertures also includes hyphen delimited ranges such as
+"8-10".
+
+With Hydra the aperture identification file (4meter) or image header
+keywords (WIYN) are produced for the user. With
+Nessie this is not the case, hence, the user must prepare a file
+manually. The aperture identification file is not mandatory, sequential
+numbering will be used, but it is highly recommended for keeping track of
+the objects assigned to the fibers. The aperture identification table
+contains lines consisting of an aperture number, a beam number, and an
+object identification. These must be in the same order as the fibers in
+the image. The aperture number may be any unique number but it is
+recommended that the fiber number be used. The beam number is used to flag
+object, sky, arc, or other types of spectra. The default beam numbers used
+by the task are 0 for sky, 1 for object, and 2 for arc. The object
+identifications are optional but it is good practice to include them so
+that the data will contain the object information independent of other
+records. Figure 2 shows an example for the \fIblue\fR fibers from a board
+called M33Sch2.
+.nf
+
+ Figure 2: Example Aperture Identification File
+
+ cl> type m33sch2
+ 1 1 143
+ 2 1 254
+ 3 0 sky
+ 4 1 121
+ 5 2 arc
+ .
+ .
+ .
+ 44 1 s92
+ 49 -1 Broken
+ 45 1 156
+ 46 2 arc
+ 47 0 sky
+ 48 1 phil2
+
+.fi
+Note the identification of the sky fibers with beam number 0, the object
+fibers with 1, and the arc fibers with 2. Also note that broken fiber 49
+is actually between fibers 44 and 45. The broken fiber entries, given beam
+number -1, are optional but recommended to give the automatic spectrum
+finding operation the best chance to make the correct identifications. The
+identification file will vary for each plugboard setup. Additional
+information about the aperture identification table may be found in the
+description of the task \fBapfind\fR.
+
+An alternative to using an aperture identification table is to give no
+name, the "" empty string, and to explicitly give a range of
+aperture numbers for the skys and possibly for the sky subtraction
+object list in the parameters \fIobjaps, skyaps, arcaps, objbeams,
+skybeams,\fR and \fIarcbeams\fR.
+
+Because taking comparison exposures with Nessie requires replugging the
+fibers, possibly in more than one configuration, and the good stability of
+the instrument, there are two mutually exclusive methods for monitoring
+shifts in the dispersion zero point from the basic arc lamp spectra other
+than taking many arc lamp exposures. One is to use some fibers to take a
+simultaneous arc spectrum while observing the program objects. The fibers
+are identified by aperture or beam numbers. The second method is to use
+\fIauxiliary line spectra\fR, such as mercury lines from the dome lights.
+These spectra are specified with an auxiliary shift arc list, \fIarc2\fR.
+
+When using auxiliary line spectra for monitoring zero point shifts one of
+these spectra is plotted interactively by \fBidentify\fR with the
+reference dispersion function from the reference arc spectrum. The user
+marks one or more lines which will be used to compute zero point wavelength
+shifts in the dispersion functions automatically. The actual wavelengths
+of the lines need not be known. In this case accept the wavelength based
+on the reference dispersion function. As other observations of the same
+features are made the changes in the positions of the features will be
+tracked as zero point wavelength changes such that wavelengths of the
+features remain constant.
+
+When using auxiliary line spectra the only arc lamp spectrum used is the
+initial arc reference spectrum (the first image in the \fIarcs1\fR list).
+The master dispersion functions are then shifted based on the spectra in
+the \fIarcs2\fR list (which must all be of the same type). The dispersion
+function assignments made by \fBrefspectra\fR using either the arc
+assignment file or based on header keywords is done in the same way as
+described for the arc lamp images except using the auxiliary spectra.
+
+If simultaneous arcs are used the arc lines are reidentified to determine a
+zero point shift relative to the comparison lamp spectra selected, by
+\fBrefspectra\fR, of the same fiber. A linear function of aperture
+position on the image across the dispersion verses the zero point shifts
+from the arc fibers is determined and applied to the dispersion functions
+from the assigned calibration arcs for the non-arc fibers. Note that if
+there are two comparison lamp spectra (before and after the object
+exposure) then there will be two shifts applied to two dispersion functions
+which are then combined using the weights based on the header parameters
+(usually the observation time).
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is also the sequence performed
+by the test procedure "demos dohydra".
+
+.nf
+hy> demos mkhydra
+Creating image demoobj ...
+Creating image demoflat ...
+Creating image demoarc ...
+hy> type demoapid
+===> demoapid <===
+36 1
+37 0
+38 1
+39 1
+41 0
+42 1
+43 1
+44 0
+45 1
+46 -1
+47 0
+48 1
+hy> hydra.verbose = yes
+hy> dohydra demoobj apref=demoflat flat=demoflat arcs1=demoarc \
+>>> fib=12 apid=demoapid width=4. minsep=5. maxsep=7. clean- splot+
+Set reference apertures for demoflat
+Resize apertures for demoflat? (yes):
+Edit apertures for demoflat? (yes):
+<Exit with 'q'>
+Fit curve to aperture 36 of demoflat interactively (yes):
+<Exit with 'q'>
+Fit curve to aperture 37 of demoflat interactively (yes): N
+Create response function demoflatdemoad.ms
+Extract flat field demoflat
+Fit and ratio flat field demoflat
+<Exit with 'q'>
+Create the normalized response demoflatdemoad.ms
+demoflatdemoad.ms -> demoflatdemoad.ms using bzero: 0.
+ and bscale: 1.000001
+ mean: 1.000001 median: 1.052665 mode: 1.273547
+ upper: INDEF lower: INDEF
+Average fiber response:
+1. 1.151023
+2. 0.4519709
+3. 1.250614
+4. 1.287281
+5. 1.271358
+6. 0.6815334
+7. 1.164336
+8. 0.7499605
+9. 1.008654
+10. 1.053296
+11. 0.929967
+Extract arc reference image demoarc
+Determine dispersion solution for demoarc
+<A dispersion solution is found automatically.>
+<Type 'f' to look at fit. Type 'q' to exit fit.>
+<Exit with 'q'>
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Tue 16:01:07 11-Feb-92
+ Reference image = d....ms.imh, New image = d....ms, Refit = yes
+ Image Data Found Fit Pix Shift User Shift Z Shift RMS
+d....ms - Ap 41 16/20 16/16 0.00796 0.0682 8.09E-6 3.86
+Fit dispersion function interactively? (no|yes|NO|YES) (NO): y
+<Exit with 'q'>
+d....ms - Ap 41 16/20 16/16 0.00796 0.0682 8.09E-6 3.86
+d....ms - Ap 39 19/20 19/19 0.152 1.3 1.95E-4 3.89
+Fit dispersion function interactively? (no|yes|NO|YES) (yes): N
+d....ms - Ap 39 19/20 19/19 0.152 1.3 1.95E-4 3.89
+d....ms - Ap 38 18/20 18/18 0.082 0.697 9.66E-5 3.64
+d....ms - Ap 37 19/20 19/19 0.0632 0.553 1.09E-4 6.05
+d....ms - Ap 36 18/20 18/18 0.0112 0.0954 1.35E-5 4.12
+d....ms - Ap 43 17/20 17/17 0.0259 0.221 3.00E-5 3.69
+d....ms - Ap 44 19/20 19/19 0.168 1.44 2.22E-4 4.04
+d....ms - Ap 45 20/20 20/20 0.18 1.54 2.35E-4 3.95
+d....ms - Ap 47 18/20 18/18 -2.02E-4 0.00544 9.86E-6 4.4
+d....ms - Ap 48 16/20 16/16 0.00192 0.0183 1.44E-6 3.82
+
+Dispersion correct demoarc
+d....ms.imh: w1 = 5748.07..., w2 = 7924.62..., dw = 8.50..., nw = 257
+ Change wavelength coordinate assignments? (yes|no|NO): n
+Extract object spectrum demoobj
+Assign arc spectra for demoobj
+[demoobj] refspec1='demoarc'
+Dispersion correct demoobj
+demoobj.ms.imh: w1 = 5748.078, w2 = 7924.622, dw = 8.502127, nw = 257
+Sky subtract demoobj: skybeams=0
+Edit the sky spectra? (yes):
+<Exit with 'q'>
+Sky rejection option (none|minmax|avsigclip) (avsigclip):
+demoobj.ms.imh:
+Splot spectrum? (no|yes|NO|YES) (yes):
+Image line/aperture to plot (1:) (1):
+<Look at spectra and change apertures with # key>
+<Exit with 'q'>
+.fi
+.ih
+REVISIONS
+.ls DOHYDRA V2.11
+A sky alignment option was added.
+
+The aperture identification can now be taken from image header keywords.
+
+The initial arc line identifications is done with the automatic line
+identification algorithm.
+.le
+.ls DOHYDRA V2.10.3
+The usual output WCS format is "equispec". The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. A scattered
+light subtraction processing option has been added.
+.le
+.ih
+SEE ALSO
+apedit, apfind, approfiles, aprecenter, apresize, apsum, aptrace, apvariance,
+ccdred, center1d, dispcor, fit1d, icfit, identify, msresp1d, observatory,
+onedspec.package, refspectra, reidentify, scombine, setairmass, setjd,
+specplot, splot
+.endhelp
diff --git a/noao/imred/hydra/doc/dohydra.ms b/noao/imred/hydra/doc/dohydra.ms
new file mode 100644
index 00000000..4b505ee3
--- /dev/null
+++ b/noao/imred/hydra/doc/dohydra.ms
@@ -0,0 +1,1853 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND July 1995
+.TL
+Guide to the HYDRA Reduction Task DOHYDRA
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+The \fBdohydra\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIHydra\fR and \fINessie\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by these multifiber instruments. This guide describes what
+this task does, it's usage, and parameters.
+.AE
+.NH
+Introduction
+.LP
+The \fBdohydra\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of \fIHydra\fR and \fINessie\fR fiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single, complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by these multifiber instruments.
+.LP
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \f(CWredo\fR and \f(CWupdate\fR options, skip or
+repeat some or all the steps.
+.LP
+The following description is oriented specifically to Hydra data but
+applies equally well to Nessie data except for a few minor differences
+which are discussed in a separate section. Since \fBdohydra\fR combines many
+separate, general purpose tasks the description given here refers to these
+tasks and leaves some of the details to their help documentation.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage for
+Hydra since there are many variations possible.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdohydra\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.IP [2]
+Set the \fBdohydra\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. A throughput file or image,
+such as a blank sky observation, may also be specified. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Specify the aperture identification table (a file for 4meter data or an
+image for WIYN data) which is provided for each Hydra
+configuration. You might wish to verify the geometry parameters,
+separations, dispersion direction, etc., which may
+change with different detector setups. The processing parameters are set
+for complete reductions but for quicklook you might not use the clean
+option or dispersion calibration and sky subtraction.
+.IP
+The parameters are set for a particular Hydra configuration and different
+configurations may use different flat fields, arcs, and aperture
+identification tables.
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.IP [4]
+The apertures are defined using the specified aperture reference image.
+The spectra are found automatically and apertures assigned based on
+task parameters and the aperture identification table. Unassigned
+fibers will have a negative beam number and will be ignored in subsequent
+processing. The resize option sets the aperture size to the widths of
+the profiles at a fixed fraction of the peak height. The interactive
+review of the apertures is recommended. If the identifications are off
+by a shift the 'o' key is used. To exit the aperture review type 'q'.
+.IP [5]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.IP [6]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.IP [7]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra. If a throughput image, a
+sky flat, is specified then the total sky counts through each fiber are
+used to correct the total flat field counts. Alternatively, a separately
+derived throughput file can be used for specifying throughput corrections.
+If neither type of throughput is used the flat field also provides the
+throughput correction. The final response spectra are normalized to a unit
+mean over all fibers. The relative average throughput for each fiber is
+recorded in the log and possibly printed to the terminal.
+.IP [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.IP [9]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.IP [10]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.IP [11]
+If the sky line alignment option is selected and the sky lines have not
+been identified for a particular aperture identification table then you are
+asked to mark one or more sky lines. You may simply accept the wavelengths
+of these lines as defined by the dispersion solution for this spectrum and
+fiber or you may specify knowns wavelengths for the lines. These lines will
+be reidentified in all object spectra extracted and a mean zeropoint shift
+will be added to the dispersion solution. This has the effect of aligning
+these lines to optimize sky subtraction.
+.IP [12]
+The object spectra are now automatically scatteredl ight subtracted,
+ extracted, flat fielded, and dispersion corrected.
+.IP [13]
+When sky subtracting, the individual sky spectra may be reviewed and some
+spectra eliminated using the 'd' key. The last deleted spectrum may be
+recovered with the 'e' key. After exiting the review with 'q' you are
+asked for the combining option. The type of combining is dictated by the
+number of sky fibers.
+.IP [14]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.IP [15]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added. The flat field and arc spectra will
+also have part of the aperture identification table name added to
+allow different configurations to use the same 2D flat field and arcs
+but with different aperture definitions. If using the sky alignment
+option an image "align" with the aperture identification table name
+applied will also be created.
+
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of Hydra or Nessie object and
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must
+be processed to remove overscan, bias, and dark count effects. This
+is generally done using the \fBccdred\fR package.
+The \fBdoargus\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is
+generally not done at this stage but as part of \fBdohydra\fR.
+If flat fielding is done as part of the basic CCD processing then
+a flattened flat field, blank sky observation, or throughput file
+should still be created for applying fiber throughput corrections.
+.LP
+The task \fBdohydra\fR uses several types of calibration spectra. These
+are flat fields, blank sky flat fields, comparison lamp spectra, auxiliary
+mercury line (from the dome lights) or sky line spectra, and simultaneous
+arc spectra taken during the object observation. The flat field,
+throughput image or file, auxiliary emission line spectra, and simultaneous
+comparison fibers are optional. If a flat field is used then the sky flat
+or throughput file is optional assuming the flat field has the same fiber
+illumination. It is legal to specify only a throughput image or file and
+leave the flat field blank in order to simply apply a throughput
+correction. Because only the total counts through each fiber are used from
+a throughput image, sky flat exposures need not be of high signal per
+pixel.
+.LP
+There are three types of arc calibration methods. One is to take arc
+calibration exposures through all fibers periodically and apply the
+dispersion function derived from one or interpolated between pairs to the
+object fibers. This is the usual method with Hydra. Another method is to
+use only one or two all-fiber arcs to define the shape of the dispersion
+function and track zero point wavelength shifts with \fIsimultaneous arc\fR
+fibers taken during the object exposure. The simultaneous arcs may or may
+not be available at the instrument but \fBdohydra\fR can use this type of
+observation. The arc fibers are identified by their beam or aperture
+numbers. A related and mutually exclusive method is to use \fIauxiliary
+line spectra\fR such as lines in the dome lights or sky lines to monitor
+shifts relative to a few actual arc exposures. The main reason to do this
+is if taking arc exposures through all fibers is inconvenient as is the
+case with the manual Nessie plugboards.
+.LP
+The assignment of arc or auxiliary line calibration exposures to object
+exposures is generally done by selecting the nearest in time and
+interpolating. There are other options possible which are described under
+the task \fBrefspectra\fR. The most general option is to define a table
+giving the object image name and the one or two arc spectra to be assigned
+to that object. That file is called an \fIarc assignment table\fR and it
+is one of the optional setup files which can used with \fBdohydra\fR.
+.LP
+The first step in the processing is identifying the spectra in the images.
+The \fIaperture identification table\fR (which may be a file or an image)
+contains information about the fiber assignments. This table is created
+for you when using Hydra but must be prepared by the user when using
+Nessie. A description of this table as a text file is given in the section
+concerning Nessie.
+.LP
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. When the \f(CWextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tools such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images.
+.NH
+Package Parameters
+.LP
+The \fBhydra\fR package parameters, shown in Figure 1, set parameters
+affecting all the tasks in the package.
+.KS
+.V1
+
+.ce
+Figure 1: Package Parameter Set for HYDRA
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = hydra
+
+(dispaxi= 2) Image axis for 2D images
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Log file
+(plotfil= ) Plot file
+
+(records= )
+(version= HYDRA V1: January 1992)
+
+.KE
+.V2
+The dispersion axis parameter defines the image axis along which the
+dispersion runs. This is used if the image header doesn't define the
+dispersion axis with the DISPAXIS keyword.
+The observatory parameter is only required
+for data taken with fiber instruments other than Hydra or Nessie.
+The spectrum interpolation type might be changed to "sinc" but
+with the cautions given in \fBonedspec.package\fR.
+The other parameters define the standard I/O functions.
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdohydra\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of apertures, traces, and extracted spectra
+but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+.NH
+Processing Parameters
+.LP
+The \fBdohydra\fR parameters are shown in Figure 2.
+.KS
+.V1
+
+.ce
+Figure 2: Parameter Set for DOHYDRA
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = hydra
+ TASK = dohydra
+
+objects = List of object spectra
+(apref = ) Aperture reference spectrum
+(flat = ) Flat field spectrum
+(through= ) Throughput file or image (optional)
+(arcs1 = ) List of arc spectra
+(arcs2 = ) List of shift arc spectra
+(arcrepl= ) Special aperture replacements
+(arctabl= ) Arc assignment table (optional)
+
+.KE
+.V1
+(readnoi= RDNOISE) Read out noise sigma (photons)
+(gain = GAIN) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(fibers = 97) Number of fibers
+(width = 12.) Width of profiles (pixels)
+(minsep = 8.) Minimum separation between fibers (pixels)
+(maxsep = 15.) Maximum separation between fibers (pixels)
+(apidtab= ) Aperture identifications
+(crval = INDEF) Approximate wavelength
+(cdelt = INDEF) Approximate dispersion
+(objaps = ) Object apertures
+(skyaps = ) Sky apertures
+(arcaps = ) Arc apertures
+(objbeam= 0,1) Object beam numbers
+(skybeam= 0) Sky beam numbers
+(arcbeam= ) Arc beam numbers
+
+(scatter= no) Subtract scattered light?
+(fitflat= yes) Fit and ratio flat field spectrum?
+(clean = yes) Detect and replace bad pixels?
+(dispcor= yes) Dispersion correct spectra?
+(savearc= yes) Save simultaneous arc apertures?
+(skysubt= yes) Subtract sky?
+(skyedit= yes) Edit the sky spectra?
+(savesky= yes) Save sky spectra?
+(splot = no) Plot the final spectrum?
+(redo = no) Redo operations if previously done?
+(update = yes) Update spectra if cal data changes?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(params = ) Algorithm parameters
+
+.V2
+The list of objects and arcs can be @ files if desired. The aperture
+reference spectrum is usually the same as the flat field spectrum though it
+could be any exposure with enough signal to accurately define the positions
+and trace the spectra. The first list of arcs are the standard Th-Ar or
+HeNeAr comparison arc spectra (they must all be of the same type). The
+second list of arcs are the auxiliary emission line exposures mentioned
+previously and in the Nessie section.
+.LP
+The arc replacement file is described in the Nessie section and the arc
+assignment table was described in the data file section. Note that even if
+an arc assignment table is specified, \fIall arcs to be used must also
+appear in the arc lists\fR in order for the task to know the type of arc
+spectrum.
+.LP
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. The default will determine the values from the image
+itself.
+The variance
+weighting and cosmic-ray cleanning are sensitive to extremely strong
+cosmic-rays; ones which are hundreds of times brighter than the
+spectrum. The \fIdatamax\fR is used to set an upper limit for any
+real data. Any pixels above this value will be flagged as cosmic-rays
+and will not affect the extractions.
+The dispersion axis defines the wavelength direction of spectra in
+the image if not defined in the image header by the keyword DISPAXIS. The
+width and separation parameters define the dimensions (in pixels) of the
+spectra (fiber profile) across the dispersion. The width parameter
+primarily affects the centering. The maximum separation parameter is
+important if missing spectra from the aperture identification table are to
+be correctly skipped. The number of fibers can be left at the default
+(for Hydra) and the task will try to account for unassigned or missing fibers.
+.LP
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+.LP
+The task needs to know which fibers are object, sky if sky subtraction is
+to be done, and simultaneous arcs if used. One could explicitly give the
+aperture numbers but the recommended way, provided an aperture
+identification table is used, is to select the apertures based on
+the beam numbers. The default values are those appropriate for the
+identification files generated for Hydra configurations. Sky subtracted
+sky spectra are useful for evaluating the sky subtraction. Since only the
+spectra identified as objects are sky subtracted one can exclude fibers
+from the sky subtraction. For example, if the \fIobjbeams\fR parameter is
+set to 1 then only those fibers with a beam of 1 will be sky subtracted.
+All other fibers will remain in the extracted spectra but will not be sky
+subtracted.
+.LP
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra. The \f(CWclean\fR option invokes a profile fitting and
+deviant point rejection algorithm as well as a variance weighting of points
+in the aperture. These options require knowing the effective (i.e.
+accounting for any image combining) read out noise and gain. For a
+discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR.
+.LP
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale. If simultaneous arc fibers are defined there is an option to delete
+them from the final spectra when they are no longer needed.
+.LP
+The sky alignment option allows applying a zeropoint dispersion shift
+to all fibers based on one or more sky lines. This requires all fibers
+to have the sky lines visible. When there are sky lines this will
+improve the sky subtraction if there is a systematic error in the
+fiber illumination between the sky and the arc calibration.
+.LP
+The sky subtraction option selects whether to combine the sky fiber spectra
+and subtract this sky from the object fiber spectra. \fIDispersion
+correction and sky subtraction are independent operations.\fR This means
+that if dispersion correction is not done then the sky subtraction will be
+done with respect to pixel coordinates. This might be desirable in some
+quick look cases though it is incorrect for final reductions.
+.LP
+The sky subtraction option has two additional options. The individual sky
+spectra may be examined and contaminated spectra deleted interactively
+before combining. This can be a useful feature in crowded regions. The
+final combined sky spectrum may be saved for later inspection in an image
+with the spectrum name prefixed by \fBsky\fR.
+.LP
+After a spectrum has been processed it is possible to examine the results
+interactively using the \fBsplot\fR tasks. This option has a query which
+may be turned off with "YES" or "NO" if there are multiple spectra to be
+processed.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a new
+aperture identification table, a new reference image, new flat fields, and a
+new arc reference. If all input spectra are to be processed regardless of
+previous processing the \f(CWredo\fR flag may be used. Note that
+reprocessing clobbers the previously processed output spectra.
+.LP
+The \f(CWbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if sky spectra editing and
+\fBsplot\fR review (interactive operations) are turned off, either when the
+task is run or by responding with "NO" to the queries during processing.
+.LP
+The \f(CWlistonly\fR option prints a summary of the processing steps which
+will be performed on the input spectra without actually doing anything.
+This is useful for verifying which spectra will be affected if the input
+list contains previously processed spectra. The listing does not include
+any arc spectra which may be extracted to dispersion calibrate an object
+spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdohydra\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the \fBdohydra\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdohydra\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+Hydra or Nessie data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.V1
+
+ cl> epar params
+
+.V2
+or simple typing \f(CWparams\fR. The parameter editor can also be
+entered when editing the \fBdohydra\fR parameters by typing \f(CW:e
+params\fR or simply \f(CW:e\fR if positioned at the \f(CWparams\fR
+parameter. Figure 3 shows the parameter set.
+.KS
+.V1
+
+.ce
+Figure 3: Algorithm Parameter Set
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = hydra
+ TASK = params
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(order = decreasing) Order of apertures
+(extras = no) Extract sky, sigma, etc.?
+
+ -- DEFAULT APERTURE LIMITS --
+(lower = -5.) Lower aperture limit relative to center
+(upper = 5.) Upper aperture limit relative to center
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+
+.KE
+.KS
+.V1
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 3) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- SCATTERED LIGHT PARAMETERS --
+(buffer = 1.) Buffer distance from apertures
+(apscat1= ) Fitting parameters across the dispersion
+(apscat2= ) Fitting parameters along the dispersion
+
+.KE
+.KS
+.V1
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+(nsubaps= 1) Number of subapertures
+
+.KE
+.KS
+.V1
+ -- FLAT FIELD FUNCTION FITTING PARAMETERS --
+(f_inter= yes) Fit flat field interactively?
+(f_funct= spline3) Fitting function
+(f_order= 10) Fitting function order
+
+.KE
+.KS
+.V1
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli=linelists$idhenear.dat) Line list
+(match = 10.) Line list matching limit in Angstroms
+(fwidth = 4.) Arc line widths in pixels
+(cradius= 10.) Centering radius in pixels
+(i_funct= spline3) Coordinate function
+(i_order= 3) Order of dispersion function
+(i_niter= 2) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying?
+(addfeat= no) Add features when reidentifying?
+
+.KE
+.KS
+.V1
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+.KE
+.KS
+.V1
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+.KE
+.KS
+.V1
+ -- SKY SUBTRACTION PARAMETERS --
+(combine= average) Type of combine operation
+(reject = avsigclip) Sky rejection option
+(scale = none) Sky scaling option
+
+.KE
+.V2
+.NH 2
+Extraction
+.LP
+The identification of the spectra in the two dimensional images and their
+scattered light subtraction and extraction to one dimensional spectra
+in multispec format is accomplished
+using the tasks from the \fBapextract\fR package. The first parameters
+through \f(CWnsubaps\fR control the extractions.
+.LP
+The dispersion line is that used for finding the spectra, for plotting in
+the aperture editor, and as the starting point for tracing. The default
+value of \fBINDEF\fR selects the middle of the image. The aperture
+finding, adjusting, editing, and tracing operations also allow summing a
+number of dispersion lines to improve the signal. The number of lines is
+set by the \f(CWnsum\fR parameter.
+.LP
+The \f(CWorder\fR parameter defines whether the order of the aperture
+identifications in the aperture identification table (or the default
+sequential numbers if no file is used) is in the same sense as the image
+coordinates (increasing) or the opposite sense (decreasing). If the
+aperture identifications turn out to be opposite to what is desired when
+viewed in the aperture editing graph then simply change this parameter.
+.LP
+The basic data output by the spectral extraction routines are the one
+dimensional spectra. Additional information may be output when the
+\f(CWextras\fR option is selected and the cleaning or variance weighting
+options are also selected. In this case a three dimensional image is
+produced with the first element of the third dimension being the cleaned
+and/or weighted spectra, the second element being the uncleaned and
+unweighted spectra, and the third element being an estimate of the sigma
+of each pixel in the extracted spectrum. Currently the sigma data is not
+used by any other tasks and is only for reference.
+.LP
+The initial step of finding the fiber spectra in the aperture reference
+image consists of identifying the peaks in a cut across the dispersion,
+eliminating those which are closer to each other than the \f(CWminsep\fR
+distance, and then keeping the specified \f(CWnfibers\fR highest peaks. The
+centers of the profiles are determined using the \fBcenter1d\fR algorithm
+which uses the \f(CWwidth\fR parameter.
+.LP
+Apertures are then assigned to each spectrum. The initial edges of the
+aperture relative to the center are defined by the \f(CWlower\fR and
+\f(CWupper\fR parameters. The trickiest part of assigning the apertures is
+relating the aperture identification from the aperture identification table
+to automatically selected fiber profiles. The first aperture id in the
+file is assigned to the first spectrum found using the \f(CWorder\fR
+parameter to select the assignment direction. The numbering proceeds in
+this way except that if a gap greater than a multiple of the \f(CWmaxsep\fR
+parameter is encountered then assignments in the file are skipped under the
+assumption that a fiber is missing (broken). In Hydra data it is expected
+that all fibers will be found in flat fields including the unassigned
+fibers and the assignment file will then identify the unassigned fibers.
+The unassigned fibers will later be excluded from extraction. For more on
+the finding and assignment algorithms see \fBapfind\fR.
+.LP
+The initial apertures are the same for all spectra but they can each be
+automatically resized. The automatic resizing sets the aperture limits
+at a fraction of the peak relative to the interfiber minimum.
+The default \fIylevel\fR is to resize the apertures to 5% of the peak.
+See the description for the task \fBapresize\fR for further details.
+.LP
+The user is given the opportunity to graphically review and adjust the
+aperture definitions. This is recommended. As mentioned previously, the
+correct identification of the fibers is tricky and it is fundamentally
+important that this be done correctly; otherwise the spectrum
+identifications will not be for the objects they say. An important command in
+this regard is the 'o' key which allows reordering the identifications
+based on the aperture identification table. This is required if the first
+fiber is actually missing since the initial assignment begins assigning the
+first spectrum found with the first entry in the aperture file. The
+aperture editor is a very powerful tool and is described in detail as
+\fBapedit\fR.
+.LP
+The next set of parameters control the tracing and function fitting of the
+aperture reference positions along the dispersion direction. The position
+of a spectrum across the dispersion is determined by the centering
+algorithm (see \fBcenter1d\fR) at a series of evenly spaced steps, given by
+the parameter \f(CWt_step\fR, along the dispersion. The step size should be
+fine enough to follow position changes but it is not necessary to measure
+every point. The fitted points may jump around a little bit due to noise
+and cosmic rays even when summing a number of lines. Thus, a smooth
+function is fit. The function type, order, and iterative rejection of
+deviant points is controlled by the other trace parameters. For more
+discussion consult the help pages for \fBaptrace\fR and \fBicfit\fR. The
+default is to fit a cubic spline of three pieces with a single iteration of
+3 sigma rejection.
+.LP
+The actual extraction of the spectra by summing across the aperture at each
+point along the dispersion is controlled by the next set of parameters.
+The default extraction simply sums the pixels using partial pixels at the
+ends. The options allow selection of a weighted sum based on a Poisson
+variance model using the \f(CWreadnoise\fR and \f(CWgain\fR detector
+parameters. Note that if the \f(CWclean\fR option is selected the variance
+weighted extraction is used regardless of the \f(CWweights\fR parameter. The
+sigma thresholds for cleaning are also set in the \fBparams\fR parameters.
+For more on the variance weighted extraction and cleaning see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+.LP
+The last parameter, \f(CWnsubaps\fR, is used only in special cases when it is
+desired to subdivide the fiber profiles into subapertures prior to
+dispersion correction. After dispersion correction the subapertures are
+then added together. The purpose of this is to correct for wavelength
+shifts across a fiber.
+.NH 2
+Scattered Light Subtraction
+.LP
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+.LP
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+.LP
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+.LP
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+.NH 2
+Flat Field and Fiber Throughput Corrections
+.LP
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdohydra\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdohydra\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+.LP
+In addition to correcting for pixel-to-pixel response the flat field step
+also corrects for differences in the fiber throughput. Thus, even if the
+pixel-to-pixel flat field corrections have been made in some other way it
+is desirable to use a sky or dome flat observation for determining a fiber
+throughput correction. Alternatively, a separately derived throughput
+file may be specified. This file consists of the aperture numbers
+(the same as used for the aperture reference) and relative throughput
+numbers.
+.LP
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+After extraction one or more corrections are applied. If the \f(CWfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \f(CWf_function\fR and
+\f(CWf_order\fR. If the parameter \f(CWf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+.LP
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+.LP
+If a throughput image is available (an observation of blank sky
+usually at twilight) it is extracted. If no flat field is used the average
+signal through each fiber is computed and this becomes the response
+normalization function. Note that a dome flat may be used in place of a
+sky in the sky flat field parameter for producing throughput only
+corrections. If a flat field is specified then each sky spectrum is
+divided by the appropriate flat field spectrum. The total counts through
+each fiber are multiplied into the flat field spectrum thus making the sky
+throughput of each fiber the same. This correction is important if the
+illumination of the fibers differs between the flat field source and the
+sky. Since only the total counts are required the sky or dome flat field
+spectra need not be particularly strong though care must be taken to avoid
+objects.
+.LP
+Instead of a sky flat or other throughput image a separately derived
+throughput file may be used. It may be used with or without a
+flat field.
+.LP
+The final step is to normalize the flat field spectra by the mean counts of
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra. The final relative throughput values are recorded in the
+log and possibly printed on the terminal.
+.LP
+These flat field response steps and algorithm are available as a separate
+task called \fBmsresp1d\fR.
+.NH 2
+Dispersion Correction
+.LP
+Dispersion corrections are applied to the extracted spectra if the
+\fBdispcor\fR parameter is set. This can be a complicated process which
+the \fBdohydra\fR task tries to simplify for you. There are three basic
+steps involved; determining the dispersion functions relating pixel
+position to wavelength, assigning the appropriate dispersion function to a
+particular observation, and resampling the spectra to evenly spaced pixels
+in wavelength.
+.LP
+The comparison arc spectra are used to define dispersion functions for the
+fibers using the tasks \fBautoidentify\fR and \fBreidentify\fR. The
+interactive \fBautoidentify\fR task is only used on the central fiber of the
+first arc spectrum to define the basic reference dispersion solution from
+which all other fibers and arc spectra are automatically derived using
+\fBreidentify\fR. \fBAutoidentify\fR attempts to automatically identify
+the arc lines using the \fIcrval\fR and \fIcdelt\fR parameters. Whether
+or not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+.LP
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+.LP
+Generally, taking a number of comparison arc lamp exposures interspersed
+with the program spectra is sufficient to accurately dispersion calibrate
+Hydra spectra. However, there are some other calibration options
+which may be of interest. These options apply additional calibration data
+consisting either of auxiliary line spectra, such as from dome lights or
+night sky lines, or simultaneous arc lamp spectra taken through a few
+fibers during the object exposure. These options add complexity to the
+dispersion calibration process and were provided primarily for Nessie
+data. Therefore they are described later in the Nessie section.
+.LP
+When only arc comparison lamp spectra are used, dispersion functions are
+determined independently for each fiber of each arc image and then assigned
+to the matching fibers in the program object observations. The assignment
+consists of selecting one or two arc images to calibrate each object
+image. When two bracketing arc spectra are used the dispersion functions
+are linearly interpolated (usually based on the time of the observations).
+.LP
+The arc assignments may be done either explicitly with an arc assignment
+table (parameter \f(CWarctable\fR) or based on a header parameter. The task
+used is \fBrefspectra\fR and the user should consult this task if the
+default behavior is not what is desired. The default is to interpolate
+linearly between the nearest arcs based on the Julian date (corrected to
+the middle of the exposure). The Julian date and a local Julian day number
+(the day number at local noon) are computed automatically by the task
+\fBsetjd\fR and recorded in the image headers under the keywords JD and
+LJD. In addition the universal time at the middle of the exposure, keyword
+UTMIDDLE, is computed by the task \fBsetairmass\fR and this may also be used
+for ordering the arc and object observations.
+.LP
+An optional step is to use sky lines in the spectra to compute a zeropoint
+dispersion shift that will align the sky lines. This may improve sky
+subtraction if the illumination is not the same between the arc calibration
+and the sky. When selected the object spectrum is dispersion corrected
+using a non-linear dispersion function to avoid resampling the spectrum.
+The sky lines are then reidentified in wavelength space from a template
+list of sky lines. The mean shift in the lines for each fiber relative to
+the template in that fiber is computed to give the zeropoint shift. The
+database file is created when the first object is extracted. You are asked
+to mark the sky lines in one fiber and then the lines are automatically
+reidentified in all other fibers. Note that this technique requires the
+sky lines be found in all fibers.
+.LP
+The last step of dispersion correction (resampling the spectrum to evenly
+spaced pixels in wavelength) is optional and relatively straightforward.
+If the \f(CWlinearize\fR parameter is no then the spectra are not resampled
+and the nonlinear dispersion information is recorded in the image header.
+Other IRAF tasks (the coordinate description is specific to IRAF) will use
+this information whenever wavelengths are needed. If linearizing is
+selected a linear dispersion relation, either linear in the wavelength or
+the log of the wavelength, is defined once and applied to every extracted
+spectrum. The resampling algorithm parameters allow selecting the
+interpolation function type, whether to conserve flux per pixel by
+integrating across the extent of the final pixel, and whether to linearize
+to equal linear or logarithmic intervals. The latter may be appropriate
+for radial velocity studies. The default is to use a fifth order
+polynomial for interpolation, to conserve flux, and to not use logarithmic
+wavelength bins. These parameters are described fully in the help for the
+task \fBdispcor\fR which performs the correction. The interpolation
+function options and the nonlinear dispersion coordinate system is
+described in the help topic \fBonedspec.package\fR.
+.NH 2
+Sky Subtraction
+.LP
+Sky subtraction is selected with the \f(CWskysubtract\fR processing option.
+The sky spectra are selected by their aperture and beam numbers and
+combined into a single master sky spectrum
+which is then subtracted from each object spectrum. If the \f(CWskyedit\fR
+option is selected the sky spectra are plotted using the task
+\fBspecplot\fR. By default they are superposed to allow identifying
+spectra with unusually high signal due to object contamination. To
+eliminate a sky spectrum from consideration point at it with the cursor and
+type 'd'. The last deleted spectrum may be undeleted with 'e'. This
+allows recovery of incorrect or accidental deletions.
+.LP
+The sky combining algorithm parameters define how the individual sky fiber
+spectra, after interactive editing, are combined before subtraction from
+the object fibers. The goals of combining are to reduce noise, eliminate
+cosmic-rays, and eliminate fibers with inadvertent objects. The common
+methods for doing this to use a median and/or a special sigma clipping
+algorithm (see \fBscombine\fR for details). The scale
+parameter determines whether the individual skys are first scaled to a
+common mode. The scaling should be used if the throughput is uncertain,
+but in that case you probably did the wrong thing in the throughput
+correction. If the sky subtraction is done interactively, i.e. with the
+\f(CWskyedit\fR option selected, then after selecting the spectra to be
+combined a query is made for the combining algorithm. This allows
+modifying the default algorithm based on the number of sky spectra
+selected since the "avsigclip" rejection algorithm requires at least
+three spectra.
+.LP
+The combined sky spectrum is subtracted from only those spectra specified
+by the object aperture and beam numbers. Other spectra, such as comparison
+arc spectra, are retained unchanged. One may include the sky spectra as
+object spectra to produce residual sky spectra for analysis. The combined
+master sky spectra may be saved if the \f(CWsaveskys\fR parameter is set.
+The saved sky is given the name of the object spectrum with the prefix
+"sky".
+.NH
+Nessie Data
+.LP
+Reducing Nessie data with \fBdohydra\fR is very similar. The differences
+are that additional setup and calibration are required since this
+instrument was a precursor to the more developed Hydra instrument.
+The discussion in this section also describes some features which may
+be applicable to other fiber instruments outside of the NOAO instruments.
+.LP
+The Nessie comparison lamp exposures suffer from vignetting resulting in
+some fibers being poorly illuminated. By rearranging the fibers in the
+calibration plugboard and taking additional exposures one can obtain good
+arc spectra through all fibers. The task will merge the well exposed
+fibers from the multiple exposures into a single final extracted
+arc calibration image. One of the exposures of a set is selected as
+the primary exposure. This is the one specified in list of arcs,
+\f(CWarc1\fR. The other exposures of the set are referenced only in
+a a setup file, called an \fIarc replacement file\fR.
+.LP
+The format of the arc replacement file is lines containing the primary
+arc image, a secondary arc image,
+and the apertures from the secondary arc to be merged into the
+final arc spectra. There can be more than one secondary
+exposure though it is unlikely. Figure 4 gives an example of this
+setup file.
+
+.ce
+Figure 4: Example Arc Aperture Replacement File
+
+.V1
+ cl> type arcreplace
+ nesjun042c nesjun049c 1,7,9,13,17,19,28,34
+.V2
+
+.fi
+The primary arc exposure is \f(CWnesjun042c\fR, the secondary arc is
+\f(CWnesjun049c\fR, and the secondary apertures are 1, 7, etc. The syntax for
+the list of apertures also includes hyphen delimited ranges such as
+"8-10".
+.LP
+With Hydra the aperture identification table is produced for the user. With
+Nessie this is not the case, hence, the user must prepare a file
+manually. The aperture identification file is not mandatory, sequential
+numbering will be used, but it is highly recommended for keeping track of
+the objects assigned to the fibers. The aperture identification file
+contains lines consisting of an aperture number, a beam number, and an
+object identification. These must be in the same order as the fibers in
+the image. The aperture number may be any unique number but it is
+recommended that the fiber number be used. The beam number is used to flag
+object, sky, arc, or other types of spectra. The default beam numbers used
+by the task are 0 for sky, 1 for object, and 2 for arc. The object
+identifications are optional but it is good practice to include them so
+that the data will contain the object information independent of other
+records. Figure 5 shows an example for the \fIblue\fR fibers from a board
+called M33Sch2.
+
+.ce
+Figure 5: Example Aperture Identification File
+
+.V1
+ cl> type m33sch2
+ 1 1 143
+ 2 1 254
+ 3 0 sky
+ 4 1 121
+ 5 2 arc
+ .
+ .
+ .
+ 44 1 s92
+ 49 -1 Broken
+ 45 1 156
+ 46 2 arc
+ 47 0 sky
+ 48 1 phil2
+.V2
+
+Note the identification of the sky fibers with beam number 0, the object
+fibers with 1, and the arc fibers with 2. Also note that broken fiber 49
+is actually between fibers 44 and 45. The broken fiber entries, given beam
+number -1, are optional but recommended to give the automatic spectrum
+finding operation the best chance to make the correct identifications. The
+identification file will vary for each plugboard setup. Additional
+information about the aperture identification table may be found in the
+description of the task \fBapfind\fR.
+.LP
+An alternative to using an aperture identification table is to give no
+name, the "" empty string, and to explicitly give a range of
+aperture numbers for the skys and possibly for the sky subtraction
+object list in the parameters \f(CWobjaps, skyaps, arcaps, objbeams,
+skybeams,\fR and \f(CWarcbeams\fR.
+.LP
+Because taking comparison exposures with Nessie requires replugging the
+fibers, possibly in more than one configuration, and the good stability of
+the instrument, there are two mutually exclusive methods for monitoring
+shifts in the dispersion zero point from the basic arc lamp spectra other
+than taking many arc lamp exposures. One is to use some fibers to take a
+simultaneous arc spectrum while observing the program objects. The fibers
+are identified by aperture or beam numbers. The second method is to use
+\fIauxiliary line spectra\fR, such as mercury lines from the dome lights.
+These spectra are specified with an auxiliary shift arc list, \f(CWarc2\fR.
+.LP
+When using auxiliary line spectra for monitoring zero point shifts one of
+these spectra is plotted interactively by \fBidentify\fR with the
+reference dispersion function from the reference arc spectrum. The user
+marks one or more lines which will be used to compute zero point wavelength
+shifts in the dispersion functions automatically. The actual wavelengths
+of the lines need not be known. In this case accept the wavelength based
+on the reference dispersion function. As other observations of the same
+features are made the changes in the positions of the features will be
+tracked as zero point wavelength changes such that wavelengths of the
+features remain constant.
+.LP
+When using auxiliary line spectra the only arc lamp spectrum used is the
+initial arc reference spectrum (the first image in the \f(CWarcs1\fR list).
+The master dispersion functions are then shifted based on the spectra in
+the \f(CWarcs2\fR list (which must all be of the same type). The dispersion
+function assignments made by \fBrefspectra\fR using either the arc
+assignment file or based on header keywords is done in the same way as
+described for the arc lamp images except using the auxiliary spectra.
+.LP
+If simultaneous arcs are used the arc lines are reidentified to determine a
+zero point shift relative to the comparison lamp spectra selected, by
+\fBrefspectra\fR, of the same fiber. A linear function of aperture
+position on the image across the dispersion verses the zero point shifts
+from the arc fibers is determined and applied to the dispersion functions
+from the assigned calibration arcs for the non-arc fibers. Note that if
+there are two comparison lamp spectra (before and after the object
+exposure) then there will be two shifts applied to two dispersion functions
+which are then combined using the weights based on the header parameters
+(usually the observation time).
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+Task Help References
+.LP
+Each task in the \fBhydra\fR package and tasks used by \fBdohydra\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ continuum - Fit the continuum in spectra
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify features in spectrum for dispersion solution
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically identify features in spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra having different wavelength ranges
+ scopy - Select and copy apertures in different spectral formats
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ splot - Preliminary spectral plot/analysis
+
+ dohydra - Process HYDRA spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+.V2
+.SH
+Appendix A: DOHYDRA Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \f(CWredo\fR flag is set or the \f(CWupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.LE
+apref = ""
+.LS
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.LE
+flat = "" (optional)
+.LS
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field calibrations. If a separate
+throughput file or image is not specified the flat field is also used
+for computing a fiber throughput correction.
+.LE
+throughput = "" (optional)
+.LS
+Throughput file or image. If an image is specified, typically a blank
+sky observation, the total flux through
+each fiber is used to correct for fiber throughput. If a file consisting
+of lines with the aperture number and relative throughput is specified
+then the fiber throughput will be corrected by those values. If neither
+is specified but a flat field image is given it is used to compute the
+throughput.
+.LE
+arcs1 = "" (at least one if dispersion correcting)
+.LS
+List of primary arc spectra. These spectra are used to define the dispersion
+functions for each fiber apart from a possible zero point correction made
+with secondary shift spectra or arc calibration fibers in the object spectra.
+One fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for all other fibers and
+arc spectra are derived from it.
+.LE
+arcs2 = "" (optional for Nessie)
+.LS
+List of optional shift arc spectra. Features in these secondary observations
+are used to supply a wavelength zero point shift through the observing
+sequence. One type of observation is dome lamps containing characteristic
+emission lines.
+.LE
+arcreplace = "" (optional for Nessie)
+.LS
+Special aperture replacement file. A characteristic of Nessie (though not
+Hydra) spectra is that it requires two exposures to illuminate all fibers
+with an arc calibration. The aperture replacement file assigns fibers from
+the second exposure to replace those in the first exposure. Only the first
+exposures are specified in the \f(CWarcs1\fR list. The file contains lines
+with the first exposure image name, the second exposure image name, and a
+list of apertures from the second exposure to be used instead of those in
+the first exposure.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \f(CWparams.sort\fR, such as the observation time is made.
+.LE
+
+readnoise = "RDNOISE" (apsum)
+.LS
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.LE
+gain = "GAIN" (apsum)
+.LS
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+fibers = 97 (apfind)
+.LS
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum. It is best if this
+reflects the actual number of fibers which may be found in the aperture
+reference image. The interactive
+review of the aperture assignments allows verification and adjustments
+to the automatic aperture definitions.
+.LE
+width = 12. (apedit)
+.LS
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.LE
+minsep = 8. (apfind)
+.LS
+Minimum separation between fibers. Weaker spectra or noise within this
+distance of a stronger spectrum are rejected.
+.LE
+maxsep = 15. (apfind)
+.LS
+Maximum separation between adjacent fibers. This parameter
+is used to identify missing fibers. If two adjacent spectra exceed this
+separation then it is assumed that a fiber is missing and the aperture
+identification assignments will be adjusted accordingly.
+.LE
+apidtable = "" (apfind)
+.LS
+Aperture identification table. This may be either a text file or an
+image. A text file contains the fiber number, beam number defining object
+(1), sky (0), and arc (2) fibers, and a object title. An image contains
+the keywords SLFIBnnn with string value consisting of the fiber number,
+beam number, optional right ascension and declination, and an object
+title. For Nessie the user had to prepare the file for each plugboard, for
+Hydra at the 4meter the file was generated for the user, and for Hydra at
+the WIYN the image header contains the information. Unassigned and broken
+fibers (beam of -1) should be included in the identification information
+since they will automatically be excluded.
+.LE
+crval = INDEF, cdelt = INDEF (autoidentify)
+.LS
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.LE
+objaps = "", skyaps = "", arcaps = ""
+.LS
+List of object, sky, and arc aperture numbers. These are used to
+identify arc apertures for wavelength calibration and object and sky
+apertures for sky subtraction. Note sky apertures may be identified as
+both object and sky if one wants to subtract the mean sky from the
+individual sky spectra. Typically the different spectrum types are
+identified by their beam numbers and the default, null string,
+lists select all apertures.
+.LE
+objbeams = "0,1", skybeams = "0", arcbeams = 2
+.LS
+List of object, sky, and arc beam numbers. The convention is that sky
+fibers are given a beam number of 0, object fibers a beam number of 1, and
+arc fibers a beam number of 2. The beam numbers are typically set in the
+\f(CWapidtable\fR. Unassigned or broken fibers may be given a beam number of
+-1 in the aperture identification table since apertures with negative beam
+numbers are not extracted. Note it is valid to identify sky fibers as both
+object and sky.
+.LE
+
+scattered = no (apscatter)
+.LS
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.LE
+fitflat = yes (flat1d)
+.LS
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.LE
+clean = yes (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.LE
+dispcor = yes
+.LS
+Dispersion correct spectra? Depending on the \f(CWparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.LE
+savearcs = yes
+.LS
+Save any simultaneous arc apertures? If no then the arc apertures will
+be deleted after use.
+.LE
+skyalign = no
+.LS
+Align sky lines? If yes then for the first object spectrum you are asked
+to mark one or more sky lines to use for alignment. Then these lines will
+be found in all spectra and an average zeropoint shift computed and applied
+to the dispersion solution to align these lines. Note that this assumes
+the sky lines are seen in all fibers.
+.LE
+skysubtract = yes
+.LS
+Subtract sky from the object spectra? If yes the sky spectra are combined
+and subtracted from the object spectra as defined by the object and sky
+aperture/beam parameters.
+.LE
+skyedit = yes
+.LS
+Overplot all the sky spectra and allow contaminated sky spectra to be
+deleted?
+.LE
+saveskys = yes
+.LS
+Save the combined sky spectrum? If no then the sky spectrum will be
+deleted after sky subtraction is completed.
+.LE
+splot = no
+.LS
+Plot the final spectra with the task \fBsplot\fR?
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.LE
+update = yes
+.LS
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.LE
+batch = no
+.LS
+Process spectra as a background or batch job provided there are no interactive
+options (\f(CWskyedit\fR and \f(CWsplot\fR) selected.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+params = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.LE
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdohydra\fR.
+
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.LE
+observatory = "observatory"
+.LS
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For Hydra data the image headers
+identify the observatory as "kpno" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.V1
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.V2
+.LE
+database = "database"
+.LS
+Database (directory) used for storing aperture and dispersion information.
+.LE
+verbose = no
+.LS
+Print verbose information available with various tasks.
+.LE
+logfile = "logfile", plotfile = ""
+.LS
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.LE
+records = ""
+.LS
+Dummy parameter to be ignored.
+.LE
+version = "HYDRA: ..."
+.LS
+Version of the package.
+.LE
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdohydra\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+order = "decreasing" (apfind)
+.LS
+When assigning aperture identifications order the spectra "increasing"
+or "decreasing" with increasing pixel position (left-to-right or
+right-to-left in a cross-section plot of the image).
+.LE
+extras = no (apsum)
+.LS
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.LE
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+
+lower = -5., upper = 5. (apdefault)
+.LS
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \f(CWnsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 3 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+.LE
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+
+buffer = 1. (apscatter)
+.LS
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.LE
+apscat1 = "" (apscatter)
+.LS
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.LE
+apscat2 = "" (apscatter)
+.LS
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.LE
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum)
+.LS
+Type of extraction weighting. Note that if the \f(CWclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \f(CWgain\fR and \f(CWreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum) (fit1d|fit2d)
+.LS
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for Hydra/Nessie data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+nsubaps = 1 (apsum)
+.LS
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.LE
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+
+f_interactive = yes (fit1d)
+.LS
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \f(CWfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.LE
+f_function = "spline3", f_order = 10 (fit1d)
+.LS
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (autoidentify/identify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.LE
+match = -3. (autoidentify/identify)
+.LS
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 4. (autoidentify/identify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 10. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "spline3", i_order = 3 (autoidentify/identify)
+.LS
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.LE
+i_niterate = 2, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (reidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+addfeatures = no (reidentify)
+.LS
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.LE
+.LE
+sort = "jd", group = "ljd" (refspectra)
+.LS
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdohydra\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.LE
+log = no (dispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (dispcor)
+.LS
+Conserve the total flux during interpolation? If \f(CWno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \f(CWyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+-- SKY SUBTRACTION PARAMETERS --
+
+combine = "average" (scombine) (average|median)
+.LS
+Option for combining sky pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average" or "median"
+of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.LE
+reject = "none" (scombine) (none|minmax|avsigclip)
+.LS
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+help for \fBscombine\fR. The rejection choices are:
+
+.V1
+ none - No rejection
+ minmax - Reject the low and high pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.V2
+
+.LE
+scale = "none" (none|mode|median|mean)
+.LS
+Multiplicative scaling to be applied to each spectrum. The choices are none
+or scale by the mode, median, or mean. This should not be necessary if the
+flat field and throughput corrections have been properly made.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/hydra/dohydra.cl b/noao/imred/hydra/dohydra.cl
new file mode 100644
index 00000000..74264633
--- /dev/null
+++ b/noao/imred/hydra/dohydra.cl
@@ -0,0 +1,75 @@
+# DOHYDRA -- Process HYDRA spectra from 2D to wavelength calibrated 1D.
+#
+# The task PROC does all of the interactive work and BATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure dohydra (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+file throughput = "" {prompt="Throughput file or image (optional)"}
+string arcs1 = "" {prompt="List of arc spectra"}
+string arcs2 = "" {prompt="List of shift arc spectra"}
+file arcreplace = "" {prompt="Special aperture replacements"}
+file arctable = "" {prompt="Arc assignment table (optional)\n"}
+
+string readnoise = "RDNOISE" {prompt="Read out noise sigma (photons)"}
+string gain = "GAIN" {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+int fibers = 97 {prompt="Number of fibers"}
+real width = 12. {prompt="Width of profiles (pixels)"}
+real minsep = 8. {prompt="Minimum separation between fibers (pixels)"}
+real maxsep = 15. {prompt="Maximum separation between fibers (pixels)"}
+file apidtable = "" {prompt="Aperture identifications"}
+string crval = "INDEF" {prompt="Approximate central wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion"}
+string objaps = "" {prompt="Object apertures"}
+string skyaps = "" {prompt="Sky apertures"}
+string arcaps = "" {prompt="Arc apertures"}
+string objbeams = "0,1" {prompt="Object beam numbers"}
+string skybeams = "0" {prompt="Sky beam numbers"}
+string arcbeams = "" {prompt="Arc beam numbers\n"}
+
+bool scattered = no {prompt="Subtract scattered light?"}
+bool fitflat = yes {prompt="Fit and ratio flat field spectrum?"}
+bool clean = yes {prompt="Detect and replace bad pixels?"}
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool savearcs = yes {prompt="Save simultaneous arc apertures?"}
+bool skyalign = no {prompt="Align sky lines?"}
+bool skysubtract = yes {prompt="Subtract sky?"}
+bool skyedit = yes {prompt="Edit the sky spectra?"}
+bool saveskys = yes {prompt="Save sky spectra?"}
+bool splot = no {prompt="Plot the final spectrum?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = yes {prompt="Update spectra if cal data changes?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset params = "" {prompt="Algorithm parameters"}
+
+begin
+ apscript.readnoise = readnoise
+ apscript.gain = gain
+ apscript.nfind = fibers
+ apscript.width = width
+ apscript.t_width = width
+ apscript.minsep = minsep
+ apscript.maxsep = maxsep
+ apscript.radius = minsep
+ apscript.clean = clean
+ proc.datamax = datamax
+
+ proc (objects, apref, flat, throughput, arcs1, arcs2, arcreplace,
+ arctable, fibers, apidtable, crval, cdelt, objaps, skyaps,
+ arcaps, objbeams, skybeams, arcbeams, scattered, fitflat, no,
+ no, no, no, clean, dispcor, savearcs, skyalign, skysubtract,
+ skyedit, saveskys, splot, redo, update, batch, listonly)
+
+ if (proc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("batch&batch") | cl
+ }
+end
diff --git a/noao/imred/hydra/dohydra.par b/noao/imred/hydra/dohydra.par
new file mode 100644
index 00000000..7bca4821
--- /dev/null
+++ b/noao/imred/hydra/dohydra.par
@@ -0,0 +1,43 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,h,"",,,"Aperture reference spectrum"
+flat,f,h,"",,,"Flat field spectrum"
+throughput,f,h,"",,,"Throughput file or image (optional)"
+arcs1,s,h,"",,,"List of arc spectra"
+arcs2,s,h,"",,,"List of shift arc spectra"
+arcreplace,f,h,"",,,"Special aperture replacements"
+arctable,f,h,"",,,"Arc assignment table (optional)
+"
+readnoise,s,h,"RDNOISE",,,"Read out noise sigma (photons)"
+gain,s,h,"GAIN",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+fibers,i,h,97,,,"Number of fibers"
+width,r,h,12.,,,"Width of profiles (pixels)"
+minsep,r,h,8.,,,"Minimum separation between fibers (pixels)"
+maxsep,r,h,15.,,,"Maximum separation between fibers (pixels)"
+apidtable,f,h,"",,,"Aperture identifications"
+crval,s,h,INDEF,,,"Approximate central wavelength"
+cdelt,s,h,INDEF,,,"Approximate dispersion"
+objaps,s,h,"",,,"Object apertures"
+skyaps,s,h,"",,,"Sky apertures"
+arcaps,s,h,"",,,"Arc apertures"
+objbeams,s,h,"0,1",,,"Object beam numbers"
+skybeams,s,h,"0",,,"Sky beam numbers"
+arcbeams,s,h,"",,,"Arc beam numbers
+"
+scattered,b,h,no,,,"Subtract scattered light?"
+fitflat,b,h,yes,,,"Fit and ratio flat field spectrum?"
+clean,b,h,yes,,,"Detect and replace bad pixels?"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+savearcs,b,h,yes,,,"Save simultaneous arc apertures?"
+skyalign,b,h,no,,,"Align sky lines?"
+skysubtract,b,h,yes,,,"Subtract sky?"
+skyedit,b,h,yes,,,"Edit the sky spectra?"
+saveskys,b,h,yes,,,"Save sky spectra?"
+splot,b,h,no,,,"Plot the final spectrum?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,yes,,,"Update spectra if cal data changes?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+params,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/hydra/hydra.cl b/noao/imred/hydra/hydra.cl
new file mode 100644
index 00000000..4312a490
--- /dev/null
+++ b/noao/imred/hydra/hydra.cl
@@ -0,0 +1,82 @@
+#{ HYDRA package definition
+
+proto # bscale
+
+s1 = envget ("min_lenuserarea")
+if (s1 == "")
+ reset min_lenuserarea = 100000
+else if (int (s1) < 100000)
+ reset min_lenuserarea = 100000
+
+# Define HYDRA package
+package hydra
+
+# Package script tasks
+task dohydra = "hydra$dohydra.cl"
+task params = "hydra$params.par"
+
+# Fiber reduction script tasks
+task proc = "srcfibers$proc.cl"
+task fibresponse = "srcfibers$fibresponse.cl"
+task arcrefs = "srcfibers$arcrefs.cl"
+task doarcs = "srcfibers$doarcs.cl"
+task doalign = "srcfibers$doalign.cl"
+task skysub = "srcfibers$skysub.cl"
+task batch = "srcfibers$batch.cl"
+task listonly = "srcfibers$listonly.cl"
+task getspec = "srcfibers$getspec.cl"
+
+task msresp1d = "specred$msresp1d.cl"
+
+# Demos
+set demos = "hydra$demos/"
+task demos = "demos$demos.cl"
+task mkfibers = "srcfibers$mkfibers.cl"
+
+# Onedspec tasks
+task autoidentify,
+ continuum,
+ dispcor,
+ dopcor,
+ identify,
+ refspectra,
+ reidentify,
+ sapertures,
+ sarith,
+ sflip,
+ slist,
+ specplot,
+ specshift,
+ splot = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ aprecenter,
+ apresize,
+ apscatter,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apdefault = "apextract$apdefault.par"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apscat1 = "apextract$apscat1.par"
+task apscat2 = "apextract$apscat2.par"
+task apscript = "srcfibers$x_apextract.e"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apscript, apscat1, apscat2, dispcor1, mkfibers
+hidetask params, proc, batch, arcrefs, doarcs, doalign
+hidetask listonly, fibresponse, getspec
+
+clbye()
diff --git a/noao/imred/hydra/hydra.hd b/noao/imred/hydra/hydra.hd
new file mode 100644
index 00000000..c9e04d8a
--- /dev/null
+++ b/noao/imred/hydra/hydra.hd
@@ -0,0 +1,7 @@
+# Help directory for the HYDRA package.
+
+$doc = "./doc/"
+
+dohydra hlp=doc$dohydra.hlp
+
+revisions sys=Revisions
diff --git a/noao/imred/hydra/hydra.men b/noao/imred/hydra/hydra.men
new file mode 100644
index 00000000..a01bc9b5
--- /dev/null
+++ b/noao/imred/hydra/hydra.men
@@ -0,0 +1,32 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and remove scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plots of spectra
+ continuum - Fit the continuum in spectra
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify features in spectrum for dispersion solution
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically identify features in spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra having different wavelength ranges
+ scopy - Select and copy apertures in different spectral formats
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ slist - List spectrum header parameters
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Preliminary spectral plot/analysis
+
+ dohydra - Process HYDRA spectra
+ demos - Demonstrations and tests
diff --git a/noao/imred/hydra/hydra.par b/noao/imred/hydra/hydra.par
new file mode 100644
index 00000000..b560f84d
--- /dev/null
+++ b/noao/imred/hydra/hydra.par
@@ -0,0 +1,13 @@
+# HYDRA parameter file
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,""
+version,s,h,"HYDRA V1: January 1992"
diff --git a/noao/imred/hydra/params.par b/noao/imred/hydra/params.par
new file mode 100644
index 00000000..af56a1b8
--- /dev/null
+++ b/noao/imred/hydra/params.par
@@ -0,0 +1,67 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+order,s,h,"decreasing","increasing|decreasing",,"Order of apertures"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE LIMITS --"
+lower,r,h,-5.,,,"Lower aperture limit relative to center"
+upper,r,h,5.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,3,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- SCATTERED LIGHT PARAMETERS --"
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,"Fitting parameters along the dispersion
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,Upper rejection threshold
+nsubaps,i,h,1,1,,"Number of subapertures
+
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --"
+f_interactive,b,h,yes,,,"Fit flat field interactively?"
+f_function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+f_order,i,h,10,1,,"Fitting function order
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$ctiohenear.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"spline3","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,3,1,,"Order of dispersion function"
+i_niterate,i,h,2,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SKY SUBTRACTION PARAMETERS --"
+combine,s,h,"average","average|median",,Type of combine operation
+reject,s,h,"avsigclip","none|minmax|avsigclip",,"Sky rejection option"
+scale,s,h,"none","none|mode|median|mean",,"Sky scaling option"
diff --git a/noao/imred/iids/Revisions b/noao/imred/iids/Revisions
new file mode 100644
index 00000000..7472a80c
--- /dev/null
+++ b/noao/imred/iids/Revisions
@@ -0,0 +1,131 @@
+.help revisions Jun88 noao.imred.iids
+.nf
+
+=====
+V2.12
+=====
+
+imred$iids/standard.par
+ Added blackbody query parameters. (5/2/02, Valdes)
+
+========
+V2.11.3b
+========
+
+imred$iids/identify.par
+ Added new units parameters. (3/11/97, Valdes)
+
+=========
+V2.10.4p2
+=========
+
+imred$iids/standard.par
+ Removed ennumerated list. (4/10/89, Valdes)
+
+imred$iids/iids.cl
+imred$iids/iids.men
+specplot.par+
+ Task SPECPLOT added. (4/3/89 ShJ)
+
+imred$iids/batchred.cl
+imred$iids/batchred.par
+imred$iids/iids.cl
+imred$iids/iids.men
+ 1. New BATCHRED script. (4/27/88 Valdes)
+ 2. Eliminated EXTINCT.
+
+imred$iids/sensfunc.par
+ Added aperture selection and query parameters. (4/15/88 Valdes)
+
+imred$iids/iids.cl
+imred$iids/refspectra.par +
+ Refer to new ONEDSPEC executable and tasks. (4/7/88 Valdes)
+
+noao$imred/iids/reidentify.par
+ Valdes, Jan 4, 1988
+ Updated parameter file for new REIDENTIFY parameter.
+
+noao$imred/iids/iids.hd -
+noao$imred/iids/doc/ -
+ Valdes, June 1, 1987
+ 1. The documentation for POWERCOR has been moved to ONEDSPEC. A
+ copy of POWERCOR has been installed as part of the ONEDSPEC
+ package.
+
+noao$imred/iids/dispcor.par
+ Valdes, March 5, 1987
+ 1. The DISPCOR default parameter file has been updated because of
+ changes to the task; most notable being that wstart and wpc are
+ list structured.
+
+noao$imred/iids/flatdiv.par
+noao$imred/iids/flatfit.par
+ Valdes, December, 2, 1986
+ 1. New parameter "power" added to these tasks.
+
+noao$imred/iids/coincor.par
+noao$imred/iids/powercor.cl
+ Valdes, October 20, 1986
+ 1. New parameter "checkdone" added to COINCOR to allow overriding
+ coincidence correction checking.
+ 2. POWERCOR script modified so that it will apply correction on
+ previously coincidence corrected spectra.
+
+noao$imred/iids/iids.cl
+noao$imred/iids/iids.men
+noao$imred/iids/iids.hd +
+noao$imred/iids/powercor.cl +
+noao$imred/iids/powercor.par +
+noao$imred/iids/doc/powercor.hlp +
+ Valdes, October 13, 1986
+ 1. Added a new script task POWERCOR to apply the power law correction
+ to mountain reduced IIDS spectra.
+ 2. A help page was also added.
+
+noao$imred/iids/iids.par
+noao$imred/iids/coincor.par
+noao$imred/iids/iids.men
+ Valdes, October 13, 1986
+ 1. Added new COINCOR parameter "power".
+
+noao$imred/iids/iids.cl
+noao$imred/iids/iids.men
+noao$imred/irs/shedit.par +
+ Valdes, October 6, 1986
+ 1. Added new task SHEDIT.
+
+noao$imred/iids/identify.par
+ Valdes, October 3, 1986
+ 1. Added new IDENTIFY parameter "threshold".
+
+====================================
+Version 2.3 Release, August 18, 1986
+====================================
+
+iids: Valdes, July 3, 1986:
+ 1. New coordlist name in IDENTIFY parameter file.
+ 2. New calibration file name in package parameter file.
+
+=====================================
+STScI Pre-release and SUN 2.3 Release
+=====================================
+
+iids$bswitch.par: Valdes, May 19, 1986
+ 1. The parameter "add_const" in BSWITCH is directed to the parameter
+ in SENSFUNC of the same name.
+
+iids: Valdes, May 12, 1986:
+ 1. SPLOT updated. New parameters XMIN, XMAX, YMIN, YMAX.
+
+iids: Valdes, April 7, 1986
+ 1. Package parameter file changed to delete latitude.
+ 2. DISPCOR, BSWITCH, and STANDARD latitude parameter now obtained from
+ OBSERVATORY.
+
+iids: Valdes, March 27, 1986
+ 1. New task SETDISP added.
+
+===========
+Release 2.2
+===========
+.endhelp
diff --git a/noao/imred/iids/calibrate.par b/noao/imred/iids/calibrate.par
new file mode 100644
index 00000000..795965b7
--- /dev/null
+++ b/noao/imred/iids/calibrate.par
@@ -0,0 +1,14 @@
+# CALIBRATE parameter file
+
+input,s,a,,,,Input spectra to calibrate
+output,s,a,,,,Output calibrated spectra
+records,s,a,,,,Record number extensions
+extinct,b,h,yes,,,Apply extinction correction?
+flux,b,h,yes,,,Apply flux calibration?
+extinction,s,h,)_.extinction,,,Extinction file
+observatory,s,h,)_.observatory,,,Observatory of observation
+ignoreaps,b,h,no,,,Ignore aperture numbers in flux calibration?
+sensitivity,s,h,"sens",,,Image root name for sensitivity spectra
+fnu,b,h,no,,,Create spectra having units of FNU?
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
diff --git a/noao/imred/iids/dispcor.par b/noao/imred/iids/dispcor.par
new file mode 100644
index 00000000..186c0ca1
--- /dev/null
+++ b/noao/imred/iids/dispcor.par
@@ -0,0 +1,19 @@
+input,s,a,,,,List of input spectra
+output,s,a,,,,List of output spectra
+records,s,a,,,,Record number extensions
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+database,s,h,"database",,,Dispersion solution database
+table,s,h,"",,,Wavelength table for apertures
+w1,r,h,INDEF,,,Starting wavelength
+w2,r,h,INDEF,,,Ending wavelength
+dw,r,h,INDEF,,,Wavelength interval per pixel
+nw,i,h,1024,,,Number of output pixels
+log,b,h,no,,,Logarithmic wavelength scale?
+flux,b,h,yes,,,Conserve total flux?
+samedisp,b,h,yes,,,Same dispersion in all apertures?
+global,b,h,yes,,,Apply global defaults?
+confirm,b,h,yes,,,Confirm dispersion coordinates?
+ignoreaps,b,h,no,,,Ignore apertures?
+listonly,b,h,no,,,List the dispersion coordinates only?
+verbose,b,h,yes,,,Print linear dispersion assignments?
+logfile,s,h,"logfile",,,Log file
diff --git a/noao/imred/iids/identify.par b/noao/imred/iids/identify.par
new file mode 100644
index 00000000..92049af8
--- /dev/null
+++ b/noao/imred/iids/identify.par
@@ -0,0 +1,33 @@
+# Parameters for identify task.
+
+images,s,a,,,,Images containing features to be identified
+section,s,h,"middle line",,,Section to apply to two dimensional images
+database,f,h,database,,,Database in which to record feature data
+coordlist,f,h,linelists$henear.dat,,,User coordinate list
+units,s,h,"",,,Coordinate units
+nsum,s,h,"10",,,Number of lines/columns/bands to sum in 2D images
+match,r,h,50.,,,Coordinate list matching limit
+maxfeatures,i,h,50,,,Maximum number of features for automatic identification
+zwidth,r,h,100.,,,Zoom graph width in user units
+
+ftype,s,h,"emission","emission|absorption",,Feature type
+fwidth,r,h,4.,,,Feature width in pixels
+cradius,r,h,5.,,,Centering radius in pixels
+threshold,r,h,10.,0.,,Feature threshold for centering
+minsep,r,h,2.,0.,,Minimum pixel separation
+
+function,s,h,"chebyshev","legendre|chebyshev|spline1|spline3",,Coordinate function
+order,i,h,6,1,,Order of coordinate function
+sample,s,h,"*",,,Coordinate sample regions
+niterate,i,h,0,0,,Rejection iterations
+low_reject,r,h,3.,0.,,Lower rejection sigma
+high_reject,r,h,3.,0.,,Upper rejection sigma
+grow,r,h,0.,0.,,Rejection growing radius
+
+autowrite,b,h,no,,,"Automatically write to database"
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+
+crval,s,q,,,,"Approximate coordinate (at reference pixel)"
+cdelt,s,q,,,,"Approximate dispersion"
+aidpars,pset,h,,,,"Automatic identification algorithm parameters"
diff --git a/noao/imred/iids/iids.cl b/noao/imred/iids/iids.cl
new file mode 100644
index 00000000..19b8ac79
--- /dev/null
+++ b/noao/imred/iids/iids.cl
@@ -0,0 +1,66 @@
+#{ IIDS -- KPNO IIDS Spectral Reduction Package
+
+# Load necessary packages
+
+lists # List package for table
+
+# Define necessary paths
+
+set iidscal = "onedstds$iidscal/"
+set irsiids = "onedspec$irsiids/"
+
+package iids
+
+# Standard ONEDSPEC tasks
+task autoidentify,
+ continuum,
+ deredden,
+ dopcor,
+ mkspec,
+ names,
+ sarith,
+ sflip,
+ sinterp,
+ splot,
+ specplot,
+ specshift = onedspec$x_onedspec.e
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task dispcor1 = onedspec$dispcor1.par
+task scopy = onedspec$scopy.cl
+hidetask dispcor1
+
+# Special IRS/IIDS tasks
+task addsets,
+ bswitch,
+ coefs,
+ coincor,
+ flatdiv,
+ flatfit,
+ slist1d,
+ subsets,
+ sums = irsiids$x_onedspec.e
+task batchred = irsiids$batchred.cl
+task bplot = irsiids$bplot.cl
+task extinct = irsiids$extinct.cl
+task powercor = irsiids$powercor.cl
+
+# Different default parameters
+task calibrate,
+ dispcor,
+ identify,
+ lcalib,
+ refspectra,
+ reidentify,
+ sensfunc,
+ standard = iids$x_onedspec.e
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Define a task living in the users directory - it is created by BATCHRED
+
+task $process = process.cl
+
+clbye()
diff --git a/noao/imred/iids/iids.hd b/noao/imred/iids/iids.hd
new file mode 100644
index 00000000..5398f372
--- /dev/null
+++ b/noao/imred/iids/iids.hd
@@ -0,0 +1 @@
+# Help directory for the IIDS package.
diff --git a/noao/imred/iids/iids.men b/noao/imred/iids/iids.men
new file mode 100644
index 00000000..faa77978
--- /dev/null
+++ b/noao/imred/iids/iids.men
@@ -0,0 +1,37 @@
+ addsets - Add subsets of strings of spectra
+ batchred - Batch processing of IIDS/IRS spectra
+ bplot - Batch plots of spectra
+ bswitch - Beam-switch strings of spectra to make obj-sky pairs
+ calibrate - Apply sensitivity correction to spectra
+ coefs - Extract mtn reduced coefficients from henear scans
+ coincor - Correct spectra for detector count rates
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ extinct - Use BSWITCH for extinction correction
+ flatdiv - Divide spectra by flat field
+ flatfit - Sum and normalize flat field spectra
+ identify - Identify features in spectrum for dispersion solution
+ lcalib - List calibration file data
+ mkspec - Generate an artificial spectrum
+ names - Generate a list of image names from a string
+ powercor - Apply power law correction to mountain reduced spectra
+ process - A task generated by BATCHRED
+ refspectra - Assign reference spectra to object spectra
+ reidentify - Automatically identify features in spectra
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra having different wavelength ranges
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ sinterp - Interpolate a table of x,y pairs to create a spectrum
+ slist1d - List spectral header elements
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Preliminary spectral plot/analysis
+ standard - Identify standard stars to be used in sensitivity calc
+ subsets - Subtract pairs in strings of spectra
+ sums - Generate sums of object and sky spectra by aperture
diff --git a/noao/imred/iids/iids.par b/noao/imred/iids/iids.par
new file mode 100644
index 00000000..9035bc1b
--- /dev/null
+++ b/noao/imred/iids/iids.par
@@ -0,0 +1,17 @@
+# PARAMETERS FOR KPNO IIDS SPECTRAL REDUCTION PACKAGE
+
+observatory,s,h,"kpno",,,Observatory for data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+extinction,s,h,"onedstds$kpnoextinct.dat",,,Extinction file
+caldir,s,h,"iidscal$",,,Directory containing calibration data
+coincor,b,h,yes,,,Apply coincidence correction to flats
+ccmode,s,h,"iids",,,Correction mode (photo|iids|power)
+deadtime,r,h,1.424e-3,0,,Deadtime in seconds
+power,r,h,0.975,,,IIDS power law coefficient
+
+dispaxis,i,h,1,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,Number of lines/columns/bands to sum for 2D/3D images
+
+next_rec,i,h,1,,,"Next output record"
+
+version,s,h,"IRS V3: July 1991"
diff --git a/noao/imred/iids/irs.men b/noao/imred/iids/irs.men
new file mode 100644
index 00000000..6ebc9281
--- /dev/null
+++ b/noao/imred/iids/irs.men
@@ -0,0 +1,5 @@
+ addsets coefs flatfit powercor scombine splot
+ batchred coincor identify process sensfunc standard
+ bplot continuum lcalib rebin sinterp subsets
+ bswitch dispcor mkspec refspectra slist1d sums
+ calibrate flatdiv names reidentify specplot
diff --git a/noao/imred/iids/lcalib.par b/noao/imred/iids/lcalib.par
new file mode 100644
index 00000000..30436625
--- /dev/null
+++ b/noao/imred/iids/lcalib.par
@@ -0,0 +1,7 @@
+# CALIBLIST parameter file
+
+option,s,a,,,,"List option (bands, ext, mags, fnu, flam, stars)"
+star_name,s,a,,,,Star name in calibration list
+extinction,s,h,,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
diff --git a/noao/imred/iids/refspectra.par b/noao/imred/iids/refspectra.par
new file mode 100644
index 00000000..47cd54f9
--- /dev/null
+++ b/noao/imred/iids/refspectra.par
@@ -0,0 +1,17 @@
+input,s,a,,,,"List of input spectra"
+records,s,a,,,,Record number extensions
+references,s,h,"*.imh",,,"List of reference spectra"
+apertures,s,h,"",,,"Input aperture selection list"
+refaps,s,h,"",,,"Reference aperture selection list"
+ignoreaps,b,h,no,,,Ignore input and reference apertures?
+select,s,h,"interp","match|nearest|preceding|following|interp|average",,"Selection method for reference spectra"
+sort,s,h,"ut",,,"Sort key"
+group,s,h,"none",,,"Group key"
+time,b,h,yes,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting"
+override,b,h,no,,,"Override previous assignments?"
+confirm,b,h,yes,,,"Confirm reference spectrum assignments?"
+assign,b,h,yes,,,"Assign the reference spectra to the input spectrum?"
+logfiles,s,h,"STDOUT,logfile",,,"List of logfiles"
+verbose,b,h,no,,,"Verbose log output?"
+answer,s,q,,"no|yes|YES",,"Accept assignment?"
diff --git a/noao/imred/iids/reidentify.par b/noao/imred/iids/reidentify.par
new file mode 100644
index 00000000..13b21740
--- /dev/null
+++ b/noao/imred/iids/reidentify.par
@@ -0,0 +1,36 @@
+# Parameters for reidentify task.
+
+reference,s,a,,,,Reference image
+images,s,a,,,,Images to be reidentified
+interactive,s,h,"no","no|yes|NO|YES",,Interactive fitting?
+section,s,h,"middle line",,,Section to apply to two dimensional images
+newaps,b,h,yes,,,Reidentify apertures in images not in reference?
+override,b,h,no,,,Override previous solutions?
+refit,b,h,yes,,,"Refit coordinate function?
+"
+trace,b,h,no,,,Trace reference image?
+step,s,h,"10",,,Step in lines/columns/bands for tracing an image
+nsum,s,h,"10",,,Number of lines/columns/bands to sum
+shift,s,h,"0.",,,Shift to add to reference features (INDEF to search)
+search,r,h,0.,,,Search radius
+nlost,i,h,0,0,,"Maximum number of features which may be lost
+"
+cradius,r,h,5.,,,Centering radius
+threshold,r,h,10.,0.,,Feature threshold for centering
+addfeatures,b,h,no,,,Add features from a line list?
+coordlist,f,h,linelists$henear.dat,,,User coordinate list
+match,r,h,10.,,,Coordinate list matching limit
+maxfeatures,i,h,50,,,Maximum number of features for automatic identification
+minsep,r,h,2.,0.,,"Minimum pixel separation
+"
+database,f,h,database,,,Database
+logfiles,s,h,"logfile",,,List of log files
+plotfile,s,h,"",,,Plot file for residuals
+verbose,b,h,no,,,Verbose output?
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,"Graphics cursor input
+"
+answer,s,q,"yes","no|yes|NO|YES",,Fit dispersion function interactively?
+crval,s,q,,,,"Approximate coordinate (at reference pixel)"
+cdelt,s,q,,,,"Approximate dispersion"
+aidpars,pset,h,,,,"Automatic identification algorithm parameters"
diff --git a/noao/imred/iids/sensfunc.par b/noao/imred/iids/sensfunc.par
new file mode 100644
index 00000000..022190a4
--- /dev/null
+++ b/noao/imred/iids/sensfunc.par
@@ -0,0 +1,17 @@
+standards,s,a,std,,,Input standard star data file (from STANDARD)
+sensitivity,s,a,"sens",,,Output root sensitivity function imagename
+apertures,s,h,"",,,Aperture selection list
+ignoreaps,b,h,no,,,Ignore apertures and make one sensitivity function?
+logfile,f,h,"logfile",,,Output log for statistics information
+extinction,f,h,")_.extinction",,,Extinction file
+newextinction,f,h,"extinct.dat",,,Output revised extinction file
+observatory,s,h,)_.observatory,,,Observatory of data
+function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,Fitting function
+order,i,h,6,1,,Order of fit
+interactive,b,h,yes,,,Determine sensitivity function interactively?
+graphs,s,h,"sr",,,Graphs per frame
+marks,s,h,"plus cross box",,,Data mark types (marks deleted added)
+colors,s,h,"2 1 3 4",,,Colors (lines marks deleted added)
+cursor,*gcur,h,"",,,Graphics cursor input
+device,s,h,"stdgraph",,,Graphics output device
+answer,s,q, yes,"no|yes|NO|YES",,"(no|yes|NO|YES)"
diff --git a/noao/imred/iids/standard.par b/noao/imred/iids/standard.par
new file mode 100644
index 00000000..3abf645a
--- /dev/null
+++ b/noao/imred/iids/standard.par
@@ -0,0 +1,22 @@
+input,f,a,,,,Input image file root name
+records,s,a,,,,Spectral records
+output,s,a,std,,,Output flux file (used by SENSFUNC)
+samestar,b,h,yes,,,Same star in all apertures?
+beam_switch,b,h,yes,,,Beam switch spectra?
+apertures,s,h,"",,,Aperture selection list
+bandwidth,r,h,INDEF,,,Bandpass widths
+bandsep,r,h,INDEF,,,Bandpass separation
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
+extinction,s,h,)_.extinction,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+observatory,s,h,)_.observatory,,,Observatory for data
+interact,b,h,yes,,,Graphic interaction to define new bandpasses
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+star_name,s,q,,,,Star name in calibration list
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
+mag,r,q,,,,Magnitude of star
+magband,s,q,,"U|B|V|R|I|J|H|K|L|Lprime|M",,"Magnitude type"
+teff,s,q,,,,Effective temperature or spectral type
+answer,s,q,no,,,"(no|yes|NO|YES|NO!|YES!)"
diff --git a/noao/imred/imred.cl b/noao/imred/imred.cl
new file mode 100644
index 00000000..634b7b27
--- /dev/null
+++ b/noao/imred/imred.cl
@@ -0,0 +1,55 @@
+#{ IMRED -- Image reduction package.
+
+# Define directories.
+
+set argus = "imred$argus/"
+set biasdir = "imred$bias/"
+set ccdred = "imred$ccdred/"
+set crutil = "imred$crutil/"
+set ctioslit = "imred$ctioslit/"
+set dtoi = "imred$dtoi/"
+set echelle = "imred$echelle/"
+set generic = "imred$generic/"
+set hydra = "imred$hydra/"
+set iids = "imred$iids/"
+set irred = "imred$irred/"
+set irs = "imred$irs/"
+set kpnocoude = "imred$kpnocoude/"
+set kpnoslit = "imred$kpnoslit/"
+set quadred = "imred$quadred/"
+set specred = "imred$specred/"
+set vtel = "imred$vtel/"
+
+set apextract = "twodspec$apextract/"
+set doecslit = "imred$src/doecslit/"
+set dofoe = "imred$src/dofoe/"
+set doslit = "imred$src/doslit/"
+set srcfibers = "imred$src/fibers/"
+
+# Define the package.
+
+package imred
+
+# Tasks
+#task tutor = "imred$tutor.cl"
+
+# Packages
+task argus.pkg = "argus$argus.cl"
+task bias.pkg = "biasdir$bias.cl"
+task ccdred.pkg = "ccdred$ccdred.cl"
+task crutil.pkg = "crutil$crutil.cl"
+task ctioslit.pkg = "ctioslit$ctioslit.cl"
+task dtoi.pkg = "dtoi$dtoi.cl"
+task echelle.pkg = "echelle$echelle.cl"
+task generic.pkg = "generic$generic.cl"
+task hydra.pkg = "hydra$hydra.cl"
+task iids.pkg = "iids$iids.cl"
+task irred.pkg = "irred$irred.cl"
+task irs.pkg = "irs$irs.cl"
+task kpnocoude.pkg = "kpnocoude$kpnocoude.cl"
+task kpnoslit.pkg = "kpnoslit$kpnoslit.cl"
+task quadred.pkg = "quadred$quadred.cl"
+task specred.pkg = "specred$specred.cl"
+task vtel.pkg = "vtel$vtel.cl"
+
+clbye
diff --git a/noao/imred/imred.hd b/noao/imred/imred.hd
new file mode 100644
index 00000000..7e349d7e
--- /dev/null
+++ b/noao/imred/imred.hd
@@ -0,0 +1,119 @@
+# Help directory for the IMRED package.
+
+$doc = "./doc/"
+
+$argus = "noao$imred/argus/"
+$bias = "noao$imred/bias/"
+$ccdred = "noao$imred/ccdred/"
+$crutil = "noao$imred/crutil/"
+$ctioslit = "noao$imred/ctioslit/"
+$dtoi = "noao$imred/dtoi/"
+$echelle = "noao$imred/echelle/"
+$generic = "noao$imred/generic/"
+$hydra = "noao$imred/hydra/"
+$iids = "noao$imred/iids/"
+$irred = "noao$imred/irred/"
+$irs = "noao$imred/irs/"
+$kpnocoude = "noao$imred/kpnocoude/"
+$kpnoslit = "noao$imred/kpnoslit/"
+$quadred = "noao$imred/quadred/"
+$specred = "noao$imred/specred/"
+$vtel = "noao$imred/vtel/"
+
+demos hlp=doc$demos.hlp
+#tutor hlp=doc$tutor.hlp, src=tutor.cl
+
+argus men=argus$argus.men,
+ hlp=..,
+ pkg=argus$argus.hd,
+ src=argus$argus.cl
+
+bias men=bias$bias.men,
+ hlp=..,
+ pkg=bias$bias.hd,
+ src=bias$bias.cl
+
+ccdred men=ccdred$ccdred.men,
+ hlp=..,
+ sys=ccdred$ccdred.hlp,
+ pkg=ccdred$ccdred.hd,
+ src=ccdred$ccdred.cl
+
+crutil men=crutil$crutil.men,
+ hlp=..,
+ sys=crutil$crutil.hlp,
+ pkg=crutil$crutil.hd,
+ src=crutil$crutil.cl
+
+ctioslit men=ctioslit$ctioslit.men,
+ hlp=..,
+ pkg=ctioslit$ctioslit.hd,
+ src=ctioslit$ctioslit.cl
+
+dtoi men=dtoi$dtoi.men,
+ hlp=..,
+ pkg=dtoi$dtoi.hd,
+ src=dtoi$dtoi.cl
+
+echelle men=echelle$echelle.men,
+ hlp=..,
+ sys=echelle$echelle.hlp,
+ pkg=echelle$echelle.hd,
+ src=echelle$echelle.cl
+
+generic men=generic$generic.men,
+ hlp=..,
+ sys=generic$generic.hlp,
+ pkg=generic$generic.hd,
+ src=generic$generic.cl
+
+hydra men=hydra$hydra.men,
+ hlp=..,
+ pkg=hydra$hydra.hd,
+ src=hydra$hydra.cl
+
+iids men=iids$iids.men,
+ hlp=..,
+ sys=iids$iids.hlp,
+ pkg=iids$iids.hd,
+ src=iids$iids.cl
+
+irred men=irred$irred.men,
+ hlp=..,
+ sys=irred$irred.hlp,
+ pkg=irred$irred.hd,
+ src=irred$irred.cl
+
+irs men=irs$irs.men,
+ hlp=..,
+ sys=irs$irs.hlp,
+ pkg=irs$irs.hd,
+ src=irs$irs.cl
+
+kpnocoude men=kpnocoude$kpnocoude.men,
+ hlp=..,
+ pkg=kpnocoude$kpnocoude.hd,
+ src=kpnocoude$kpnocoude.cl
+
+kpnoslit men=kpnoslit$kpnoslit.men,
+ hlp=..,
+ pkg=kpnoslit$kpnoslit.hd,
+ src=kpnoslit$kpnoslit.cl
+
+quadred men=quadred$quadred.men,
+ hlp=..,
+ sys=quadred$quadred.hlp,
+ pkg=quadred$quadred.hd,
+ src=quadred$quadred.cl
+
+specred men=specred$specred.men,
+ hlp=..,
+ sys=specred$specred.hlp,
+ pkg=specred$specred.hd,
+ src=specred$specred.cl
+
+vtel men=vtel$vtel.men,
+ hlp=..,
+ sys=vtel$vtel.hlp,
+ pkg=vtel$vtel.hd,
+ src=vtel$vtel.cl
diff --git a/noao/imred/imred.men b/noao/imred/imred.men
new file mode 100644
index 00000000..b9ddc234
--- /dev/null
+++ b/noao/imred/imred.men
@@ -0,0 +1,17 @@
+ argus - CTIO ARGUS reduction package
+ bias - General bias subtraction tools
+ crutil - Tasks for detecting and removing cosmic rays
+ ccdred - Generic CCD reductions
+ ctioslit - CTIO spectrophotometric reduction package
+ dtoi - Density to Intensity reductions for photographic plates
+ echelle - Echelle spectral reductions (slit and FOE)
+ generic - Generic image reductions tools
+ hydra - KPNO HYDRA (and NESSIE) reduction package
+ iids - KPNO IIDS spectral reductions
+ irred - KPNO IR camera reductions
+ irs - KPNO IRS spectral reductions
+ kpnocoude - KPNO coude reduction package (slit and 3 fiber)
+ kpnoslit - KPNO low/moderate dispersion slits (Goldcam, RCspec, Whitecam)
+ quadred - CCD reductions for QUAD amplifier data
+ specred - Generic slit and fiber spectral reduction package
+ vtel - NSO Solar vacuum telescope image reductions
diff --git a/noao/imred/imred.par b/noao/imred/imred.par
new file mode 100644
index 00000000..c0e775bf
--- /dev/null
+++ b/noao/imred/imred.par
@@ -0,0 +1,5 @@
+# IMRED package parameter file
+
+keeplog,b,h,no,,,Keep log of processing?
+logfile,f,h,"imred.log",,,Log file
+version,s,h,"IMRED V3: August 2001"
diff --git a/noao/imred/irred/Revisions b/noao/imred/irred/Revisions
new file mode 100644
index 00000000..22a23670
--- /dev/null
+++ b/noao/imred/irred/Revisions
@@ -0,0 +1,61 @@
+.help revisions Jun88 noao.imred.irred
+.nf
+irred$irred.cl
+ The IRLINCOR task was mistakenly declared using 'x_irred.x' instead
+ of 'x_irred.e'. Works fine under the CL but broke Pyraf (1/18/07, MJF)
+
+irred$center.par
+irred$doc/center.hlp
+ Updated the center task parameter file to support the new cache, wcsin,
+ and wcsout parameters.
+
+ Added a copy of the center task help page so it is accessible even
+ if apphot is not loaded. Center1d gets picked up instead in that case.
+
+ Davis, April 8, 2001
+
+irred$mosproc.cl
+ Changed the outtype parameter setting in the imcombine task call from
+ "" to "real". Added default values for the rejmask, nkeep, and snoise
+ parameters.
+
+ Davis, January 18, 1999
+
+irred$irred.cl
+irred$irred.hd
+irred$irred.men
+irred$mkpkg
+irred$irlincor.par
+irred$x_irred.x
+irred$t_irlincor.x
+irred$doc/irlincor.hlp
+ Added the ctio.irlincor package task to the irred package.
+
+irred$irred.cl
+irred$mosproc.cl
+ 1. The irred cl script was modified to reference nproto instead of proto.
+ 2. The irred script was modified to load the core proto package in order to
+ pick up the bscale task.
+ 3. The references to the apselect task were replace by references to the
+ txdump task.
+ 4. The bscale.par file was removed since one cannot have private copies
+ of .par files across package boundaries.
+ 5. Replaced the obsolete version of datapars.par with the new version.
+ 6. Replaced the obsolate versions of irmosaic.par, iralign.par,
+ irmatch1d.par, and irmatch2d.par with new ones.
+
+ Davis, January 25, 1992
+
+irred$mosproc.cl
+ 1. Replaced the call to imcombine with a call to the new imcombine.
+
+ Davis, January 25, 1992
+
+
+irred$
+ The IRRED package was added to the imred package menu. The current
+ tasks are bscale, center, irmosaic, iralign, irmatch1d, and irmatch2d,
+ mosproc, and txdump.
+ Davis, April 1, 1989
+
+.endhelp
diff --git a/noao/imred/irred/center.par b/noao/imred/irred/center.par
new file mode 100644
index 00000000..4cc613cd
--- /dev/null
+++ b/noao/imred/irred/center.par
@@ -0,0 +1,21 @@
+# CENTER
+
+image,f,a,,,,"Input image(s)"
+coords,f,h,"",,,"Input coordinate list(s) (default: image.coo.?)"
+output,f,h,"default",,,"Output center file(s) (default: image.ctr.?)"
+plotfile,s,h,"",,,"Output plot metacode file"
+datapars,pset,h,"",,,"Data dependent parameters"
+centerpars,pset,h,"",,,"Centering parameters"
+interactive,b,h,yes,,,"Interactive mode ?"
+radplots,b,h,no,,,"Plot the radial profiles in interactive mode ?"
+icommands,*imcur,h,"",,,"Image cursor: [x y wcs] key [cmd]"
+gcommands,*gcur,h,"",,,"Graphics cursor: [x y wcs] key [cmd]"
+wcsin,s,h,logical,,,"The input coordinate system (logical,tv,physical,world)"
+wcsout,s,h,logical,,,"The output coordinate system (logical,tv,physical)"
+cache,b,h,no,,,"Cache the input image pixels in memory ?"
+verify,b,h,yes,,,"Verify critical parameters in non-interactive mode ?"
+update,b,h,no,,,"Update critical parameters in non-interactive mode ?"
+verbose,b,h,yes,,,"Print messages in non-interactive mode ?"
+graphics,s,h,"stdgraph",,,"Graphics device"
+display,s,h,"stdimage",,,"Display device"
+mode,s,h,'ql'
diff --git a/noao/imred/irred/centerpars.par b/noao/imred/irred/centerpars.par
new file mode 100644
index 00000000..1692b29e
--- /dev/null
+++ b/noao/imred/irred/centerpars.par
@@ -0,0 +1,14 @@
+# CENTERPARS
+
+calgorithm,s,h,"centroid","|centroid|gauss|none|ofilter|",,Centering algorithm
+cbox,r,h,5.0,,,Centering box width in scale units
+cthreshold,r,h,0.0,,,Centering threshold in sigma above background
+minsnratio,r,h,1.0,0.0,,Minimum signal-to-noise ratio for centering algorithm
+cmaxiter,i,h,10,,,Maximum number of iterations for centering algorithm
+maxshift,r,h,1.0,,,Maximum center shift in scale units
+clean,b,h,no,,,Symmetry clean before centering ?
+rclean,r,h,1.0,,,Cleaning radius in scale units
+rclip,r,h,2.0,,,Clipping radius in scale units
+kclean,r,h,3.0,,,Rejection limit in sigma
+mkcenter,b,h,no,,,Mark the computed center on display ?
+mode,s,h,'ql'
diff --git a/noao/imred/irred/datapars.par b/noao/imred/irred/datapars.par
new file mode 100644
index 00000000..15778b9f
--- /dev/null
+++ b/noao/imred/irred/datapars.par
@@ -0,0 +1,25 @@
+# DATAPARS
+
+scale,r,h,1.0,0.0,,Image scale in units per pixel
+fwhmpsf,r,h,2.5,0.0,,FWHM of the PSF in scale units
+emission,b,h,y,,,Features are positive ?
+sigma,r,h,INDEF,,,Standard deviation of background in counts
+datamin,r,h,INDEF,,,Minimum good data value
+datamax,r,h,INDEF,,,Maximum good data value
+
+noise,s,h,"poisson","|constant|poisson|",,Noise model
+ccdread,s,h,"",,,CCD readout noise image header keyword
+gain,s,h,"",,,CCD gain image header keyword
+readnoise,r,h,0.0,,,CCD readout noise in electrons
+epadu,r,h,1.0,,,Gain in electrons per count
+
+exposure,s,h,"",,,Exposure time image header keyword
+airmass,s,h,"",,,Airmass image header keyword
+filter,s,h,"",,,Filter image header keyword
+obstime,s,h,"",,,Time of observation image header keyword
+itime,r,h,1.0,,,Exposure time
+xairmass,r,h,INDEF,,,Airmass
+ifilter,s,h,"INDEF",,,Filter
+otime,s,h,"INDEF",,,Time of observation
+
+mode,s,h,'ql'
diff --git a/noao/imred/irred/doc/center.hlp b/noao/imred/irred/doc/center.hlp
new file mode 100644
index 00000000..1171deb4
--- /dev/null
+++ b/noao/imred/irred/doc/center.hlp
@@ -0,0 +1,637 @@
+.help center May00 irred
+.ih
+NAME
+center -- compute accurate centers for a list of objects
+.ih
+USAGE
+center image
+.ih
+PARAMETERS
+.ls image
+The list of images containing the objects to be centered.
+.le
+.ls coords = ""
+The list of text files containing initial coordinates for the objects to
+be centered. Objects are listed in coords one object per line with the
+initial coordinate values in columns one and two. The number of coordinate
+files must be zero, one, or equal to the number of images.
+If coords is "default", "dir$default", or a directory specification then an
+coords file name of the form dir$root.extension.version is constructed and
+searched for, where dir is the directory, root is the root image name,
+extension is "coo" and version is the next available version number for the
+file.
+.le
+.ls output = "default"
+The name of the results file or results directory. If output is
+"default", "dir$default", or a directory specification then an output file name
+of the form dir$root.extension.version is constructed, where dir is the
+directory, root is the root image name, extension is "ctr" and version is
+the next available version number for the file. The number of output files
+must be zero, one, or equal to the number of image files. In both interactive
+and batch mode full output is written to output. In interactive mode
+an output summary is also written to the standard output.
+.le
+.ls plotfile = ""
+The name of the file containing radial profile plots of the stars written
+to the output file. If plotfile is defined then a radial profile plot
+is written to plotfile every time a record is written to \fIoutput\fR.
+The user should be aware that this can be a time consuming operation.
+.le
+.ls datapars = ""
+The name of the file containing the data dependent parameters.
+The critical parameters \fIfwhmpsf\fR and \fIsigma\fR are located in
+datapars. If datapars is undefined then the default parameter set in
+uparm directory is used.
+.le
+.ls centerpars = ""
+The name of the file containing the centering algorithm parameters.
+The critical parameters \fIcalgorithm\fR and \fIcbox\fR are located in
+centerpars. If centerpars is undefined then the default parameter
+set in uparm is used.
+.le
+.ls interactive = yes
+Interactive or non-interactive mode?
+.le
+.ls radplots = no
+If \fIradplots\fR is "yes" and CENTER is run in interactive mode, a radial
+profile of each star is plotted on the screen after the center is fit.
+.le
+.ls icommands = ""
+The image display cursor or image cursor command file.
+.le
+.ls gcommands = ""
+The graphics cursor or graphics cursor command file.
+.le
+.ls wcsin = "logical", wcsout = "logical"
+The coordinate system of the input coordinates read from \fIcoords\fR and
+of the output coordinates written to \fIoutput\fR respectively. The image
+header coordinate system is used to transform from the input coordinate
+system to the "logical" pixel coordinate system used internally,
+and from the internal "logical" pixel coordinate system to the output
+coordinate system. The input coordinate system options are "logical", tv",
+"physical", and "world". The output coordinate system options are "logical",
+"tv", and "physical". The image cursor coordinate system is assumed to
+be the "tv" system.
+.ls logical
+Logical coordinates are pixel coordinates relative to the current image.
+The logical coordinate system is the coordinate system used by the image
+input/output routines to access the image data on disk. In the logical
+coordinate system the coordinates of the first pixel of a 2D image, e.g.
+dev$ypix and a 2D image section, e.g. dev$ypix[200:300,200:300] are
+always (1,1).
+.le
+.ls tv
+Tv coordinates are the pixel coordinates used by the display servers. Tv
+coordinates include the effects of any input image section, but do not
+include the effects of previous linear transformations. If the input
+image name does not include an image section, then tv coordinates are
+identical to logical coordinates. If the input image name does include a
+section, and the input image has not been linearly transformed or copied from
+a parent image, tv coordinates are identical to physical coordinates.
+In the tv coordinate system the coordinates of the first pixel of a
+2D image, e.g. dev$ypix and a 2D image section, e.g. dev$ypix[200:300,200:300]
+are (1,1) and (200,200) respectively.
+.le
+.ls physical
+Physical coordinates are pixel coordinates invariant with respect to linear
+transformations of the physical image data. For example, if the current image
+was created by extracting a section of another image, the physical
+coordinates of an object in the current image will be equal to the physical
+coordinates of the same object in the parent image, although the logical
+coordinates will be different. In the physical coordinate system the
+coordinates of the first pixel of a 2D image, e.g. dev$ypix and a 2D
+image section, e.g. dev$ypix[200:300,200:300] are (1,1) and (200,200)
+respectively.
+.le
+.ls world
+World coordinates are image coordinates in any units which are invariant
+with respect to linear transformations of the physical image data. For
+example, the ra and dec of an object will always be the same no matter
+how the image is linearly transformed. The units of input world coordinates
+must be the same as those expected by the image header wcs, e. g.
+degrees and degrees for celestial coordinate systems.
+.le
+The wcsin and wcsout parameters default to the values of the package
+parameters of the same name. The default values of the package parameters
+wcsin and wcsout are "logical" and "logical" respectively.
+.le
+.ls cache = no
+Cache the image pixels in memory. Cache may be set to "yes", or "no".
+By default cacheing is
+disabled.
+.le
+.ls verify = yes
+Verify the critical parameters in non-interactive mode ? Verify may be set to
+or "no.
+.le
+.ls update = no
+Update the critical parameters in non-interactive mode if \fIverify\fR is
+set to yes? Update may be set to "yes" or "no.
+.le
+.ls verbose = yes
+Print messages on the terminal in non-interactive mode ? Verbose may be set
+to "yes" or "no.
+.le
+.ls graphics = ")_.graphics"
+The default graphics device.
+Graphics may be set to the apphot package parameter value (the default), "yes",
+or "no.
+.le
+.ls display = ")_.display"
+The default display device. Display may be set to the apphot package
+parameter value (the default), "yes", or "no. By default graphics overlay
+is disabled. Setting display to one of "imdr", "imdg", "imdb", or "imdy"
+enables graphics overlay with the IMD graphics kernel. Setting display to
+"stdgraph" enables CENTER to work interactively from a contour plot.
+.le
+
+.ih
+DESCRIPTION
+CENTER computes accurate centers for a set of objects in the IRAF image
+\fIimage\fR, whose initial coordinates are read from the image display cursor,
+from the text file \fIcoords\fR, or from a cursor command file.
+The computed x and y coordinates, the errors, and the fitting parameters
+are written to the text file \fIoutput\fR.
+
+The coordinates read from \fIcoords\fR are assumed to be in coordinate
+system defined by \fIwcsin\fR. The options are "logical", "tv", "physical",
+and "world" and the transformation from the input coordinate system to
+the internal "logical" system is defined by the image coordinate system.
+The simplest default is the "logical" pixel system. Users working on with
+image sections but importing pixel coordinate lists generated from the parent
+image must use the "tv" or "physical" input coordinate systems.
+Users importing coordinate lists in world coordinates, e.g. ra and dec,
+must use the "world" coordinate system and may need to convert their
+equatorial coordinate units from hours and degrees to degrees and degrees first.
+
+The coordinates written to \fIoutput\fR are in the coordinate
+system defined by \fIwcsout\fR. The options are "logical", "tv",
+and "physical". The simplest default is the "logical" system. Users
+wishing to correlate the output coordinates of objects measured in
+image sections or mosaic pieces with coordinates in the parent
+image must use the "tv" or "physical" coordinate systems.
+
+If \fIcache\fR is yes and the host machine physical memory and working set size
+are large enough, the input image pixels are cached in memory. If cacheing
+is enabled and CENTER is run interactively the first measurement will appear
+to take a long time as the entire image must be read in before the measurement
+is actually made. All subsequent measurements will be very fast because CENTER
+is accessing memory not disk. The point of cacheing is to speed up random
+image access by making the internal image i/o buffers the same size as the
+image itself. However if the input object lists are sorted in row order and
+sparse cacheing may actually worsen not improve the execution time. Also at
+present there is no point in enabling cacheing for images that are less than
+or equal to 524288 bytes, i.e. the size of the test image dev$ypix, as the
+default image i/o buffer is exactly that size. However if the size of dev$ypix
+is doubled by converting it to a real image with the chpixtype task then the
+effect of cacheing in interactive is can be quite noticeable if measurements
+of objects in the top and bottom halves of the image are alternated.
+
+CENTER can be run either interactively or in batch mode by setting the
+parameter \fIinteractive\fR. In interactive mode starting x and y positions
+can either be read directly from the image cursor or read from the text
+file \fIcoords\fR. In interactive mode the user can examine, adjust, and
+save the algorithm parameters, change ojects interactively, query for
+the next or nth object in the list, or fit the entire coordinate list with
+the chosen parameter set. In batch mode the positions can be read from the
+text file \fIcoords\fR or the image cursor can be redirected to a text file
+containing a list of cursor commands as specified by the parameter
+\fIicommands\fR.
+
+.ih
+CURSOR COMMANDS
+
+The following cursor commands are currently available.
+
+.nf
+ Interactive Keystroke Commands
+
+? Print help
+: Colon commands
+v Verify the critical parameters
+w Save the current parameters
+d Plot radial profile of current star
+i Interactively set parameters using current star
+f Fit center of current star
+spbar Fit center of current star, output results
+m Move to next star in coordinate list
+n Center next star in coordinate list, output results
+l Center remaining stars in coordinate list, output results
+e Print error messages
+r Rewind the coordinate list
+q Exit task
+
+
+ Colon Commands
+
+:show [data/center] List the parameters
+:m [n] Move to next [nth] star in coordinate list
+:n [n] Center next [nth] star in coordinate list,
+ output results
+
+
+ Colon Parameter Editing Commands
+
+# Image and file name parameters
+
+:image [string] Image name
+:coords [string] Coordinate file name
+:output [string] Output file name
+
+# Data dependent parameters
+
+:scale [value] Image scale (units per pixel)
+:fwhmpsf [value] Full-width half-maximum of PSF (scale units)
+:emission [y/n] Emission feature (y), absorption (n)
+:sigma [value] Standard deviation of sky (counts)
+:datamin [value] Minimum good data value (counts)
+:datamax [value] Maximum good data value (counts)
+
+# Noise parameters
+
+:noise [string] Noise model (constant|poisson)
+:gain [string] Gain image header keyword
+:ccdread [string] Readout noise image header keyword
+:epadu [value] Gain (electrons per adu)
+:readnoise [value] Readout noise (electrons)
+
+# Observations parameters
+
+:exposure [string] Exposure time image header keyword
+:airmass [string] Airmass image header keyword
+:filter [string] Filter image header keyword
+:obstime [string] Time of observation image header keyword
+:itime [value] Exposure time (time units)
+:xairmass [value] Airmass value (number)
+:ifilter [string] Filter id string
+:otime [string] Time of observation (time units)
+
+# Centering parameters
+
+:calgorithm [string] Centering algorithm
+:cbox [value] Width of centering box (scale units)
+:cthreshold [value] Centering intensity threshold (sigma)
+:cmaxiter [value] Maximum number of iterations
+:maxshift [value] Maximum center shift (scale units)
+:minsnratio [value] Minimum signal to noise for centering
+:clean [y/n] Clean subraster before centering
+:rclean [value] Cleaning radius (scale units)
+:rclip [value] Clipping radius (scale units)
+:kclean [value] Clean K-sigma rejection limit (sigma)
+
+# Plotting and marking parameters
+
+:mkcenter [y/n] Mark computed centers on the display
+:radplot [y/n] Plot radial profile of object
+
+
+The following keystroke commands are available from the interactive setup
+menu.
+
+ Interactive Center Setup Menu
+
+ v Mark and verify the critical center parameters (f,s,c)
+
+ f Mark and verify the full-width half-maximum of the psf
+ s Mark and verify the standard deviation of the background
+ l Mark and verify the minimum good data value
+ u Mark and verify the maximum good data value
+
+ c Mark and verify the centering box half-width
+ n Mark and verify the cleaning radius
+ p Mark and verify the clipping radius
+.fi
+
+.ih
+ALGORITHMS
+
+Descriptions of the data dependent parameters and the centering
+algorithm parameters can be found in the online manual pages for
+\fIdatapars\fR and \fIcenterpars\fR.
+
+.ih
+OUTPUT
+
+In interactive mode the following quantities are written to the terminal
+as each object is measured. Error is a simple string which indicates
+whether an error condition has been flagged. The centers and their errors are
+in pixel units.
+
+.nf
+ image xinit yinit xcenter ycenter xerr yerr error
+.fi
+
+In both interactive and batch mode the full output is written to the
+text file \fIoutput\fR. At the beginning of each file is a header
+listing the current values of the parameters when the first stellar
+record was written. These parameters can be subsequently altered.
+For each star measured the following record is written
+
+.nf
+ image xinit yinit id coords lid
+ xcenter ycenter xshift yshift xerr yerr cier error
+.fi
+
+Image and coords are the name of the image and coordinate file respectively.
+Id and lid are the sequence numbers of stars in the output and coordinate
+files respectively. Cier and error are the centering error code and accompanying
+error message respectively. Xinit, yinit, xcenter, ycenter, xshift, yshift,
+and xerr, yerr are self explanatory and output in pixel units. The sense of
+the xshift and yshift definitions is the following.
+
+.nf
+ xshift = xcenter - xinit
+ yshift = ycenter - yinit
+.fi
+
+In interactive mode a radial profile of each measured object is plotted
+in the graphics window if \fIradplots\fR is "yes".
+
+In interactive and batchmode a radial profile plot is written to
+\fIplotfile\fR if it is defined each time the result of an object
+measurement is written to \fIoutput\fR .
+
+.ih
+ERRORS
+
+If the object centering was error free then the field cier will be zero.
+Non-zero values in the cier column flag the following error conditions.
+
+.nf
+ 0 # No error
+ 101 # The centering box is off the image
+ 102 # The centering box is partially off the image
+ 103 # The S/N ratio is low in the centering box
+ 104 # There are two few points for a good fit
+ 105 # The x or y center fit is singular
+ 106 # The x or y center fit did not converge
+ 107 # The x or y center shift is greater than maxshift
+ 108 # There is bad data in the centering box
+.fi
+
+.ih
+EXAMPLES
+
+1. Compute the centers for a few stars in dev$ypix using the image display
+and the image cursor. Setup the task parameters using the interactive
+setup menu defined by the i keystroke command and a radial profile plot.
+
+.nf
+ ap> display dev$ypix 1 fi+
+
+ ... display the image
+
+ ap> center dev$ypix
+
+ ... type ? to see help screen
+
+ ... move image cursor to a star
+ ... type i to enter the interactive setup menu
+ ... enter the maximum radius in pixels for the radial profile or
+ accept the default with a CR
+ ... type v to get the default menu
+ ... set the fwhmpsf, sigma, and centering box half-width using the
+ graphics cursor and the stellar radial profile plot
+ ... typing <CR> after a prompt leaves the parameter at its default
+ value
+ ... type q to exit setup menu
+
+ ... type the v key to verify the critical parameters
+
+ ... type the w key to save the parameters in the parameter files
+
+ ... move the image cursor to the stars of interest and tap
+ the space bar
+
+ ... type q to quit followed by q to confirm the quit
+
+ ... the output will appear in ypix.ctr.1
+
+.fi
+
+2. Compute the centers for a few stars in dev$ypix using the contour plot
+and the graphics cursor. This option is only useful for those (now very few)
+users who have access to a graphics terminal but not to an image display
+server. Setup the task parameters using the interactive setup menu defined by
+the i key command as in example 1.
+
+.nf
+ ap> show stdimcur
+
+ ... record the default value of stdimcur
+
+ ap> set stdimcur = stdgraph
+
+ ... define the image cursor to be the graphics cursor
+
+ ap> contour dev$ypix
+
+ ... make a contour plot of dev$ypix
+
+ ap> contour dev$ypix >G ypix.plot1
+
+ ... store the contour plot of ypix in the file ypix.plot
+
+ ap> center dev$ypix display=stdgraph
+
+ ... type ? to see the help screen
+
+ ... move graphics cursor to a star
+ ... type i to enter the interactive setup menu
+ ... enter the maximum radius in pixels for the radial profile or
+ accept the default with a CR
+ ... type v key to get the default setup menu
+ ... enter maximum radius in pixels of the radial profile
+ ... set the fwhmpsf, sigma, and centering box half-width
+ using the graphics cursor and the stellar radial profile plot
+ ... typing <CR> after the prompt leaves the parameter at its
+ default value
+ ... type q to quit the setup menu
+
+ ... type the v key to verify critical parameters
+
+ ... type the w key to save the parameters in the parameter files
+
+ ... retype :.read ypix.plot1 to reload the contour plot
+
+ ... move the graphics cursor to the stars of interest and tap
+ the space bar
+
+ ... a one line summary of the answers will appear on the standard
+ output for each star measured
+
+ ... type q to quit followed by q to confirm the quit
+
+ ... full output will appear in the text file ypix.ctr.2
+
+ ap> set stdimcur = <default>
+
+ ... reset stdimcur to its previous value
+.fi
+
+
+3. Setup and run CENTER interactively on a list of objects temporarily
+overriding the fwhmpsf, sigma, and cbox parameters determined in examples
+1 or 2.
+
+.nf
+ ap> daofind dev$ypix fwhmpsf=2.6 sigma=25.0 verify-
+
+ ... make a coordinate list
+
+ ... the output will appear in the text file ypix.coo.1
+
+ ap> center dev$ypix cbox=7.0 coords=ypix.coo.1
+
+ ... type ? for optional help
+
+
+ ... move the graphics cursor to the stars and tap space bar
+
+ or
+
+ ... select stars from the input coordinate list with m / :m #
+ and measure with spbar
+
+ ... measure stars selected from the input coordinate list
+ with n / n #
+
+ ... a one line summary of results will appear on the standard output
+ for each star measured
+
+ ... the output will appear in ypix.ctr.3 ...
+.fi
+
+
+4. Display and measure some stars in an image section and write the output
+coordinates in the coordinate system of the parent image.
+
+.nf
+ ap> display dev$ypix[150:450,150:450] 1
+
+ ... display the image section
+
+ ap> center dev$ypix[150:450,150:450] wcsout=tv
+
+ ... move cursor to stars and type spbar
+
+ ... type q to quit and q again to confirm quit
+
+ ... output will appear in ypix.ctr.4
+
+ ap> pdump ypix.ctr.4 xc,yc yes | tvmark 1 STDIN col=204
+.fi
+
+
+5. Run CENTER in batch mode using the coordinate file and the previously
+saved parameters. Verify the critical parameters.
+
+.nf
+ ap> center dev$ypix coords=ypix.coo.1 verify+ inter-
+
+ ... output will appear in ypix.ctr.5 ...
+.fi
+
+
+6. Repeat example 5 but assume that the input coordinate are ra and dec
+in degrees and degrees, turn off verification, and submit the task to to
+the background.
+
+.nf
+ ap> display dev$ypix
+
+ ap> rimcursor wcs=world > radec.coo
+
+ ... move to selected stars and type any key
+
+ ... type ^Z to quit
+
+ ap> center dev$ypix coords=radec.coo wcsin=world verify- inter- &
+
+ ... output will appear in ypix.ctr.6
+
+ ap> pdump ypix.ctr.6 xc,yc yes | tvmark 1 STDIN col=204
+
+ ... mark the stars on the display
+
+
+7. Run CENTER interactively without using the image display.
+
+.nf
+ ap> show stdimcur
+
+ ... record the default value of stdimcur
+
+ ap> set stdimcur = text
+
+ ... set the image cursor to the standard input
+
+ ap> center dev$ypix coords=ypix.coo.1
+
+ ... type ? for optional help
+
+ ... type :m 3 to set the initial coordinates to those of the
+ third star in the list
+
+ ... type i to enter the interactive setup menu
+ ... enter the maximum radius in pixels for the radial profile or
+ accept the default with a CR
+ ... type v to enter the default menu
+ ... set the fwhmpsf, sigma, and centering box half-width
+ using the graphics cursor and the stellar radial profile plot
+ ... typing <CR> after the prompt leaves the parameter at its default
+ value
+
+ ... type r to rewind the coordinate list
+
+ ... type l to measure all the stars in the coordinate list
+
+ ... a one line summary of the answers will appear on the standard
+ output for each star measured
+
+ ... type q to quit followed by q to confirm the quit
+
+ ... full output will appear in the text file ypix.ctr.7
+
+ ap> set stdimcur = <default>
+
+ ... reset the value of stdimcur
+.fi
+
+8. Use a image cursor command file to drive the CENTER task. The cursor command
+file shown below sets the fwhmpsf, calgorithm, and cbox parameters, computes
+the centers for 3 stars, updates the parameter files, and quits the task.
+
+.nf
+ ap> type cmdfile
+ : calgorithm gauss
+ : fwhmpsf 2.5
+ : cbox 9.0
+ 442 410 101 \040
+ 349 188 101 \040
+ 225 131 101 \040
+ w
+ q
+
+ ap> center dev$ypix icommands=cmdfile verify-
+
+ ... full output will appear in ypix.ctr.8
+.fi
+
+.ih
+BUGS
+
+It is the responsibility of the user to make sure that the image displayed
+in the image display is the same as the image specified by the image parameter.
+
+Commands which draw to the image display are disabled by default.
+To enable graphics overlay on the image display, set the display
+parameter to "imdr", "imdg", "imdb", or "imdy" to get red, green,
+blue or yellow overlays and set the centerpars mkcenter switch to
+"yes". It may be necessary to run gflush and to redisplay the image
+to get the overlays position correctly.
+
+.ih
+SEE ALSO
+datapars, centerpars
+.endhelp
diff --git a/noao/imred/irred/doc/irlincor.hlp b/noao/imred/irred/doc/irlincor.hlp
new file mode 100644
index 00000000..630370a1
--- /dev/null
+++ b/noao/imred/irred/doc/irlincor.hlp
@@ -0,0 +1,81 @@
+.help irlincor Nov94 irred
+.ih
+NAME
+irlincor -- Correct IR imager frames for non-linearity.
+.ih
+USAGE
+irlincor input output
+.ih
+PARAMETERS
+.ls input
+The list of images to be corrected for non-linearity
+.le
+.ls output
+The list of corrected output images
+.le
+
+.ls coeff1 = 1.0
+The first coefficient of the correction function
+.le
+
+.ls coeff2 = 0.0
+The second coefficient of the correction function
+.le
+
+.ls coeff3 = 0.0
+The third coefficient of the correction function
+.le
+
+.ih
+DESCRIPTION
+The IR imager frames specified by \fIinput\fR, which may be a general image
+template including wild cards or an @list, are corrected for non-linearity
+on a pixel by pixel basis and written to \fIoutput\fR. The number of output
+images must match the number input. The pixel type of the output image(s) will
+match that of the input image(s), however, internally all calculations are
+performed as type real. The correction is performed assuming
+that the non-linearity can be represented by the following simple relationship:
+.nf
+
+ADU' = ADU * [ coeff1 + coeff2 * (ADU / 32767) + coeff3 * (ADU / 32767)**2 ]
+
+.fi
+The coefficients which occur in this expression are specified by the
+parameters \fIcoeff1\fR, \fIcoeff2\fR and \fIcoeff3\fR. Their values are
+derived from periodic instrumental calibrations and are believed to be
+fairly constant. The default values specify a \fBnull\fR correction.
+You should consult \fBJay Elias\fR for the latest values.
+Note that the coefficients are expressed in terms of ADU normalised to the
+maximum possible value 32767, in order that their values can be input
+more easily.
+.ih
+EXAMPLES
+1. Correct input to output using the default values for the coefficients (not a very rewarding operation!)
+
+.nf
+ cl> irlincor input output
+
+.fi
+
+2. Correct a list of images in place using specified values for the coefficients
+
+.nf
+ cl> irlincor @list @list coeff1=1.0 coeff2=0.1 coeff3=0.01
+
+.fi
+.ih
+TIME REQUIREMENTS
+.ih
+AUTHORS
+The IRLINCOR task was originally written by Steve Heathcote as part of the
+CTIO package.
+.ih
+BUGS
+The form of the correction equation is currently experimental;
+a higher order polynomial or a different functional form could be accommodated
+very easily if required.
+It may be advisable to carry out the calculations in double precision.
+.ih
+SEE ALSO
+onedspec.coincor, proto.imfunction
+.endhelp
diff --git a/noao/imred/irred/doc/mosproc.hlp b/noao/imred/irred/doc/mosproc.hlp
new file mode 100644
index 00000000..d0f5c931
--- /dev/null
+++ b/noao/imred/irred/doc/mosproc.hlp
@@ -0,0 +1,170 @@
+.help mosproc May89 irred
+.ih
+NAME
+mosproc -- Prepare images for quick look mosaicing
+.ih
+USAGE
+mosproc input output nxsub nysub
+.ih
+PARAMETERS
+.ls input
+The list of input images to be mosaiced. The images are assumed
+to be ordered either by row, column, or in a raster pattern. If
+the image list is not in order then the iraf \fBfiles\fR task plus
+the \fBeditor\fR must be used to construct an image list. The images
+in the input list are assumed to all be the same size.
+.le
+.ls output
+The name of the output mosaiced image.
+.le
+.ls nxsub
+The number of subrasters along a row of the output image.
+.le
+.ls nysub
+The number of subrasters along a column of the output image.
+.le
+.ls skysubtract = yes
+Subtract a sky image from all the input images. The sky image
+to be subtracted is either \fIsky\fR or a sky image computed
+by median filtering selected input images after weighting the images
+by the exposure time..
+.le
+.ls sky = ""
+The name of the sky image.
+.le
+.ls exclude = ""
+The input images to be excluded from the computation of the sky image.
+For example if \fIexclude\fR="1,3-5" then input images 1, 3, 4, 5 are
+not used for computing the sky frame.
+.le
+.ls expname = "exptime"
+The image header exposure time keyword. If the sky frame is computed
+internally by median filtering the input images, the individual images
+are weighted by the exposure time defined by the exposure time
+keyword \fIexpname\fR. Weights of 1 are assigned when no exposure time
+is given.
+.le
+.ls flatten = yes
+Divide all the images by a flat field image. Flat fielding is done
+after sky subtraction. If the name of a flat field image \fIflat\fR
+is supplied that image is divided directly into all the input images.
+Otherwise the skyframe computed above is normalized by the mode of the
+pixels and divided into all the input images.
+.le
+.ls flat = ""
+The name of the flat field image.
+.le
+.ls transpose = no
+Transpose the input images before inserting them into the mosaic.
+.le
+.ls trim_section = "[*,*]"
+The section of the input images to be mosaiced into the output
+image. Section can be used to flip and/or trim the individual
+subrasters before adding them to the mosaic. For example if we
+want to flip each subraster around the y axis before adding it
+to the mosaic, then \fItrim_section\fR = "[*,-*]".
+.le
+.ls corner = "lr"
+The starting position in the output image. The four options are "ll" for
+lower left corner, "lr" for lower right corner, "ul" for upper left
+corner and "ur" for upper right corner.
+.le
+.ls direction = "row"
+Add input images to the output image in row or column order. The options
+are "row" for row order and "column" for column order. The direction
+specified must agree with the order of the input list.
+.le
+.ls raster = no
+Add the columns or rows to the output image in a raster pattern or return
+to the start of a column or a row.
+.le
+.ls median_section = ""
+Compute the median of each input image inserted into the mosaic using the
+specified section.
+.le
+.ls subtract = no
+Subtract the computed median from each input image before inserting it
+into the mosaic.
+.le
+.ls oval = -1.0
+The value of border pixels.
+.le
+.ls delete = yes
+Delete sky subtracted, flat fielded and transposed images upon exit from
+the script.
+.le
+.ls logfile = STDOUT
+The name of the log file.
+.le
+
+.ih
+DESCRIPTION
+
+MOSPROC takes the list of input images \fIinput\fR of identical dimensions and
+inserts them into a single output image \fIoutput\fR. Before mosaicing the user
+can optionally sky subtract, flat field or transpose the input images.
+If \fIskysubtract\fR = yes, a single sky
+image is subtracted from all the input images. The sky image
+may be the externally derived image \fIsky\fR or calculated internally
+by computing the exposure time weighted median of the input images, minus
+those input images specifically excluded by the \fIexclude\fR parameter.
+If \fIflatten\fR = yes, the input images are flat fielded using either
+the externally defined flat field image \fIflat\fR or the internally
+derived sky image normalized by its mode.
+If \fItranspose\fR is enabled all the input images are optionally transposed
+before mosaicing.
+
+MOSPROC takes the list of processed images and inserts them into the
+output image in positions determined by their order in the input list,
+\fInxsub\fR, \fInysub\fR and the parameters \fIcorner\fR, \fIdirection\fR
+and \fIraster\fR.
+The orientation and size of each individual subraster in the output image
+may be altered by setting the parameter \fItrim_section\fR. The size
+of the output image is determined by nxsub and nysub and the size of
+the individual input images. A one column wide border is drawn between
+each of the output image subrasters with a pixel value of \fIoval\fR.
+The user may optionally compute and subtract the median from each input
+image before inserting it into the mosaic.
+
+MOSPROC produces an output mosaiced image \fIoutput\fR and an accompanying
+database file \fIdboutput\fR. These two files plus an interactively
+generated coordinate list comprise the necessary input for the IRALIGN,
+IRMATCH1D and IRMATCH2D tasks.
+The temporary images generated (sky substracted, flat fielded, and
+transposed)
+can be deleted automatically if \fBdelete=yes\fR, before the task completes.
+Otherwise they will be left in the same directory of the input images.
+The temporary sky and flat field images if created are not deleted.
+
+The computation of the sky frame is done with IMAGES.IMCOMBINE and the
+subsequent sky subraction with IMAGES.IMARITH. The computation of
+the flat field is done with PROTO.BSCALE and the flat field division
+with FLATTEN. The task IMAGES.TRANSPOSE transpose the input.
+The mosaicing itself is done with PROTO.IRMOSAIC.
+
+.ih
+EXAMPLES
+
+1. Mosaic a list of 64 infrared images onto an 8 by 8 grid after sky
+ subtraction and flat fielding. Use an externally derived sky and
+ flat field image
+
+ ir> mosproc @imlist mosaic 8 8 skysub+ sky=skyimage flatten+ \
+ >>> flat=flatfield
+
+2. Mosaic a list of 64 infrared images onto an 8 by 8 grid after sky
+ subtraction and flat fielding. Derive the sky and flat field frames
+ from the data excluding image number 5
+
+ ir> mosproc @imlist mosaic 8 8 skysub+ exclude="5" flatten+
+
+.ih
+TIME REQUIREMENTS
+
+.ih
+BUGS
+
+.ih
+SEE ALSO
+images.imcombine, images.imarith, proto.bscale, images.imtrans, proto.irmosaic
+.endhelp
diff --git a/noao/imred/irred/imcombine b/noao/imred/irred/imcombine
new file mode 100644
index 00000000..c95d8302
--- /dev/null
+++ b/noao/imred/irred/imcombine
@@ -0,0 +1,11 @@
+imcombine ("@"//tmptmp, skyframe, sigma="", logfile=logfile,
+ outtype="", option="median", expname=expname, exposure+,
+ sca-, off-, wei+, modesec="", low=3., high=3., blank=-1)
+
+imcombine ("@"//tmptmp, skyframe, plfile="", sigma="", logfile=logfile,
+ combine="median", reject="none", project=no, outtype="", offsets="none",
+ masktype="none", maskvalue=0.0, blank=-1.0, scale="exposure",
+ zero="none", weight="exposure", statsec="", expname=expname,
+ lthreshold=INDEF, hthreshold=INDEF, nlow=1, nhigh=1, mclip=yes,
+ lsigma=3.0, hsigma=3.0, rdnoise="0.0", gain="1.0", sigscale=0.1,
+ pclip=-0.5, grow=0)
diff --git a/noao/imred/irred/iralign.par b/noao/imred/irred/iralign.par
new file mode 100644
index 00000000..0865c07d
--- /dev/null
+++ b/noao/imred/irred/iralign.par
@@ -0,0 +1,20 @@
+# IRALIGN
+
+input,f,a,,,,Input image
+output,f,a,,,,Output image
+database,f,a,,,,Database file
+coords,f,a,,,,Coordinate file
+xshift,r,a,0.0,,,Xshift for align by shifts
+yshift,r,a,0.0,,,Yshift for align by shifts
+alignment,s,h,"coords",,,'Alignment technique (coords|shifts|file)'
+nxrsub,i,h,INDEF,,,Row index of reference subraster
+nyrsub,i,h,INDEF,,,Column index of reference subraster
+xref,i,h,0,,,X offset of reference subraster in pixels
+yref,i,h,0,,,Y offset of reference subraster in pixels
+trimlimits,s,h,"[1:1,1:1]",,,Trim limits for each subraster
+nimcols,i,h,INDEF,,,Number of column in the output image
+nimlines,i,h,INDEF,,,Number of lines in the output image
+oval,r,h,INDEF,,,The value of undefined regions the image
+interpolant,s,h,'linear',,,'Interpolant (nearest|linear|poly3|poly5,spline3)'
+verbose,b,h,yes,,,Print messages
+mode,s,h,'ql'
diff --git a/noao/imred/irred/irlincor.par b/noao/imred/irred/irlincor.par
new file mode 100644
index 00000000..701a0483
--- /dev/null
+++ b/noao/imred/irred/irlincor.par
@@ -0,0 +1,7 @@
+# irlincor parameter file
+input,s,a,"",,,Input images
+output,s,a,"",,,Output images
+section,s,h,"",,,Image section to correct
+coeff1,r,h,1.0,,,First coefficient of correction equation
+coeff2,r,h,0.0,,,Second coefficient of correction equation
+coeff3,r,h,0.0,,,Third coefficient of correction equation
diff --git a/noao/imred/irred/irmatch1d.par b/noao/imred/irred/irmatch1d.par
new file mode 100644
index 00000000..a9c40ff6
--- /dev/null
+++ b/noao/imred/irred/irmatch1d.par
@@ -0,0 +1,21 @@
+# IRMATCH1D
+
+input,f,a,,,,Input image
+output,f,a,,,,Output image
+database,f,a,,,,Database file
+coords,f,a,,,,Coordinate file
+xshift,r,a,0.0,,,Xshift for align by shifts
+yshift,r,a,0.0,,,Yshift for align by shifts
+alignment,s,h,"coords",,,'Alignment technique (coords|shifts|file)'
+match,s,h,"*",,,Intensity match the following subrastrers
+nxrsub,i,h,INDEF,,,Row index of reference subraster
+nyrsub,i,h,INDEF,,,Column index of reference subraster
+xref,i,h,0,,,Column offset of reference subraster
+yref,i,h,0,,,Line offset of reference subraster
+trimlimits,s,h,"[1:1,1:1]",,,Trim limits for the input subraster
+nimcols,i,h,INDEF,,,Number of column in the output image
+nimlines,i,h,INDEF,,,Number of lines in the output image
+oval,r,h,INDEF,,,The value of undefined regions the image
+interpolant,s,h,'linear',,,'Interpolant (nearest|linear|poly3|poly5,spline3)'
+verbose,b,h,yes,,,Print messages
+mode,s,h,'ql'
diff --git a/noao/imred/irred/irmatch2d.par b/noao/imred/irred/irmatch2d.par
new file mode 100644
index 00000000..7a159eba
--- /dev/null
+++ b/noao/imred/irred/irmatch2d.par
@@ -0,0 +1,21 @@
+# IRMATCH2D
+
+input,f,a,,,,Input image
+output,f,a,,,,Output image
+database,f,a,,,,Database file
+coords,f,a,,,,Coordinate file
+xshift,r,a,0.0,,,Xshift for align by shifts
+yshift,r,a,0.0,,,Yshift for align by shifts
+alignment,s,h,"coords",,,'Alignment technique (coords|shifts|file)'
+match,s,h,"*",,,Intensity match the following subrastrers
+nxrsub,i,h,INDEF,,,Row index of reference subraster
+nyrsub,i,h,INDEF,,,Column index of reference subraster
+xref,i,h,0,,,Column offset of the reference subraster
+yref,i,h,0,,,Line offset of the reference subraster
+trimlimits,s,h,"[1:1,1:1]",,,Trim limits for the input subraster
+nimcols,i,h,INDEF,,,Number of column in the output image
+nimlines,i,h,INDEF,,,Number of lines in the output image
+oval,r,h,INDEF,,,The value of undefined regions the image
+interpolant,s,h,'linear',,,'Interpolant (nearest|linear|poly3|poly5,spline3)'
+verbose,b,h,yes,,,Print messages
+mode,s,h,'ql'
diff --git a/noao/imred/irred/irmosaic.par b/noao/imred/irred/irmosaic.par
new file mode 100644
index 00000000..7fc573ff
--- /dev/null
+++ b/noao/imred/irred/irmosaic.par
@@ -0,0 +1,22 @@
+# IRMOSAIC
+
+input,f,a,,,,List of input images
+output,f,a,,,,Output image
+database,f,a,,,,Output database file
+nxsub,i,a,,,,Number of input images along the x direction
+nysub,i,a,,,,Number of input images along the y direction
+trim_section,s,h,"[*,*]",,,Input image section written to the output image
+null_input,s,h,"",,,List of missing input images
+corner,s,h,"ll",,,Position of first subraster
+direction,s,h,"row",,,Row or column order placement
+raster,b,h,no,,,Raster scan mode
+median_section,s,h,"",,,Input image section used to compute the median
+subtract,b,h,no,,,Subtract median from each input image
+nimcols,i,h,INDEF,,,The number of columns in the output image
+nimrows,i,h,INDEF,,,The number of rows in the output image
+nxoverlap,i,h,-1,,,Number of columns of overlap between input images
+nyoverlap,i,h,-1,,,Number of rows of overlap between input images
+opixtype,s,h,"r",,,Output image pixel type
+oval,r,h,0.0,,,Value of undefined output image pixels
+verbose,b,h,yes,,,Print out messages
+mode,s,h,'ql'
diff --git a/noao/imred/irred/irred.cl b/noao/imred/irred/irred.cl
new file mode 100644
index 00000000..a3b33765
--- /dev/null
+++ b/noao/imred/irred/irred.cl
@@ -0,0 +1,36 @@
+#{ IRRED -- KPNO IR Camera Reduction Package
+
+# Load necessary core packages
+
+images # tasks sections,imcopy,imarith,imcombine,imdelete
+lists # tokens task
+utilities # task translit
+proto # task bscale
+
+# Define necessary paths
+
+set generic = "noao$imred/generic/"
+set nproto = "noao$nproto/"
+
+package irred
+
+task irlincor = "irred$x_irred.e"
+
+task iralign,
+ irmatch1d,
+ irmatch2d,
+ irmosaic = "nproto$x_nproto.e"
+
+# Define the apphot centering and related tasks
+
+task center = "irred$x_apphot.e"
+task centerpars = "irred$centerpars.par"
+task datapars = "irred$datapars.par"
+task txdump = "irred$x_ptools.e"
+
+# Scripts
+
+task flatten = "generic$flatten.cl"
+task mosproc = "irred$mosproc.cl"
+
+clbye()
diff --git a/noao/imred/irred/irred.hd b/noao/imred/irred/irred.hd
new file mode 100644
index 00000000..8a2e6437
--- /dev/null
+++ b/noao/imred/irred/irred.hd
@@ -0,0 +1,8 @@
+# Help directory for the IRRED package.
+
+$doc = "./doc/"
+
+center hlp=doc$center.hlp
+irlincor hlp=doc$irlincor.hlp, src=t_irlincor.x
+mosproc hlp=doc$mosproc.hlp, src=mosproc.cl
+revisions sys=Revisions
diff --git a/noao/imred/irred/irred.men b/noao/imred/irred/irred.men
new file mode 100644
index 00000000..e3500032
--- /dev/null
+++ b/noao/imred/irred/irred.men
@@ -0,0 +1,11 @@
+ txdump - Select fields from the center task output text file
+ center - Compute accurate centers for a list of objects
+ centerpars - Edit the centering parameters
+ datapars - Edit the data dependent parameters
+ flatten - Flatten images using a flat field
+ iralign - Align the image produced by irmosaic
+ irlincor - Correct IR imager frames for non-linearity
+ irmatch1d - Align and intensity match the image produced by irmosaic (1D)
+ irmatch2d - Align and intensity match the image produced by irmosaic (2D)
+ irmosaic - Mosaic an ordered list of images onto a grid
+ mosproc - Prepare images for quick look mosaicing
diff --git a/noao/imred/irred/irred.par b/noao/imred/irred/irred.par
new file mode 100644
index 00000000..18e57b78
--- /dev/null
+++ b/noao/imred/irred/irred.par
@@ -0,0 +1,3 @@
+# PARAMETERS FOR KPNO IR CAMERA REDUCTION PACKAGE
+
+version,s,h,"Mar 1989"
diff --git a/noao/imred/irred/mkpkg b/noao/imred/irred/mkpkg
new file mode 100644
index 00000000..b62021e8
--- /dev/null
+++ b/noao/imred/irred/mkpkg
@@ -0,0 +1,24 @@
+# Make the IRRED package.
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $set LIBS="-lxtools"
+ $update libpkg.a
+ $omake x_irred.x
+ $link x_irred.o libpkg.a $(LIBS) -o xx_irred.e
+ ;
+
+install:
+ $move xx_irred.e noaobin$x_irred.e
+ ;
+
+libpkg.a:
+ t_irlincor.x <error.h> <imhdr.h>
+ ;
diff --git a/noao/imred/irred/mosproc.cl b/noao/imred/irred/mosproc.cl
new file mode 100644
index 00000000..3fa89405
--- /dev/null
+++ b/noao/imred/irred/mosproc.cl
@@ -0,0 +1,172 @@
+# MOSPROC - Sky subtract, flat field and transpose images before mosaicing.
+
+procedure mosproc (input, output, nxsub, nysub)
+
+string input {prompt="Input images"}
+string output {prompt="Output image"}
+int nxsub {8, prompt="Number of subrasters in x"}
+int nysub {8, prompt="Number of subrasters in y"}
+
+bool skysubtract {yes, prompt="Sky subtract images before mosaicing"}
+string sky {"", prompt="Sky image to subtract"}
+string exclude {"", prompt="Input images excluded from sky frame"}
+string expname {"EXPTIME", prompt="Image exposure time keywords"}
+
+bool flatten {yes, prompt="Flatten images before mosaicing"}
+string flat {"", prompt="Flat field image"}
+bool transpose {no, prompt="Transpose images before mosaicing?"}
+
+string trim_section {"[*,*]", prompt="Input image section to be extracted"}
+string corner {"lr", prompt="Starting corner for the mosaic"}
+string direction {"row", prompt="Starting direction for the mosaic"}
+bool raster {no, prompt="Raster scan?"}
+string median_section {"", prompt="Input subraster section for median ?"}
+bool subtract {no, prompt="Substract median from each subraster?"}
+real oval {-1.0, prompt="Mosaic border pixel values"}
+
+bool delete {yes, prompt="Delete temporary images?"}
+file logfile {"STDOUT", prompt="Log file name"}
+
+struct *list1, *list2
+
+begin
+ file tmpimg, tmptmp, tmpred, tmpexc
+ int nx, ny, i, nin, lo, hi
+ string skyframe, normframe, in, out, img, delim, junk
+
+ tmpimg = mktemp ("MOS")
+ tmptmp = mktemp ("MOS")
+ tmpred = mktemp ("MOS")
+ tmpexc = mktemp ("tmp$MOS")
+
+ # Get positional parameters
+ in = input
+ out = output
+ nx = nxsub
+ ny = nysub
+
+ # Expand input file name list removing the ".imh" extensions.
+ sections (in, option="fullname", > tmptmp)
+ list1 = tmptmp
+ for (nin = 0; fscan (list1, img) != EOF; nin += 1) {
+ i = strlen (img)
+ if (substr (img, i-3, i) == ".imh")
+ img = substr (img, 1, i-4)
+ print (img, >> tmpimg)
+ print (img // ".red", >> tmpred)
+ }
+ list1 = ""; delete (tmptmp, ver-, >& "dev$null")
+
+ # Expand the range of images to skip.
+ if (skysubtract && sky != "") {
+
+ skyframe = sky
+ imarith ("@"//tmpimg, "-", skyframe, "@"//tmpred, title="",
+ divzero=0., hparams="", pixtype="", calctype="", verbose+,
+ noact-, >> logfile)
+
+ } else if (skysubtract) {
+
+ print (exclude, ",") | translit ("", "^-,0-9", del+) |
+ translit ("", "-", "!", del-) | tokens (new-) |
+ translit ("", "\n,", " \n", del-, > tmpexc)
+
+ type (tmpexc, >> logfile)
+
+ list1 = tmpexc
+ while (fscan (list1, lo, delim, hi, junk) != EOF) {
+ if (nscan() == 0)
+ next
+ else if (nscan() == 1 && lo >= 1)
+ print (lo, >> tmptmp)
+ else if (nscan() == 3) {
+ lo = min (max (lo, 1), nin); hi = min (max (hi, 1), nin)
+ for (i = lo; i <= hi; i += 1)
+ print (i, >> tmptmp)
+ }
+ }
+ list1 = ""; delete (tmpexc, ver-, >& "dev$null")
+
+ if (access (tmptmp)) {
+ sort (tmptmp, col=0, ign+, num+, rev-) | unique (> tmpexc)
+ delete (tmptmp, ver-, >& "dev$null")
+
+ list1 = tmpimg; list2 = tmpexc; junk = fscan (list2, nin)
+ for (i = 1; fscan (list1, img) != EOF; i += 1) {
+ if (i == nin) {
+ junk = fscan (list2, nin)
+ next
+ }
+ print (img, >> tmptmp)
+ }
+ list1 = ""; list2 = ""; delete (tmpexc, ver-, >& "dev$null")
+ } else
+ tmptmp = tmpimg
+
+ skyframe = out // ".sky"
+
+ imcombine ("@"//tmptmp, skyframe, rejmask="", plfile="", sigma="",
+ logfile=logfile, combine="median", reject="none", project=no,
+ outtype="real", offsets="none", masktype="none", maskvalue=0.0,
+ blank=-1.0, scale="exposure", zero="none", weight="exposure",
+ statsec="", expname=expname, lthreshold=INDEF,
+ hthreshold=INDEF, nlow=1, nhigh=1, nkeep=1, mclip=yes,
+ lsigma=3.0, hsigma=3.0, rdnoise="0.0", gain="1.0", snoise="0.0",
+ sigscale=0.1, pclip=-0.5, grow=0)
+ print ("\n", >> logfile)
+ imarith ("@"//tmpimg, "-", skyframe, "@"//tmpred, title="",
+ divzero=0., hparams="", pixtype="", calctype="", verbose+,
+ noact-, >> logfile)
+
+ } else {
+
+ skyframe = ""
+ imcopy ("@"//tmpimg, "@"//tmpred, verbose-)
+ }
+
+ if (flatten) {
+ if (flat != "") {
+ print ("\n", >> logfile)
+ flatten ("@"//tmpred, flat, minflat=INDEF, pixtype="",
+ keeplog=yes, logfile=logfile)
+ } else if (skyframe != "") {
+ print ("\n", >> logfile)
+ normframe = out // ".norm"
+ imcopy (skyframe, normframe, verbose-)
+ bscale (normframe, normframe, bzero="0.0", bscale="mode",
+ section="", step=10, lower=INDEF, upper=INDEF,
+ verbose+, >>logfile)
+ print ("\n", >> logfile)
+ flatten ("@"//tmpred, normframe, minflat=INDEF, pixtype="",
+ keeplog=yes, logfile=logfile)
+ }
+ }
+
+ if (transpose) {
+ print ("\nTRANSPOSE: Transpose images", >> logfile)
+ time (, >> logfile)
+ imtrans ("@"//tmpred, "@"//tmpred)
+ time (, >> logfile)
+ print ("TRANSPOSE: done", >> logfile)
+ }
+
+ print ("\nIRMOSAIC: Mosaic images", >> logfile)
+ time (, >> logfile)
+ irmosaic ("@"//tmpred, out, "db"//out, nx, ny,
+ trim_section=trim_section, null_input="", corner=corner,
+ direction=direction, raster=raster, nxover=-1, nyover=-1,
+ nimcols=INDEF, nimrows=INDEF, oval=oval,
+ median_section=median_section, sub=subtract, opixtype="r",
+ verbose+, >> logfile)
+ time (, >> logfile)
+ print ("IRMOSAIC: done", >> logfile)
+
+ if (delete) {
+ if (access (tmpred))
+ imdelete ("@"//tmpred, ver-, >& "dev$null")
+ }
+
+ delete (tmpimg, ver-, >& "dev$null")
+ delete (tmptmp, ver-, >& "dev$null")
+ delete (tmpred, ver-, >& "dev$null")
+end
diff --git a/noao/imred/irred/t_irlincor.x b/noao/imred/irred/t_irlincor.x
new file mode 100644
index 00000000..053c383d
--- /dev/null
+++ b/noao/imred/irred/t_irlincor.x
@@ -0,0 +1,254 @@
+include <imhdr.h>
+include <error.h>
+
+# Maximum number of correction function coefficients.
+define MAXCOEF 3
+
+# Maximum number of ADU....
+define MAXADU 32767.0
+
+
+# T_ARLINCOR -- Corrects IR imager frames for non linearity. This task
+# only corrects a section of the total image and copies the rest of
+# the image intact to the output image.
+
+procedure t_irlincor ()
+
+pointer inlist, outlist # input and output image lists
+char section[SZ_LINE] # image section
+pointer coeff # coeficients of correction function
+
+bool sflag
+pointer imin, imout
+pointer input, output, orig, temp
+pointer sp
+
+int strlen()
+int imtgetim(), imtlen()
+real clgetr()
+pointer immap(), imtopenp()
+
+begin
+ # Get parameters
+ inlist = imtopenp ("input")
+ outlist = imtopenp ("output")
+ call clgstr ("section", section, SZ_LINE)
+
+ # Check that the input and output image lists have the
+ # same number of images. Abort if that's not the case.
+ if (imtlen (inlist) != imtlen (outlist)) {
+ call imtclose (inlist)
+ call imtclose (outlist)
+ call error (1, "Input and output image lists don't match")
+ }
+
+ # Set section flag
+ sflag = (strlen (section) > 0)
+
+ # Allocate string space
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (orig, SZ_FNAME, TY_CHAR)
+ call salloc (temp, SZ_FNAME, TY_CHAR)
+
+ # Allocate memory for the correction coefficients and
+ # read them from the parameter file.
+ call malloc (coeff, MAXCOEF, TY_REAL)
+ Memr[coeff] = clgetr ("coeff1")
+ Memr[coeff+1] = clgetr ("coeff2")
+ Memr[coeff+2] = clgetr ("coeff3")
+
+ # Loop over all images in the input and output lists
+ while ((imtgetim (inlist, Memc[input], SZ_FNAME) != EOF) &&
+ (imtgetim (outlist, Memc[output], SZ_FNAME) != EOF)) {
+
+ # Generate temporary output image name to allow for
+ # input and output images having the same name
+ call xt_mkimtemp (Memc[input], Memc[output], Memc[orig], SZ_FNAME)
+
+ # Take different actions depending on whether the image section
+ # is specified or not, in order to optimize speed. When the image
+ # section is specified the input image is copied to the output
+ # image and then the output image opened to work on the section.
+ # Otherwise the output image is created only once.
+ if (sflag) {
+
+ # Copy input image into output image using fast copy
+ iferr (call irl_imcopy (Memc[input], Memc[output])) {
+ call erract (EA_WARN)
+ next
+ }
+
+ # Append section to image names. The output name should
+ # be preserved without the section for later use.
+ call strcat (section, Memc[input], SZ_FNAME)
+ call sprintf (Memc[temp], SZ_FNAME, "%s%s")
+ call pargstr (Memc[output])
+ call pargstr (section)
+
+ # Open input and output images. The output image already
+ # exists, since it was created by the copy operation, so
+ # it is opened as read/write.
+ iferr (imin = immap (Memc[input], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+ iferr (imout = immap (Memc[temp], READ_WRITE, 0)) {
+ call imunmap (imin)
+ call erract (EA_WARN)
+ next
+ }
+
+ } else {
+
+ # Open input and output images. The output image does not
+ # exist already so it is opened as a new copy of the input
+ # image.
+ iferr (imin = immap (Memc[input], READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ next
+ }
+ iferr (imout = immap (Memc[output], NEW_COPY, imin)) {
+ call imunmap (imin)
+ call erract (EA_WARN)
+ next
+ }
+
+ }
+
+ # Perform the linear correction.
+ call irl_correct (imin, imout, Memr[coeff], MAXCOEF)
+
+ # Close images
+ call imunmap (imin)
+ call imunmap (imout)
+
+ # Replace output image with the temporary image. This is a
+ # noop if the input and output images have different names
+ call xt_delimtemp (Memc[output], Memc[orig])
+ }
+
+ # Free memory and close image lists
+ call mfree (coeff, TY_REAL)
+ call imtclose (inlist)
+ call imtclose (outlist)
+end
+
+
+# IRL_CORRECT -- Corrects an IR imager frame for non-linearity using a
+# simple power series polynomial correction function:
+#
+# ADU' = ADU * [ a + b * (ADU / MAXADU) + c * (ADU / MAXADU) **2 ]
+#
+
+procedure irl_correct (imin, imout, coeff, ncoef)
+
+pointer imin # input image pointer
+pointer imout # output image pointer
+real coeff[ncoef] # coefficients of polynomial function
+int ncoef # number of polynomial coeficients
+
+int col, ncols
+long v1[IM_MAXDIM], v2[IM_MAXDIM]
+pointer inbuf, outbuf
+
+int imgeti()
+int imgnlr(), impnlr()
+real apolr()
+
+begin
+ # Initiliaze counters for line i/o
+ call amovkl (long(1), v1, IM_MAXDIM)
+ call amovkl (long(1), v2, IM_MAXDIM)
+
+ # Number of pixels per line
+ ncols = imgeti (imin, "i_naxis1")
+
+ # Loop over image lines
+ while ((imgnlr (imin, inbuf, v1) != EOF) &&
+ (impnlr (imout, outbuf, v2) != EOF)) {
+ call adivkr (Memr[inbuf], MAXADU, Memr[outbuf], ncols)
+ do col = 1, ncols {
+ Memr[outbuf+col-1] = apolr (Memr[outbuf+col-1], coeff, ncoef)
+ }
+ call amulr (Memr[inbuf], Memr[outbuf], Memr[outbuf], ncols)
+ }
+end
+
+
+# IRL_IMCOPY -- Copy input image into the output image. Avoid data type
+# conversion in order to opetimize speed.
+
+procedure irl_imcopy (input, output)
+
+char input[ARB] # input image name
+char output[ARB] # output image name
+
+int npix
+long vin[IM_MAXDIM], vout[IM_MAXDIM]
+pointer imin, imout
+pointer inline, outline
+
+int imgeti()
+int imgnls(), impnls()
+int imgnli(), impnli()
+int imgnll(), impnll()
+int imgnlr(), impnlr()
+int imgnld(), impnld()
+int imgnlx(), impnlx()
+pointer immap()
+
+begin
+ # Open input and output images
+ iferr (imin = immap (input, READ_ONLY, 0))
+ call erract (EA_ERROR)
+ iferr (imout = immap (output, NEW_COPY, imin)) {
+ call imunmap (imin)
+ call erract (EA_ERROR)
+ }
+
+ # Initiliaze counters
+ call amovkl (long(1), vin, IM_MAXDIM)
+ call amovkl (long(1), vout, IM_MAXDIM)
+
+ # Copy image lines
+ switch (imgeti (imin, "i_pixtype")) {
+ case TY_SHORT, TY_USHORT:
+ while (imgnls (imin, inline, vin) != EOF) {
+ npix = impnls (imout, outline, vout)
+ call amovs (Mems[inline], Mems[outline], npix)
+ }
+ case TY_INT:
+ while (imgnli (imin, inline, vin) != EOF) {
+ npix = impnli (imout, outline, vout)
+ call amovi (Memi[inline], Memi[outline], npix)
+ }
+ case TY_LONG:
+ while (imgnll (imin, inline, vin) != EOF) {
+ npix = impnll (imout, outline, vout)
+ call amovl (Meml[inline], Meml[outline], npix)
+ }
+ case TY_REAL:
+ while (imgnlr (imin, inline, vin) != EOF) {
+ npix = impnlr (imout, outline, vout)
+ call amovr (Memr[inline], Memr[outline], npix)
+ }
+ case TY_DOUBLE:
+ while (imgnld (imin, inline, vin) != EOF) {
+ npix = impnld (imout, outline, vout)
+ call amovd (Memd[inline], Memd[outline], npix)
+ }
+ case TY_COMPLEX:
+ while (imgnlx (imin, inline, vin) != EOF) {
+ npix = impnlx (imout, outline, vout)
+ call amovx (Memx[inline], Memx[outline], npix)
+ }
+ default:
+ call error (0, "Unsupported pixel type")
+ }
+
+ # Close images
+ call imunmap (imin)
+ call imunmap (imout)
+end
diff --git a/noao/imred/irred/txdump.par b/noao/imred/irred/txdump.par
new file mode 100644
index 00000000..ce400817
--- /dev/null
+++ b/noao/imred/irred/txdump.par
@@ -0,0 +1,8 @@
+# TXDUMP Parameters
+
+textfiles,s,a,,,,Input apphot/daophot text database(s)
+fields,s,a,,,,Fields to be extracted
+expr,s,a,yes,,,Boolean expression for record selection
+headers,b,h,no,,,Print the field headers ?
+parameters,b,h,yes,,,Print the parameters if headers is yes ?
+mode,s,h,"ql",,,Mode of task
diff --git a/noao/imred/irred/x_irred.x b/noao/imred/irred/x_irred.x
new file mode 100644
index 00000000..25e406aa
--- /dev/null
+++ b/noao/imred/irred/x_irred.x
@@ -0,0 +1 @@
+task irlincor = t_irlincor
diff --git a/noao/imred/irs/Revisions b/noao/imred/irs/Revisions
new file mode 100644
index 00000000..ac156735
--- /dev/null
+++ b/noao/imred/irs/Revisions
@@ -0,0 +1,111 @@
+.help revisions Jun88 noao.imred.irs
+.nf
+
+=====
+V2.12
+=====
+
+imred$irs/standard.par
+ Added blackbody query parameters. (5/2/02, Valdes)
+
+========
+V2.11.3b
+========
+
+imred$irs/identify.par
+ Added new units parameters. (3/11/97, Valdes)
+
+=========
+V2.10.4p2
+=========
+
+imred$irs/
+ Updated to new version of ONEDSPEC (7/11/91, Valdes)
+
+================
+
+imred$irs/standard.par
+ Removed ennumerated list. (4/10/89, Valdes)
+
+imred$irs/irs.cl
+imred$irs/irs.men
+specplot.par+
+ Task SPECPLOT added. (4/3/89 ShJ)
+
+imred$irs/batchred.cl
+imred$irs/batchred.par
+imred$irs/irs.cl
+imred$irs/irs.men
+ 1. New BATCHRED script. (4/27/88 Valdes)
+ 2. Eliminated EXTINCT. (4/27/88 Valdes)
+
+imred$irs/sensfunc.par
+ Added aperture selection and query parameters. (4/15/88 Valdes)
+
+imred$irs/irs.cl
+imred$irs/refspectra.par +
+ Refer to new ONEDSPEC executable and tasks. (4/7/88 Valdes)
+
+noao$imred/irs/reidentify.par
+ Valdes, Jan 4, 1988
+ Updated parameter file for new REIDENTIFY parameter.
+
+noao$imred/irs/dispcor.par
+ Valdes, March 5, 1987
+ 1. The DISPCOR default parameter file has been updated because of
+ changes to the task; most notable being that wstart and wpc are
+ list structured.
+
+noao$imred/irs/flatdiv.par
+noao$imred/irs/flatfit.par
+ Valdes, December, 2, 1986
+ 1. New parameter "power" added to these tasks.
+
+noao$imred/irs/coincor.par
+ Valdes, October 20, 1986
+ 1. New parameter "checkdone" added to COINCOR to allow overriding
+ coincidence correction checking.
+
+noao$imred/irs/irs.par
+noao$imred/irs/coincor.par
+noao$imred/irs/irs.men
+ Valdes, October 13, 1986
+ 1. Added new COINCOR parameter "power".
+
+noao$imred/irs/irs.cl
+noao$imred/irs/irs.men
+noao$imred/irs/shedit.par +
+ Valdes, October 6, 1986
+ 1. Added new task SHEDIT.
+
+noao$imred/irs/identify.par
+ Valdes, October 3, 1986
+ 1. Added new IDENTIFY parameter "threshold".
+
+irs: Valdes, July 3, 1986:
+ 1. New coordlist name in IDENTIFY parameter file.
+ 2. New calibration file name in package parameter file.
+
+=====================================
+STScI Pre-release and SUN 2.3 Release
+=====================================
+
+irs$bswitch.par: Valdes, May 19, 1986
+ 1. The parameter "add_const" in BSWITCH is directed to the parameter
+ in SENSFUNC of the same name.
+
+irs: Valdes, May 12, 1986:
+ 1. SPLOT updated. New parameters XMIN, XMAX, YMIN, YMAX.
+
+irs: Valdes, April 7, 1986:
+ 1. Package parameter file changed to delete latitude.
+ 2. DISPCOR, BSWITCH, and STANDARD latitude parameter now obtained from
+ OBSERVATORY.
+
+irs: Valdes, March 27, 1986:
+ 1. New task SETDISP added.
+
+===========
+Release 2.2
+===========
+.endhelp
diff --git a/noao/imred/irs/calibrate.par b/noao/imred/irs/calibrate.par
new file mode 100644
index 00000000..795965b7
--- /dev/null
+++ b/noao/imred/irs/calibrate.par
@@ -0,0 +1,14 @@
+# CALIBRATE parameter file
+
+input,s,a,,,,Input spectra to calibrate
+output,s,a,,,,Output calibrated spectra
+records,s,a,,,,Record number extensions
+extinct,b,h,yes,,,Apply extinction correction?
+flux,b,h,yes,,,Apply flux calibration?
+extinction,s,h,)_.extinction,,,Extinction file
+observatory,s,h,)_.observatory,,,Observatory of observation
+ignoreaps,b,h,no,,,Ignore aperture numbers in flux calibration?
+sensitivity,s,h,"sens",,,Image root name for sensitivity spectra
+fnu,b,h,no,,,Create spectra having units of FNU?
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
diff --git a/noao/imred/irs/dispcor.par b/noao/imred/irs/dispcor.par
new file mode 100644
index 00000000..90257214
--- /dev/null
+++ b/noao/imred/irs/dispcor.par
@@ -0,0 +1,19 @@
+input,s,a,,,,List of input spectra
+output,s,a,,,,List of output spectra
+records,s,a,,,,Record number extensions
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+database,s,h,"database",,,Dispersion solution database
+table,s,h,"",,,Wavelength table for apertures
+w1,r,h,INDEF,,,Starting wavelength
+w2,r,h,INDEF,,,Ending wavelength
+dw,r,h,INDEF,,,Wavelength interval per pixel
+nw,i,h,1024,,,Number of output pixels
+log,b,h,no,,,Logarithmic wavelength scale?
+flux,b,h,yes,,,Conserve total flux?
+samedisp,b,h,yes,,,Same dispersion in all apertures?
+global,b,h,yes,,,Apply global defaults?
+ignoreaps,b,h,no,,,Ignore apertures?
+confirm,b,h,yes,,,Confirm dispersion coordinates?
+listonly,b,h,no,,,List the dispersion coordinates only?
+verbose,b,h,yes,,,Print linear dispersion assignments?
+logfile,s,h,"logfile",,,Log file
diff --git a/noao/imred/irs/flatfit.par b/noao/imred/irs/flatfit.par
new file mode 100644
index 00000000..bd693aed
--- /dev/null
+++ b/noao/imred/irs/flatfit.par
@@ -0,0 +1,24 @@
+# FLATFIT parameter file
+
+input,s,a,,,,Input image root file name
+records,s,a,,,,Range of spectral records
+output,s,a,,,,Output file root name for new spectra
+function,s,h,"chebyshev",,,Function to fit (chebyshev|legendre|spline3|spline1)
+order,i,h,1,1,,Fitting order (number of terms)
+niter,i,h,1,1,,Number of rejection iterations
+lower,r,h,100.,0,,Lower rejection criterion in sigmas
+upper,r,h,100.,0,,Upper rejection criterion in sigmas
+ngrow,i,h,0,,,Growing region
+div_min,r,h,1.0,,,Value to use if division by zero occurs
+interact,b,h,yes,,,Interact with the first accumulation?
+all_interact,b,h,no,,,Interact with all accumulations?
+coincor,b,h,)_.coincor,,,Apply coincidence correction to flats
+ccmode,s,h,)_.ccmode,,,Correction mode (photo|iids)
+deadtime,r,h,)_.deadtime,,,Deadtime in seconds
+power,r,h,)_.power,,,IIDS power law coefficient
+new_order,i,a,4,1,,enter order
+new_lower,r,a,,,,enter nr sigma
+new_upper,r,a,,,,enter nr sigma
+new_niter,i,a,,,,enter nr of iterations
+confirm,b,a,,,,Exit and save solution?
+cursor,*gcur,h,"",,,Graphics cursor input
diff --git a/noao/imred/irs/identify.par b/noao/imred/irs/identify.par
new file mode 100644
index 00000000..d42065e1
--- /dev/null
+++ b/noao/imred/irs/identify.par
@@ -0,0 +1,33 @@
+# Parameters for identify task.
+
+images,s,a,,,,Images containing features to be identified
+section,s,h,"middle line",,,Section to apply to two dimensional images
+database,f,h,database,,,Database in which to record feature data
+coordlist,f,h,linelists$henear.dat,,,User coordinate list
+units,s,h,"",,,Coordinate units
+nsum,s,h,"10",,,Number of lines/columns/bands to sum in 2D images
+match,r,h,50.,,,Coordinate list matching limit
+maxfeatures,i,h,50,,,Maximum number of features for automatic identification
+zwidth,r,h,100.,,,Zoom graph width in user units
+
+ftype,s,h,"emission","emission|absorption",,Feature type
+fwidth,r,h,4.,,,Feature width in pixels
+cradius,r,h,5.,,,Centering radius in pixels
+threshold,r,h,10.,0.,,Feature threshold for centering
+minsep,r,h,2.,0.,,Minimum pixel separation
+
+function,s,h,"chebyshev","legendre|chebyshev|spline1|spline3",,Coordinate function
+order,i,h,8,1,,Order of coordinate function
+sample,s,h,"*",,,Coordinate sample regions
+niterate,i,h,0,0,,Rejection iterations
+low_reject,r,h,3.,0.,,Lower rejection sigma
+high_reject,r,h,3.,0.,,Upper rejection sigma
+grow,r,h,0.,0.,,Rejection growing radius
+
+autowrite,b,h,no,,,"Automatically write to database"
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+
+crval,s,q,,,,"Approximate coordinate (at reference pixel)"
+cdelt,s,q,,,,"Approximate dispersion"
+aidpars,pset,h,,,,"Automatic identification algorithm parameters"
diff --git a/noao/imred/irs/irs.cl b/noao/imred/irs/irs.cl
new file mode 100644
index 00000000..6814185f
--- /dev/null
+++ b/noao/imred/irs/irs.cl
@@ -0,0 +1,64 @@
+#{ IRS -- KPNO IRS Spectral Reduction Package
+
+# Load necessary packages
+
+lists # List package for table
+
+# Define necessary paths
+
+set irscal = "onedstds$irscal/"
+set irsiids = "onedspec$irsiids/"
+
+package irs
+
+# Standard ONEDSPEC tasks
+task autoidentify,
+ continuum,
+ deredden,
+ dopcor,
+ mkspec,
+ names,
+ sarith,
+ sflip,
+ sinterp,
+ splot,
+ specplot,
+ specshift = onedspec$x_onedspec.e
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task dispcor1 = onedspec$dispcor1.par
+task scopy = onedspec$scopy.cl
+hidetask dispcor1
+
+# Special IRS/IIDS tasks
+task addsets,
+ bswitch,
+ coefs,
+ flatdiv,
+ slist1d,
+ subsets,
+ sums = irsiids$x_onedspec.e
+task batchred = irsiids$batchred.cl
+task bplot = irsiids$bplot.cl
+task extinct = irsiids$extinct.cl
+
+# Different default parameters
+task calibrate,
+ dispcor,
+ flatfit,
+ identify,
+ lcalib,
+ reidentify,
+ refspectra,
+ sensfunc,
+ standard = irs$x_onedspec.e
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Define a task living in the users directory - it is created by BATCHRED
+
+task $process = process.cl
+
+clbye()
diff --git a/noao/imred/irs/irs.hd b/noao/imred/irs/irs.hd
new file mode 100644
index 00000000..e5125e4f
--- /dev/null
+++ b/noao/imred/irs/irs.hd
@@ -0,0 +1 @@
+# Help directory for the KPNOSLIT package.
diff --git a/noao/imred/irs/irs.men b/noao/imred/irs/irs.men
new file mode 100644
index 00000000..60e4a37d
--- /dev/null
+++ b/noao/imred/irs/irs.men
@@ -0,0 +1,35 @@
+ addsets - Add subsets of strings of spectra
+ batchred - Batch processing of IIDS/IRS spectra
+ bplot - Batch plots of spectra
+ bswitch - Beam-switch strings of spectra to make obj-sky pairs
+ calibrate - Apply sensitivity correction to spectra
+ coefs - Extract mtn reduced coefficients from henear scans
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ extinct - Use BSWITCH for extinction correction
+ flatdiv - Divide spectra by flat field
+ flatfit - Sum and normalize flat field spectra
+ identify - Identify features in spectrum for dispersion solution
+ lcalib - List calibration file data
+ mkspec - Generate an artificial spectrum
+ names - Generate a list of image names from a string
+ process - A task generated by BATCHRED
+ refspectra - Assign reference spectra to object spectra
+ reidentify - Automatically identify features in spectra
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra having different wavelength ranges
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ sinterp - Interpolate a table of x,y pairs to create a spectrum
+ slist1d - List spectral header elements
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Preliminary spectral plot/analysis
+ standard - Identify standard stars to be used in sensitivity calc
+ subsets - Subtract pairs in strings of spectra
+ sums - Generate sums of object and sky spectra by aperture
diff --git a/noao/imred/irs/irs.par b/noao/imred/irs/irs.par
new file mode 100644
index 00000000..1a59fd5e
--- /dev/null
+++ b/noao/imred/irs/irs.par
@@ -0,0 +1,17 @@
+# PARAMETERS FOR KPNO IRS SPECTRAL REDUCTION PACKAGE
+
+observatory,s,h,"kpno",,,Observatory for data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+extinction,s,h,"onedstds$kpnoextinct.dat",,,Extinction file
+caldir,s,h,"irscal$",,,Directory containing calibration data
+coincor,b,h,no,,,Apply coincidence correction to flats
+ccmode,s,h,"",,,Correction mode (photo|iids|power)
+deadtime,r,h,,0,,Deadtime in seconds
+power,r,h,,,,IIDS power law coefficient
+
+dispaxis,i,h,1,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,Number of lines/columns/bands to sum for 2D/3D images
+
+next_rec,i,h,1,,,"Next output record"
+
+version,s,h,"IRS V3: July 1991"
diff --git a/noao/imred/irs/lcalib.par b/noao/imred/irs/lcalib.par
new file mode 100644
index 00000000..30436625
--- /dev/null
+++ b/noao/imred/irs/lcalib.par
@@ -0,0 +1,7 @@
+# CALIBLIST parameter file
+
+option,s,a,,,,"List option (bands, ext, mags, fnu, flam, stars)"
+star_name,s,a,,,,Star name in calibration list
+extinction,s,h,,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
diff --git a/noao/imred/irs/refspectra.par b/noao/imred/irs/refspectra.par
new file mode 100644
index 00000000..47cd54f9
--- /dev/null
+++ b/noao/imred/irs/refspectra.par
@@ -0,0 +1,17 @@
+input,s,a,,,,"List of input spectra"
+records,s,a,,,,Record number extensions
+references,s,h,"*.imh",,,"List of reference spectra"
+apertures,s,h,"",,,"Input aperture selection list"
+refaps,s,h,"",,,"Reference aperture selection list"
+ignoreaps,b,h,no,,,Ignore input and reference apertures?
+select,s,h,"interp","match|nearest|preceding|following|interp|average",,"Selection method for reference spectra"
+sort,s,h,"ut",,,"Sort key"
+group,s,h,"none",,,"Group key"
+time,b,h,yes,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting"
+override,b,h,no,,,"Override previous assignments?"
+confirm,b,h,yes,,,"Confirm reference spectrum assignments?"
+assign,b,h,yes,,,"Assign the reference spectra to the input spectrum?"
+logfiles,s,h,"STDOUT,logfile",,,"List of logfiles"
+verbose,b,h,no,,,"Verbose log output?"
+answer,s,q,,"no|yes|YES",,"Accept assignment?"
diff --git a/noao/imred/irs/reidentify.par b/noao/imred/irs/reidentify.par
new file mode 100644
index 00000000..13b21740
--- /dev/null
+++ b/noao/imred/irs/reidentify.par
@@ -0,0 +1,36 @@
+# Parameters for reidentify task.
+
+reference,s,a,,,,Reference image
+images,s,a,,,,Images to be reidentified
+interactive,s,h,"no","no|yes|NO|YES",,Interactive fitting?
+section,s,h,"middle line",,,Section to apply to two dimensional images
+newaps,b,h,yes,,,Reidentify apertures in images not in reference?
+override,b,h,no,,,Override previous solutions?
+refit,b,h,yes,,,"Refit coordinate function?
+"
+trace,b,h,no,,,Trace reference image?
+step,s,h,"10",,,Step in lines/columns/bands for tracing an image
+nsum,s,h,"10",,,Number of lines/columns/bands to sum
+shift,s,h,"0.",,,Shift to add to reference features (INDEF to search)
+search,r,h,0.,,,Search radius
+nlost,i,h,0,0,,"Maximum number of features which may be lost
+"
+cradius,r,h,5.,,,Centering radius
+threshold,r,h,10.,0.,,Feature threshold for centering
+addfeatures,b,h,no,,,Add features from a line list?
+coordlist,f,h,linelists$henear.dat,,,User coordinate list
+match,r,h,10.,,,Coordinate list matching limit
+maxfeatures,i,h,50,,,Maximum number of features for automatic identification
+minsep,r,h,2.,0.,,"Minimum pixel separation
+"
+database,f,h,database,,,Database
+logfiles,s,h,"logfile",,,List of log files
+plotfile,s,h,"",,,Plot file for residuals
+verbose,b,h,no,,,Verbose output?
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,"Graphics cursor input
+"
+answer,s,q,"yes","no|yes|NO|YES",,Fit dispersion function interactively?
+crval,s,q,,,,"Approximate coordinate (at reference pixel)"
+cdelt,s,q,,,,"Approximate dispersion"
+aidpars,pset,h,,,,"Automatic identification algorithm parameters"
diff --git a/noao/imred/irs/sensfunc.par b/noao/imred/irs/sensfunc.par
new file mode 100644
index 00000000..022190a4
--- /dev/null
+++ b/noao/imred/irs/sensfunc.par
@@ -0,0 +1,17 @@
+standards,s,a,std,,,Input standard star data file (from STANDARD)
+sensitivity,s,a,"sens",,,Output root sensitivity function imagename
+apertures,s,h,"",,,Aperture selection list
+ignoreaps,b,h,no,,,Ignore apertures and make one sensitivity function?
+logfile,f,h,"logfile",,,Output log for statistics information
+extinction,f,h,")_.extinction",,,Extinction file
+newextinction,f,h,"extinct.dat",,,Output revised extinction file
+observatory,s,h,)_.observatory,,,Observatory of data
+function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,Fitting function
+order,i,h,6,1,,Order of fit
+interactive,b,h,yes,,,Determine sensitivity function interactively?
+graphs,s,h,"sr",,,Graphs per frame
+marks,s,h,"plus cross box",,,Data mark types (marks deleted added)
+colors,s,h,"2 1 3 4",,,Colors (lines marks deleted added)
+cursor,*gcur,h,"",,,Graphics cursor input
+device,s,h,"stdgraph",,,Graphics output device
+answer,s,q, yes,"no|yes|NO|YES",,"(no|yes|NO|YES)"
diff --git a/noao/imred/irs/standard.par b/noao/imred/irs/standard.par
new file mode 100644
index 00000000..3abf645a
--- /dev/null
+++ b/noao/imred/irs/standard.par
@@ -0,0 +1,22 @@
+input,f,a,,,,Input image file root name
+records,s,a,,,,Spectral records
+output,s,a,std,,,Output flux file (used by SENSFUNC)
+samestar,b,h,yes,,,Same star in all apertures?
+beam_switch,b,h,yes,,,Beam switch spectra?
+apertures,s,h,"",,,Aperture selection list
+bandwidth,r,h,INDEF,,,Bandpass widths
+bandsep,r,h,INDEF,,,Bandpass separation
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
+extinction,s,h,)_.extinction,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+observatory,s,h,)_.observatory,,,Observatory for data
+interact,b,h,yes,,,Graphic interaction to define new bandpasses
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+star_name,s,q,,,,Star name in calibration list
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
+mag,r,q,,,,Magnitude of star
+magband,s,q,,"U|B|V|R|I|J|H|K|L|Lprime|M",,"Magnitude type"
+teff,s,q,,,,Effective temperature or spectral type
+answer,s,q,no,,,"(no|yes|NO|YES|NO!|YES!)"
diff --git a/noao/imred/kpnocoude/Revisions b/noao/imred/kpnocoude/Revisions
new file mode 100644
index 00000000..da682fb8
--- /dev/null
+++ b/noao/imred/kpnocoude/Revisions
@@ -0,0 +1,59 @@
+.help revisions Dec94 noao.imred.kpnocoude
+.nf
+
+=====
+V2.12
+=====
+
+imred$kpnocoude/standard.par
+ Added blackbody query parameters. (5/2/02, Valdes)
+
+========
+V2.11.3b
+========
+
+iimred$kpnocoude/doc/do3fiber.hlp
+ Fixed minor formating problem. (4/22/99, Valdes)
+
+=======
+V2.11.1
+=======
+
+imred$kpnocoude/demos/mkdo3fiber.cl
+imred$kpnocoude/demos/mkdoslit.cl
+ Made the ARTDATA package parameters explicit (4/15/97, Valdes)
+
+imred$kpnocoude/identify.par
+ Added the new units parameter. (3/11/97, Valdes)
+
+imred$kpnocoude/kpnocoude.cl
+ Increased the minimum min_lenuserarea from 40000 to 100000.
+ (7/31/96, Valdes)
+
+imred$kpnocoude/sparams.par
+ Changed match from 0.2 to -3. (4/5/96, Valdes)
+
+imred$kpnocoude/do3fiber.cl
+imred$kpnocoude/do3fiber.par
+imred$kpnocoude/params.par
+imred$kpnocoude/doc/do3fiber.hlp
+imred$kpnocoude/doc/do3fiber.ms
+ Added crval/cdelt parameters used in new version with automatic arc
+ line identification. (4/5/96, Valdes)
+
+imred$kpnocoude/do3fiber.cl
+ The script needed to be modifed for the extra argument to proc for
+ sky alignment. The sky alignment option is not used in DO3FIBER.
+ (7/19/95, Valdes)
+
+=======
+V2.10.4
+=======
+
+kpnocoude$kpnocoude.cl
+kpnocoude$kpnocoude.men
+ 1. Added background, illumination, response, apflatten, apnormalize.
+ 2. Renamed fiber response task to "fibresponse".
+ (12/29/94, Valdes)
+
+.endhelp
diff --git a/noao/imred/kpnocoude/calibrate.par b/noao/imred/kpnocoude/calibrate.par
new file mode 100644
index 00000000..e09457a2
--- /dev/null
+++ b/noao/imred/kpnocoude/calibrate.par
@@ -0,0 +1,13 @@
+# CALIBRATE parameter file
+
+input,s,a,,,,Input spectra to calibrate
+output,s,a,,,,Output calibrated spectra
+extinct,b,h,yes,,,Apply extinction correction?
+flux,b,h,yes,,,Apply flux calibration?
+extinction,s,h,)_.extinction,,,Extinction file
+observatory,s,h,)_.observatory,,,Observatory of observation
+ignoreaps,b,h,yes,,,Ignore aperture numbers in flux calibration?
+sensitivity,s,h,"sens",,,Image root name for sensitivity spectra
+fnu,b,h,no,,,Create spectra having units of FNU?
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
diff --git a/noao/imred/kpnocoude/demos/demoarc1.dat b/noao/imred/kpnocoude/demos/demoarc1.dat
new file mode 100644
index 00000000..fa0a179d
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demoarc1.dat
@@ -0,0 +1,38 @@
+ OBJECT = 'First comp ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 60. / actual integration time
+ DARKTIME= 60. / total elapsed time
+ IMAGETYP= 'comp ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:11:30.00 ' / universal time
+ ST = '09:04:54.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:09:03.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '48.760 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDMEAN = 179.398
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnocoude/demos/demoarc2.dat b/noao/imred/kpnocoude/demos/demoarc2.dat
new file mode 100644
index 00000000..4cd9975d
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demoarc2.dat
@@ -0,0 +1,38 @@
+ OBJECT = 'Last comp ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 60. / actual integration time
+ DARKTIME= 60. / total elapsed time
+ IMAGETYP= 'comp ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:41:30.00 ' / universal time
+ ST = '09:34:54.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:09:03.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '48.760 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDMEAN = 179.398
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnocoude/demos/demoobj1.dat b/noao/imred/kpnocoude/demos/demoobj1.dat
new file mode 100644
index 00000000..78f3b9ad
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demoobj1.dat
@@ -0,0 +1,37 @@
+ OBJECT = 'V640Mon 4500 ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 1200. / actual integration time
+ DARKTIME= 1200. / total elapsed time
+ IMAGETYP= 'object ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:19:55.00 ' / universal time
+ ST = '09:13:15.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:08:52.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '44.580 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnocoude/demos/demos.cl b/noao/imred/kpnocoude/demos/demos.cl
new file mode 100644
index 00000000..5b065c51
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demos.cl
@@ -0,0 +1,18 @@
+# DEMOS -- Run specified demo provided a demo file exists.
+
+procedure demos (demoname)
+
+file demoname {prompt="Demo name"}
+
+begin
+ file demo, demofile
+
+ if ($nargs == 0 && mode != "h")
+ type ("demos$demos.men")
+ demo = demoname
+ demofile = "demos$" // demo // ".cl"
+ if (access (demofile))
+ cl (< demofile)
+ else
+ error (1, "Unknown demo " // demo)
+end
diff --git a/noao/imred/kpnocoude/demos/demos.men b/noao/imred/kpnocoude/demos/demos.men
new file mode 100644
index 00000000..cdd3d484
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demos.men
@@ -0,0 +1,6 @@
+ MENU of KPNOCOUDE Demonstrations
+
+ doslit - Quick test of DOSLIT (no comments, no delays)
+ do3fiber - Quick test of DO3FIBER (small images, no comments, no delays)
+ mkdoslit - Make DOSLIT test data
+ mkdo3fiber - Make DO3FIBER test data (50x256)
diff --git a/noao/imred/kpnocoude/demos/demos.par b/noao/imred/kpnocoude/demos/demos.par
new file mode 100644
index 00000000..4181ed59
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demos.par
@@ -0,0 +1,2 @@
+demoname,f,a,"",,,"Demo name"
+mode,s,h,"ql",,,
diff --git a/noao/imred/kpnocoude/demos/demostd1.dat b/noao/imred/kpnocoude/demos/demostd1.dat
new file mode 100644
index 00000000..78f3b9ad
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/demostd1.dat
@@ -0,0 +1,37 @@
+ OBJECT = 'V640Mon 4500 ' / object name
+ OBSERVAT= 'KPNO ' / observatory
+ OBSERVER= 'Massey ' / observers
+ COMMENTS= 'Final New Ice ' / comments
+ EXPTIME = 1200. / actual integration time
+ DARKTIME= 1200. / total elapsed time
+ IMAGETYP= 'object ' / object, dark, bias, etc.
+ DATE-OBS= '26/11/91 ' / date (dd/mm/yy) of obs.
+ UT = '12:19:55.00 ' / universal time
+ ST = '09:13:15.00 ' / sidereal time
+ RA = '06:37:02.00 ' / right ascension
+ DEC = '06:08:52.00 ' / declination
+ EPOCH = 1991.9 / epoch of ra and dec
+ ZD = '44.580 ' / zenith distance
+ AIRMASS = 0. / airmass
+ TELESCOP= 'kpcdf ' / telescope name
+ DETECTOR= 'te1k ' / detector
+ PREFLASH= 0 / preflash time, seconds
+ GAIN = 5.4 / gain, electrons per adu
+ DWELL = 5 / sample integration time
+ RDNOISE = 3.5 / read noise, electrons per adu
+ DELAY0 = 0 / time delay after each pixel
+ DELAY1 = 0 / time delay after each row
+ CAMTEMP = -111 / camera temperature
+ DEWTEMP = -183 / dewar temperature
+ CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+ CCDSUM = '1 1 ' / on chip summation
+ INSTRUME= 'test ' / instrument
+ APERTURE= '250micron slit ' / aperture
+ TVFILT = '4-96 ' / tv filter
+ DISPAXIS= '2 ' / dispersion axis
+ GRATPOS = 4624.3 / grating position
+ TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+ OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+ CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnocoude/demos/do3fiber.cl b/noao/imred/kpnocoude/demos/do3fiber.cl
new file mode 100644
index 00000000..c4ab15a5
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/do3fiber.cl
@@ -0,0 +1,14 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdo3fiber.cl")
+
+unlearn do3fiber
+params.coordlist = "linelists$idhenear.dat"
+params.match = 10.
+delete demologfile,demoplotfile verify=no >& dev$null
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdo3fiber.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/kpnocoude/demos/doslit.cl b/noao/imred/kpnocoude/demos/doslit.cl
new file mode 100644
index 00000000..dd7a0955
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/doslit.cl
@@ -0,0 +1,15 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdoslit.cl")
+
+unlearn doslit
+sparams.extras = no
+sparams.coordlist = "linelists$idhenear.dat"
+sparams.match = 10.
+delete demologfile,demoplotfile verify=no >& dev$null
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdoslit.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/kpnocoude/demos/mkdo3fiber.cl b/noao/imred/kpnocoude/demos/mkdo3fiber.cl
new file mode 100644
index 00000000..00a775f4
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/mkdo3fiber.cl
@@ -0,0 +1,22 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkfibers ("demoobj", type="objnosky", fibers="demos$mkdo3fiber.dat",
+ title="Coude artificial image", header="demos$demoobj1.dat",
+ ncols=50, nlines=256, wstart=5786., wend=7362., seed=1)
+mkfibers ("demoflat", type="flat", fibers="demos$mkdo3fiber.dat",
+ title="Coude artificial image", header="demos$demostd1.dat",
+ ncols=50, nlines=256, wstart=5786., wend=7362., seed=2)
+mkfibers ("demoarc", type="henear", fibers="demos$mkdo3fiber.dat",
+ title="Coude artificial image", header="demos$demoarc1.dat",
+ ncols=50, nlines=256, wstart=5786., wend=7362., seed=3)
diff --git a/noao/imred/kpnocoude/demos/mkdo3fiber.dat b/noao/imred/kpnocoude/demos/mkdo3fiber.dat
new file mode 100644
index 00000000..e0d2d241
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/mkdo3fiber.dat
@@ -0,0 +1,3 @@
+1 2 0.757532 gauss 2.7 0 14.6 0.002
+2 1 0.939866 gauss 2.7 0 25.1 0.002
+3 2 1.015546 gauss 2.7 0 35.5 0.002
diff --git a/noao/imred/kpnocoude/demos/mkdoslit.cl b/noao/imred/kpnocoude/demos/mkdoslit.cl
new file mode 100644
index 00000000..b76f467d
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/mkdoslit.cl
@@ -0,0 +1,25 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkexample ("longslit", "demoarc1", oseed=5, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoarc1", "demos$demoarc1.dat", append=no, verbose=no)
+mkexample ("longslit", "demoobj1", oseed=1, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoobj1", "demos$demoobj1.dat", append=no, verbose=no)
+mkexample ("longslit", "demostd1", oseed=2, nseed=2,
+ errors=no, verbose=yes, list=no)
+mkheader ("demostd1", "demos$demostd1.dat", append=no, verbose=no)
+mkexample ("longslit", "demoarc2", oseed=5, nseed=2,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoarc2", "demos$demoarc2.dat", append=no, verbose=no)
diff --git a/noao/imred/kpnocoude/demos/xgdo3fiber.dat b/noao/imred/kpnocoude/demos/xgdo3fiber.dat
new file mode 100644
index 00000000..498209b4
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/xgdo3fiber.dat
@@ -0,0 +1,60 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\skpnocoude\n
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile
+^Z
+epar\sdo3fiber\n
+demoobj\r
+demoflat\r
+demoflat\r
+demoarc\r
+\r
+rdnoise\r
+gain\r
+\r
+\r
+4\r
+6600\r
+6.1\r
+\r
+\r
+\r
+\r
+y\r
+y\r
+\r
+\r
+y\r
+^Z
+do3fiber\sredo+\n
+\n
+\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\r
+q/<-5\s\s\s\s/=(.\s=\r
+:/<-5\s\s\s\s/=(.\s=\r coord\slinelists$idhenear.dat\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+n\n
+y\n
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+n\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/kpnocoude/demos/xgdoslit.dat b/noao/imred/kpnocoude/demos/xgdoslit.dat
new file mode 100644
index 00000000..4ba38e19
--- /dev/null
+++ b/noao/imred/kpnocoude/demos/xgdoslit.dat
@@ -0,0 +1,71 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\skpnocoude\n
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdoslit\n
+demoobj1\r
+demoarc1,demoarc2\r
+\r
+demostd1\r
+rdnoise\r
+gain\r
+\r
+\r
+5700\r
+6.2\r
+\r
+y\r
+y\r
+y\r
+y\r
+y\r
+^Z
+doslit\sredo+\n
+\n
+\n
+b/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+y\n
+4210\n
+7350\n
+6.2\n
+\n
+n\n
+\n
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+hz14\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+Y\n
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+gkimos\sdemoplotfile\snx=3\sny=3\sdev=stdgraph\n
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/kpnocoude/do3fiber.cl b/noao/imred/kpnocoude/do3fiber.cl
new file mode 100644
index 00000000..649684cb
--- /dev/null
+++ b/noao/imred/kpnocoude/do3fiber.cl
@@ -0,0 +1,60 @@
+# DO3FIBERS -- Process Coude fiber spectra from 2D to wavelength calibrated 1D.
+#
+# The task PROC does all of the interactive work and BATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure do3fiber (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+string arcs = "" {prompt="List of arc spectra"}
+file arctable = "" {prompt="Arc assignment table (optional)\n"}
+
+string readnoise = "RDNOISE" {prompt="Read out noise sigma (photons)"}
+string gain = "GAIN" {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+int fibers = 3 {prompt="Number of fibers"}
+real width = 6. {prompt="Width of profiles (pixels)"}
+string crval = "INDEF" {prompt="Approximate central wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion"}
+string objaps = "2" {prompt="Object apertures"}
+string arcaps = "1,3" {prompt="Arc apertures\n"}
+
+bool scattered = no {prompt="Subtract scattered light?"}
+bool fitflat = yes {prompt="Fit and ratio flat field spectrum?"}
+bool recenter = yes {prompt="Recenter object apertures?"}
+bool edit = no {prompt="Edit/review object apertures?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool splot = yes {prompt="Plot the final spectrum?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = yes {prompt="Update spectra if cal data changes?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset params = "" {prompt="Algorithm parameters"}
+
+begin
+ apscript.readnoise = readnoise
+ apscript.gain = gain
+ apscript.nfind = fibers
+ apscript.width = width
+ apscript.t_width = width
+ apscript.radius = width
+ apscript.clean = clean
+ apscript.order = "increasing"
+ proc.datamax = datamax
+
+ proc (objects, apref, flat, "", arcs, "", "",
+ arctable, fibers, "", crval, cdelt, objaps, "", arcaps, "",
+ "", "", scattered, fitflat, recenter, edit, no, no, clean,
+ dispcor, no, no, no, no, no, splot, redo, update, batch, listonly)
+
+ if (proc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("batch&batch") | cl
+ }
+end
diff --git a/noao/imred/kpnocoude/do3fiber.par b/noao/imred/kpnocoude/do3fiber.par
new file mode 100644
index 00000000..8a30a4e0
--- /dev/null
+++ b/noao/imred/kpnocoude/do3fiber.par
@@ -0,0 +1,30 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,h,"",,,"Aperture reference spectrum"
+flat,f,h,"",,,"Flat field spectrum"
+arcs,s,h,"",,,"List of arc spectra"
+arctable,f,h,"",,,"Arc assignment table (optional)
+"
+readnoise,s,h,"RDNOISE",,,"Read out noise sigma (photons)"
+gain,s,h,"GAIN",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+fibers,i,h,3,,,"Number of fibers"
+width,r,h,6.,,,"Width of profiles (pixels)"
+crval,s,h,INDEF,,,"Approximate central wavelength"
+cdelt,s,h,INDEF,,,"Approximate dispersion"
+objaps,s,h,"2",,,"Object apertures"
+arcaps,s,h,"1,3",,,"Arc apertures
+"
+scattered,b,h,no,,,"Subtract scattered light?"
+fitflat,b,h,yes,,,"Fit and ratio flat field spectrum?"
+recenter,b,h,yes,,,"Recenter object apertures?"
+edit,b,h,no,,,"Edit/review object apertures?"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+splot,b,h,yes,,,"Plot the final spectrum?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,yes,,,"Update spectra if cal data changes?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+params,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/kpnocoude/doc/do3fiber.hlp b/noao/imred/kpnocoude/doc/do3fiber.hlp
new file mode 100644
index 00000000..af99e15b
--- /dev/null
+++ b/noao/imred/kpnocoude/doc/do3fiber.hlp
@@ -0,0 +1,1146 @@
+.help do3fiber Feb93 noao.imred.kpnocoude
+.ih
+NAME
+do3fiber -- Three fiber data reduction task
+.ih
+USAGE
+do3fiber objects
+.ih
+SUMMARY
+The \fBdo3fiber\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, and wavelength calibration of multifiber data in which some
+fibers are used to take object spectra and other fibers are used to
+take simultaneous arc spectra. A three fiber instrument of this
+type (one object and two arc fibers) is available at the KPNO coude feed.
+The default parameters are set for this configuration.
+If there are a large number of fibers and fiber throughput and sky
+fiber subtraction is needed the \fBdofiber\fR task should be used.
+
+The \fBdo3fiber\fR task is a command language script which collects
+and combines the functions and parameters of many general purpose tasks to
+provide a single complete data reduction path. The task provides a degree
+of guidance, automation, and record keeping necessary when dealing with
+this type of multifiber data.
+.ih
+PARAMETERS
+.ls objects
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.le
+.ls apref = ""
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.le
+.ls flat = "" (optional)
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field corrections.
+.le
+.ls arcs = "" (at least one if dispersion correcting)
+List of primary, all fiber arc spectra. These spectra are used to define
+the dispersion functions for each fiber apart from a possible zero point
+correction made with simultaneous arc calibration fibers in the object
+spectra. One fiber from the first spectrum is used to mark lines and set
+the dispersion function interactively and dispersion functions for all
+other fibers and arc spectra are derived from it.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIparams.sort\fR, such as the observation time is made.
+.le
+
+.ls readnoise = "RDNOISE" (apsum)
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.le
+.ls gain = "GAIN" (apsum)
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or arc
+spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls fibers = 3 (apfind)
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum.
+.le
+.ls width = 6. (apedit)
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.le
+.ls crval = INDEF, cdelt = INDEF (autoidentify)
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used. If one or both of these parameters are
+specified as INDEF the search for a solution will be slower and more likely
+to fail.
+.le
+.ls objaps = "2", arcaps = "1,3"
+List of object and arc aperture numbers. These are used to
+identify arc apertures for wavelength calibration and object apertures
+for the final results.
+.le
+
+.ls scattered = no (apscatter)
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.le
+.ls fitflat = yes (flat1d)
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.le
+.ls recenter = yes (aprecenter)
+Recenter reference apertures for each object spectrum?
+.le
+.ls edit = no (apedit)
+Review aperture definitions for each object spectrum? Note that this does
+not apply to the initial reference aperture which always allows
+interactive review of the aperture definitions.
+.le
+.ls clean = no (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.le
+.ls dispcor = yes
+Dispersion correct spectra? Depending on the \fIparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.le
+.ls splot = yes
+Plot the final spectra with the task \fBsplot\fR?
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.le
+.ls update = yes
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.le
+.ls batch = no
+Process spectra as a background or batch job provided there are no interactive
+options (\fIedit\fR and \fIsplot\fR) selected.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls params = "" (pset)
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.le
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdo3fiber\fR.
+.ls observatory = "observatory"
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For NOAO data the image headers
+identify the observatory as "kpno" or "ctio" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter.
+.le
+.ls database = "database"
+Database (directory) used for storing aperture and dispersion information.
+.le
+.ls verbose = no
+Print verbose information available with various tasks.
+.le
+.ls logfile = "logfile", plotfile = ""
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.le
+.ls records = ""
+Dummy parameter to be ignored.
+.le
+.ls version = "KPNOCOUDE: ..."
+Version of the package.
+.le
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdo3fiber\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls extras = no (apsum)
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.le
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+.ls lower = -3., upper = 3. (apdefault)
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 2 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+.ls buffer = 1. (apscatter)
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.le
+.ls apscat1 = "" (apscatter)
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.le
+.ls apscat2 = "" (apscatter)
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.le
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum) (fit1d|fit2d)
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for most data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+.ls nsubaps = 1 (apsum)
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.le
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+.ls f_interactive = yes (fit1d)
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \fIfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.le
+.ls f_function = "spline3", f_order = 20 (fit1d)
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (autoidentify/identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.le
+.ls match = -3. (autoidentify/identify)
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 3.5 (autoidentify/identify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 4. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "legendre", i_order = 3 (autoidentify/identify)
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.le
+.ls i_niterate = 3, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (reidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+.ls addfeatures = no (reidentify)
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.le
+.le
+.ls sort = "jd", group = "ljd" (refspectra)
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdo3fiber\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.le
+.ls log = no (dispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (dispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+The \fBdo3fiber\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, and wavelength calibration of multifiber data in which some
+fibers are used to take object spectra and other fibers are used to
+take simultaneous arc spectra. A three fiber instrument of this
+type (one object and two arc fibers) is available at the KPNO coude feed.
+The default parameters are set for this configuration.
+If there are a large number of fibers and fiber throughput and sky
+fiber subtraction is needed the \fBdofiber\fR task should be used.
+
+The \fBdo3fiber\fR task is a command language script which collects
+and combines the functions and parameters of many general purpose tasks to
+provide a single complete data reduction path. The task provides a degree
+of guidance, automation, and record keeping necessary when dealing with
+this type of multifiber data.
+
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \fIredo\fR and \fIupdate\fR options, skip or
+repeat some or all the steps.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage. Since
+\fBdo3fiber\fR combines many separate, general purpose tasks the
+description given here refers to these tasks and leaves some of the details
+to their help documentation.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdo3fibers\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.le
+.ls [2]
+Set the \fBdo3fiber\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+The apertures are defined using the specified aperture reference image
+which is usually a flat field in which both the object and arc fibers are
+illuminated. The specified number of fibers are found automatically and
+sequential apertures assigned.
+.le
+.ls [5]
+A query is given allowing the apertures to be interactively reviewed.
+In this mode one may adjust the aperture widths as desired either
+explicitly (:lower and :upper), with the cursor ('l' and 'u'), at a
+particular flux level ('y'), or with an automatic algorithm ('z')
+as described by \fBapresize\fR. To exit type 'q'.
+.le
+.ls [6]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.le
+.ls [7]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.le
+.ls [8]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra.
+The final response spectra are normalized to a unit
+mean over all fibers.
+.le
+.ls [9]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.le
+.ls [10]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.le
+.ls [11]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.le
+.ls [12]
+The object spectra are now automatically scattered light subtracted,
+ extracted, flat fielded, and dispersion corrected.
+The reference apertures are first assigned
+to the object spectra. If the \fIrecenter\fR option is set the apertures
+will have a shift applied based on recentering the fiber profiles.
+If the \fIedit\fR option is set you may review and modify
+the aperture definitions interactively. Any new
+arcs assigned to the object images are automatically extracted and
+dispersion functions determined. A zero point wavelength correction
+is computed from the arc fiber spectra and applied to the object spectrum.
+.le
+.ls [13]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.le
+.ls [14]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of multifiber object and calibration spectra
+stored as IRAF images. The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+There are two types of calibration images. These
+are flat fields and comparison lamp arc spectra. The raw CCD images must
+be processed to remove overscan, bias, and dark count effects. This is
+generally done using the \fBccdred\fR package.
+The \fBdo3fiber\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is generally
+not done at this stage but as part of \fBdo3fiber\fR. If for some reason
+the flat field or calibration arc spectra have separate exposures through
+different fibers they may be simply added.
+
+The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in the task \fBrefspectra\fR.
+
+The final reduced spectra are recorded in one, two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. With a single object spectrum the image will be one dimensional
+and with multiple object spectra the image will be two dimensional.
+When the \fIextras\fR parameter is set the images will be three
+dimensional (regardless of the number of apertures) and the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR.
+
+\fBPackage Parameters\fR
+
+The \fBkpnocoude\fR package parameters set parameters affecting all the tasks
+in the package. Some of the parameters are not applicable to the
+\fBdo3fiber\fR task. The observatory parameter is only required for data
+without an OBSERVAT header parameter (currently included in NOAO data).
+The spectrum interpolation type might be changed to "sinc" but with the
+cautions given in \fBonedspec.package\fR. The dispersion axis parameter is
+only needed if a DISPAXIS image header parameter is not defined. The other
+parameters define the standard I/O functions. The verbose parameter
+selects whether to print everything which goes into the log file on the
+terminal. It is useful for monitoring what the \fBdo3fiber\fR task does. The
+log and plot files are useful for keeping a record of the processing. A
+log file is highly recommended. A plot file provides a record of
+apertures, traces, and extracted spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The input images are specified by image lists. The lists may be
+a list of explicit, comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+The aperture reference spectrum is used to find the spectrum profiles and trace
+them. Thus, this requires an image with good signal in all fibers
+which usually means a flat field spectrum. It is recommended that
+flat field correction be done using one dimensional extracted spectra
+rather than as two dimensional images. This is done if a flat field
+spectrum is specified. The arc assignment table is used to specifically
+assign arc spectra to particular object spectra and the format
+of the file is described in \fBrefspectra\fR.
+
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. The dispersion axis defines the wavelength direction
+of spectra in the image if not defined in the image header by the keyword
+DISPAXIS. The width parameter (in pixels) is used for the profile finding and
+centering algorithm (\fBcenter1d\fR).
+
+The number of fibers is fairly obvious. It is the number of
+fibers, including the arc fibers, to be automatically found and
+assigned apertures. The apertures are assigned aperture
+numbers sequentially. The object and arc fibers are identified
+by these aperture numbers as specified by the \fIobjaps\fR and
+\fIarcaps\fR parameters. The defaults are for the case of three
+fibers in the sequence arc fiber, object fiber, and arc fiber.
+
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra.
+
+The apertures defined for the aperture reference image are assigned to
+each image. For the object images the apertures may be shifted across
+the dispersion by recentering the strongest profiles and averaging
+the individual shifts to form a single shift for all apertures. This
+corrects for shifts in the detector during the observations. The
+\fIrecenter\fR parameter selects whether to apply this shift or not.
+
+The \fIedit\fR option allows you to be queried to review the apertures
+assigned to each object image. If selected and the query answered
+affirmatively the apertures may be interactively shifted and resized. The
+query may also be answered with "NO" to turn off this option during
+processing. Note that the initial aperture definitions for the aperture
+reference image always allows editing.
+
+The \fIclean\fR option invokes a profile fitting and deviant
+point rejection algorithm as well as a variance weighting of points in the
+aperture. These options require knowing the effective (i.e. accounting for
+any image combining) read out noise and gain. For a discussion of cleaning
+and variance weighted extraction see \fBapvariance\fR and
+\fBapprofiles\fR.
+
+The dispersion correction option selects whether to extract arc spectra,
+determine dispersion functions, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale.
+
+The \fIsplot\fR option allows a query (which may be answered with "YES"
+or "NO" to eliminate the query) and then plotting of the final object
+spectra if answered affirmatively. The plotting is done with the
+task \fBsplot\fR.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are a new
+reference image, new flat field, and a new arc reference image. If all
+input spectra are to be processed regardless of previous processing the
+\fIredo\fR flag may be used. Note that reprocessing clobbers the
+previously processed output spectra.
+
+The \fIbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if the aperture editing
+and final spectrum plotting have been turned off, either with the task
+option parameter or by answering "NO" to the queries. The \fIlistonly\fR
+option prints a summary of the processing steps which will be performed on
+the input spectra without actually doing anything. This is useful for
+verifying which spectra will be affected if the input list contains
+previously processed spectra. The listing does not include any arc spectra
+which may be extracted to dispersion calibrate an object spectrum.
+
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdo3fiber\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the \fBdo3fiber\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdo3fiber\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+this type of data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.nf
+
+ cl> epar params
+
+.fi
+or simple typing \fIparams\fR. The parameter editor can also be
+entered when editing the \fBdo3fiber\fR parameters by typing \fI:e
+params\fR or simply \fI:e\fR if positioned at the \fIparams\fR
+parameter.
+
+\fBAperture Definitions\fR
+
+The first operation is to define the extraction apertures, which include
+the aperture width and position dependence with wavelength, for the object
+and arc fibers. This is done on a reference spectrum which is usually a
+flat field taken through both fibers. Other spectra will inherit the
+reference apertures and may apply a correction for any shift of the orders
+across the dispersion. The reference apertures are defined only once
+unless the \fIredo\fR option is set.
+
+The selected number of fibers are found automatically by selecting the
+highest peaks in a cut across the dispersion. Apertures are assigned with
+a limits set by the \fIlower\fR and \fIupper\fR parameter and numbered
+sequentially. A query is then given allowing the apertures to be reviewed
+interactively. If answered affirmatively a cut across the orders is shown
+with the apertures marked and an interactive aperture editing mode is
+entered (see \fBapedit\fR). The main thing to be concerned about is that
+the aperture numbers agree with the \fIobjaps\fR and \fIarcaps\fR
+definitions. The aperture numbers may be changed with the 'i' or 'o'
+keys. The apertures may also be resized from the default limits.
+To exit the background and aperture editing steps type 'q'.
+
+Next the positions of the fiber profiles at various points along the
+dispersion are measured and a "trace function" is fit. The user is asked
+whether to fit the trace function interactively. This is selected to
+adjust the fitting parameters such as function type and order. When
+interactively fitting a query is given for each aperture. After the first
+aperture one may skip reviewing the other traces by responding with "NO".
+Queries made by \fBdo3fiber\fR generally may be answered with either lower
+case "yes" or "no" or with upper case "YES" or "NO". The upper case
+responses apply to all further queries and so are used to eliminate further
+queries of that kind.
+
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the orders and the number of image lines or
+columns to sum are set by the \fIline\fR and \fInsum\fR parameters. A line
+of INDEF (the default) selects the middle of the image. The automatic
+finding algorithm is described for the task \fBapfind\fR and basically
+finds the strongest peaks. The tracing is done as described in
+\fBaptrace\fR and consists of stepping along the image using the specified
+\fIt_step\fR parameter. The function fitting uses the \fBicfit\fR commands
+with the other parameters from the tracing section.
+
+\fBExtraction\fR
+
+The actual extraction of the spectra is done by summing across the fixed
+width apertures at each point along the dispersion. The default is to
+simply sum the pixels using partial pixels at the ends. There is an
+option to weight the sum based on a Poisson noise model using the
+\fIreadnoise\fR and \fIgain\fR detector parameters. Note that if the
+\fIclean\fR option is selected the variance weighted extraction is used
+regardless of the \fIweights\fR parameter. The sigma thresholds for
+cleaning are also set in the \fBparams\fR parameters.
+
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain. These
+numbers need to be adjusted if the image has been processed such that the
+intensity scale has a different origin (such as a background light
+subtraction) or scaling (such as caused by unnormalized flat fielding).
+For optimal extraction and cleaning to work it is recommended that
+a \fIdatamax\fR value be determined for the data and the
+\fIfitflat\fR option be used. For further discussion of cleaning and
+variance weighted extraction see \fBapvariance\fR and \fBapprofiles\fR as
+well as \fBapsum\fR.
+
+\fBScattered Light Subtraction\fR
+
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+
+\fBFlat Field Correction\fR
+
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdo3fiber\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdo3fiber\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+If the \fIfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \fIf_function\fR and
+\fIf_order\fR. If the parameter \fIf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+
+The final step is to normalize the flat field spectra by the mean counts over
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra.
+
+\fBDispersion Correction\fR
+
+If dispersion correction is not selected, \fIdispcor\fR=no, then the object
+spectra are simply extracted. If it is selected the arc spectra are used
+to dispersion calibrate the object spectra. There are four steps involved;
+determining the dispersion functions relating pixel position to wavelength,
+assigning the appropriate dispersion functions to a particular observation,
+determining a zero point wavelength shift from the arc fibers to be applied
+to the object fiber dispersion functions, and either storing the nonlinear
+dispersion function in the image headers or resampling the spectra to
+evenly spaced pixels in wavelength.
+
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted using the reference aperture
+definitions. The interactive task \fBautoidentify\fR is used to
+automatically define the dispersion function in one fiber. Whether or not
+it is successful the user is presented with the interactive identification
+graph. The automatic identifications can be reviewed and a new solution or
+corrections to the automatic solution may be performed. The dispersion
+functions for the other fibers are then determined automatically by
+reference to the first fiber using the task \fBreidentify\fR. Except in
+batch mode a query is given allowing the reidentified arc spectra to be
+examined interactively with \fBidentify\fR. This would normally be done
+only if the information about the reidentification printed on the terminal
+indicates a problem such as a large increase in the RMS. This query may be
+eliminated in the usual way.
+
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+
+If resampling of the spectra is selected by the parameter \fIlinearize\fR
+all the arc dispersion functions are combined to provide a default
+starting and ending wavelength and dispersion with the same number of
+pixels is determined and the user is queried for any changes. This
+linear dispersion system will be applied to all spectra so that all
+the final processed object spectra will have the same dispersion
+sampling.
+
+Once the reference dispersion functions are defined other arc spectra are
+extracted as they are assign to the object spectra. The assignment of
+arcs is done either explicitly with an arc assignment table (parameter
+\fIarctable\fR) or based on a header parameter such as a time.
+The assignments are made by the task \fBrefspectra\fR. When two arcs are
+assigned to an object spectrum an interpolation is done between the two
+dispersion functions. This makes an approximate correction for steady
+drifts in the dispersion. Because the arc fibers monitor any zero point
+shifts in the dispersion functions, due to translation and rotation of the
+detector, it is probably only necessary to have one or two arc spectra, one
+at the beginning and/or one at the end of the night.
+
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+
+When the object spectra are extracted so are the simultaneous arc spectra.
+A zero point shift of the arc spectra relative to the dispersion solutions
+of an assigned full arc observation is computed using \fBreidentify\fR.
+The zero point shifts from the arc fibers are then
+interpolated across the detector based on the positions of the arc
+apertures to the positions of the object apertures. A linear interpolation
+is used which accounts for a rotation of the detector as well as a
+translation along the dispersion. The interpolated zero point wavelength
+shifts are then added to the dispersion functions from the full arc
+observation for the object fibers. Note that this does not assume that the
+object and arc fiber dispersion functions are the same or have the same
+wavelength origin, but only that the interpolated shifts in wavelength zero
+point apply to all fibers. When there are two assigned full arc spectra
+the above steps are done independently and the final pair of zero point
+corrected dispersion functions for each object fiber are combined using the
+assigned weights. Once the dispersion function correction is determined
+from the extracted arc fiber spectra they are deleted leaving only the
+object spectra.
+
+The last step of dispersion correction is setting the dispersion
+of the object spectra. There are two choices here.
+If the \fIlinearize\fR parameter is not set the nonlinear dispersion
+functions are stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+
+If the \fIlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength. The linear dispersion parameters are those defined
+previously for the arc reference image.
+
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is also the sequence performed
+by the test procedure "demos do3fiber".
+
+.nf
+kp> demos mkdo3fiber
+Creating image demoobj ...
+Creating image demoflat ...
+Creating image demoarc ...
+kp> do3fiber demoobj apref=demoflat flat=demoflat arcs=demoarc \
+>>> width=4 edit=yes
+Set reference apertures for demoflat
+Resize apertures for demoflat? (yes):
+Edit apertures for demoflat? (yes):
+<Exit with 'q'>
+Fit traced positions for demoflat interactively? (yes):
+Fit curve to aperture 1 of demoflat interactively (yes):
+<Exit with 'q'>
+Fit curve to aperture 2 of demoflat interactively (yes): N
+Create response function demoflatnorm.ms
+Extract flat field demoflat
+Fit and ratio flat field demoflat
+Create the normalized response demoflatnorm.ms
+demoflatnorm.ms -> demoflatnorm.ms using bzero: 0. and bscale: 1.
+ mean: 1. median: 1.034214 mode: 0.8378798
+ upper: INDEF lower: INDEF
+Average aperture response:
+1. 0.8394014
+2. 1.034403
+3. 1.126194
+Extract arc reference image demoarc
+Determine dispersion solution for demoarc
+<Reset default line list with ":coord linelists$idhenear.dat">
+<A dispersion solution is found automatically.>
+<Examine the fit with 'f'>
+<Exit fit with 'q' and exit task with 'q'>
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Fri 11:04:32 06-Mar-92
+ Reference image = demoarc.ms, New image = demoarc.ms, Refit = yes
+ Image Data Found Fit Pix Shift User Shift Z Shift RMS
+d...ms - Ap 1 30/30 29/30 -0.00675 -0.04 -6.9E-6 0.252
+Fit dispersion function interactively? (no|yes|NO|YES) (yes): n
+d...ms - Ap 3 30/30 29/30 -0.0154 -0.0928 -1.4E-5 0.303
+Fit dispersion function interactively? (no|yes|NO|YES) (no): y
+<Exit with 'q'>
+d...ms - Ap 3 30/30 29/30 -0.0154 -0.0928 -1.4E-5 0.303
+
+Dispersion correct demoarc
+d...ms: w1 = 5785.86, w2 = 7351.59, dw = 6.14, nw = 256
+ Change wavelength coordinate assignments? (yes|no|NO): N
+Extract object spectrum demoobj
+Edit apertures for demoobj? (yes): n
+Assign arc spectra for demoobj
+[demoobj] refspec1='demoarc'
+Reidentify arc fibers in demoobj with respect to demoarc
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Fri 11:04:52 06-Mar-92
+ Reference image = demoarc.ms, New image = demoobjarc.ms, Refit = no
+ Image Data Found Fit Pix Shift User Shift Z Shift RMS
+d...ms - Ap 1 27/30 27/27 0.00502 0.0263 3.99E-6 0.175
+d...ms - Ap 3 27/30 27/27 8.62E-4 0.006 5.07E-7 0.248
+Dispersion correct demoobj
+demoobj.ms.imh: REFSHFT1 = 'demoobjarc.ms interp', shift = -0.0050,
+rms = 0.00282813 intercept = -0.0118401, slope = 2.70764E-4
+d...ms: ap = 2, w1 = 5785.86, w2 = 7351.59, dw = 6.14, nw = 256
+demoobj.ms.imh:
+Splot spectrum? (no|yes|NO|YES) (yes):
+<Exit with 'q'>
+.fi
+.ih
+REVISIONS
+.ls DO3FIBER V2.11
+The initial arc line identifications is done with the automatic line
+identification algorithm.
+.le
+.ls DO3FIBER V2.10.3
+The usual output WCS format is "equispec". The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. A scattered
+light subtraction processing option has been added.
+.le
+.ih
+SEE ALSO
+apedit, apfind, approfiles, aprecenter, apresize, apsum, aptrace, apvariance,
+ccdred, center1d, dispcor, fit1d, icfit, identify, observatory,
+onedspec.package, refspectra, reidentify, setairmass, setjd
+.endhelp
diff --git a/noao/imred/kpnocoude/doc/do3fiber.ms b/noao/imred/kpnocoude/doc/do3fiber.ms
new file mode 100644
index 00000000..d572d035
--- /dev/null
+++ b/noao/imred/kpnocoude/doc/do3fiber.ms
@@ -0,0 +1,1413 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND February 1993
+.TL
+Guide to the Coude Three Fiber Reduction Task DO3FIBER
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+The \fBdo3fiber\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, and wavelength calibration of multifiber data in which some
+fibers are used to take object spectra and other fibers are used to
+take simultaneous arc spectra. A three fiber instrument of this
+type (one object and two arc fibers) is available at the KPNO coude feed.
+The default parameters are set for this configuration.
+If there are a large number of fibers and fiber throughput and sky
+fiber subtraction is needed the \fBdofiber\fR task should be used.
+.LP
+The \fBdo3fiber\fR task is a command language script which collects
+and combines the functions and parameters of many general purpose tasks to
+provide a single complete data reduction path. The task provides a degree
+of guidance, automation, and record keeping necessary when dealing with
+this type of multifiber data.
+.AE
+.NH
+Introduction
+.LP
+The \fBdo3fiber\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, and wavelength calibration of multifiber data in which some
+fibers are used to take object spectra and other fibers are used to
+take simultaneous arc spectra. A three fiber instrument of this
+type (one object and two arc fibers) is available at the KPNO coude feed.
+The default parameters are set for this configuration.
+If there are a large number of fibers and fiber throughput and sky
+fiber subtraction is needed the \fBdofiber\fR task should be used.
+.LP
+The \fBdo3fiber\fR task is a command language script which collects
+and combines the functions and parameters of many general purpose tasks to
+provide a single complete data reduction path. The task provides a degree
+of guidance, automation, and record keeping necessary when dealing with
+this type of multifiber data.
+.LP
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \f(CWredo\fR and \f(CWupdate\fR options, skip or
+repeat some or all the steps.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage. Since
+\fBdo3fiber\fR combines many separate, general purpose tasks the
+description given here refers to these tasks and leaves some of the details
+to their help documentation.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdo3fibers\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.IP [2]
+Set the \fBdo3fiber\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.IP [4]
+The apertures are defined using the specified aperture reference image
+which is usually a flat field in which both the object and arc fibers are
+illuminated. The specified number of fibers are found automatically and
+sequential apertures assigned.
+.IP [5]
+A query is given allowing the apertures to be interactively reviewed.
+In this mode one may adjust the aperture widths as desired either
+explicitly (:lower and :upper), with the cursor ('l' and 'u'), at a
+particular flux level ('y'), or with an automatic algorithm ('z')
+as described by \fBapresize\fR. To exit type 'q'.
+.IP [6]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.IP [7]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.IP [8]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra.
+The final response spectra are normalized to a unit
+mean over all fibers.
+.IP [9]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.IP [10]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.IP [11]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.IP [12]
+The object spectra are now automatically scattered light subtracted,
+ extracted, flat fielded, and dispersion corrected.
+The reference apertures are first assigned
+to the object spectra. If the \f(CWrecenter\fR option is set the apertures
+will have a shift applied based on recentering the fiber profiles.
+If the \f(CWedit\fR option is set you may review and modify
+the aperture definitions interactively. Any new
+arcs assigned to the object images are automatically extracted and
+dispersion functions determined. A zero point wavelength correction
+is computed from the arc fiber spectra and applied to the object spectrum.
+.IP [13]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.IP [14]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added.
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of multifiber object and calibration spectra
+stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+There are two types of calibration images. These
+are flat fields and comparison lamp arc spectra. The raw CCD images must
+be processed to remove overscan, bias, and dark count effects. This is
+generally done using the \fBccdred\fR package.
+The \fBdo3fiber\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is generally
+not done at this stage but as part of \fBdo3fiber\fR. If for some reason
+the flat field or calibration arc spectra have separate exposures through
+different fibers they may be simply added.
+.LP
+The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in the task \fBrefspectra\fR.
+.LP
+The final reduced spectra are recorded in one, two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. With a single object spectrum the image will be one dimensional
+and with multiple object spectra the image will be two dimensional.
+When the \f(CWextras\fR parameter is set the images will be three
+dimensional (regardless of the number of apertures) and the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR.
+.NH
+Package Parameters
+.LP
+The \fBkpnocoude\fR package parameters, shown in Figure 1, set parameters
+affecting all the tasks in the package. Some of the parameters are not
+applicable to the \fBdo3fiber\fR task.
+.KS
+.V1
+
+.ce
+Figure 1: Package Parameters for KPNOCOUDE
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = kpnocoude
+
+(extinct= onedstds$kpnoextinct.dat) Extinction file
+(caldir = onedstds$spec50cal/) Standard star calibration directory
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+(dispaxi= 2) Image axis for 2D images
+(nsum = 1) Number of lines/columns to sum for 2D images
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Log file
+(plotfil= ) Plot file
+
+(records= ) Record number extensions
+(version= KPNOCOUDE V3: January 1992)
+
+.KE
+.V2
+The observatory parameter is only required for data
+without an OBSERVAT header parameter (currently included in NOAO data).
+The spectrum interpolation type might be changed to "sinc" but with the
+cautions given in \fBonedspec.package\fR. The dispersion axis parameter is
+only needed if a DISPAXIS image header parameter is not defined. The other
+parameters define the standard I/O functions. The verbose parameter
+selects whether to print everything which goes into the log file on the
+terminal. It is useful for monitoring what the \fBdo3fiber\fR task does. The
+log and plot files are useful for keeping a record of the processing. A
+log file is highly recommended. A plot file provides a record of
+apertures, traces, and extracted spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+.NH
+Processing Parameters
+.LP
+The \fBdo3fiber\fR parameters are shown in Figure 2.
+.KS
+.V1
+
+.ce
+Figure 2: Parameter Set for DO3FIBER
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = kpnocoude
+ TASK = do3fiber
+
+objects = List of object spectra
+(apref = ) Aperture reference spectrum
+(flat = ) Flat field spectrum
+(arcs = ) List of arc spectra
+(arctabl= ) Arc assignment table (optional)
+
+.KE
+.V1
+(readnoi= RDNOISE) Read out noise sigma (photons)
+(gain = GAIN) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(fibers = 3) Number of fibers
+(width = 6.) Width of profiles (pixels)
+(crval = INDEF) Approximate wavelength
+(cdelt = INDEF) Approximate dispersion
+(objaps = 2) Object apertures
+(arcaps = 1,3) Arc apertures
+
+(scatter= no) Subtract scattered light?
+(fitflat= yes) Fit and ratio flat field spectrum?
+(recente= yes) Recenter object apertures?
+(edit = no) Edit/review object apertures?
+(clean = no) Detect and replace bad pixels?
+(dispcor= yes) Dispersion correct spectra?
+(splot = yes) Plot the final spectrum?
+(redo = no) Redo operations if previously done?
+(update = yes) Update spectra if cal data changes?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(params = ) Algorithm parameters
+
+.V2
+The input images are specified by image lists. The lists may be
+a list of explicit, comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+The aperture reference spectrum is used to find the spectrum profiles and trace
+them. Thus, this requires an image with good signal in all fibers
+which usually means a flat field spectrum. It is recommended that
+flat field correction be done using one dimensional extracted spectra
+rather than as two dimensional images. This is done if a flat field
+spectrum is specified. The arc assignment table is used to specifically
+assign arc spectra to particular object spectra and the format
+of the file is described in \fBrefspectra\fR.
+.LP
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction.
+The variance
+weighting and cosmic-ray cleanning are sensitive to extremely strong
+cosmic-rays; ones which are hundreds of times brighter than the
+spectrum. The \fIdatamax\fR is used to set an upper limit for any
+real data. Any pixels above this value will be flagged as cosmic-rays
+and will not affect the extractions.
+The dispersion axis defines the wavelength direction
+of spectra in the image if not defined in the image header by the keyword
+DISPAXIS. The width parameter (in pixels) is used for the profile finding and
+centering algorithm (\fBcenter1d\fR).
+.LP
+The number of fibers is fairly obvious. It is the number of
+fibers, including the arc fibers, to be automatically found and
+assigned apertures. The apertures are assigned aperture
+numbers sequentially. The object and arc fibers are identified
+by these aperture numbers as specified by the \f(CWobjaps\fR and
+\f(CWarcaps\fR parameters. The defaults are for the case of three
+fibers in the sequence arc fiber, object fiber, and arc fiber.
+.LP
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+.LP
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra.
+.LP
+The apertures defined for the aperture reference image are assigned to
+each image. For the object images the apertures may be shifted across
+the dispersion by recentering the strongest profiles and averaging
+the individual shifts to form a single shift for all apertures. This
+corrects for shifts in the detector during the observations. The
+\f(CWrecenter\fR parameter selects whether to apply this shift or not.
+.LP
+The \f(CWedit\fR option allows you to be queried to review the apertures
+assigned to each object image. If selected and the query answered
+affirmatively the apertures may be interactively shifted and resized. The
+query may also be answered with "NO" to turn off this option during
+processing. Note that the initial aperture definitions for the aperture
+reference image always allows editing.
+.LP
+The \f(CWclean\fR option invokes a profile fitting and deviant
+point rejection algorithm as well as a variance weighting of points in the
+aperture. These options require knowing the effective (i.e. accounting for
+any image combining) read out noise and gain. For a discussion of cleaning
+and variance weighted extraction see \fBapvariance\fR and
+\fBapprofiles\fR.
+.LP
+The dispersion correction option selects whether to extract arc spectra,
+determine dispersion functions, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale.
+.LP
+The \f(CWsplot\fR option allows a query (which may be answered with "YES"
+or "NO" to eliminate the query) and then plotting of the final object
+spectra if answered affirmatively. The plotting is done with the
+task \fBsplot\fR.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a new
+reference image, new flat field, and a new arc reference image. If all
+input spectra are to be processed regardless of previous processing the
+\f(CWredo\fR flag may be used. Note that reprocessing clobbers the
+previously processed output spectra.
+.LP
+The \f(CWbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if the aperture editing
+and final spectrum plotting have been turned off, either with the task
+option parameter or by answering "NO" to the queries. The \f(CWlistonly\fR
+option prints a summary of the processing steps which will be performed on
+the input spectra without actually doing anything. This is useful for
+verifying which spectra will be affected if the input list contains
+previously processed spectra. The listing does not include any arc spectra
+which may be extracted to dispersion calibrate an object spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdo3fiber\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the \fBdo3fiber\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdo3fiber\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+this type of data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.V1
+
+ cl> epar params
+
+.V2
+or simple typing \f(CWparams\fR. The parameter editor can also be
+entered when editing the \fBdo3fiber\fR parameters by typing \f(CW:e
+params\fR or simply \f(CW:e\fR if positioned at the \f(CWparams\fR
+parameter. Figure 3 shows the parameter set.
+.KS
+.V1
+
+.ce
+Figure 3: Algorithm Parameter Set
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = kpnocoude
+ TASK = params
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(extras = no) Extract sky, sigma, etc.?
+
+ -- DEFAULT APERTURE LIMITS --
+(lower = -3.) Lower aperture limit relative to center
+(upper = 3.) Upper aperture limit relative to center
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+
+.KE
+.KS
+.V1
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 2) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- SCATTERED LIGHT PARAMETERS --
+(buffer = 1.) Buffer distance from apertures
+(apscat1= ) Fitting parameters across the dispersion
+(apscat2= ) Fitting parameters along the dispersion
+
+.KE
+.KS
+.V1
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+(nsubaps= 1) Number of subapertures
+
+.KE
+.KS
+.V1
+ -- FLAT FIELD FUNCTION FITTING PARAMETERS --
+(f_inter= yes) Fit flat field interactively?
+(f_funct= spline3) Fitting function
+(f_order= 20) Fitting function order
+
+.KE
+.KS
+.V1
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli= linelists$idhenear.dat) Line list
+(match = 10.) Line list matching limit in Angstroms
+(fwidth = 3.5) Arc line widths in pixels
+(cradius= 4.) Centering radius in pixels
+(i_funct= legendre) Coordinate function
+(i_order= 3) Order of dispersion function
+(i_niter= 3) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying?
+(addfeat= no) Add features when reidentifying?
+
+.KE
+.KS
+.V1
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+.KE
+.KS
+.V1
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+.KE
+.V2
+.NH 2
+Aperture Definitions
+.LP
+The first operation is to define the extraction apertures, which include
+the aperture width and position dependence with wavelength, for the object
+and arc fibers. This is done on a reference spectrum which is usually a
+flat field taken through both fibers. Other spectra will inherit the
+reference apertures and may apply a correction for any shift of the orders
+across the dispersion. The reference apertures are defined only once
+unless the \f(CWredo\fR option is set.
+.LP
+The selected number of fibers are found automatically by selecting the
+highest peaks in a cut across the dispersion. Apertures are assigned with
+a limits set by the \f(CWlower\fR and \f(CWupper\fR parameter and numbered
+sequentially. A query is then given allowing the apertures to be reviewed
+interactively. If answered affirmatively a cut across the orders is shown
+with the apertures marked and an interactive aperture editing mode is
+entered (see \fBapedit\fR). The main thing to be concerned about is that
+the aperture numbers agree with the \f(CWobjaps\fR and \f(CWarcaps\fR
+definitions. The aperture numbers may be changed with the 'i' or 'o'
+keys. The apertures may also be resized from the default limits.
+To exit the background and aperture editing steps type 'q'.
+.LP
+Next the positions of the fiber profiles at various points along the
+dispersion are measured and a "trace function" is fit. The user is asked
+whether to fit the trace function interactively. This is selected to
+adjust the fitting parameters such as function type and order. When
+interactively fitting a query is given for each aperture. After the first
+aperture one may skip reviewing the other traces by responding with "NO".
+Queries made by \fBdo3fiber\fR generally may be answered with either lower
+case "yes" or "no" or with upper case "YES" or "NO". The upper case
+responses apply to all further queries and so are used to eliminate further
+queries of that kind.
+.LP
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the orders and the number of image lines or
+columns to sum are set by the \f(CWline\fR and \f(CWnsum\fR parameters. A line
+of INDEF (the default) selects the middle of the image. The automatic
+finding algorithm is described for the task \fBapfind\fR and basically
+finds the strongest peaks. The tracing is done as described in
+\fBaptrace\fR and consists of stepping along the image using the specified
+\f(CWt_step\fR parameter. The function fitting uses the \fBicfit\fR commands
+with the other parameters from the tracing section.
+.NH 2
+Extraction
+.LP
+The actual extraction of the spectra is done by summing across the fixed
+width apertures at each point along the dispersion. The default is to
+simply sum the pixels using partial pixels at the ends. There is an
+option to weight the sum based on a Poisson noise model using the
+\f(CWreadnoise\fR and \f(CWgain\fR detector parameters. Note that if the
+\f(CWclean\fR option is selected the variance weighted extraction is used
+regardless of the \f(CWweights\fR parameter. The sigma thresholds for
+cleaning are also set in the \fBparams\fR parameters.
+.LP
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain. These
+numbers need to be adjusted if the image has been processed such that the
+intensity scale has a different origin (such as a background light
+subtraction) or scaling (such as caused by unnormalized flat fielding).
+For optimal extraction and cleaning to work it is recommended that the
+\f(CWfitflat\fR option be used. For further discussion of cleaning and
+variance weighted extraction see \fBapvariance\fR and \fBapprofiles\fR as
+well as \fBapsum\fR.
+.NH 2
+Scattered Light Subtraction
+.LP
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+.LP
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+.LP
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+.LP
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+.NH 2
+Flat Field Correction
+.LP
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdo3fiber\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdo3fiber\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+.LP
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+If the \f(CWfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \f(CWf_function\fR and
+\f(CWf_order\fR. If the parameter \f(CWf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+.LP
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+.LP
+The final step is to normalize the flat field spectra by the mean counts over
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra.
+.NH 2
+Dispersion Correction
+.LP
+If dispersion correction is not selected, \f(CWdispcor\fR=no, then the object
+spectra are simply extracted. If it is selected the arc spectra are used
+to dispersion calibrate the object spectra. There are four steps involved;
+determining the dispersion functions relating pixel position to wavelength,
+assigning the appropriate dispersion function to a particular observation,
+determining a zero point wavelength shift from the arc fibers to be applied
+to the object fiber dispersion functions, and either storing the nonlinear
+dispersion functions in the image headers or resampling the spectra to
+evenly spaced pixels in wavelength.
+.LP
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted using the reference aperture
+definitions. The interactive task \fBautoidentify\fR is used to
+automatically define the dispersion function in one fiber. Whether or not
+it is successful the user is presented with the interactive identification
+graph. The automatic identifications can be reviewed and a new solution or
+corrections to the automatic solution may be performed. The dispersion
+functions for the other fibers are then determined automatically by
+reference to the first fiber using the task \fBreidentify\fR. Except in
+batch mode a query is given allowing the reidentified arc spectra to be
+examined interactively with \fBidentify\fR. This would normally be done
+only if the information about the reidentification printed on the terminal
+indicates a problem such as a large increase in the RMS. This query may be
+eliminated in the usual way.
+.LP
+The set of arc dispersion function parameters are from \fBidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBidentify\fR.
+.LP
+If resampling of the spectra is selected by the parameter \f(CWlinearize\fR
+all the arc dispersion functions are combined to provide a default
+starting and ending wavelength and dispersion with the same number of
+pixels is determined and the user is queried for any changes. This
+linear dispersion system will be applied to all spectra so that all
+the final processed object spectra will have the same dispersion
+sampling.
+.LP
+Once the reference dispersion functions are defined other arc spectra are
+extracted as they are assign to the object spectra. The assignment of
+arcs is done either explicitly with an arc assignment table (parameter
+\f(CWarctable\fR) or based on a header parameter such as a time.
+The assignments are made by the task \fBrefspectra\fR. When two arcs are
+assigned to an object spectrum an interpolation is done between the two
+dispersion functions. This makes an approximate correction for steady
+drifts in the dispersion. Because the arc fibers monitor any zero point
+shifts in the dispersion functions, due to translation and rotation of the
+detector, it is probably only necessary to have one or two arc spectra, one
+at the beginning and/or one at the end of the night.
+.LP
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+.LP
+When the object spectra are extracted so are the simultaneous arc spectra.
+A zero point shift of the arc spectra relative to the dispersion solutions
+of an assigned full arc observation is computed using \fBreidentify\fR.
+The zero point shifts from the arc fibers are then
+interpolated across the detector based on the positions of the arc
+apertures to the positions of the object apertures. A linear interpolation
+is used which accounts for a rotation of the detector as well as a
+translation along the dispersion. The interpolated zero point wavelength
+shifts are then added to the dispersion functions from the full arc
+observation for the object fibers. Note that this does not assume that the
+object and arc fiber dispersion functions are the same or have the same
+wavelength origin, but only that the interpolated shifts in wavelength zero
+point apply to all fibers. When there are two assigned full arc spectra
+the above steps are done independently and the final pair of zero point
+corrected dispersion functions for each object fiber are combined using the
+assigned weights. Once the dispersion function correction is determined
+from the extracted arc fiber spectra they are deleted leaving only the
+object spectra.
+.LP
+The last step of dispersion correction is setting the dispersion
+of the object spectra. There are two choices here.
+If the \f(CWlinearize\fR parameter is not set the nonlinear dispersion
+functions are stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+.LP
+If the \f(CWlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength. The linear dispersion parameters are those defined
+previously for the arc reference image.
+.LP
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+Task Help References
+.LP
+Each task in the \fBspecred\fR packages and tasks used by \fBdofibers\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit and normalize the continuum of multispec spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify arc lines and determine a dispersion function
+ msresp1d - Create fiber response spectra from flat field and sky spectra
+ refspectra - Assign reference spectra to observations
+ reidentify - Reidentify arc lines and determine new dispersion functions
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Copy spectra including aperture selection and format changes
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ slist - List spectrum headers
+ specplot - Stack and plot multiple spectra
+ splot - Plot and analyze spectra
+ standard - Identify standard stars to be used in sensitivity calc
+
+ do3fiber - Process KPNO coude three fiber spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+.V2
+.SH
+Appendix A: DO3FIBER Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \f(CWredo\fR flag is set or the \f(CWupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.LE
+apref = ""
+.LS
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.LE
+flat = "" (optional)
+.LS
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field corrections.
+.LE
+arcs = "" (at least one if dispersion correcting)
+.LS
+List of primary, all fiber arc spectra. These spectra are used to define
+the dispersion functions for each fiber apart from a possible zero point
+correction made with simultaneous arc calibration fibers in the object
+spectra. One fiber from the first spectrum is used to mark lines and set
+the dispersion function interactively and dispersion functions for all
+other fibers and arc spectra are derived from it.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \f(CWparams.sort\fR, such as the observation time is made.
+.LE
+
+readnoise = "RDNOISE" (apsum)
+.LS
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.LE
+gain = "GAIN" (apsum)
+.LS
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+fibers = 3 (apfind)
+.LS
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum.
+.LE
+width = 6. (apedit)
+.LS
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.LE
+crval = INDEF, cdelt = INDEF (autoidentify)
+.LS
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used. If one or both of these parameters are
+specified as INDEF the search for a solution will be slower and more likely
+to fail.
+.LE
+objaps = "2", arcaps = "1,3"
+.LS
+List of object and arc aperture numbers. These are used to
+identify arc apertures for wavelength calibration and object apertures
+for the final results.
+.LE
+
+scattered = no (apscatter)
+.LS
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.LE
+fitflat = yes (flat1d)
+.LS
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.LE
+recenter = yes (aprecenter)
+.LS
+Recenter reference apertures for each object spectrum?
+.LE
+edit = no (apedit)
+.LS
+Review aperture definitions for each object spectrum? Note that this does
+not apply to the initial reference aperture which always allows
+interactive review of the aperture definitions.
+.LE
+clean = no (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.LE
+dispcor = yes
+.LS
+Dispersion correct spectra? Depending on the \f(CWparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.LE
+splot = yes
+.LS
+Plot the final spectra with the task \fBsplot\fR?
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.LE
+update = yes
+.LS
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.LE
+batch = no
+.LS
+Process spectra as a background or batch job provided there are no interactive
+options (\f(CWedit\fR and \f(CWsplot\fR) selected.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+params = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.LE
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdo3fiber\fR.
+
+observatory = "observatory"
+.LS
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. For NOAO data the image headers
+identify the observatory as "kpno" or "ctio" so this parameter is not used.
+For data from other observatories this parameter may be used
+as describe in \fBobservatory\fR.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.V1
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.V2
+.LE
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter.
+.LE
+database = "database"
+.LS
+Database (directory) used for storing aperture and dispersion information.
+.LE
+verbose = no
+.LS
+Print verbose information available with various tasks.
+.LE
+logfile = "logfile", plotfile = ""
+.LS
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.LE
+records = ""
+.LS
+Dummy parameter to be ignored.
+.LE
+version = "KPNOCOUDE: ..."
+.LS
+Version of the package.
+.LE
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdo3fiber\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+extras = no (apsum)
+.LS
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.LE
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+
+lower = -3., upper = 3. (apdefault)
+.LS
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \f(CWnsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 2 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+.LE
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+
+buffer = 1. (apscatter)
+.LS
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.LE
+apscat1 = "" (apscatter)
+.LS
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.LE
+apscat2 = "" (apscatter)
+.LS
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.LE
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum)
+.LS
+Type of extraction weighting. Note that if the \f(CWclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \f(CWgain\fR and \f(CWreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum) (fit1d|fit2d)
+.LS
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for most data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+nsubaps = 1 (apsum)
+.LS
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.LE
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+
+f_interactive = yes (fit1d)
+.LS
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \f(CWfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.LE
+f_function = "spline3", f_order = 20 (fit1d)
+.LS
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (autoidentify/identify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.LE
+match = -3. (autoidentify/identify)
+.LS
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 3.5 (autoidentify/identify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 4. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "legendre", i_order = 3 (autoidentify/identify)
+.LS
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.LE
+i_niterate = 3, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (reidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+addfeatures = no (reidentify)
+.LS
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.LE
+.LE
+sort = "jd", group = "ljd" (refspectra)
+.LS
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdo3fiber\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.LE
+log = no (dispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (dispcor)
+.LS
+Conserve the total flux during interpolation? If \f(CWno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \f(CWyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/kpnocoude/identify.par b/noao/imred/kpnocoude/identify.par
new file mode 100644
index 00000000..4aec4ad2
--- /dev/null
+++ b/noao/imred/kpnocoude/identify.par
@@ -0,0 +1,33 @@
+# Parameters for identify task.
+
+images,s,a,,,,Images containing features to be identified
+section,s,h,"middle line",,,Section to apply to two dimensional images
+database,f,h,database,,,Database in which to record feature data
+coordlist,f,h,linelists$thar.dat,,,User coordinate list
+units,s,h,"",,,Coordinate units
+nsum,s,h,"1",,,Number of lines/columns/bands to sum in 2D images
+match,r,h,0.2.,,,Coordinate list matching limit
+maxfeatures,i,h,100,,,Maximum number of features for automatic identification
+zwidth,r,h,100.,,,Zoom graph width in user units
+
+ftype,s,h,"emission","emission|absorption",,Feature type
+fwidth,r,h,3.5,,,Feature width in pixels
+cradius,r,h,4.,,,Centering radius in pixels
+threshold,r,h,10.,0.,,Feature threshold for centering
+minsep,r,h,4.,0.,,Minimum pixel separation
+
+function,s,h,"legendre","legendre|chebyshev|spline1|spline3",,Coordinate function
+order,i,h,3,1,,Order of coordinate function
+sample,s,h,"*",,,Coordinate sample regions
+niterate,i,h,3,0,,Rejection iterations
+low_reject,r,h,3.,0.,,Lower rejection sigma
+high_reject,r,h,3.,0.,,Upper rejection sigma
+grow,r,h,0.,0.,,Rejection growing radius
+
+autowrite,b,h,no,,,"Automatically write to database"
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+
+crval,s,q,,,,"Approximate coordinate (at reference pixel)"
+cdelt,s,q,,,,"Approximate dispersion"
+aidpars,pset,h,,,,"Automatic identification algorithm parameters"
diff --git a/noao/imred/kpnocoude/kpnocoude.cl b/noao/imred/kpnocoude/kpnocoude.cl
new file mode 100644
index 00000000..60b45595
--- /dev/null
+++ b/noao/imred/kpnocoude/kpnocoude.cl
@@ -0,0 +1,98 @@
+#{ KPNOCOUDE package definition
+
+proto # bscale
+
+s1 = envget ("min_lenuserarea")
+if (s1 == "")
+ reset min_lenuserarea = 100000
+else if (int (s1) < 100000)
+ reset min_lenuserarea = 100000
+
+# Define KPNOCOUDE package
+package kpnocoude
+
+set demos = "kpnocoude$demos/"
+
+# Slitproc
+cl < doslit$doslittasks.cl
+task sparams = "kpnocoude$sparams.par"
+
+# Dofibers
+task do3fiber = "kpnocoude$do3fiber.cl"
+task params = "kpnocoude$params.par"
+
+task proc = "srcfibers$proc.cl"
+task fibresponse = "srcfibers$fibresponse.cl"
+task arcrefs = "srcfibers$arcrefs.cl"
+task doarcs = "srcfibers$doarcs.cl"
+task doalign = "srcfibers$doalign.cl"
+task skysub = "srcfibers$skysub.cl"
+task batch = "srcfibers$batch.cl"
+task getspec = "srcfibers$getspec.cl"
+task listonly = "srcfibers$listonly.cl"
+task mkfibers = "srcfibers$mkfibers.cl"
+task apscript = "srcfibers$x_apextract.e"
+
+task msresp1d = "specred$msresp1d.cl"
+
+# Onedspec tasks
+task autoidentify,
+ continuum,
+ deredden,
+ dispcor,
+ dopcor,
+ refspectra,
+ sapertures,
+ sarith,
+ sflip,
+ slist,
+ specplot,
+ specshift,
+ splot = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Different default parameters
+task calibrate,
+ identify,
+ reidentify,
+ sensfunc,
+ standard = "kpnocoude$x_onedspec.e"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ apflatten,
+ apnormalize,
+ aprecenter,
+ apresize,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apdefault = "apextract$apdefault.par"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apflat1 = "apextract$apflat1.par"
+task apnorm1 = "apextract$apnorm1.par"
+
+# Longslit tasks
+task illumination,
+ response = "twodspec$longslit/x_longslit.e"
+task background = "generic$background.cl"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Demos
+task demos = "demos$demos.cl"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apflat1, apnorm1, dispcor1, sparams
+hidetask mkfibers, params, doalign
+hidetask apscript, proc, batch, arcrefs, doarcs, getspec, listonly, fibresponse
+
+clbye
diff --git a/noao/imred/kpnocoude/kpnocoude.hd b/noao/imred/kpnocoude/kpnocoude.hd
new file mode 100644
index 00000000..71d12a4d
--- /dev/null
+++ b/noao/imred/kpnocoude/kpnocoude.hd
@@ -0,0 +1,5 @@
+# Help directory for the KPNOCOUDE package.
+
+$doc = "./doc/"
+
+do3fiber hlp=doc$do3fiber.hlp
diff --git a/noao/imred/kpnocoude/kpnocoude.men b/noao/imred/kpnocoude/kpnocoude.men
new file mode 100644
index 00000000..a5f29a3a
--- /dev/null
+++ b/noao/imred/kpnocoude/kpnocoude.men
@@ -0,0 +1,41 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ background - Fit and subtract a line or column background
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit and normalize the continuum of multispec spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify arc lines and determine a dispersion function
+ illumination - Determine illumination calibration
+ msresp1d - Create fiber response spectra from flat field and sky spectra
+ refspectra - Assign reference spectra to observations
+ reidentify - Reidentify arc lines and determine new dispersion functions
+ response - Determine response calibration
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Copy spectra including aperture selection and format changes
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ slist - List spectrum headers
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Plot and analyze spectra
+ standard - Identify standard stars to be used in sensitivity calc
+
+ do3fiber - Process KPNO coude three fiber spectra
+ doslit - Process KPNO coude slit spectra
+ demos - Demonstrations and tests
diff --git a/noao/imred/kpnocoude/kpnocoude.par b/noao/imred/kpnocoude/kpnocoude.par
new file mode 100644
index 00000000..b653f211
--- /dev/null
+++ b/noao/imred/kpnocoude/kpnocoude.par
@@ -0,0 +1,15 @@
+# KPNOCOUDE parameter file
+extinction,s,h,onedstds$kpnoextinct.dat,,,Extinction file
+caldir,s,h,onedstds$spec50cal/,,,Standard star calibration directory
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,"",,,Record number extensions
+version,s,h,"KPNOCOUDE V3: January 1992"
diff --git a/noao/imred/kpnocoude/params.par b/noao/imred/kpnocoude/params.par
new file mode 100644
index 00000000..239a0efd
--- /dev/null
+++ b/noao/imred/kpnocoude/params.par
@@ -0,0 +1,61 @@
+line,i,h,INDEF,,,Default dispersion line
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE LIMITS --"
+lower,r,h,-3.,,,"Lower aperture limit relative to center"
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,2,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- SCATTERED LIGHT PARAMETERS --"
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,"Fitting parameters along the dispersion
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,Upper rejection threshold
+nsubaps,i,h,1,1,,"Number of subapertures
+
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --"
+f_interactive,b,h,yes,,,"Fit flat field interactively?"
+f_function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+f_order,i,h,20,1,,"Fitting function order
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$thar.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,3.5,,,"Arc line widths in pixels"
+cradius,r,h,4.,,,Centering radius in pixels
+i_function,s,h,"legendre","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,3,1,,"Order of dispersion function"
+i_niterate,i,h,3,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?"
diff --git a/noao/imred/kpnocoude/reidentify.par b/noao/imred/kpnocoude/reidentify.par
new file mode 100644
index 00000000..74d57b7f
--- /dev/null
+++ b/noao/imred/kpnocoude/reidentify.par
@@ -0,0 +1,36 @@
+# Parameters for reidentify task.
+
+reference,s,a,,,,Reference image
+images,s,a,,,,Images to be reidentified
+interactive,s,h,"no","no|yes|NO|YES",,Interactive fitting?
+section,s,h,"middle line",,,Section to apply to two dimensional images
+newaps,b,h,yes,,,Reidentify apertures in images not in reference?
+override,b,h,no,,,Override previous solutions?
+refit,b,h,yes,,,"Refit coordinate function?
+"
+trace,b,h,no,,,Trace reference image?
+step,s,h,"10",,,Step in lines/columns/bands for tracing an image
+nsum,s,h,"10",,,Number of lines/columns/bands to sum
+shift,s,h,"0.",,,Shift to add to reference features (INDEF to search)
+search,r,h,0.,,,Search radius
+nlost,i,h,3,0,,"Maximum number of features which may be lost
+"
+cradius,r,h,5.,,,Centering radius
+threshold,r,h,10.,0.,,Feature threshold for centering
+addfeatures,b,h,no,,,Add features from a line list?
+coordlist,f,h,linelists$thar.dat,,,User coordinate list
+match,r,h,0.2,,,Coordinate list matching limit
+maxfeatures,i,h,100,,,Maximum number of features for automatic identification
+minsep,r,h,4.,0.,,"Minimum pixel separation
+"
+database,f,h,database,,,Database
+logfiles,s,h,"logfile",,,List of log files
+plotfile,s,h,"",,,Plot file for residuals
+verbose,b,h,no,,,Verbose output?
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,"Graphics cursor input
+"
+answer,s,q,"yes","no|yes|NO|YES",,Fit dispersion function interactively?
+crval,s,q,,,,"Approximate coordinate (at reference pixel)"
+cdelt,s,q,,,,"Approximate dispersion"
+aidpars,pset,h,,,,"Automatic identification algorithm parameters"
diff --git a/noao/imred/kpnocoude/sensfunc.par b/noao/imred/kpnocoude/sensfunc.par
new file mode 100644
index 00000000..94f84f4a
--- /dev/null
+++ b/noao/imred/kpnocoude/sensfunc.par
@@ -0,0 +1,17 @@
+standards,s,a,std,,,Input standard star data file (from STANDARD)
+sensitivity,s,a,"sens",,,Output root sensitivity function imagename
+apertures,s,h,"",,,Aperture selection list
+ignoreaps,b,h,yes,,,Ignore apertures and make one sensitivity function?
+logfile,f,h,"logfile",,,Output log for statistics information
+extinction,f,h,)_.extinction,,,Extinction file
+newextinction,f,h,"extinct.dat",,,Output revised extinction file
+observatory,s,h,)_.observatory,,,Observatory of data
+function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,Fitting function
+order,i,h,6,1,,Order of fit
+interactive,b,h,yes,,,Determine sensitivity function interactively?
+graphs,s,h,"sr",,,Graphs per frame
+marks,s,h,"plus cross box",,,Data mark types (marks deleted added)
+colors,s,h,"2 1 3 4",,,Colors (lines marks deleted added)
+cursor,*gcur,h,"",,,Graphics cursor input
+device,s,h,"stdgraph",,,Graphics output device
+answer,s,q, yes,"no|yes|NO|YES",,"(no|yes|NO|YES)"
diff --git a/noao/imred/kpnocoude/sparams.par b/noao/imred/kpnocoude/sparams.par
new file mode 100644
index 00000000..06ccbb94
--- /dev/null
+++ b/noao/imred/kpnocoude/sparams.par
@@ -0,0 +1,65 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE PARAMETERS -- "
+lower,r,h,-3.,,,Lower aperture limit relative to center
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,1,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none",,,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- BACKGROUND SUBTRACTION PARAMETERS --"
+background,s,h,"fit","none|average|median|minimum|fit",,Background to subtract
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,"Background function"
+b_order,i,h,1,,,"Background function order"
+b_sample,s,h,"-10:-6,6:10",,,"Background sample regions"
+b_naverage,i,h,-100,,,"Background average or median"
+b_niterate,i,h,1,0,,"Background rejection iterations"
+b_low,r,h,3.,0.,,"Background lower rejection sigma"
+b_high,r,h,3.,0.,,"Background upper rejection sigma
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$thar.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,3.5.,,,"Arc line widths in pixels"
+cradius,r,h,4.,,,Centering radius in pixels
+i_function,s,h,"legendre","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,3,1,,"Order of dispersion function"
+i_niterate,i,h,3,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SENSITIVITY CALIBRATION PARAMETERS --"
+s_function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,"Fitting function"
+s_order,i,h,1,1,,"Order of sensitivity function"
+fnu,b,h,no,,,"Create spectra having units of FNU?"
diff --git a/noao/imred/kpnocoude/standard.par b/noao/imred/kpnocoude/standard.par
new file mode 100644
index 00000000..99b98877
--- /dev/null
+++ b/noao/imred/kpnocoude/standard.par
@@ -0,0 +1,21 @@
+input,f,a,,,,Input image file root name
+output,s,a,std,,,Output flux file (used by SENSFUNC)
+samestar,b,h,yes,,,Same star in all apertures?
+beam_switch,b,h,no,,,Beam switch spectra?
+apertures,s,h,"",,,Aperture selection list
+bandwidth,r,h,INDEF,,,Bandpass widths
+bandsep,r,h,INDEF,,,Bandpass separation
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
+extinction,s,h,)_.extinction,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+observatory,s,h,)_.observatory,,,Observatory for data
+interact,b,h,yes,,,Graphic interaction to define new bandpasses
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+star_name,s,q,,,,Star name in calibration list
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
+mag,r,q,,,,Magnitude of star
+magband,s,q,,"U|B|V|R|I|J|H|K|L|Lprime|M",,"Magnitude type"
+teff,s,q,,,,Effective temperature or spectral type
+answer,s,q,no,,,"(no|yes|NO|YES|NO!|YES!)"
diff --git a/noao/imred/kpnoslit/Revisions b/noao/imred/kpnoslit/Revisions
new file mode 100644
index 00000000..2d50d554
--- /dev/null
+++ b/noao/imred/kpnoslit/Revisions
@@ -0,0 +1,32 @@
+.help revisions Jun88 noao.imred.kpnoslit
+.nf
+
+=====
+V2.12
+=====
+
+imred$kpnoslit/standard.par
+ Added blackbody query parameters. (5/2/02, Valdes)
+
+========
+V2.11.3b
+========
+
+imred$kpnoslit/demos/mkdoslit.cl
+ Made the ARTDATA package parameters explicit (4/15/97, Valdes)
+
+imred$kpnoslit/sparams.par
+ Changed match from 10 to -3. (4/5/96, Valdes)
+
+imred$kpnoslit/kpnoslit.cl
+imred$kpnoslit/kpnoslit.men
+ Added background, illumination, response, apflatten, apnormalize.
+ (12/29/94, Valdes)
+
+imred$kpnoslit/demos/xdoslit.dat
+ Incorrectly used hz2 as a standard star instead of hz44. (8/10/92, Valdes)
+
+=======
+V2.10.1
+=======
+.endhelp
diff --git a/noao/imred/kpnoslit/calibrate.par b/noao/imred/kpnoslit/calibrate.par
new file mode 100644
index 00000000..e09457a2
--- /dev/null
+++ b/noao/imred/kpnoslit/calibrate.par
@@ -0,0 +1,13 @@
+# CALIBRATE parameter file
+
+input,s,a,,,,Input spectra to calibrate
+output,s,a,,,,Output calibrated spectra
+extinct,b,h,yes,,,Apply extinction correction?
+flux,b,h,yes,,,Apply flux calibration?
+extinction,s,h,)_.extinction,,,Extinction file
+observatory,s,h,)_.observatory,,,Observatory of observation
+ignoreaps,b,h,yes,,,Ignore aperture numbers in flux calibration?
+sensitivity,s,h,"sens",,,Image root name for sensitivity spectra
+fnu,b,h,no,,,Create spectra having units of FNU?
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
diff --git a/noao/imred/kpnoslit/demos/demoarc1.dat b/noao/imred/kpnoslit/demos/demoarc1.dat
new file mode 100644
index 00000000..dd6b0161
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demoarc1.dat
@@ -0,0 +1,38 @@
+OBJECT = 'First comp ' / object name
+OBSERVAT= 'KPNO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 60. / actual integration time
+DARKTIME= 60. / total elapsed time
+IMAGETYP= 'comp ' / object, dark, bias, etc.
+DATE-OBS= '1991-11-26T12:11:30.00' / date (dd/mm/yy) of obs.
+UT = '12:11:30.00 ' / universal time
+ST = '09:04:54.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:09:03.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '48.760 ' / zenith distance
+AIRMASS = 0. / airmass
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'test ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDMEAN = 179.398
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnoslit/demos/demoarc2.dat b/noao/imred/kpnoslit/demos/demoarc2.dat
new file mode 100644
index 00000000..5c2d8050
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demoarc2.dat
@@ -0,0 +1,38 @@
+OBJECT = 'Last comp ' / object name
+OBSERVAT= 'KPNO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 60. / actual integration time
+DARKTIME= 60. / total elapsed time
+IMAGETYP= 'comp ' / object, dark, bias, etc.
+DATE-OBS= '1991-11-26T12:41:30.00' / date (dd/mm/yy) of obs.
+UT = '12:41:30.00 ' / universal time
+ST = '09:34:54.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:09:03.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '48.760 ' / zenith distance
+AIRMASS = 0. / airmass
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'test ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDMEAN = 179.398
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnoslit/demos/demoflat.dat b/noao/imred/kpnoslit/demos/demoflat.dat
new file mode 100644
index 00000000..0d5ed308
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demoflat.dat
@@ -0,0 +1,38 @@
+OBJECT = 'Flat field ' / object name
+OBSERVAT= 'KPNO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 3. / actual integration time
+DARKTIME= 3. / total elapsed time
+IMAGETYP= 'flat ' / object, dark, bias, etc.
+DATE-OBS= '1991-11-26T11:11:30.00' / date (dd/mm/yy) of obs.
+UT = '11:11:30.00 ' / universal time
+ST = '09:04:54.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:09:03.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '48.760 ' / zenith distance
+AIRMASS = 0. / airmass
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'test ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDMEAN = 179.398
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnoslit/demos/demoobj1.dat b/noao/imred/kpnoslit/demos/demoobj1.dat
new file mode 100644
index 00000000..bf571862
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demoobj1.dat
@@ -0,0 +1,37 @@
+OBJECT = 'V640Mon 4500 ' / object name
+OBSERVAT= 'KPNO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 1200. / actual integration time
+DARKTIME= 1200. / total elapsed time
+IMAGETYP= 'object ' / object, dark, bias, etc.
+DATE-OBS= '1991-11-26T12:19:55.00' / date (dd/mm/yy) of obs.
+UT = '12:19:55.00 ' / universal time
+ST = '09:13:15.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:08:52.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '44.580 ' / zenith distance
+AIRMASS = 0. / airmass
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'test ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnoslit/demos/demos.cl b/noao/imred/kpnoslit/demos/demos.cl
new file mode 100644
index 00000000..5b065c51
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demos.cl
@@ -0,0 +1,18 @@
+# DEMOS -- Run specified demo provided a demo file exists.
+
+procedure demos (demoname)
+
+file demoname {prompt="Demo name"}
+
+begin
+ file demo, demofile
+
+ if ($nargs == 0 && mode != "h")
+ type ("demos$demos.men")
+ demo = demoname
+ demofile = "demos$" // demo // ".cl"
+ if (access (demofile))
+ cl (< demofile)
+ else
+ error (1, "Unknown demo " // demo)
+end
diff --git a/noao/imred/kpnoslit/demos/demos.men b/noao/imred/kpnoslit/demos/demos.men
new file mode 100644
index 00000000..b35ba7be
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demos.men
@@ -0,0 +1,4 @@
+ MENU of KPNOSLIT Demonstrations
+
+ doslit - Quick test of DOSLIT (no comments, no delays)
+ mkdoslit - Make DOSLIT test data
diff --git a/noao/imred/kpnoslit/demos/demos.par b/noao/imred/kpnoslit/demos/demos.par
new file mode 100644
index 00000000..4181ed59
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demos.par
@@ -0,0 +1,2 @@
+demoname,f,a,"",,,"Demo name"
+mode,s,h,"ql",,,
diff --git a/noao/imred/kpnoslit/demos/demostd1.dat b/noao/imred/kpnoslit/demos/demostd1.dat
new file mode 100644
index 00000000..bf571862
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/demostd1.dat
@@ -0,0 +1,37 @@
+OBJECT = 'V640Mon 4500 ' / object name
+OBSERVAT= 'KPNO ' / observatory
+OBSERVER= 'Massey ' / observers
+COMMENTS= 'Final New Ice ' / comments
+EXPTIME = 1200. / actual integration time
+DARKTIME= 1200. / total elapsed time
+IMAGETYP= 'object ' / object, dark, bias, etc.
+DATE-OBS= '1991-11-26T12:19:55.00' / date (dd/mm/yy) of obs.
+UT = '12:19:55.00 ' / universal time
+ST = '09:13:15.00 ' / sidereal time
+RA = '06:37:02.00 ' / right ascension
+DEC = '06:08:52.00 ' / declination
+EPOCH = 1991.9 / epoch of ra and dec
+ZD = '44.580 ' / zenith distance
+AIRMASS = 0. / airmass
+TELESCOP= 'kpcdf ' / telescope name
+DETECTOR= 'te1k ' / detector
+PREFLASH= 0 / preflash time, seconds
+GAIN = 5.4 / gain, electrons per adu
+DWELL = 5 / sample integration time
+RDNOISE = 3.5 / read noise, electrons per adu
+DELAY0 = 0 / time delay after each pixel
+DELAY1 = 0 / time delay after each row
+CAMTEMP = -111 / camera temperature
+DEWTEMP = -183 / dewar temperature
+CCDSEC = '[97:134,2:1023]' / orientation to full frame
+ORIGSEC = '[1:1024,1:1024] ' / original size full frame
+CCDSUM = '1 1 ' / on chip summation
+INSTRUME= 'test ' / instrument
+APERTURE= '250micron slit ' / aperture
+TVFILT = '4-96 ' / tv filter
+DISPAXIS= '2 ' / dispersion axis
+GRATPOS = 4624.3 / grating position
+TRIM = 'Nov 26 5:44 Trim data section is [23:60,2:1023]'
+OVERSCAN= 'Nov 26 5:44 Overscan section is [103:133,2:1023] with mean=611.1
+ZEROCOR = 'Nov 26 5:44 Zero level correction image is Zerof'
+CCDPROC = 'Nov 26 5:44 CCD processing done'
diff --git a/noao/imred/kpnoslit/demos/doslit.cl b/noao/imred/kpnoslit/demos/doslit.cl
new file mode 100644
index 00000000..b2ecbde2
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/doslit.cl
@@ -0,0 +1,14 @@
+# Create demo data if needed.
+
+cl (< "demos$mkdoslit.cl")
+
+unlearn doslit
+sparams.extras = no
+sparams.coordlist = "linelists$idhenear.dat"
+delete demologfile,demoplotfile verify=no >& dev$null
+
+# Execute playback.
+if (substr (envget("stdgraph"), 1, 6) == "xgterm")
+ stty (playback="demos$xgdoslit.dat", nlines=24, verify=no, delay=0)
+else
+ error (1, "Playback for current terminal type not available")
diff --git a/noao/imred/kpnoslit/demos/mkdoslit.cl b/noao/imred/kpnoslit/demos/mkdoslit.cl
new file mode 100644
index 00000000..63acb123
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/mkdoslit.cl
@@ -0,0 +1,28 @@
+# Create demo data if needed.
+
+artdata
+artdata.nxc = 5
+artdata.nyc = 5
+artdata.nxsub = 10
+artdata.nysub = 10
+artdata.nxgsub = 5
+artdata.nygsub = 5
+artdata.dynrange = 100000.
+artdata.psfrange = 10.
+artdata.ranbuf = 0
+
+mkexample ("longslit", "demoflat", oseed=4, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoflat", "demos$demoflat.dat", append=no, verbose=no)
+mkexample ("longslit", "demoarc1", oseed=5, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoarc1", "demos$demoarc1.dat", append=no, verbose=no)
+mkexample ("longslit", "demoobj1", oseed=1, nseed=1,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoobj1", "demos$demoobj1.dat", append=no, verbose=no)
+mkexample ("longslit", "demostd1", oseed=2, nseed=2,
+ errors=no, verbose=yes, list=no)
+mkheader ("demostd1", "demos$demostd1.dat", append=no, verbose=no)
+mkexample ("longslit", "demoarc2", oseed=5, nseed=2,
+ errors=no, verbose=yes, list=no)
+mkheader ("demoarc2", "demos$demoarc2.dat", append=no, verbose=no)
diff --git a/noao/imred/kpnoslit/demos/xgdoslit.dat b/noao/imred/kpnoslit/demos/xgdoslit.dat
new file mode 100644
index 00000000..0cd3a8d9
--- /dev/null
+++ b/noao/imred/kpnoslit/demos/xgdoslit.dat
@@ -0,0 +1,71 @@
+\O=NOAO/IRAF IRAFX valdes@puppis Mon 14:58:37 15-Nov-93
+\T=xgterm
+\G=xgterm
+epar\skpnoslit\n
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+y\r
+demologfile\r
+demoplotfile\r
+^Z
+epar\sdoslit\n
+demoobj1\r
+demoarc1,demoarc2\r
+\r
+demostd1\r
+rdnoise\r
+gain\r
+\r
+\r
+5700\r
+6.2\r
+\r
+y\r
+y\r
+y\r
+y\r
+y\r
+^Z
+doslit\sredo+\n
+\n
+\n
+b/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+\r
+\r
+q/<-5\s\s\s\s/=(.\s=\r
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+y\n
+4210\n
+7350\n
+6.2\n
+\n
+n\n
+\n
+f/<-5\s\s\s\s/=(.\s=\r
+l/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+N\n
+hz44\n
+\n
+q/<-5\s\s\s\s/=(.\s=\r
+Y\n
+q/<-5\s\s\s\s/=(.\s=\r
+q/<-5\s\s\s\s/=(.\s=\r
+gkimos\sdemoplotfile\snx=3\sny=3\sdev=stdgraph\n
+q/<-5\s\s\s\s/=(.\s=\r
diff --git a/noao/imred/kpnoslit/kpnoslit.cl b/noao/imred/kpnoslit/kpnoslit.cl
new file mode 100644
index 00000000..f9d79756
--- /dev/null
+++ b/noao/imred/kpnoslit/kpnoslit.cl
@@ -0,0 +1,69 @@
+#{ KPNOSLIT package definition
+
+# Define KPNOSLIT package
+package kpnoslit
+
+set demos = "kpnoslit$demos/"
+
+# Slitproc
+cl < doslit$doslittasks.cl
+task sparams = "kpnoslit$sparams.par"
+
+# Onedspec tasks
+task autoidentify,
+ continuum,
+ deredden,
+ dispcor,
+ dopcor,
+ identify,
+ refspectra,
+ reidentify,
+ sarith,
+ sflip,
+ slist,
+ splot,
+ specplot,
+ specshift = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Different default parameters
+task calibrate,
+ sensfunc,
+ standard = "kpnoslit$x_onedspec.e"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ apflatten,
+ apnormalize,
+ aprecenter,
+ apresize,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apdefault = "apextract$apdefault.par"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apflat1 = "apextract$apflat1.par"
+task apnorm1 = "apextract$apflat1.par"
+
+# Longslit tasks
+task illumination,
+ response = "twodspec$longslit/x_longslit.e"
+task background = "generic$background.cl"
+
+# Demos
+task demos = "demos$demos.cl"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apflat1, apnorm1, dispcor1, sparams
+
+clbye()
diff --git a/noao/imred/kpnoslit/kpnoslit.hd b/noao/imred/kpnoslit/kpnoslit.hd
new file mode 100644
index 00000000..e5125e4f
--- /dev/null
+++ b/noao/imred/kpnoslit/kpnoslit.hd
@@ -0,0 +1 @@
+# Help directory for the KPNOSLIT package.
diff --git a/noao/imred/kpnoslit/kpnoslit.men b/noao/imred/kpnoslit/kpnoslit.men
new file mode 100644
index 00000000..891ff0b7
--- /dev/null
+++ b/noao/imred/kpnoslit/kpnoslit.men
@@ -0,0 +1,38 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ background - Fit and subtract a line or column background
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Apply extinction and flux calibrations to spectra
+ continuum - Fit and normalize the continuum of multispec spectra
+ deredden - Apply interstellar extinction corrections
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ identify - Identify arc lines and determine a dispersion function
+ illumination - Determine illumination calibration
+ refspectra - Assign reference spectra to observations
+ reidentify - Reidentify arc lines and determine new dispersion functions
+ response - Determine response calibration
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Copy spectra including aperture selection and format changes
+ sensfunc - Create sensitivity function
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sflip - Flip data and/or dispersion coordinates in spectra
+ slist - List spectrum headers
+ specplot - Stack and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Plot and analyze spectra
+ standard - Identify standard stars to be used in sensitivity calc
+
+ doslit - Process slit spectra
+ demos - Demonstrations and tests
diff --git a/noao/imred/kpnoslit/kpnoslit.par b/noao/imred/kpnoslit/kpnoslit.par
new file mode 100644
index 00000000..bd61b8de
--- /dev/null
+++ b/noao/imred/kpnoslit/kpnoslit.par
@@ -0,0 +1,15 @@
+# KPNOSLIT parameter file
+extinction,s,h,onedstds$kpnoextinct.dat,,,Extinction file
+caldir,s,h,onedstds$spec50cal/,,,Standard star calibration directory
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,"",,,Record number extensions
+version,s,h,"KPNOSLIT V3: January 1992"
diff --git a/noao/imred/kpnoslit/sensfunc.par b/noao/imred/kpnoslit/sensfunc.par
new file mode 100644
index 00000000..94f84f4a
--- /dev/null
+++ b/noao/imred/kpnoslit/sensfunc.par
@@ -0,0 +1,17 @@
+standards,s,a,std,,,Input standard star data file (from STANDARD)
+sensitivity,s,a,"sens",,,Output root sensitivity function imagename
+apertures,s,h,"",,,Aperture selection list
+ignoreaps,b,h,yes,,,Ignore apertures and make one sensitivity function?
+logfile,f,h,"logfile",,,Output log for statistics information
+extinction,f,h,)_.extinction,,,Extinction file
+newextinction,f,h,"extinct.dat",,,Output revised extinction file
+observatory,s,h,)_.observatory,,,Observatory of data
+function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,Fitting function
+order,i,h,6,1,,Order of fit
+interactive,b,h,yes,,,Determine sensitivity function interactively?
+graphs,s,h,"sr",,,Graphs per frame
+marks,s,h,"plus cross box",,,Data mark types (marks deleted added)
+colors,s,h,"2 1 3 4",,,Colors (lines marks deleted added)
+cursor,*gcur,h,"",,,Graphics cursor input
+device,s,h,"stdgraph",,,Graphics output device
+answer,s,q, yes,"no|yes|NO|YES",,"(no|yes|NO|YES)"
diff --git a/noao/imred/kpnoslit/sparams.par b/noao/imred/kpnoslit/sparams.par
new file mode 100644
index 00000000..cfdf1f4f
--- /dev/null
+++ b/noao/imred/kpnoslit/sparams.par
@@ -0,0 +1,65 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE PARAMETERS -- "
+lower,r,h,-3.,,,Lower aperture limit relative to center
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,1,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none",,,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- BACKGROUND SUBTRACTION PARAMETERS --"
+background,s,h,"fit","none|average|median|minimum|fit",,Background to subtract
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,"Background function"
+b_order,i,h,1,,,"Background function order"
+b_sample,s,h,"-10:-6,6:10",,,"Background sample regions"
+b_naverage,i,h,-100,,,"Background average or median"
+b_niterate,i,h,1,0,,"Background rejection iterations"
+b_low,r,h,3.,0.,,"Background lower rejection sigma"
+b_high,r,h,3.,0.,,"Background upper rejection sigma
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$idhenear.dat,,,"Line list"
+match,r,h,10.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"spline3","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,1,1,,"Order of dispersion function"
+i_niterate,i,h,1,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SENSITIVITY CALIBRATION PARAMETERS --"
+s_function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,"Fitting function"
+s_order,i,h,1,1,,"Order of sensitivity function"
+fnu,b,h,no,,,"Create spectra having units of FNU?"
diff --git a/noao/imred/kpnoslit/standard.par b/noao/imred/kpnoslit/standard.par
new file mode 100644
index 00000000..99b98877
--- /dev/null
+++ b/noao/imred/kpnoslit/standard.par
@@ -0,0 +1,21 @@
+input,f,a,,,,Input image file root name
+output,s,a,std,,,Output flux file (used by SENSFUNC)
+samestar,b,h,yes,,,Same star in all apertures?
+beam_switch,b,h,no,,,Beam switch spectra?
+apertures,s,h,"",,,Aperture selection list
+bandwidth,r,h,INDEF,,,Bandpass widths
+bandsep,r,h,INDEF,,,Bandpass separation
+fnuzero,r,h,3.68e-20,,,Absolute flux zero point
+extinction,s,h,)_.extinction,,,Extinction file
+caldir,s,h,)_.caldir,,,Directory containing calibration data
+observatory,s,h,)_.observatory,,,Observatory for data
+interact,b,h,yes,,,Graphic interaction to define new bandpasses
+graphics,s,h,"stdgraph",,,Graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+star_name,s,q,,,,Star name in calibration list
+airmass,r,q,,1.,,Airmass
+exptime,r,q,,,,Exposure time (seconds)
+mag,r,q,,,,Magnitude of star
+magband,s,q,,"U|B|V|R|I|J|H|K|L|Lprime|M",,"Magnitude type"
+teff,s,q,,,,Effective temperature or spectral type
+answer,s,q,no,,,"(no|yes|NO|YES|NO!|YES!)"
diff --git a/noao/imred/mkpkg b/noao/imred/mkpkg
new file mode 100644
index 00000000..30057f0d
--- /dev/null
+++ b/noao/imred/mkpkg
@@ -0,0 +1,20 @@
+# Make the IMRED package.
+
+update:
+ $echo "------------------ IMRED.BIAS ----------------------"
+ $call update@bias
+ $echo "------------------ IMRED.CCDRED --------------------"
+ $call update@ccdred
+ $echo "------------------ IMRED.CRUTIL --------------------"
+ $call update@crutil
+ $echo "------------------ IMRED.DTOI ----------------------"
+ $call update@dtoi
+ $echo "------------------ IMRED.GENERIC -------------------"
+ $call update@generic
+ $echo "------------------ IMRED.IRRED ---------------------"
+ $call update@irred
+ $echo "------------------ IMRED.QUADRED -------------------"
+ $call update@quadred
+ $echo "------------------ IMRED.VTEL ----------------------"
+ $call update@vtel
+ ;
diff --git a/noao/imred/quadred/doc/package.hlp b/noao/imred/quadred/doc/package.hlp
new file mode 100644
index 00000000..ffea9446
--- /dev/null
+++ b/noao/imred/quadred/doc/package.hlp
@@ -0,0 +1,142 @@
+.help package Sep93 quadred
+.ih
+NAME
+quadred -- CCD reductions of images in multi-amp readout format
+.ih
+SYNOPSIS
+This package is a varient of \fBccdred\fR that operates on a
+multi-amplifier data format in which the various amplifier readouts are
+recorded in sections of a regular two-dimensional image. The CTIO Arcon
+dual or quad readout data is an example of this format. See help on
+\fBquadformat\fR for details. Most tasks are the same as in the
+\fBccdred\fR package. The difference is the version of \fBccdproc\fR in
+this package also works on the multi-amp format. An alternative to using
+this version of \fBccdproc\fR is \fBquadproc\fR and the alternate
+calibration combining task based on this task.
+.ih
+USAGE
+quadred
+.ih
+PARAMETERS
+The following are "package" parameters. This means that they apply to
+many of the tasks in this package.
+
+.ls pixeltype = "real real"
+Output pixel datatype and calculation datatype. When images are processed
+or created the output pixel datatype is determined by this parameter.
+The allowed types are "short" for short integer, and "real" for real
+floating point. The calculation datatypes are also short and real with a
+default of real if none is specified.
+.le
+.ls verbose = no
+Print log information to the standard output?
+.le
+.ls logfile = "logfile"
+Text log file. If no filename is specified then no log file is kept.
+.le
+.ls plotfile = ""
+Log metacode plot file for the overscan bias vector fits. If
+no filename is specified then no metacode plot file is kept.
+.le
+.ls backup = ""
+Backup prefix for backup images. If no prefix is specified then no backup
+images are kept when processing. If specified then the backup image
+has the specified prefix.
+.le
+.ls instrument = ""
+CCD instrument translation file. This is usually set with \fBsetinstrument\fR.
+.le
+.ls ssfile = "subsets"
+Subset translation file used to define the subset identifier. See
+\fBsubsets\fR for more.
+.le
+.ls graphics = "stdgraph"
+Interactive graphics output device when fitting the overscan bias vector.
+.le
+.ls cursor = ""
+Graphics cursor input. The default is the standard graphics cursor.
+.le
+.ls version = "Version 1.0 - August 22, 2001"
+Package version.
+.le
+.ih
+DESCRIPTION
+The \fBquadred\fR package contains all basic tasks necessary for the
+reduction of CCD data in single image format. This includes both single
+amplifier readout data and multi-amplifier data stored as sections in a
+single two-dimensional image. One example of this type of multi-amplifier
+data is the CTIO Arcon "dual" or "quad" readout format. The format is
+described in the help topic \fBquadformat\fR. This package is a
+combination of two earlier packages called \fBxccdred\fR and
+\fBared.quad\fR, each of which are variants of the original \fBccdred\fR
+package.
+
+The raw data contains overscan/prescan regions in the image. For multi-amp
+data there are multiple overscan/prescan regions. The first steps in
+processing the data is to use the overscan/prescan regions to determine
+the amplifier bias, subtract this bias, and trim the regions out of
+the data. Once this is done the data are just simple images. It is
+the special step of dealing with the overscan/prescan regions with
+the multi-amp format that is different from the standard \fBccdred\fR
+package.
+
+Two methods are provided for dealing with the special format. One is a
+special version of \fBccdproc\fR which processes the sections directly. If
+one uses this task then the reduction steps appear identical to using the
+\fBccdred\fR package. The other method is to use the tasks \fBquadproc\fR,
+\fBqzerocombine\fR, \fBqdarkcombine\fR, and \fBqflatcombine\fR. The latter
+calibration combining tasks are the same as the standard versions except
+they use \fBquadproc\fR instead of \fBccdproc\fR. The task \fBquadproc\fR
+operates internally by splitting the multiple regions into temporary single
+amplifier images, processing them with \fBccdproc\fR, and then joining the
+pieces back together.
+
+The recommended method is to use \fBccdproc\fR. However, the \fBquadproc\fR
+related tasks have a history of usage for CTIO data and so may also be
+used.
+
+The \fBquadred\fR package itself has several parameters which are
+common to many of the tasks in the package. When images are processed or
+new image are created the output pixel datatype is that specified by the
+parameter \fBpixeltype\fR. Note that CCD processing replaces the original
+image by the processed image so the pixel type of the CCD images may change
+during processing. It is unlikely that real images will be processed to
+short images but the reverse is quite likely. Processing images from short
+to real pixel datatypes will generally increase the amount of disk space
+required (a factor of 2 on most computers).
+
+The tasks produce log output which may be printed on the standard
+output (the terminal unless redirected) and appended to a file. The
+parameter \fIverbose\fR determines whether processing information
+is printed. This may be desirable initially, but when using background
+jobs the verbose output should be turned off. The user may look at
+the end of the log file (for example with \fBtail\fR) to determine
+the status of the processing.
+
+The package was designed to work with data from many different observatories
+and instruments. In order to accomplish this an instrument translation
+file is used to define a mapping between the package parameters and
+the particular image header format. The instrument translation file
+is specified to the package by the parameter \fIinstrument\fR. This
+parameter is generally set by the task \fBsetinstrument\fR. The other
+file used is a subset file. This is generally created and maintained
+by the package and the user need not do anything. For more sophisticated
+users see \fBinstruments\fR and \fBsubsets\fR.
+
+The package has very little graphics output. The exception is the overscan
+bias subtraction. The bias vector is logged in the metacode plot file if
+given. The plot file may be examined with the tasks in the \fBplot\fR
+package such as \fBgkimosaic\fR. When interactively fitting the overscan
+vector the graphics input and output devices must be specified. The
+defaults should apply in most cases.
+
+Because processing replaces the input image by the processed image it may
+be desired to save the original image. This may be done by specifying a
+backup prefix with the parameter \fIbackup\fR. For example, if the prefix
+is "orig" and the image is "ccd001", the backup image will be
+"origccd001". The prefix may be a directory but if so it must end with '/'
+or '$' (for logical directories) and the directory must already exist.
+.ih
+SEE ALSO
+quadformat, mscred
+.endhelp
diff --git a/noao/imred/quadred/doc/qhistogram.hlp b/noao/imred/quadred/doc/qhistogram.hlp
new file mode 100644
index 00000000..a34e412c
--- /dev/null
+++ b/noao/imred/quadred/doc/qhistogram.hlp
@@ -0,0 +1,37 @@
+.help qhistogram Aug01 noao.imred.quadred
+.ih
+NAME
+qhistogram -- Compute and print histogram for multi-amp data
+.ih
+USAGE
+qhistogram images
+.ih
+PARAMETERS
+.ls images
+List of image names in \fBquadformat\fR.
+.le
+.ls window = "datasec" (datasec|trimsec|biassec)
+Type of section to use for histogram. The choices are "datasec" for the
+amplifier section which includes the bias if any is present, "trimsec" for
+the trim section, and "biassec" for the bias section.
+.le
+
+The remaining parameters come from the \fBimhistogram\fR task.
+.ih
+DESCRIPTION
+This script tasks uses the \fBquadsections\fR task to break the
+\fBquadformat\fR data into separate sections and runs the \fBimhistogram\fR
+task on the sections. The graphics is collected onto a single page.
+.ih
+EXAMPLES
+
+1. To graph the histograms (default behavior).
+
+.nf
+ qu> qhist quad0072
+ [graph appears]
+.fi
+.ih
+SEE ALSO
+quadformat, quadsections, imhistogram
+.endhelp
diff --git a/noao/imred/quadred/doc/qstatistics.hlp b/noao/imred/quadred/doc/qstatistics.hlp
new file mode 100644
index 00000000..222ae778
--- /dev/null
+++ b/noao/imred/quadred/doc/qstatistics.hlp
@@ -0,0 +1,52 @@
+.help qstatistics Aug01 noao.imred.quadred
+.ih
+NAME
+qstatistics -- Compute and print statistics for multi-amp data
+.ih
+USAGE
+qstatistics images
+.ih
+PARAMETERS
+.ls images
+List of image names in \fBquadformat\fR.
+.le
+.ls window = "datasec" (datasec|trimsec|biassec)
+Type of section to output. The choices are "datasec" for the amplifier
+section which includes the bias if any is present, "trimsec" for the trim
+section, and "biassec" for the bias section.
+.le
+
+The remaining parameters come from the \fBimstatistics\fR task.
+.ih
+DESCRIPTION
+This script tasks uses the \fBquadsections\fR task to break the
+\fBquadformat\fR data into separate sections and runs the \fBimstatistics\fR
+task on the sections.
+.ih
+EXAMPLES
+
+1. To compute the mean and stddev of the data section.
+
+.nf
+ qu> qstat quad0072 fields=image,mean,stddev
+ # IMAGE MEAN STDDEV
+ quad0072[1:1034,1:1024] 5537. 2647.
+ quad0072[1163:2196,1:1024] 6210. 5439.
+ quad0072[1:1034,1025:2048] 5364. 2535.
+ quad0072[1163:2196,1025:2048] 5862. 1327.
+.fi
+
+2. To compute the mean and stdev of the bias section.
+
+.nf
+ qu> qstat quad0072 fields=image,mean,stddev window=biassec
+ # IMAGE MEAN STDDEV
+ quad0072[1045:1098,1:1024] 713. 1.272
+ quad0072[1099:1152,1:1024] 516.2 1.425
+ quad0072[1045:1098,1025:2048] 554.3 1.347
+ quad0072[1099:1152,1025:2048] 530.3 1.377
+.fi
+.ih
+SEE ALSO
+quadformat, quadsections, imstatistics
+.endhelp
diff --git a/noao/imred/quadred/doc/quadformat.hlp b/noao/imred/quadred/doc/quadformat.hlp
new file mode 100644
index 00000000..eb5fbfbd
--- /dev/null
+++ b/noao/imred/quadred/doc/quadformat.hlp
@@ -0,0 +1,392 @@
+.help quadformat Aug01 imred.quadred
+.ih
+NAME
+quadformat - Description of the special multi-amplifier CCD format
+.ih
+DESCRIPTION
+CCDs may be readout from multiple amplifiers at the same time to increase
+the readout speed. This produces multiple images of rectangular regions in
+the full CCD exposure. The amplifier readout images may be recorded in
+various ways. One way is as extensions in a multiextension FITS file.
+This type of format can be reduced using the MSCRED package.
+
+Another way is to paste the regions into a single two-dimensional image.
+This, along with specific keywords to describe the locations of the
+regions, constitutes the \fIquadformat\fR format described here and used by the
+QUADRED package. The term "quad" originates from the possibility of using
+four amplifiers in quadrants but the format also includes any other
+number of amplifiers.
+
+It is important to realize that this is a special format only as long as
+the overscan or prescan data is included in the image data. Once this
+information is used and removed as part of the processing the resulting
+image can be treated in the same way as a single amplifier CCD image.
+However, the image can still contain the format keywords allowing the
+regions from the different amplifiers to be identified and extracted as
+needed.
+
+The \fIquadformat\fR consists of a single 2D image for a single CCD
+exposure. The image storage format may be any standard image type such
+as imh or fits. Within the image are regions containing the CCD
+pixel data and regions containing overscan or prescan, which we will
+call bias regions, for each amplifier. The \fIquadformat\fR requires
+the bias regions to be attached to the CCD regions such that a single
+rectangular region contains both.
+
+Generally the rectangular regions are of equal size in order to sequence
+the amplifiers simultaneously. However, it is possible for the regions
+to be unequal in cases of subregion readouts with certain CCD controllers.
+The figure below illustrates a "dual" and "quad" readout with equal
+size regions.
+
+.nf
+ +-----+-+-+-----+ +-----+-+-+-----+ +----------+-+
+ | D !B|B! D | | D !B|B! D | | D !B|
+ | 3 !3|4! 4 | | 1 !1|2! 2 | | 2 !2|
+ | ! | ! | | ! | ! | | ! |
+ +-----+-+-+-----+ | ! | ! | +----------+-+
+ | D !B|B! D | | ! | ! | | D !B|
+ | 1 !1|2! 2 | | ! | ! | | 1 !1|
+ | ! | ! | | ! | ! | | ! |
+ +-----+-+-+-----+ +-----+-+-+-----+ +----------+-+
+.fi
+
+The areas labeled D are the data sections and those labeled B are the
+bias sections. The data and biases are match by the amplifier labels
+which are 1-4 in these examples. The combination of the data and
+bias sections are called the amplifier sections.
+
+The regions are identified in the header by various keywords. There is
+a header translation facility which allows for alternative keyword names.
+Below we describe the default keyword names in the absence of a translation.
+The number of regions and the amplifier labels are described by the
+string keyword AMPLIST. The value is a string of space separated
+amplifier labels. For the above four amplifier example it would be
+
+.nf
+ AMPLIST = '1 2 3 4'
+.fi
+
+For CTIO data the labels are '11 12 21 22'. Note that the labels
+are appended to rootnames so they should be relatively short.
+
+The amplifier labels are appended to various root names. The important
+ones define "section" keywords. The values are image sections that
+describe regions in an raster such as the image or the CCD. The format
+of a section follows the standard IRAF notation "[c1:c2,l1:l2]" where
+c1 and c2 are inclusive column endpoints and l1 and l2 are inclusive
+line endpoints.
+
+The various sections are defined below. The labels again show the default
+untranslated keyword roots.
+
+.ls ASEC
+The section of the image containing the amplifier readout. This is the
+combination of the data and bias regions as shown in the figures.
+.le
+.ls DSEC
+The section of the image containing the actual CCD data exclusive of
+bias data. In the figures these are the D regions.
+.le
+.ls BSEC
+The section of the image containing the bias data. In the figures these
+are the B regions.
+.le
+.ls TSEC
+The second of the image containing the useful CCD data. This defines
+a "trimming" area and lies within the data section. It may also be
+the same as the data region. During trimming the final image will only
+include the regions in the trim sections. Note that it generally does
+not make sense to trim between amplifier regions but does make sense to
+trim regions at the edges of the CCD.
+.le
+.ls CSEC
+The section of the CCD corresponding to the data section in the image.
+The CCD is considered an ideal raster (without bias regions) and a
+section corresponds to the pixels in the CCD. The CCD section must be
+the same size as the data section. It is the CCD sections that define
+how the amplifiers will be pieced together to form a single image
+after trimming the bias region.
+.le
+
+There may be other keyword root names for things such as gains which
+have the amplifier labels appended. However, none of these are used
+by the current software. Example image headers are given
+in the EXAMPLES section.
+
+There is a limitation in the current software that the regions be recorded
+without horizontal or vertical flips. In other words, where amplifiers
+from opposite corners are used some of them must be flipped by the
+data acquisition system before recording then in this \fBquadformat\fR.
+
+.ih
+EXAMPLES
+
+1. The following is an example of a full 2048x2048 CCD readout with
+four amplifiers at CTIO.
+
+.nf
+qu> imhad quad0020
+quad0020[2196,2048][ushort]: IC 1257 5290 180s
+No bad pixels, min=435., max=61973.
+Line storage mode, physdim [2304,2048], length of user area 3079 s.u.
+Created Thu 08:35:57 23-Aug-2001, Last modified Thu 08:35:57 23-Aug-2001
+Pixel file "HDR$pixels/quad0020.pix" [ok]
+'KPNO-IRAF' /
+'06-07-99' /
+IRAF-MAX= 6.197300E4 / DATA MAX
+IRAF-MIN= 4.350000E2 / DATA MIN
+IRAF-BPX= 16 / DATA BITS/PIXEL
+IRAFTYPE= 'USHORT ' / PIXEL TYPE
+OPICNUM = 123 / Original picture number
+HDR_REV = '2.000 13Feb96 (add mode and group to hdrs)' /
+IMAGETYP= 'OBJECT ' / Type of picture (object, dark, etc.)
+DETECTOR= 'Site2K_6' / Detector (CCD type, photon counter, etc.)
+PREFLASH= 0.000000 / Preflash time in secs
+CCDSUM = '1 1 ' / On chip summation (X,Y)
+DATE-OBS= '07/07/99' / Date (dd/mm/yy) of observation
+UTSHUT = '01:14:40.0' / UT of shutter open
+UT = ' 1:14:41.50' / UT of TCS coords
+OBSERVAT= 'CTIO ' / Origin of data
+TELESCOP= 'CTIO 1.5 meter telescope' / Specific system
+NAMPSYX = '2 2 ' / Num amps in y & x (eg. '2 2'=quad)
+AMPLIST = '11 21 12 22' / Readout order in y,x
+ASEC11 = '[1:1098,1:1024]' / Section read with Amp11
+CSEC11 = '[1:1034,1:1024]' / Section in full CCD for DSEC11
+DSEC11 = '[1:1034,1:1024]' / Image area in raw frame for Amp11
+TSEC11 = '[11:1034,1:1024]' / Trim section definition for Amp11
+BSEC11 = '[1045:1098,1:1024]' / Bias section definition for Amp11
+BSEC12 = '[1099:1152,1:1024]' / Bias section definition for Amp12
+ASEC12 = '[1099:2196,1:1024]' / Section read with Amp12
+CSEC12 = '[1035:2068,1:1024]' / Section in full CCD for DSEC12
+DSEC12 = '[1163:2196,1:1024]' / Image area in raw frame for Amp12
+TSEC12 = '[1163:2186,1:1024]' / Trim section definition for Amp12
+ASEC21 = '[1:1098,1025:2048]' / Section read with Amp21
+CSEC21 = '[1:1034,1025:2048]' / Section in full CCD for DSEC21
+DSEC21 = '[1:1034,1025:2048]' / Image area in raw frame for Amp21
+TSEC21 = '[11:1034,1025:2048]' / Trim section definition for Amp21
+BSEC21 = '[1045:1098,1025:2048]' / Bias section definition for Amp21
+BSEC22 = '[1099:1152,1025:2048]' / Bias section definition for Amp22
+ASEC22 = '[1099:2196,1025:2048]' / Section read with Amp22
+CSEC22 = '[1035:2068,1025:2048]' / Section in full CCD for DSEC22
+DSEC22 = '[1163:2196,1025:2048]' / Image area in raw frame for Amp22
+TSEC22 = '[1163:2186,1025:2048]' / Trim section definition for Amp22
+WAVEFILE= 'Obs Tue Jul 6 20:11:59 1999' /
+NOTE = 'WARNING: Lower amps reaching full well before ADCs saturate' /
+WAVEMODE= 'MPP OverlapXmit EarlyReset' / Waveform mode switches on
+GTRON22 = 4.100 / (e-) predicted read noise, upper right
+GTRON21 = 3.900 / (e-) predicted read noise, upper left
+GTRON12 = 4.200 / (e-) predicted read noise, lower right
+GTRON11 = 4.200 / (e-) predicted read noise, lower left
+GTGAIN22= 2.800 / (e-/ADU), predicted gain, upper right
+GTGAIN21= 3.100 / (e-/ADU) predicted gain, upper left
+GTGAIN12= 2.900 / (e-/ADU) predicted gain, lower right
+GTGAIN11= 3.200 / (e-/ADU) predicted gain, lower left
+GTINDEX = 2 / Gain selection (index into Gain Table)
+PIXELT = 29520 / (ns) unbinned pixel read time
+DCS_TIME= 7000 / (ns) Double Correlated Sample time
+RA = '17:27:10.82' / right ascension (telescope)
+DEC = '-7:06:35.40' / declination (telescope)
+EPOCH = 2000.0 / epoch of RA & DEC
+ZD = 35.9 / zenith distance (degrees)
+HA = '-01:57:23.7' / hour angle (H:M:S)
+ST = '15:29:46.00' / sidereal time
+AIRMASS = 1.234 / airmass
+EXPTIME = 180.000 / Exposure time in secs
+DARKTIME= 181.309 / Total elapsed time in secs
+OBSERVER= 'Jacoby' / Observers
+PROPID = '92' / Proposal Id
+COMMENT Globular PNe
+TELID = 'ct60' / CTIO 1.5-m Telescope
+ARCONVER= '17Oct97ver7_22' / Arcon software version
+COMMENT INSTRUMENT PARAMETERS
+INSTRUME= 'cfccd' / cassegrain direct imager
+FILTER1 = 'dia' / Filter in wheel one
+FNAME1 = 'diaphragm' / Full name of filter in wheel1
+FILTER2 = 'ocon' / Filter in wheel two
+FNAME2 = 'O cont' / Full name of filter in wheel2
+FILTERS = 'dia ocon' / Filter positions
+TELFOCUS= 57550 / Telescope focus
+XPIXSIZE= 0.432 / Pixel size in X (arcsec/pix)
+YPIXSIZE= 0.432 / Pixel size in Y (arcsec/pix)
+RECID = 'ct60.990707.011817' / NOAO Archive record ID
+.fi
+
+2. The following is a more complex readout of a region where the
+full 2Kx2K CCD is not readout and where even the regions are not the
+same size.
+
+.nf
+qu> imhead quad0013
+quad0013[1686,1538][ushort]: R sky flat 7s
+No bad pixels, min=393., max=65535.
+Line storage mode, physdim [1792,1538], length of user area 3079 s.u.
+Created Thu 08:34:00 23-Aug-2001, Last modified Thu 08:34:00 23-Aug-2001
+Pixel file "HDR$pixels/quad0013.pix" [ok]
+'KPNO-IRAF' /
+'06-07-99' /
+IRAF-MAX= 6.553500E4 / DATA MAX
+IRAF-MIN= 3.930000E2 / DATA MIN
+IRAF-BPX= 16 / DATA BITS/PIXEL
+IRAFTYPE= 'USHORT ' / PIXEL TYPE
+OPICNUM = 15 / Original picture number
+HDR_REV = '2.000 13Feb96 (add mode and group to hdrs)' /
+IMAGETYP= 'SKY FLAT' / Type of picture (object, dark, etc.)
+DETECTOR= 'Site2K_6' / Detector (CCD type, photon counter, etc.)
+PREFLASH= 0.000000 / Preflash time in secs
+CCDSUM = '1 1 ' / On chip summation (X,Y)
+DATE-OBS= '06/07/99' / Date (dd/mm/yy) of observation
+UTSHUT = '22:25:22.0' / UT of shutter open
+UT = '22:25:34.00' / UT of TCS coords
+OBSERVAT= 'CTIO ' / Origin of data
+TELESCOP= 'CTIO 1.5 meter telescope' / Specific system
+NAMPSYX = '2 2 ' / Num amps in y & x (eg. '2 2'=quad)
+AMPLIST = '11 21 12 22' / Readout order in y,x
+ASEC11 = '[1:843,1:769]' / Section read with Amp11
+CSEC11 = '[256:1034,256:1024]' / Section in full CCD for DSEC11
+DSEC11 = '[1:779,1:769]' / Image area in raw frame for Amp11
+TSEC11 = '[11:779,1:769]' / Trim section definition for Amp11
+BSEC11 = '[790:843,1:769]' / Bias section definition for Amp11
+BSEC12 = '[844:897,1:769]' / Bias section definition for Amp12
+ASEC12 = '[844:1686,1:769]' / Section read with Amp12
+CSEC12 = '[1035:1813,256:1024]' / Section in full CCD for DSEC12
+DSEC12 = '[908:1686,1:769]' / Image area in raw frame for Amp12
+TSEC12 = '[908:1418,1:769]' / Trim section definition for Amp12
+ASEC21 = '[1:843,770:1538]' / Section read with Amp21
+CSEC21 = '[256:1034,1025:1793]' / Section in full CCD for DSEC21
+DSEC21 = '[1:779,770:1538]' / Image area in raw frame for Amp21
+TSEC21 = '[11:779,770:1280]' / Trim section definition for Amp21
+BSEC21 = '[790:843,770:1538]' / Bias section definition for Amp21
+BSEC22 = '[844:897,770:1538]' / Bias section definition for Amp22
+ASEC22 = '[844:1686,770:1538]' / Section read with Amp22
+CSEC22 = '[1035:1813,1025:1793]' / Section in full CCD for DSEC22
+DSEC22 = '[908:1686,770:1538]' / Image area in raw frame for Amp22
+TSEC22 = '[908:1418,770:1280]' / Trim section definition for Amp22
+WAVEFILE= 'Obs Tue Jul 6 18:07:56 1999' /
+NOTE = 'WARNING: Lower amps reaching full well before ADCs saturate' /
+WAVEMODE= 'MPP OverlapXmit EarlyReset' / Waveform mode switches on
+GTRON22 = 4.100 / (e-) predicted read noise, upper right
+GTRON21 = 3.900 / (e-) predicted read noise, upper left
+GTRON12 = 4.200 / (e-) predicted read noise, lower right
+GTRON11 = 4.200 / (e-) predicted read noise, lower left
+GTGAIN22= 2.800 / (e-/ADU), predicted gain, upper right
+GTGAIN21= 3.100 / (e-/ADU) predicted gain, upper left
+GTGAIN12= 2.900 / (e-/ADU) predicted gain, lower right
+GTGAIN11= 3.200 / (e-/ADU) predicted gain, lower left
+GTINDEX = 2 / Gain selection (index into Gain Table)
+PIXELT = 29520 / (ns) unbinned pixel read time
+DCS_TIME= 7000 / (ns) Double Correlated Sample time
+RA = '14:53:52.67' / right ascension (telescope)
+DEC = '-19:20:10.70' / declination (telescope)
+EPOCH = 2000.0 / epoch of RA & DEC
+ZD = 32.1 / zenith distance (degrees)
+HA = '-02:13:40.3' / hour angle (H:M:S)
+ST = '12:40:10.80' / sidereal time
+AIRMASS = 1.180 / airmass
+EXPTIME = 7.000 / Exposure time in secs
+DARKTIME= 8.239 / Total elapsed time in secs
+OBSERVER= 'Jacoby' / Observers
+PROPID = '92' / Proposal Id
+COMMENT
+TELID = 'ct60' / CTIO 1.5-m Telescope
+ARCONVER= '17Oct97ver7_22' / Arcon software version
+COMMENT INSTRUMENT PARAMETERS
+INSTRUME= 'cfccd' / cassegrain direct imager
+FILTER1 = 'dia' / Filter in wheel one
+FNAME1 = 'diaphragm' / Full name of filter in wheel1
+FILTER2 = 'r' / Filter in wheel two
+FNAME2 = 'R' / Full name of filter in wheel2
+FILTERS = 'dia r' / Filter positions
+TELFOCUS= 0 / Telescope focus
+XPIXSIZE= 0.432 / Pixel size in X (arcsec/pix)
+YPIXSIZE= 0.432 / Pixel size in Y (arcsec/pix)
+RECID = 'ct60.990706.222551' / NOAO Archive record ID
+.fi
+
+3. The following is for the raw image of example 2 after it has been
+processed by CCDPROC. Note that the various bias, trim, and CCD sections are
+removed. The AMPLIST and ASEC keywords remain and may be used to split
+or evaluate the individual amplifier regions with tasks such as QUADSECTIONS,
+QUADSPLIT, and QSTATISTICS.
+
+.nf
+qu> imhead quad0013
+quad0013[1280,1280][real]: R sky flat 7s
+No bad pixels, min=unknown, max=unknown
+Line storage mode, physdim [1280,1280], length of user area 2795 s.u.
+Created Fri 13:29:40 24-Aug-2001, Last modified Fri 13:29:40 24-Aug-2001
+Pixel file "HDR$pixels/quad0013.pix" [ok]
+'KPNO-IRAF' /
+'06-07-99' /
+New copy of quad0013
+IRAF-MAX= 6.553500E4 / DATA MAX
+IRAF-MIN= 3.930000E2 / DATA MIN
+IRAF-BPX= 16 / DATA BITS/PIXEL
+IRAFTYPE= 'USHORT ' / PIXEL TYPE
+OPICNUM = 15 / Original picture number
+HDR_REV = '2.000 13Feb96 (add mode and group to hdrs)' /
+IMAGETYP= 'SKY FLAT' / Type of picture (object, dark, etc.)
+DETECTOR= 'Site2K_6' / Detector (CCD type, photon counter, etc.)
+PREFLASH= 0.000000 / Preflash time in secs
+CCDSUM = '1 1 ' / On chip summation (X,Y)
+DATE-OBS= '06/07/99' / Date (dd/mm/yy) of observation
+UTSHUT = '22:25:22.0' / UT of shutter open
+UT = '22:25:34.00' / UT of TCS coords
+OBSERVAT= 'CTIO ' / Origin of data
+TELESCOP= 'CTIO 1.5 meter telescope' / Specific system
+NAMPSYX = '2 2 ' / Num amps in y & x (eg. '2 2'=quad)
+AMPLIST = '11 21 12 22' / Readout order in y,x
+ASEC11 = '[1:769,1:769]' / Section read with Amp11
+ASEC12 = '[770:1280,1:769]' / Section read with Amp12
+ASEC21 = '[1:769,770:1280]' / Section read with Amp21
+ASEC22 = '[770:1280,770:1280]' / Section read with Amp22
+WAVEFILE= 'Obs Tue Jul 6 18:07:56 1999' /
+NOTE = 'WARNING: Lower amps reaching full well before ADCs saturate' /
+WAVEMODE= 'MPP OverlapXmit EarlyReset' / Waveform mode switches on
+GTRON22 = 4.100 / (e-) predicted read noise, upper right
+GTRON21 = 3.900 / (e-) predicted read noise, upper left
+GTRON12 = 4.200 / (e-) predicted read noise, lower right
+GTRON11 = 4.200 / (e-) predicted read noise, lower left
+GTGAIN22= 2.800 / (e-/ADU), predicted gain, upper right
+GTGAIN21= 3.100 / (e-/ADU) predicted gain, upper left
+GTGAIN12= 2.900 / (e-/ADU) predicted gain, lower right
+GTGAIN11= 3.200 / (e-/ADU) predicted gain, lower left
+GTINDEX = 2 / Gain selection (index into Gain Table)
+PIXELT = 29520 / (ns) unbinned pixel read time
+DCS_TIME= 7000 / (ns) Double Correlated Sample time
+RA = '14:53:52.67' / right ascension (telescope)
+DEC = '-19:20:10.70' / declination (telescope)
+EPOCH = 2000.0 / epoch of RA & DEC
+ZD = 32.1 / zenith distance (degrees)
+HA = '-02:13:40.3' / hour angle (H:M:S)
+ST = '12:40:10.80' / sidereal time
+AIRMASS = 1.180 / airmass
+EXPTIME = 7.000 / Exposure time in secs
+DARKTIME= 8.239 / Total elapsed time in secs
+OBSERVER= 'Jacoby' / Observers
+PROPID = '92' / Proposal Id
+COMMENT
+TELID = 'ct60' / CTIO 1.5-m Telescope
+ARCONVER= '17Oct97ver7_22' / Arcon software version
+COMMENT INSTRUMENT PARAMETERS
+INSTRUME= 'cfccd' / cassegrain direct imager
+FILTER1 = 'dia' / Filter in wheel one
+FNAME1 = 'diaphragm' / Full name of filter in wheel1
+FILTER2 = 'r' / Filter in wheel two
+FNAME2 = 'R' / Full name of filter in wheel2
+FILTERS = 'dia r' / Filter positions
+TELFOCUS= 0 / Telescope focus
+XPIXSIZE= 0.432 / Pixel size in X (arcsec/pix)
+YPIXSIZE= 0.432 / Pixel size in Y (arcsec/pix)
+RECID = 'ct60.990706.222551' / NOAO Archive record ID
+TRIM = 'Aug 24 13:29 Trim multiple overscan sections'
+OVERSCAN= 'Aug 24 13:29 Overscan is [790:843,1:769] with mean=714.3438'
+OVRSCN2 = 'Aug 24 13:29 Overscan is [790:843,770:1538] with mean=554.01'
+OVRSCN3 = 'Aug 24 13:29 Overscan is [844:897,1:769] with mean=519.7755'
+OVRSCN4 = 'Aug 24 13:29 Overscan is [844:897,770:1538] with mean=531.69'
+CCDSEC = '[266:1545,256:1535]'
+CCDMEAN = 9727.605
+CCDMEANT= 683126983
+CCDPROC = 'Aug 24 13:29 CCD processing done'
+.fi
+.endhelp
diff --git a/noao/imred/quadred/doc/quadjoin.hlp b/noao/imred/quadred/doc/quadjoin.hlp
new file mode 100644
index 00000000..2a3a075e
--- /dev/null
+++ b/noao/imred/quadred/doc/quadjoin.hlp
@@ -0,0 +1,43 @@
+.help quadjoin Aug01 noao.imred.quadred
+.ih
+NAME
+quadjoin -- Split quadformat data into single amplifier images
+.ih
+USAGE
+quadjoin input
+.ih
+PARAMETERS
+.ls input
+Root name of images to be joined. Extensions based on the AMPLIST
+keyword are applied to the root name. This task does not
+allow a list of input root names.
+.le
+.ls output = ""
+Output image name. If one is not given then the input root name is used.
+.le
+.ls delete = no
+Delete subimages on completion?
+.le
+.ih
+DESCRIPTION
+Images in split "quadformat" (see help topic \fBquadformat\fR and
+\fBquadsplit\fR) are rejoined into "quadformat". The input images
+have a common root name and then an extension given by the amplifier
+labels in the AMPLIST keyword are added. The output name may be specified
+or the input root name may be used.
+.ih
+EXAMPLES
+1. To join a split set of images:
+
+.nf
+ qu> dir quad0072*
+ quad0072.11.imh quad0072.21.imh
+ quad0072.12.imh quad0072.22.imh
+ qu> quadjoin quad0072 delete+
+ qu> dir quad0072*
+ quad0072.imh
+.fi
+.ih
+SEE ALSO
+quadformat, quadsplit
+.endhelp
diff --git a/noao/imred/quadred/doc/quadscale.hlp b/noao/imred/quadred/doc/quadscale.hlp
new file mode 100644
index 00000000..7493bcf6
--- /dev/null
+++ b/noao/imred/quadred/doc/quadscale.hlp
@@ -0,0 +1,37 @@
+.help quadscale Aug01 noao.imred.quadred
+.ih
+NAME
+quadscale -- Scale amplifier sections by separate gains
+.ih
+USAGE
+quadscale input output
+.ih
+PARAMETERS
+.ls input
+Input image in \fBquadformat\fR to be scaled.
+.le
+.ls output
+Output scaled image in \fBquadformat\fR.
+.le
+.ls gain11 = 1., gain12 = 1., gain21 = 1., gain22 = 1.
+Gain factors for each quadrant.
+.le
+.ls operation = "multiply" (multiply|divide)
+The operation to apply with the gains.
+.le
+.ih
+DESCRIPTION
+This task multiplies or divides by gain factors for each amplifier in
+\fBquadformat\fR.
+.ih
+EXAMPLES
+
+1. To multiply by different gain factors.
+
+.nf
+ qu> quadscale quad0072 test gain11=1.2 gain12=1.3 gain21=1.4
+.fi
+.ih
+SEE ALSO
+quadformat
+.endhelp
diff --git a/noao/imred/quadred/doc/quadsections.hlp b/noao/imred/quadred/doc/quadsections.hlp
new file mode 100644
index 00000000..2735e3d5
--- /dev/null
+++ b/noao/imred/quadred/doc/quadsections.hlp
@@ -0,0 +1,81 @@
+.help quadsections Aug01 noao.imred.quadred
+.ih
+NAME
+quadsections -- Create image sections
+.ih
+USAGE
+quadsplit images
+.ih
+PARAMETERS
+.ls images
+List of image names for images in \fBquadformat\fR.
+.le
+.ls window = "datasec" (datasec|trimsec|biassec)
+Type of section to output. The choices are "datasec" for the amplifier
+section which includes the bias if any is present, "trimsec" for the trim
+section, and "biassec" for the bias section.
+.le
+.ls section = ""
+Section to be overlapped. The output sections will be the parts of the
+amplifier windows which are included within this section.
+.le
+.ls template = ""
+Template for producing the output. The template replaces occurs of
+$I with the image name, $S with the section, and $A with the amplifier
+label. If none is specified then the default template "$I$S\\n" is
+used which produces the image name with section separated by new-lines.
+The special characters "\n" is the new-line and the extra "\" is
+required to pass the new-line through to the formatting routine.
+.le
+.ih
+DESCRIPTION
+Images in "quadformat" (see help topic \fBquadformat\fR) are broken down
+in sections and written to the standard output in a specified format.
+.ih
+EXAMPLES
+1. To print the default data sections.
+
+.nf
+ qu> quadsec quad0072
+ quad0072[1:1034,1:1024]
+ quad0072[1163:2196,1:1024]
+ quad0072[1:1034,1025:2048]
+ quad0072[1163:2196,1025:2048]
+.fi
+
+3. To apply an overlap section.
+
+.nf
+ qu> quadsec quad0072 section=[1000:2000,1000:2000]
+ quad0072[1000:1034,1000:1024]
+ quad0072[1163:2000,1000:1024]
+ quad0072[1000:1034,1025:2000]
+ quad0072[1163:2000,1025:2000]
+.fi
+
+2. To print the trim sections.
+
+.nf
+ qu> quadsec quad0072 window=trimsec
+ quad0072[11:1034,1:1024]
+ quad0072[1163:2186,1:1024]
+ quad0072[11:1034,1025:2048]
+ quad0072[1163:2186,1025:2048]
+.fi
+
+
+4. To make a custom output.
+
+.nf
+ qu> quadsec quad0072 template="image=$I, section=$S, amplifier=$A\\n"
+ image=quad0072, section=[1:1034,1:1024], amplifier=11
+ image=quad0072, section=[1163:2196,1:1024], amplifier=12
+ image=quad0072, section=[1:1034,1025:2048], amplifier=21
+ image=quad0072, section=[1163:2196,1025:2048], amplifier=22
+ qu> quadsec quad0072 template="$I.$A,"
+ quad0072.11,quad0072.12,quad0072.21,quad0072.22,
+.fi
+.ih
+SEE ALSO
+quadformat
+.endhelp
diff --git a/noao/imred/quadred/doc/quadsplit.hlp b/noao/imred/quadred/doc/quadsplit.hlp
new file mode 100644
index 00000000..4a0adf66
--- /dev/null
+++ b/noao/imred/quadred/doc/quadsplit.hlp
@@ -0,0 +1,49 @@
+.help quadsplit Aug01 noao.imred.quadred
+.ih
+NAME
+quadsplit -- Split quadformat data into single amplifier images
+.ih
+USAGE
+quadsplit input
+.ih
+PARAMETERS
+.ls input
+Image name of \fIquadformat\fR image to be split. This task does not
+allow a list of input names.
+.le
+.ls output = ""
+Output root name to which the AMPLIST amplifier identifiers will be
+appended to form the split images. If no output name is given then
+the input name is used as the root name.
+.le
+.ls clobber = yes
+Clobber any existing images?
+.le
+.ih
+DESCRIPTION
+Images in "quadformat" (see help topic \fBquadformat\fR) are separated
+into images containing data from only one amplifier. The output images
+have a common root name and then an extension given by the amplifier
+labels in the AMPLIST keyword. The output root name may be specified
+or default to the input name.
+
+In addition to producing the individual images keywords, are added that
+are understood by the standard \fBccdproc\fR task for single amplifier
+CCD reductions.
+
+The task \fBquadjoin\fR may be used to rejoin images that were split
+by this task.
+.ih
+EXAMPLES
+1. To spit an image:
+
+.nf
+ qu> quadsplit quad0072
+ qu> dir quad0072*
+ quad0072.11.imh quad0072.21.imh quad0072.imh
+ quad0072.12.imh quad0072.22.imh
+.fi
+.ih
+SEE ALSO
+quadformat, quadjoin
+.endhelp
diff --git a/noao/imred/quadred/mkpkg b/noao/imred/quadred/mkpkg
new file mode 100644
index 00000000..3a55a03a
--- /dev/null
+++ b/noao/imred/quadred/mkpkg
@@ -0,0 +1,8 @@
+# Make the package.
+
+$call update@src
+$exit
+
+update:
+ $call update@src
+ ;
diff --git a/noao/imred/quadred/quadred.cl b/noao/imred/quadred/quadred.cl
new file mode 100644
index 00000000..74592010
--- /dev/null
+++ b/noao/imred/quadred/quadred.cl
@@ -0,0 +1,68 @@
+#{ QUADRED -- QUAD CCD Reduction Package
+
+set ccddb = "ccdred$ccddb/"
+
+package quadred
+
+# Special version of CCDPROC.
+
+set quadsrc = "quadred$src/ccdproc/"
+
+task ccdproc = quadsrc$x_quadred.e
+task qccdproc = quad$x_ccdred.e
+
+# Task from the CTIO QUAD package.
+
+set quad = "quadred$src/quad/"
+
+task quadsplit,
+ quadjoin,
+ quadscale,
+ quadsections,
+ ccddelete,
+ ccdprcselect,
+ ccdssselect,
+ ccdsection,
+ qpcalimage,
+ qpselect,
+ gainmeasure,
+ ccdgetparam = "quad$x_quad.e"
+
+task quadproc = "quad$quadproc.cl"
+task qproc = "quad$qproc.cl"
+task qnoproc = "quad$qnoproc.cl"
+task qstatistics = "quad$qstatistics.cl"
+task qhistogram = "quad$qhistogram.cl"
+
+task setinstrument = "quad$setinstrument.cl"
+
+hidetask ccdgetparam, ccddelete, ccdprcselect, ccdssselect, ccdsection
+hidetask qpcalimage, qpselect, qproc, qnoproc, qccdproc
+
+# Special versions which run quadproc rather than ccdproc
+task qdarkcombine = quad$qdarkcombine.cl
+task qflatcombine = quad$qflatcombine.cl
+task qzerocombine = quad$qzerocombine.cl
+
+
+# Tasks from the standard CCDRED package.
+
+task badpiximage,
+ ccdgroups,
+ ccdhedit,
+ ccdinstrument,
+ ccdlist,
+ ccdmask,
+ combine,
+ mkfringecor,
+ mkillumcor,
+ mkillumflat,
+ mkskycor,
+ mkskyflat = ccdred$x_ccdred.e
+
+task darkcombine = ccdred$darkcombine.cl
+task flatcombine = ccdred$flatcombine.cl
+#task setinstrument = ccdred$setinstrument.cl
+task zerocombine = ccdred$zerocombine.cl
+
+clbye()
diff --git a/noao/imred/quadred/quadred.hd b/noao/imred/quadred/quadred.hd
new file mode 100644
index 00000000..b542fec9
--- /dev/null
+++ b/noao/imred/quadred/quadred.hd
@@ -0,0 +1,22 @@
+# Help directory for the QUADRED package.
+
+$doc = "./doc/"
+$cdoc = "./src/ccdproc/doc/"
+$qdoc = "./src/quad/doc/"
+
+package hlp=doc$quad.hlp
+quadformat hlp=doc$quadformat.hlp
+quadsplit hlp=doc$quadsplit.hlp
+quadjoin hlp=doc$quadjoin.hlp
+quadsections hlp=doc$quadsections.hlp
+qstatistics hlp=doc$qstatistics.hlp
+qhistogram hlp=doc$qhistogram.hlp
+quadscale hlp=doc$quadscale.hlp
+
+ccdproc hlp=cdoc$ccdproc.hlp
+
+quadproc hlp=qdoc$quadproc.hlp
+#quadreadout hlp=qdoc$quadreadout.hlp
+#quadman hlp=qdoc$quadman.hlp
+
+revisions sys=Revisions
diff --git a/noao/imred/quadred/quadred.men b/noao/imred/quadred/quadred.men
new file mode 100644
index 00000000..6a0225a5
--- /dev/null
+++ b/noao/imred/quadred/quadred.men
@@ -0,0 +1,61 @@
+ SPECIAL VERSION OF CCDRED PACKAGE FOR MULTI-AMPLIFIER CCD IMAGES
+
+The package has a special quad version of CCDPROC that processes
+multi-amplifier CCD images in a particular format. See help topic
+"quadformat" for a description of the format. The task QUADPROC is
+largely obsoleted by the quad version of CCDPROC but may be used by
+those familiar with the ARED.QUAD package or to use features in the
+standard CCDPROC that are not in the quad version. Those features
+include line-by-line overscan functions.
+
+
+ STANDARD CCDRED TASKS
+
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdinstrument - Review and edit instrument translation files
+ ccdlist - List CCD processing information
+ ccdmask - Make a bad pixel mask from CCD data
+ ccdproc - Process CCD images (including quadformat data)
+ combine - Combine CCD images
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+ setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+
+ SPECIAL TASKS FOR MULTI-AMPLIFIER CCD IMAGES IN QUADFORMAT
+
+ gainmeasure - Measure gains in quadformat images
+ quadscale - Scale sections by gain factors
+ qstatistics - Calculate image statistics for multi-amplifier CCD images
+ quadsections - Produce image section list for sections of quadformat images
+ qhistogram - Make histogram of multi-amplifier CCD image
+ quadsplit - Split quadformat data into individual single amplifier images
+ quadjoin - Rejoin single amplifier images produced by quadsplit
+
+ ALTERNATIVE TASKS
+
+ quadproc - Process multi-amplifier CCD images (see also ccdproc)
+ qdarkcombine - Combine and process dark count images using quadproc
+ qflatcombine - Combine and process flat field images using quadproc
+ qzerocombine - Combine and process zero level images using quadproc
+
+ There is no separate help for the quadproc versions of the combining
+ tasks. See the help for the standard versions.
+
+ ADDITIONAL HELP TOPICS
+
+ package - Package parameters and overview
+ quadformat - Format for multi-amplifier CCD images
+ ccdgeometry - Discussion of CCD coordinate/geometry keywords
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ subsets - Description of CCD subsets
diff --git a/noao/imred/quadred/quadred.par b/noao/imred/quadred/quadred.par
new file mode 100644
index 00000000..05c8d112
--- /dev/null
+++ b/noao/imred/quadred/quadred.par
@@ -0,0 +1,13 @@
+# QUADRED package parameter file
+
+proctask,s,h,"ccdproc","ccdproc|quadproc",,Processing task
+pixeltype,s,h,"real real",,,Output and calculation pixel datatypes
+verbose,b,h,no,,,Print log information to the standard output?
+logfile,f,h,"logfile",,,Text log file
+plotfile,f,h,"",,,Log metacode plot file
+backup,s,h,"",,,Backup directory or prefix
+instrument,s,h,"",,,CCD instrument file
+ssfile,s,h,"subsets",,,Subset translation file
+graphics,s,h,"stdgraph",,,Interactive graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+version,s,h,"V1.0: August 22, 2001"
diff --git a/noao/imred/quadred/src/Revisions b/noao/imred/quadred/src/Revisions
new file mode 100644
index 00000000..bbdfcc7d
--- /dev/null
+++ b/noao/imred/quadred/src/Revisions
@@ -0,0 +1,42 @@
+.help revisions Jun88 noao.imred.quadred
+.nf
+
+ccdproc/ccdcache.x
+ The 'bufs' pointer was declared as TY_REAL instead of TY_SHORT (5/4/13)
+
+quad/qproc.cl
+quad/quad.cl
+quad/quadjoin.x
+quad/quadproc.cl
+quad/setinstrument.cl
+quad/qccdproc.par +
+../quadred.cl
+../quadred.men
+ When using quadproc the latest CCDPROC is used with the alias QCCDPROC.
+ This is to allow using the line-by-line overscan function. Other features
+ in CCDPROC would also be available. It was too hard to update the
+ quad version of CCDPROC. (3/12/08, Valdes)
+
+=====
+V2.14
+=====
+
+=======
+V2.12.1
+=======
+
+quad/qproc.cl
+ For some reason the quadsplit call was commented out. So when quadproc
+ is run the pieces are not split and then the quadjoin call results in
+ a divide by zero error. The call was uncommented. Due to my lack
+ of understanding with QUAD and that multipiece CCDPROC is used which
+ does not support trims, the quadsplit with trimming is not used.
+ (7/5/02, Valdes)
+
+=====
+V2.12
+=====
+
+New package consisting of XCCDRED and ARED.QUAD was added.
+
+.endhelp
diff --git a/noao/imred/quadred/src/ccdproc/calimage.x b/noao/imred/quadred/src/ccdproc/calimage.x
new file mode 100644
index 00000000..8a6007c1
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/calimage.x
@@ -0,0 +1,367 @@
+include <error.h>
+include <imset.h>
+include "ccdtypes.h"
+
+define SZ_SUBSET 16 # Maximum size of subset string
+define IMAGE Memc[$1+($2-1)*SZ_FNAME] # Image string
+define SUBSET Memc[$1+($2-1)*SZ_SUBSET] # Subset string
+
+# CAL_IMAGE -- Return a calibration image for a specified input image.
+# CAL_OPEN -- Open the calibration image list.
+# CAL_CLOSE -- Close the calibration image list.
+# CAL_LIST -- Add images to the calibration image list.
+#
+# The open procedure is called first to get the calibration image
+# lists and add them to an internal list. Calibration images from the
+# input list are also added so that calibration images may be specified
+# either from the calibration image list parameters or in the input image list.
+# Existence errors and duplicate calibration images are ignored.
+# Validity checks are made when the calibration images are requested.
+#
+# During processing the calibration image names are requested for each input
+# image. The calibration image list is searched for a calibration image of
+# the right type and subset. If more than one is found the first one is
+# returned and a warning given for the others. The warning is only issued
+# once. If no calibration image is found then an error is returned.
+#
+# The calibration image list must be closed at the end of processing the
+# input images.
+
+
+# CAL_IMAGE -- Return a calibration image of a particular type.
+# Search the calibration list for the first calibration image of the desired
+# type and subset. Print a warning if there is more than one possible
+# calibration image and return an error if there is no calibration image.
+
+procedure cal_image (im, ccdtype, nscan, image, maxchars)
+
+pointer im # Image to be processed
+int ccdtype # Callibration CCD image type desired
+int nscan # Number of scan rows desired
+char image[maxchars] # Calibration image (returned)
+int maxchars # Maximum number chars in image name
+
+int i, m, n
+pointer sp, subset, str
+bool strne(), ccd_cmp()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+begin
+ call smark (sp)
+ call salloc (subset, SZ_SUBSET, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ m = 0
+ n = 0
+ switch (ccdtype) {
+ case ZERO, DARK:
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ n = n + 1
+ if (n == 1) {
+ m = i
+ } else {
+ if (Memi[nscans+i-1] == Memi[nscans+m-1]) {
+# call eprintf (
+# "Warning: Extra calibration image %s ignored\n")
+# call pargstr (IMAGE(images,i))
+
+ # Reset the image type to eliminate further warnings.
+ Memi[ccdtypes+i-1] = UNKNOWN
+ } else if (Memi[nscans+m-1] != nscan &&
+ (Memi[nscans+i-1] == nscan ||
+ Memi[nscans+i-1] == 1)) {
+ m = i
+ }
+ }
+ }
+ case FLAT, ILLUM, FRINGE:
+ call ccdsubset (im, Memc[subset], SZ_SUBSET)
+
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ if (strne (SUBSET(subsets,i), Memc[subset]))
+ next
+ n = n + 1
+ if (n == 1) {
+ m = i
+ } else {
+ if (Memi[nscans+i-1] == Memi[nscans+m-1]) {
+# call eprintf (
+# "Warning: Extra calibration image %s ignored\n")
+# call pargstr (IMAGE(images,i))
+
+ # Reset the image type to eliminate further warnings.
+ Memi[ccdtypes+i-1] = UNKNOWN
+ } else if (Memi[nscans+m-1] != nscan &&
+ (Memi[nscans+i-1] == nscan ||
+ Memi[nscans+i-1] == 1)) {
+ m = i
+ }
+ }
+ }
+ }
+
+ # If no calibration image is found then it is an error.
+ if (m == 0) {
+ switch (ccdtype) {
+ case ZERO:
+ call error (0, "No zero level calibration image found")
+ case DARK:
+ call error (0, "No dark count calibration image found")
+ case FLAT:
+ call sprintf (Memc[str], SZ_LINE,
+ "No flat field calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ case ILLUM:
+ call sprintf (Memc[str], SZ_LINE,
+ "No illumination calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ case FRINGE:
+ call sprintf (Memc[str], SZ_LINE,
+ "No fringe calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ }
+ }
+
+ call strcpy (IMAGE(images,m), image, maxchars)
+ if (nscan != Memi[nscans+m-1]) {
+ if (nscan != 1 && Memi[nscans+m-1] == 1)
+ call cal_scan (nscan, image, maxchars)
+ else {
+ call sprintf (Memc[str], SZ_LINE,
+ "Cannot find or create calibration with nscan of %d")
+ call pargi (nscan)
+ call error (0, Memc[str])
+ }
+ }
+
+ # Check that the input image is not the same as the calibration image.
+ call imstats (im, IM_IMAGENAME, Memc[str], SZ_LINE)
+ if (ccd_cmp (Memc[str], IMAGE(images,m))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Calibration image %s is the same as the input image")
+ call pargstr (image)
+ call error (0, Memc[str])
+ }
+
+ call sfree (sp)
+end
+
+
+# CAL_OPEN -- Create a list of calibration images from the input image list
+# and the calibration image lists.
+
+procedure cal_open (list)
+
+int list # List of input images
+int list1 # List of calibration images
+
+pointer sp, str
+int ccdtype, strdic(), imtopenp()
+bool clgetb()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subset numbers
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+errchk cal_list
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ call clgstr ("ccdtype", Memc[str], SZ_LINE)
+ call xt_stripwhite (Memc[str])
+ if (Memc[str] == EOS)
+ ccdtype = NONE
+ else
+ ccdtype = strdic (Memc[str], Memc[str], SZ_LINE, CCDTYPES)
+
+ # Add calibration images to list.
+ nimages = 0
+ if (ccdtype != ZERO && clgetb ("zerocor")) {
+ list1 = imtopenp ("zero")
+ call cal_list (list1, ZERO)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && clgetb ("darkcor")) {
+ list1 = imtopenp ("dark")
+ call cal_list (list1, DARK)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ clgetb ("flatcor")) {
+ list1 = imtopenp ("flat")
+ call cal_list (list1, FLAT)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ ccdtype != ILLUM && clgetb ("illumcor")) {
+ list1 = imtopenp ("illum")
+ call cal_list (list1, ILLUM)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ ccdtype != FRINGE && clgetb ("fringecor")) {
+ list1 = imtopenp ("fringe")
+ call cal_list (list1, FRINGE)
+ call imtclose (list1)
+ }
+ if (list != NULL) {
+ call cal_list (list, UNKNOWN)
+ call imtrew (list)
+ }
+
+ call sfree (sp)
+end
+
+
+# CAL_CLOSE -- Free memory from the internal calibration image list.
+
+procedure cal_close ()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subset
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+begin
+ if (nimages > 0) {
+ call mfree (ccdtypes, TY_INT)
+ call mfree (subsets, TY_CHAR)
+ call mfree (nscans, TY_INT)
+ call mfree (images, TY_CHAR)
+ }
+end
+
+
+# CAL_LIST -- Add calibration images to an internal list.
+# Map each image and get the CCD image type and subset.
+# If the ccdtype is given as a procedure argument this overrides the
+# image header type. For the calibration images add the type, subset,
+# and image name to dynamic arrays. Ignore duplicate names.
+
+procedure cal_list (list, listtype)
+
+pointer list # Image list
+int listtype # CCD type of image in list.
+ # Overrides header type if not UNKNOWN.
+
+int i, ccdtype, ccdtypei(), ccdnscan(), imtgetim()
+pointer sp, image, im, immap()
+bool streq()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer nscans # Pointer to array of calibration nscan values
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, nscans, images, nimages
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+
+ while (imtgetim (list, Memc[image], SZ_FNAME) != EOF) {
+ # Open the image. If an explicit type is given it is an
+ # error if the image can't be opened.
+ iferr (im = immap (Memc[image], READ_ONLY, 0)) {
+ if (listtype == UNKNOWN)
+ next
+ else
+ call erract (EA_ERROR)
+ }
+
+ # Override image header CCD type if a list type is given.
+ if (listtype == UNKNOWN)
+ ccdtype = ccdtypei (im)
+ else
+ ccdtype = listtype
+
+ switch (ccdtype) {
+ case ZERO, DARK, FLAT, ILLUM, FRINGE:
+ # Check for duplication.
+ for (i=1; i<=nimages; i=i+1)
+ if (streq (Memc[image], IMAGE(images,i)))
+ break
+ if (i <= nimages)
+ break
+
+ # Allocate memory for a new image.
+ if (i == 1) {
+ call malloc (ccdtypes, i, TY_INT)
+ call malloc (subsets, i * SZ_SUBSET, TY_CHAR)
+ call malloc (nscans, i, TY_INT)
+ call malloc (images, i * SZ_FNAME, TY_CHAR)
+ } else {
+ call realloc (ccdtypes, i, TY_INT)
+ call realloc (subsets, i * SZ_FNAME, TY_CHAR)
+ call realloc (nscans, i, TY_INT)
+ call realloc (images, i * SZ_FNAME, TY_CHAR)
+ }
+
+ # Enter the ccdtype, subset, and image name.
+ Memi[ccdtypes+i-1] = ccdtype
+ Memi[nscans+i-1] = ccdnscan (im, ccdtype)
+ call ccdsubset (im, SUBSET(subsets,i), SZ_SUBSET-1)
+ call strcpy (Memc[image], IMAGE(images,i), SZ_FNAME-1)
+ nimages = i
+ }
+ call imunmap (im)
+ }
+ call sfree (sp)
+end
+
+
+# CAL_SCAN -- Generate name for scan corrected calibration image.
+
+procedure cal_scan (nscan, image, maxchar)
+
+int nscan #I Number of scan lines
+char image[maxchar] #U Input root name, output scan name
+int maxchar #I Maximum number of chars in image name
+
+bool clgetb()
+pointer sp, root, ext
+
+begin
+ # Check if this operation is desired.
+ if (!clgetb ("scancor") || nscan == 1)
+ return
+
+ call smark (sp)
+ call salloc (root, SZ_FNAME, TY_CHAR)
+ call salloc (ext, SZ_FNAME, TY_CHAR)
+
+ call xt_imroot (image, Memc[root], SZ_FNAME)
+ call xt_imext (image, Memc[ext], SZ_FNAME)
+ if (IS_INDEFI (nscan)) {
+ call sprintf (image, maxchar, "%s.1d%s")
+ call pargstr (Memc[root])
+ call pargstr (Memc[ext])
+ } else {
+ call sprintf (image, maxchar, "%s.%d%s")
+ call pargstr (Memc[root])
+ call pargi (nscan)
+ call pargstr (Memc[ext])
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdcache.com b/noao/imred/quadred/src/ccdproc/ccdcache.com
new file mode 100644
index 00000000..91ffae12
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdcache.com
@@ -0,0 +1,10 @@
+# Common data defining the cached images and data.
+
+int ccd_ncache # Number of images cached
+int ccd_maxcache # Maximum size of cache
+int ccd_szcache # Current size of cache
+int ccd_oldsize # Original memory size
+int ccd_pcache # Pointer to image cache structures
+
+common /ccdcache_com/ ccd_ncache, ccd_maxcache, ccd_szcache, ccd_oldsize,
+ ccd_pcache
diff --git a/noao/imred/quadred/src/ccdproc/ccdcache.h b/noao/imred/quadred/src/ccdproc/ccdcache.h
new file mode 100644
index 00000000..f7de3a2c
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdcache.h
@@ -0,0 +1,10 @@
+# Definition for image cache structure.
+
+define CCD_LENCACHE 6
+
+define CCD_IM Memi[$1] # IMIO pointer
+define CCD_NACCESS Memi[$1+1] # Number of accesses requested
+define CCD_SZDATA Memi[$1+2] # Size of data in cache in chars
+define CCD_DATA Memi[$1+3] # Pointer to data cache
+define CCD_BUFR Memi[$1+4] # Pointer to real image line
+define CCD_BUFS Memi[$1+5] # Pointer to short image line
diff --git a/noao/imred/quadred/src/ccdproc/ccdcache.x b/noao/imred/quadred/src/ccdproc/ccdcache.x
new file mode 100644
index 00000000..78f84ace
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdcache.x
@@ -0,0 +1,381 @@
+include <imhdr.h>
+include <imset.h>
+include <mach.h>
+include "ccdcache.h"
+
+.help ccdcache Jun87
+.nf ---------------------------------------------------------------------
+The purpose of the CCD image caching package is to minimize image mapping
+time, to prevent multiple mapping of the same image, and to keep entire
+calibration images in memory for extended periods to minimize disk
+I/O. It is selected by specifying a maximum caching size based on the
+available memory. When there is not enough memory for caching (or by
+setting the size to 0) then standard IMIO is used. When there is
+enough memory then as many images as will fit into the specified cache
+size are kept in memory. Images are also kept mapped until explicitly
+flushed or the entire package is closed.
+
+This is a special purpose interface intended only for the CCDRED package.
+It has the following restrictions.
+
+ 1. Images must be processed to be cached.
+ 2. Images must be 2 dimensional to be cached
+ 3. Images must be real or short to be cached.
+ 4. Images must be read_only to be cached.
+ 5. Cached images remain in memory until they are displaced,
+ flushed, or the package is closed.
+
+The package consists of the following procedures.
+
+ ccd_open ()
+ im = ccd_cache (image)
+ ptr = ccd_glr (im, col1, col2, line)
+ ptr = ccd_gls (im, col1, col2, line)
+ ccd_unmap (im)
+ ccd_flush (im)
+ ccd_close ()
+
+
+CCD_OPEN: Initialize the image cache. Called at the beginning.
+CCD_CLOSE: Flush the image cache and restore memory. Called at the end.
+
+CCD_CACHE: Open an image and save the IMIO pointer. If the image has been
+opened previously it need not be opened again. If image data caching
+is specified the image data may be read it into memory. In order for
+image data caching to occur the the image has to have been processed,
+be two dimensional, be real or short, and the total cache memory not
+be exceeded. If an error occurs in reading the image into memory
+the data is not cached.
+
+CCD_UNMAP: The image access number is decremented but the image
+is not closed against the event it will be used again.
+
+CCD_FLUSH: The image is closed and flushed from the cache.
+
+CCD_GLR, CCD_GLS: Get a real or short image line. If the image data is cached
+then a pointer to the line is quickly returned. If the data is not cached then
+IMIO is used to get the pointer.
+.endhelp ---------------------------------------------------------------------
+
+
+
+# CCD_CACHE -- Open an image and possibly cache it in memory.
+
+pointer procedure ccd_cache (image, ccdtype)
+
+char image[ARB] # Image to be opened
+int ccdtype # Image type
+
+int i, nc, nl, nbytes
+pointer sp, str, pcache, pcache1, im
+
+int sizeof()
+pointer immap(), imgs2r(), imgs2s()
+bool streq(), ccdcheck()
+errchk immap, imgs2r, imgs2s
+
+include "ccdcache.com"
+
+define done_ 99
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Check if the image is cached.
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ im = CCD_IM(pcache)
+ call imstats (im, IM_IMAGENAME, Memc[str], SZ_LINE)
+ if (streq (image, Memc[str]))
+ break
+ }
+
+ # If the image is not cached open it and allocate memory.
+ if (i > ccd_ncache) {
+ im = immap (image, READ_ONLY, 0)
+ ccd_ncache = i
+ call realloc (ccd_pcache, ccd_ncache, TY_INT)
+ call malloc (pcache, CCD_LENCACHE, TY_STRUCT)
+ Memi[ccd_pcache+i-1] = pcache
+ CCD_IM(pcache) = im
+ CCD_NACCESS(pcache) = 0
+ CCD_SZDATA(pcache) = 0
+ CCD_DATA(pcache) = NULL
+ CCD_BUFR(pcache) = NULL
+ CCD_BUFS(pcache) = NULL
+ }
+
+ # If not caching the image data or if the image data has already
+ # been cached we are done.
+ if ((ccd_maxcache == 0) || (CCD_SZDATA(pcache) > 0))
+ goto done_
+
+ # Don't cache unprocessed calibration image data.
+ # This is the only really CCDRED specific code.
+ if (ccdcheck (im, ccdtype))
+ goto done_
+
+ # Check image is 2D and a supported pixel type.
+ if (IM_NDIM(im) != 2)
+ goto done_
+ if ((IM_PIXTYPE(im) != TY_REAL) && (IM_PIXTYPE(im) !=TY_SHORT))
+ goto done_
+
+ # Compute the size of the image data.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ nbytes = nc * nl * sizeof (IM_PIXTYPE(im)) * SZB_CHAR
+
+ # Free memory not in use.
+ if (ccd_szcache + nbytes > ccd_maxcache) {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache1 = Memi[ccd_pcache+i-1]
+ if (CCD_NACCESS(pcache1) == 0) {
+ if (CCD_SZDATA(pcache1) > 0) {
+ ccd_szcache = ccd_szcache - CCD_SZDATA(pcache1)
+ CCD_SZDATA(pcache1) = 0
+ CCD_DATA(pcache1) = NULL
+ call mfree (CCD_BUFR(pcache1), TY_REAL)
+ call mfree (CCD_BUFS(pcache1), TY_SHORT)
+ call imseti (CCD_IM(pcache1), IM_CANCEL, YES)
+ if (ccd_szcache + nbytes > ccd_maxcache)
+ break
+ }
+ }
+ }
+ }
+ if (ccd_szcache + nbytes > ccd_maxcache)
+ goto done_
+
+ # Cache the image data
+ iferr {
+ switch (IM_PIXTYPE (im)) {
+ case TY_SHORT:
+ CCD_DATA(pcache) = imgs2s (im, 1, nc, 1, nl)
+ case TY_REAL:
+ CCD_DATA(pcache) = imgs2r (im, 1, nc, 1, nl)
+ }
+ ccd_szcache = ccd_szcache + nbytes
+ CCD_SZDATA(pcache) = nbytes
+ } then {
+ call imunmap (im)
+ im = immap (image, READ_ONLY, 0)
+ CCD_IM(pcache) = im
+ CCD_SZDATA(pcache) = 0
+ }
+
+done_
+ CCD_NACCESS(pcache) = CCD_NACCESS(pcache) + 1
+ call sfree (sp)
+ return (im)
+end
+
+
+# CCD_OPEN -- Initialize the CCD image cache.
+
+procedure ccd_open (max_cache)
+
+int max_cache # Maximum cache size in bytes
+
+int max_size, begmem()
+include "ccdcache.com"
+
+begin
+ ccd_ncache = 0
+ ccd_maxcache = max_cache
+ ccd_szcache = 0
+ call malloc (ccd_pcache, 1, TY_INT)
+
+ # Ask for the maximum physical memory.
+ if (ccd_maxcache > 0) {
+ ccd_oldsize = begmem (0, ccd_oldsize, max_size)
+ call fixmem (max_size)
+ }
+end
+
+
+# CCD_UNMAP -- Unmap an image.
+# Don't actually unmap the image since it may be opened again.
+
+procedure ccd_unmap (im)
+
+pointer im # IMIO pointer
+
+int i
+pointer pcache
+include "ccdcache.com"
+
+begin
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ CCD_NACCESS(pcache) = CCD_NACCESS(pcache) - 1
+ return
+ }
+ }
+
+ call imunmap (im)
+end
+
+
+# CCD_FLUSH -- Close image and flush from cache.
+
+procedure ccd_flush (im)
+
+pointer im # IMIO pointer
+
+int i
+pointer pcache
+include "ccdcache.com"
+
+begin
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ ccd_ncache = ccd_ncache - 1
+ ccd_szcache = ccd_szcache - CCD_SZDATA(pcache)
+ call mfree (CCD_BUFR(pcache), TY_REAL)
+ call mfree (CCD_BUFS(pcache), TY_SHORT)
+ call mfree (pcache, TY_STRUCT)
+ for (; i<=ccd_ncache; i=i+1)
+ Memi[ccd_pcache+i-1] = Memi[ccd_pcache+i]
+ break
+ }
+ }
+
+ call imunmap (im)
+end
+
+
+# CCD_CLOSE -- Close the image cache.
+
+procedure ccd_close ()
+
+int i
+pointer pcache
+include "ccdcache.com"
+
+begin
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ call imunmap (CCD_IM(pcache))
+ call mfree (CCD_BUFR(pcache), TY_REAL)
+ call mfree (CCD_BUFS(pcache), TY_SHORT)
+ call mfree (pcache, TY_STRUCT)
+ }
+ call mfree (ccd_pcache, TY_INT)
+
+ # Restore memory.
+ call fixmem (ccd_oldsize)
+end
+
+
+# CCD_GLR -- Get a line of real data from the image.
+# If the image data is cached this is fast (particularly if the datatype
+# matches). If the image data is not cached then use IMIO.
+
+pointer procedure ccd_glr (im, col1, col2, line)
+
+pointer im # IMIO pointer
+int col1, col2 # Columns
+int line # Line
+
+int i
+pointer pcache, data, bufr, imgs2r()
+errchk malloc
+include "ccdcache.com"
+
+begin
+ # Quick test for cached data.
+ if (ccd_maxcache == 0)
+ return (imgs2r (im, col1, col2, line, line))
+
+ # Return cached data.
+ if (IM_PIXTYPE(im) == TY_REAL) {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0)
+ return (CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1)
+ else
+ break
+ }
+ }
+ } else {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0) {
+ data = CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1
+ bufr = CCD_BUFR(pcache)
+ if (bufr == NULL) {
+ call malloc (bufr, IM_LEN(im,1), TY_REAL)
+ CCD_BUFR(pcache) = bufr
+ }
+ call achtsr (Mems[data], Memr[bufr], IM_LEN(im,1))
+ return (bufr)
+ } else
+ break
+ }
+ }
+ }
+
+ # Return uncached data.
+ return (imgs2r (im, col1, col2, line, line))
+end
+
+
+# CCD_GLS -- Get a line of short data from the image.
+# If the image data is cached this is fast (particularly if the datatype
+# matches). If the image data is not cached then use IMIO.
+
+pointer procedure ccd_gls (im, col1, col2, line)
+
+pointer im # IMIO pointer
+int col1, col2 # Columns
+int line # Line
+
+int i
+pointer pcache, data, bufs, imgs2s()
+errchk malloc
+include "ccdcache.com"
+
+begin
+ # Quick test for cached data.
+ if (ccd_maxcache == 0)
+ return (imgs2s (im, col1, col2, line, line))
+
+ # Return cached data.
+ if (IM_PIXTYPE(im) == TY_SHORT) {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0)
+ return (CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1)
+ else
+ break
+ }
+ }
+ } else {
+ for (i=1; i<=ccd_ncache; i=i+1) {
+ pcache = Memi[ccd_pcache+i-1]
+ if (CCD_IM(pcache) == im) {
+ if (CCD_SZDATA(pcache) > 0) {
+ data = CCD_DATA(pcache)+(line-1)*IM_LEN(im,1)+col1-1
+ bufs = CCD_BUFS(pcache)
+ if (bufs == NULL) {
+ call malloc (bufs, IM_LEN(im,1), TY_SHORT)
+ CCD_BUFS(pcache) = bufs
+ }
+ call achtrs (Memr[data], Mems[bufs], IM_LEN(im,1))
+ return (bufs)
+ } else
+ break
+ }
+ }
+ }
+
+ # Return uncached data.
+ return (imgs2s (im, col1, col2, line, line))
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdcheck.x b/noao/imred/quadred/src/ccdproc/ccdcheck.x
new file mode 100644
index 00000000..0dde14f9
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdcheck.x
@@ -0,0 +1,67 @@
+include <imhdr.h>
+include "ccdtypes.h"
+
+# CCDCHECK -- Check processing status.
+
+bool procedure ccdcheck (im, ccdtype)
+
+pointer im # IMIO pointer
+int ccdtype # CCD type
+
+real ccdmean, hdmgetr()
+bool clgetb(), ccdflag()
+long time
+int hdmgeti()
+
+begin
+ if (clgetb ("trim") && !ccdflag (im, "trim"))
+ return (true)
+ if (clgetb ("fixpix") && !ccdflag (im, "fixpix"))
+ return (true)
+ if (clgetb ("overscan") && !ccdflag (im, "overscan"))
+ return (true)
+
+ switch (ccdtype) {
+ case ZERO:
+ if (clgetb ("readcor") && !ccdflag (im, "readcor"))
+ return (true)
+ case DARK:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ case FLAT:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ if (clgetb ("darkcor") && !ccdflag (im, "darkcor"))
+ return (true)
+ if (clgetb ("scancor") && !ccdflag (im, "scancor"))
+ return (true)
+ iferr (ccdmean = hdmgetr (im, "ccdmean"))
+ return (true)
+ iferr (time = hdmgeti (im, "ccdmeant"))
+ time = IM_MTIME(im)
+ if (time < IM_MTIME(im))
+ return (true)
+ case ILLUM:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ if (clgetb ("darkcor") && !ccdflag (im, "darkcor"))
+ return (true)
+ if (clgetb ("flatcor") && !ccdflag (im, "flatcor"))
+ return (true)
+ iferr (ccdmean = hdmgetr (im, "ccdmean"))
+ return (true)
+ default:
+ if (clgetb ("zerocor") && !ccdflag (im, "zerocor"))
+ return (true)
+ if (clgetb ("darkcor") && !ccdflag (im, "darkcor"))
+ return (true)
+ if (clgetb ("flatcor") && !ccdflag (im, "flatcor"))
+ return (true)
+ if (clgetb ("illumcor") && !ccdflag (im, "illumcor"))
+ return (true)
+ if (clgetb ("fringecor") && !ccdflag (im, "fringcor"))
+ return (true)
+ }
+
+ return (false)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdcmp.x b/noao/imred/quadred/src/ccdproc/ccdcmp.x
new file mode 100644
index 00000000..a2687934
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdcmp.x
@@ -0,0 +1,23 @@
+# CCD_CMP -- Compare two image names with extensions ignored.
+
+bool procedure ccd_cmp (image1, image2)
+
+char image1[ARB] # First image
+char image2[ARB] # Second image
+
+int i, j, strmatch(), strlen(), strncmp()
+bool streq()
+
+begin
+ if (streq (image1, image2))
+ return (true)
+
+ i = max (strmatch (image1, ".imh"), strmatch (image1, ".hhh"))
+ if (i == 0)
+ i = strlen (image1)
+ j = max (strmatch (image2, ".imh"), strmatch (image2, ".hhh"))
+ if (j == 0)
+ j = strlen (image2)
+
+ return (strncmp (image1, image2, max (i, j)) == 0)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccddelete.x b/noao/imred/quadred/src/ccdproc/ccddelete.x
new file mode 100644
index 00000000..90931135
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccddelete.x
@@ -0,0 +1,55 @@
+# CCDDELETE -- Delete an image by renaming it to a backup image.
+#
+# 1. Get the backup prefix which may be a path name.
+# 2. If no prefix is specified then delete the image without a backup.
+# 3. If there is a prefix then make a backup image name.
+# Rename the image to the backup image name.
+#
+# The backup image name is formed by prepending the backup prefix to the
+# image name. If a previous backup exist append integers to the backup
+# prefix until a nonexistant image name is created.
+
+procedure ccddelete (image)
+
+char image[ARB] # Image to delete (backup)
+
+int i, imaccess()
+pointer sp, prefix, backup
+errchk imdelete, imrename
+
+begin
+ call smark (sp)
+ call salloc (prefix, SZ_FNAME, TY_CHAR)
+ call salloc (backup, SZ_FNAME, TY_CHAR)
+
+ # Get the backup prefix.
+ call clgstr ("backup", Memc[prefix], SZ_FNAME)
+ call xt_stripwhite (Memc[prefix])
+
+ # If there is no prefix then simply delete the image.
+ if (Memc[prefix] == EOS)
+ call imdelete (image)
+
+ # Otherwise create a backup image name which does not exist and
+ # rename the image to the backup image.
+
+ else {
+ i = 0
+ repeat {
+ if (i == 0) {
+ call sprintf (Memc[backup], SZ_FNAME, "%s%s")
+ call pargstr (Memc[prefix])
+ call pargstr (image)
+ } else {
+ call sprintf (Memc[backup], SZ_FNAME, "%s%d%s")
+ call pargstr (Memc[prefix])
+ call pargi (i)
+ call pargstr (image)
+ }
+ i = i + 1
+ } until (imaccess (Memc[backup], READ_ONLY) == NO)
+ call imrename (image, Memc[backup])
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdflag.x b/noao/imred/quadred/src/ccdproc/ccdflag.x
new file mode 100644
index 00000000..427365d2
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdflag.x
@@ -0,0 +1,27 @@
+# CCDFLAG -- Determine if a CCD processing flag is set. This is less than
+# obvious because of the need to use the default value to indicate a
+# false flag.
+
+bool procedure ccdflag (im, name)
+
+pointer im # IMIO pointer
+char name[ARB] # CCD flag name
+
+bool flag, strne()
+pointer sp, str1, str2
+
+begin
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the flag string value and the default value.
+ # The flag is true if the value and the default do not match.
+
+ call hdmgstr (im, name, Memc[str1], SZ_LINE)
+ call hdmgdef (name, Memc[str2], SZ_LINE)
+ flag = strne (Memc[str1], Memc[str2])
+
+ call sfree (sp)
+ return (flag)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdlog.x b/noao/imred/quadred/src/ccdproc/ccdlog.x
new file mode 100644
index 00000000..48453704
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdlog.x
@@ -0,0 +1,46 @@
+include <imhdr.h>
+include <imset.h>
+
+# CCDLOG -- Log information about the processing with the image name.
+#
+# 1. If the package "verbose" parameter is set print the string preceded
+# by the image name.
+# 2. If the package "logfile" parameter is not null append the string,
+# preceded by the image name, to the file.
+
+procedure ccdlog (im, str)
+
+pointer im # IMIO pointer
+char str[ARB] # Log string
+
+int fd, open()
+bool clgetb()
+pointer sp, fname
+errchk open
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+
+ # Write to the standard error output if "verbose".
+ if (clgetb ("verbose")) {
+ call imstats (im, IM_IMAGENAME, Memc[fname], SZ_FNAME)
+ call eprintf ("%s: %s\n")
+ call pargstr (Memc[fname])
+ call pargstr (str)
+ }
+
+ # Append to the "logfile" if not null.
+ call clgstr ("logfile", Memc[fname], SZ_FNAME)
+ call xt_stripwhite (Memc[fname])
+ if (Memc[fname] != EOS) {
+ fd = open (Memc[fname], APPEND, TEXT_FILE)
+ call imstats (im, IM_IMAGENAME, Memc[fname], SZ_FNAME)
+ call fprintf (fd, "%s: %s\n")
+ call pargstr (Memc[fname])
+ call pargstr (str)
+ call close (fd)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdmean.x b/noao/imred/quadred/src/ccdproc/ccdmean.x
new file mode 100644
index 00000000..d38ea97b
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdmean.x
@@ -0,0 +1,50 @@
+include <imhdr.h>
+
+
+# CCDMEAN -- Compute mean and add to header if needed.
+
+procedure ccdmean (input)
+
+char input[ARB] # Input image
+
+int i, nc, nl, hdmgeti()
+long time, clktime()
+bool clgetb()
+real mean, hdmgetr(), asumr()
+pointer in, immap(), imgl2r()
+errchk immap
+
+begin
+ # Check if this operation has been done.
+
+ in = immap (input, READ_WRITE, 0)
+ ifnoerr (mean = hdmgetr (in, "ccdmean")) {
+ iferr (time = hdmgeti (in, "ccdmeant"))
+ time = IM_MTIME(in)
+ if (time >= IM_MTIME(in)) {
+ call imunmap (in)
+ return
+ }
+ }
+
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Compute mean of image\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ # Compute and record the mean.
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ mean = 0.
+ do i = 1, nl
+ mean = mean + asumr (Memr[imgl2r(in,i)], nc)
+ mean = mean / (nc * nl)
+ time = clktime (long(0))
+ call hdmputr (in, "ccdmean", mean)
+ call hdmputi (in, "ccdmeant", int (time))
+
+ call imunmap (in)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdnscan.x b/noao/imred/quadred/src/ccdproc/ccdnscan.x
new file mode 100644
index 00000000..3a9fbeba
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdnscan.x
@@ -0,0 +1,38 @@
+include "ccdtypes.h"
+
+
+# CCDNSCAN -- Return the number CCD scan rows.
+#
+# If not found in the header return the "nscan" parameter for objects and
+# 1 for calibration images.
+
+int procedure ccdnscan (im, ccdtype)
+
+pointer im #I Image
+int ccdtype #I CCD type
+int nscan #O Number of scan lines
+
+bool clgetb()
+char type, clgetc()
+int hdmgeti(), clgeti()
+
+begin
+ iferr (nscan = hdmgeti (im, "nscanrow")) {
+ switch (ccdtype) {
+ case ZERO, DARK, FLAT, ILLUM, FRINGE:
+ nscan = 1
+ default:
+ type = clgetc ("scantype")
+ if (type == 's')
+ nscan = clgeti ("nscan")
+ else {
+ if (clgetb ("scancor"))
+ nscan = INDEFI
+ else
+ nscan = 1
+ }
+ }
+ }
+
+ return (nscan)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdproc.par b/noao/imred/quadred/src/ccdproc/ccdproc.par
new file mode 100644
index 00000000..f20207a7
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdproc.par
@@ -0,0 +1,43 @@
+images,s,a,"",,,List of CCD images to correct
+ccdtype,s,h,"",,,CCD image type to correct
+max_cache,i,h,0,0,,Maximum image caching memory (in Mbytes)
+noproc,b,h,no,,,"List processing steps only?
+"
+fixpix,b,h,yes,,,Fix bad CCD lines and columns?
+overscan,b,h,yes,,,Apply overscan strip correction?
+trim,b,h,yes,,,Trim the image?
+zerocor,b,h,yes,,,Apply zero level correction?
+darkcor,b,h,no,,,Apply dark count correction?
+flatcor,b,h,yes,,,Apply flat field correction?
+illumcor,b,h,no,,,Apply illumination correction?
+fringecor,b,h,no,,,Apply fringe correction?
+readcor,b,h,no,,,Convert zero level image to readout correction?
+scancor,b,h,no,,,"Convert flat field image to scan correction?
+"
+readaxis,s,h,"line","column|line",, Read out axis (column|line)
+fixfile,s,h,"",,,File describing the bad lines and columns
+biassec,s,h,"",,,Overscan strip image section
+trimsec,s,h,"",,,Trim data section
+zero,s,h,"",,,Zero level calibration image
+dark,s,h,"",,,Dark count calibration image
+flat,s,h,"",,,Flat field images
+illum,s,h,"",,,Illumination correction images
+fringe,s,h,"",,,Fringe correction images
+minreplace,r,h,1.,,,Minimum flat field value
+scantype,s,h,"shortscan","shortscan|longscan",,Scan type (shortscan|longscan)
+nscan,i,h,1,1,,"Number of short scan lines
+"
+interactive,b,h,no,,,Fit overscan interactively?
+function,s,h,"legendre",,,Fitting function
+order,i,h,1,1,,Number of polynomial terms or spline pieces
+sample,s,h,"*",,,Sample points to fit
+naverage,i,h,1,,,Number of sample points to combine
+niterate,i,h,1,0,,Number of rejection iterations
+low_reject,r,h,3.,0.,,Low sigma rejection factor
+high_reject,r,h,3.,0.,,High sigma rejection factor
+grow,r,h,0.,0.,,"Rejection growing radius
+"
+verbose,b,h,)_.verbose,,,Print log information to the standard output?
+logfile,f,h,)_.logfile,,,Text log file
+backup,s,h,)_.backup,,,Backup directory or prefix
+output,s,h,"",,,Not used
diff --git a/noao/imred/quadred/src/ccdproc/ccdproc.x b/noao/imred/quadred/src/ccdproc/ccdproc.x
new file mode 100644
index 00000000..1b2a133c
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdproc.x
@@ -0,0 +1,106 @@
+include <error.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# CCDPROC -- Process a CCD image of a specified CCD image type.
+#
+# The input image is corrected for bad pixels, overscan levels, zero
+# levels, dark counts, flat field, illumination, and fringing. It may also
+# be trimmed. The checking of whether to apply each correction, getting the
+# required parameters, and logging the operations is left to separate
+# procedures, one for each correction. The actual processing is done by
+# a specialized procedure designed to be very efficient. These
+# procedures may also process calibration images if necessary.
+# The specified image type overrides the image type in the image header.
+# There are two data type paths; one for short data types and one for
+# all other data types (usually real).
+
+procedure ccdproc (input, ccdtype)
+
+char input[ARB] # CCD image to process
+int ccdtype # CCD type of image (independent of header).
+
+pointer sp, output, str, in, out, ccd, immap()
+errchk immap, set_output, ccddelete
+errchk set_fixpix, set_zero, set_dark, set_flat, set_illum, set_fringe
+
+begin
+ call smark (sp)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Map the image, make a working output image and set the processing
+ # parameters.
+
+ in = immap (input, READ_ONLY, 0)
+ call mktemp ("tmp", Memc[output], SZ_FNAME)
+ call set_output (in, out, Memc[output])
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+
+ # Set processing appropriate for the various image types.
+ switch (ccdtype) {
+ case ZERO:
+ case DARK:
+ call set_zero (ccd)
+ case FLAT:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ CORS(ccd, FINDMEAN) = YES
+ CORS(ccd, MINREP) = YES
+ case ILLUM:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ case OBJECT, COMP:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ default:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ CORS(ccd, FINDMEAN) = YES
+ }
+
+ # Do the processing if the COR flag is set.
+ if (COR(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Replace the input by the output image.
+ call imunmap (in)
+ call imunmap (out)
+ iferr (call ccddelete (input)) {
+ call imdelete (Memc[output])
+ call error (1,
+ "Can't delete or make backup of original image")
+ }
+ call imrename (Memc[output], input)
+ } else {
+ # Delete the temporary output image leaving the input unchanged.
+ call imunmap (in)
+ iferr (call imunmap (out))
+ ;
+ iferr (call imdelete (Memc[output]))
+ ;
+ }
+ call free_proc (ccd)
+
+ # Do special processing for calibration images.
+ switch (ccdtype) {
+ case ZERO:
+ call readcor (input)
+ case FLAT:
+ call ccdmean (input)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdred.h b/noao/imred/quadred/src/ccdproc/ccdred.h
new file mode 100644
index 00000000..ef41f592
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdred.h
@@ -0,0 +1,155 @@
+# CCDRED Data Structures and Definitions
+
+# The CCD structure: This structure is used to communicate processing
+# parameters between the package procedures. It contains pointers to
+# data, calibration image IMIO pointers, scaling parameters, and the
+# correction flags. The corrections flags indicate which processing
+# operations are to be performed. The subsection parameters do not
+# include a step size. A step size is assumed. If arbitrary subsampling
+# is desired this would be the next generalization.
+
+define LEN_CCD 75 # Length of CCD structure
+
+# CCD data coordinates
+define CCD_C1 Memi[$1] # CCD starting column
+define CCD_C2 Memi[$1+1] # CCD ending column
+define CCD_L1 Memi[$1+2] # CCD starting line
+define CCD_L2 Memi[$1+3] # CCD ending line
+
+# Input data
+define IN_IM Memi[$1+4] # Input image pointer
+define IN_C1 Memi[$1+5] # Input data starting column
+define IN_C2 Memi[$1+6] # Input data ending column
+define IN_L1 Memi[$1+7] # Input data starting line
+define IN_L2 Memi[$1+8] # Input data ending line
+define IN_NSEC Memi[$1+71] # Number of input pieces
+define IN_SEC Memi[$1+72] # Pointer to sections (c1,c2,l1,l2)xn
+
+# Output data
+define OUT_IM Memi[$1+9] # Output image pointer
+define OUT_C1 Memi[$1+10] # Output data starting column
+define OUT_C2 Memi[$1+11] # Output data ending column
+define OUT_L1 Memi[$1+12] # Output data starting line
+define OUT_L2 Memi[$1+13] # Output data ending line
+define OUT_SEC Memi[$1+73] # Pointer to sections (c1,c2,l1,l2)xn
+
+# Zero level data
+define ZERO_IM Memi[$1+14] # Zero level image pointer
+define ZERO_C1 Memi[$1+15] # Zero level data starting column
+define ZERO_C2 Memi[$1+16] # Zero level data ending column
+define ZERO_L1 Memi[$1+17] # Zero level data starting line
+define ZERO_L2 Memi[$1+18] # Zero level data ending line
+
+# Dark count data
+define DARK_IM Memi[$1+19] # Dark count image pointer
+define DARK_C1 Memi[$1+20] # Dark count data starting column
+define DARK_C2 Memi[$1+21] # Dark count data ending column
+define DARK_L1 Memi[$1+22] # Dark count data starting line
+define DARK_L2 Memi[$1+23] # Dark count data ending line
+
+# Flat field data
+define FLAT_IM Memi[$1+24] # Flat field image pointer
+define FLAT_C1 Memi[$1+25] # Flat field data starting column
+define FLAT_C2 Memi[$1+26] # Flat field data ending column
+define FLAT_L1 Memi[$1+27] # Flat field data starting line
+define FLAT_L2 Memi[$1+28] # Flat field data ending line
+
+# Illumination data
+define ILLUM_IM Memi[$1+29] # Illumination image pointer
+define ILLUM_C1 Memi[$1+30] # Illumination data starting column
+define ILLUM_C2 Memi[$1+31] # Illumination data ending column
+define ILLUM_L1 Memi[$1+32] # Illumination data starting line
+define ILLUM_L2 Memi[$1+33] # Illumination data ending line
+
+# Fringe data
+define FRINGE_IM Memi[$1+34] # Fringe image pointer
+define FRINGE_C1 Memi[$1+35] # Fringe data starting column
+define FRINGE_C2 Memi[$1+36] # Fringe data ending column
+define FRINGE_L1 Memi[$1+37] # Fringe data starting line
+define FRINGE_L2 Memi[$1+38] # Fringe data ending line
+
+# Trim section
+define TRIM_C1 Memi[$1+39] # Trim starting column
+define TRIM_C2 Memi[$1+40] # Trim ending column
+define TRIM_L1 Memi[$1+41] # Trim starting line
+define TRIM_L2 Memi[$1+42] # Trim ending line
+
+# Bias section
+define BIAS_C1 Memi[$1+43] # Bias starting column
+define BIAS_C2 Memi[$1+44] # Bias ending column
+define BIAS_L1 Memi[$1+45] # Bias starting line
+define BIAS_L2 Memi[$1+46] # Bias ending line
+define BIAS_SEC Memi[$1+74] # Multiple bias sections
+
+define READAXIS Memi[$1+47] # Read out axis (1=cols, 2=lines)
+define CALCTYPE Memi[$1+48] # Calculation data type
+define NBADCOLS Memi[$1+49] # Number of column interpolation regions
+define BADCOLS Memi[$1+50] # Pointer to col interpolation regions
+define NBADLINES Memi[$1+51] # Number of line interpolation regions
+define BADLINES Memi[$1+52] # Pointer to line interpolation regions
+define OVERSCAN_VEC Memi[$1+53] # Pointer to overscan vector
+define DARKSCALE Memr[P2R($1+54)] # Dark count scale factor
+define FRINGESCALE Memr[P2R($1+55)] # Fringe scale factor
+define FLATSCALE Memr[P2R($1+56)] # Flat field scale factor
+define ILLUMSCALE Memr[P2R($1+57)] # Illumination scale factor
+define MINREPLACE Memr[P2R($1+58)] # Minimum replacement value
+define MEAN Memr[P2R($1+59)] # Mean of output image
+define COR Memi[$1+60] # Overall correction flag
+define CORS Memi[$1+61+($2-1)] # Individual correction flags
+
+# Individual components of input, output, and bias section pieces.
+define IN_SC1 Memi[IN_SEC($1)+4*$2-4]
+define IN_SC2 Memi[IN_SEC($1)+4*$2-3]
+define IN_SL1 Memi[IN_SEC($1)+4*$2-2]
+define IN_SL2 Memi[IN_SEC($1)+4*$2-1]
+define OUT_SC1 Memi[OUT_SEC($1)+4*$2-4]
+define OUT_SC2 Memi[OUT_SEC($1)+4*$2-3]
+define OUT_SL1 Memi[OUT_SEC($1)+4*$2-2]
+define OUT_SL2 Memi[OUT_SEC($1)+4*$2-1]
+define BIAS_SC1 Memi[BIAS_SEC($1)+4*$2-4]
+define BIAS_SC2 Memi[BIAS_SEC($1)+4*$2-3]
+define BIAS_SL1 Memi[BIAS_SEC($1)+4*$2-2]
+define BIAS_SL2 Memi[BIAS_SEC($1)+4*$2-1]
+
+# The correction array contains the following elements with array indices
+# given by the macro definitions.
+
+define NCORS 10 # Number of corrections
+
+define FIXPIX 1 # Fix bad pixels
+define TRIM 2 # Trim image
+define OVERSCAN 3 # Apply overscan correction
+define ZEROCOR 4 # Apply zero level correction
+define DARKCOR 5 # Apply dark count correction
+define FLATCOR 6 # Apply flat field correction
+define ILLUMCOR 7 # Apply illumination correction
+define FRINGECOR 8 # Apply fringe correction
+define FINDMEAN 9 # Find the mean of the output image
+define MINREP 10 # Check and replace minimum value
+
+# The following definitions identify the correction values in the correction
+# array. They are defined in terms of bit fields so that it is possible to
+# add corrections to form unique combination corrections. Some of
+# these combinations are implemented as compound operations for efficiency.
+
+define O 001B # overscan
+define Z 002B # zero level
+define D 004B # dark count
+define F 010B # flat field
+define I 020B # Illumination
+define Q 040B # Fringe
+
+# The following correction combinations are recognized.
+
+define ZO 003B # zero level + overscan
+define DO 005B # dark count + overscan
+define DZ 006B # dark count + zero level
+define DZO 007B # dark count + zero level + overscan
+define FO 011B # flat field + overscan
+define FZ 012B # flat field + zero level
+define FZO 013B # flat field + zero level + overscan
+define FD 014B # flat field + dark count
+define FDO 015B # flat field + dark count + overscan
+define FDZ 016B # flat field + dark count + zero level
+define FDZO 017B # flat field + dark count + zero level + overscan
+define QI 060B # fringe + illumination
diff --git a/noao/imred/quadred/src/ccdproc/ccdsection.x b/noao/imred/quadred/src/ccdproc/ccdsection.x
new file mode 100644
index 00000000..aced216a
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdsection.x
@@ -0,0 +1,100 @@
+include <ctype.h>
+
+# CCD_SECTION -- Parse a 2D image section into its elements.
+# 1. The default values must be set by the caller.
+# 2. A null image section is OK.
+# 3. The first nonwhitespace character must be '['.
+# 4. The last interpreted character must be ']'.
+#
+# This procedure should be replaced with an IMIO procedure at some
+# point.
+
+procedure ccd_section (section, x1, x2, xstep, y1, y2, ystep)
+
+char section[ARB] # Image section
+int x1, x2, xstep # X image section parameters
+int y1, y2, ystep # X image section parameters
+
+int i, ip, a, b, c, temp, ctoi()
+define error_ 99
+
+begin
+ # Decode the section string.
+ ip = 1
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == '[')
+ ip = ip + 1
+ else if (section[ip] == EOS)
+ return
+ else
+ goto error_
+
+ do i = 1, 2 {
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+
+ # Default values
+ if (i == 1) {
+ a = x1
+ b = x2
+ c = xstep
+ } else {
+ a = y1
+ b = y2
+ c = ystep
+ }
+
+ # Get a:b:c. Allow notation such as "-*:c"
+ # (or even "-:c") where the step is obviously negative.
+
+ if (ctoi (section, ip, temp) > 0) { # a
+ a = temp
+ if (section[ip] == ':') {
+ ip = ip + 1
+ if (ctoi (section, ip, b) == 0) # a:b
+ goto error_
+ } else
+ b = a
+ } else if (section[ip] == '-') { # -*
+ temp = a
+ a = b
+ b = temp
+ ip = ip + 1
+ if (section[ip] == '*')
+ ip = ip + 1
+ } else if (section[ip] == '*') # *
+ ip = ip + 1
+ if (section[ip] == ':') { # ..:step
+ ip = ip + 1
+ if (ctoi (section, ip, c) == 0)
+ goto error_
+ else if (c == 0)
+ goto error_
+ }
+ if (a > b && c > 0)
+ c = -c
+
+ if (i == 1) {
+ x1 = a
+ x2 = b
+ xstep = c
+ } else {
+ y1 = a
+ y2 = b
+ ystep = c
+ }
+
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ',')
+ ip = ip + 1
+ }
+
+ if (section[ip] != ']')
+ goto error_
+
+ return
+error_
+ call error (0, "Error in image section specification")
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdsubsets.x b/noao/imred/quadred/src/ccdproc/ccdsubsets.x
new file mode 100644
index 00000000..6152897f
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdsubsets.x
@@ -0,0 +1,92 @@
+# CCDSUBSET -- Return the CCD subset identifier.
+#
+# 1. Get the subset string and search the subset record file for the ID string.
+# 2. If the subset string is not in the record file define a default ID string
+# based on the first word of the subset string. If the first word is not
+# unique append a integer to the first word until it is unique.
+# 3. Add the new subset string and identifier to the record file.
+# 4. Since the ID string is used to generate image names replace all
+# nonimage name characters with '_'.
+#
+# It is an error if the record file cannot be created or written when needed.
+
+procedure ccdsubset (im, subset, sz_name)
+
+pointer im # Image
+char subset[sz_name] # CCD subset identifier
+int sz_name # Size of subset string
+
+bool streq()
+int i, fd, ctowrd(), open(), fscan()
+pointer sp, fname, str1, str2, subset1, subset2, subset3
+errchk open
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+ call salloc (subset1, SZ_LINE, TY_CHAR)
+ call salloc (subset2, SZ_LINE, TY_CHAR)
+ call salloc (subset3, SZ_LINE, TY_CHAR)
+
+ # Get the subset record file and the subset string.
+ call clgstr ("ssfile", Memc[fname], SZ_LINE)
+ call hdmgstr (im, "subset", Memc[str1], SZ_LINE)
+
+ # The default subset identifier is the first word of the subset string.
+ i = 1
+ i = ctowrd (Memc[str1], i, Memc[subset1], SZ_LINE)
+
+ # A null subset string is ok. If not null check for conflict
+ # with previous subset IDs.
+ if (Memc[str1] != EOS) {
+ call strcpy (Memc[subset1], Memc[subset3], SZ_LINE)
+
+ # Search the subset record file for the same subset string.
+ # If found use the ID string. If the subset ID has been
+ # used for another subset string then increment an integer
+ # suffix to the default ID and check the list again.
+
+ i = 1
+ ifnoerr (fd = open (Memc[fname], READ_ONLY, TEXT_FILE)) {
+ while (fscan (fd) != EOF) {
+ call gargwrd (Memc[str2], SZ_LINE)
+ call gargwrd (Memc[subset2], SZ_LINE)
+ if (streq (Memc[str1], Memc[str2])) {
+ i = 0
+ call strcpy (Memc[subset2], Memc[subset1], SZ_LINE)
+ break
+ } if (streq (Memc[subset1], Memc[subset2])) {
+ call sprintf (Memc[subset1], SZ_LINE, "%s%d")
+ call pargstr (Memc[subset3])
+ call pargi (i)
+ i = i + 1
+ call seek (fd, BOF)
+ }
+ }
+ call close (fd)
+ }
+
+ # If the subset is not in the record file add it.
+ if (i > 0) {
+ fd = open (Memc[fname], APPEND, TEXT_FILE)
+ call fprintf (fd, "'%s'\t%s\n")
+ call pargstr (Memc[str1])
+ call pargstr (Memc[subset1])
+ call close (fd)
+ }
+ }
+
+ # Set the subset ID string and replace magic characters by '_'
+ # since the subset ID is used in forming image names.
+
+ call strcpy (Memc[subset1], subset, sz_name)
+ for (i=1; subset[i]!=EOS; i=i+1)
+ switch (subset[i]) {
+ case '-','+','?','*','[',']',' ','\t':
+ subset[i] = '_'
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/ccdtypes.h b/noao/imred/quadred/src/ccdproc/ccdtypes.h
new file mode 100644
index 00000000..0d5d4caf
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdtypes.h
@@ -0,0 +1,14 @@
+# Standard CCD image types.
+
+define CCDTYPES "|object|zero|dark|flat|illum|fringe|other|comp|"
+
+define NONE -1
+define UNKNOWN 0
+define OBJECT 1
+define ZERO 2
+define DARK 3
+define FLAT 4
+define ILLUM 5
+define FRINGE 6
+define OTHER 7
+define COMP 8
diff --git a/noao/imred/quadred/src/ccdproc/ccdtypes.x b/noao/imred/quadred/src/ccdproc/ccdtypes.x
new file mode 100644
index 00000000..bf6d29e2
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/ccdtypes.x
@@ -0,0 +1,72 @@
+include "ccdtypes.h"
+
+# CCDTYPES -- Return the CCD type name string.
+# CCDTYPEI -- Return the CCD type code.
+
+
+# CCDTYPES -- Return the CCD type name string.
+
+procedure ccdtypes (im, name, sz_name)
+
+pointer im # Image
+char name[sz_name] # CCD type name
+int sz_name # Size of name string
+
+int strdic()
+pointer sp, str
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the image type string. If none then return "none".
+ # Otherwise get the corresponding package image type string.
+ # If the image type is unknown return "unknown" otherwise return
+ # the package name.
+
+ call hdmgstr (im, "imagetyp", Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call strcpy ("none", name, sz_name)
+ } else {
+ call hdmname (Memc[str], name, sz_name)
+ if (name[1] == EOS)
+ call strcpy (Memc[str], name, sz_name)
+ if (strdic (name, name, sz_name, CCDTYPES) == UNKNOWN)
+ call strcpy ("unknown", name, sz_name)
+ }
+
+ call sfree (sp)
+end
+
+
+# CCDTYPEI -- Return the CCD type code.
+
+int procedure ccdtypei (im)
+
+pointer im # Image
+int ccdtype # CCD type (returned)
+
+pointer sp, str1, str2
+int strdic()
+
+begin
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the image type and if there is none then return the NONE code.
+ call hdmgstr (im, "imagetyp", Memc[str1], SZ_LINE)
+ if (Memc[str1] == EOS) {
+ ccdtype = NONE
+
+ # Otherwise get the package type and convert to an image type code.
+ } else {
+ call hdmname (Memc[str1], Memc[str2], SZ_LINE)
+ if (Memc[str2] == EOS)
+ call strcpy (Memc[str1], Memc[str2], SZ_LINE)
+ ccdtype = strdic (Memc[str2], Memc[str2], SZ_LINE, CCDTYPES)
+ }
+
+ call sfree (sp)
+ return (ccdtype)
+end
diff --git a/noao/imred/quadred/src/ccdproc/cor.gx b/noao/imred/quadred/src/ccdproc/cor.gx
new file mode 100644
index 00000000..189f9437
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/cor.gx
@@ -0,0 +1,362 @@
+include "ccdred.h"
+
+
+.help cor Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+cor -- Process CCD image lines
+
+These procedures are the heart of the CCD processing. They do the desired
+set of processing operations on the image line data as efficiently as
+possible. They are called by the PROC procedures. There are four procedures
+one for each readout axis and one for short and real image data.
+Some sets of operations are coded as single compound operations for efficiency.
+To keep the number of combinations managable only the most common
+combinations are coded as compound operations. The combinations
+consist of any set of line overscan, column overscan, zero level, dark
+count, and flat field and any set of illumination and fringe
+correction. The corrections are applied in place to the output vector.
+
+The column readout procedure is more complicated in order to handle
+zero level and flat field corrections specified as one dimensional
+readout corrections instead of two dimensional calibration images.
+Column readout format is probably extremely rare and the 1D readout
+corrections are used only for special types of data.
+.ih
+SEE ALSO
+proc, ccdred.h
+.endhelp -----------------------------------------------------------------------
+
+$for (sr)
+# COR1 -- Correct image lines with readout axis 1 (lines).
+
+procedure cor1$t (cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, darkscale, flatscale, illumscale, frgscale)
+
+int cors[ARB] # Correction flags
+PIXEL out[n] # Output data
+real overscan # Overscan value
+PIXEL zero[n] # Zero level correction
+PIXEL dark[n] # Dark count correction
+PIXEL flat[n] # Flat field correction
+PIXEL illum[n] # Illumination correction
+PIXEL fringe[n] # Fringe correction
+int n # Number of pixels
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan
+ case Z: # zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+
+ case ZO: # zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i]
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ case DZO: # dark count + zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i] - darkscale * dark[i]
+
+ case F: # flat field
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ case FO: # flat field + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan) * flatscale / flat[i]
+ case FZ: # flat field + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ case FZO: # flat field + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale /
+ flat[i]
+ case FD: # flat field + dark count
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale / flat[i]
+ case FDO: # flat field + dark count + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZ: # flat field + dark count + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZO: # flat field + dark count + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+
+# COR2 -- Correct lines for readout axis 2 (columns). This procedure is
+# more complex than when the readout is along the image lines because the
+# zero level and/or flat field corrections may be single readout column
+# vectors.
+
+procedure cor2$t (line, cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+int line # Line to be corrected
+int cors[ARB] # Correction flags
+PIXEL out[n] # Output data
+real overscan[n] # Overscan value
+PIXEL zero[n] # Zero level correction
+PIXEL dark[n] # Dark count correction
+PIXEL flat[n] # Flat field correction
+PIXEL illum[n] # Illumination correction
+PIXEL fringe[n] # Fringe correction
+int n # Number of pixels
+pointer zeroim # Zero level IMIO pointer (NULL if 1D vector)
+pointer flatim # Flat field IMIO pointer (NULL if 1D vector)
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+PIXEL zeroval
+real flatval
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i]
+ case Z: # zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval
+ }
+
+ case ZO: # zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval
+ }
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval - darkscale * dark[i]
+ }
+ case DZO: # dark count + zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]
+ }
+
+ case F: # flat field
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = out[i] * flatval
+ }
+ case FO: # flat field + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatval
+ }
+ case FZ: # flat field + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatval
+ }
+ }
+ case FZO: # flat field + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) * flatval
+ }
+ }
+ case FD: # flat field + dark count
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale/flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatval
+ }
+ case FDO: # flat field + dark count + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatval
+ }
+ case FDZ: # flat field + dark count + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatval
+ }
+ }
+ case FDZO: # flat field + dark count + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatval
+ }
+ }
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+$endfor
diff --git a/noao/imred/quadred/src/ccdproc/corinput.gx b/noao/imred/quadred/src/ccdproc/corinput.gx
new file mode 100644
index 00000000..241cc34d
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/corinput.gx
@@ -0,0 +1,220 @@
+include <imhdr.h>
+include "ccdred.h"
+
+$for (sr)
+# CORINPUT -- Get an input image line, fix the bad pixels, and trim.
+# Return the corrected input line in the output array.
+
+procedure corinput$t (in, line, ccd, output, ncols)
+
+pointer in # Input IMIO pointer
+int line # Corrected output line
+pointer ccd # CCD pointer
+PIXEL output[ncols] # Output data (returned)
+int ncols # Number of output columns
+
+int i, inline
+pointer inbuf, imgl2$t()
+
+begin
+ # Determine the input line in terms of the trimmed output line.
+ if (IN_SEC(ccd) == NULL)
+ inline = IN_L1(ccd) + line - 1
+ else {
+ do i = 1, IN_NSEC(ccd) {
+ if (line < OUT_SL1(ccd,i) || line > OUT_SL2(ccd,i))
+ next
+ inline = IN_SL1(ccd,i) + line - OUT_SL1(ccd,i)
+ break
+ }
+ }
+
+ # If there are bad lines call a procedure to fix them. Otherwise
+ # read the image line directly.
+
+ if (NBADLINES(ccd) != 0)
+ call lfix$t (in, inline, Mems[BADLINES(ccd)], IM_LEN(in,1),
+ IM_LEN(in,2), NBADLINES(ccd), inbuf)
+ else
+ inbuf = imgl2$t (in, inline)
+
+ # IF there are bad columns call a procedure to fix them.
+ if (NBADCOLS(ccd) != 0)
+ call cfix$t (inline, Mems[BADCOLS(ccd)], IM_LEN(in,1),
+ IM_LEN(in,2), NBADCOLS(ccd), Mem$t[inbuf])
+
+ # Move the pixels to the output line.
+ if (IN_SEC(ccd) == NULL)
+ call amov$t (Mem$t[inbuf+IN_C1(ccd)-OUT_C1(ccd)], output, ncols)
+ else {
+ do i = 1, IN_NSEC(ccd) {
+ if (inline < IN_SL1(ccd,i) || inline > IN_SL2(ccd,i))
+ next
+ call amov$t (Mem$t[inbuf+IN_SC1(ccd,i)-OUT_C1(ccd)],
+ output[OUT_SC1(ccd,i)], OUT_SC2(ccd,i)-OUT_SC1(ccd,i)+1)
+ }
+ }
+end
+
+
+# CFIX -- Interpolate across bad columns defined in the bad column array.
+
+procedure cfix$t (line, badcols, ncols, nlines, nbadcols, data)
+
+int line # Line to be fixed
+short badcols[2, nlines, nbadcols] # Bad column array
+int ncols # Number of columns
+int nlines # Number of lines
+int nbadcols # Number of bad column regions
+PIXEL data[ncols] # Data to be fixed
+
+PIXEL val
+real del
+int i, j, col1, col2
+
+begin
+ do i = 1, nbadcols {
+ col1 = badcols[1, line, i]
+ if (col1 == 0) # No bad columns
+ return
+ col2 = badcols[2, line, i]
+ if (col1 == 1) { # Bad first column
+ val = data[col2+1]
+ do j = col1, col2
+ data[j] = val
+ } else if (col2 == ncols) { # Bad last column
+ val = data[col1-1]
+ do j = col1, col2
+ data[j] = val
+ } else { # Interpolate
+ del = (data[col2+1] - data[col1-1]) / (col2 - col1 + 2)
+ val = data[col1-1] + del
+ do j = col1, col2
+ data[j] = val + (j - col1) * del
+ }
+ }
+end
+
+
+# LFIX -- Get image line and replace bad pixels by interpolation from
+# neighboring lines. Internal buffers are used to keep the last fixed
+# line and the next good line. They are allocated with LFIXINIT and
+# freed with LFIXFREE.
+
+procedure lfix$t (im, line, badlines, ncols, nlines, nbadlines, data)
+
+pointer im # IMIO pointer
+int line # Line to be obtained and fixed
+short badlines[2,nlines,nbadlines] # Bad line region array
+int ncols # Number of columns in image
+int nlines # Number of lines in images
+int nbadlines # Number of bad line regions
+pointer data # Data line pointer (returned)
+
+real wt1, wt2
+int i, nextgood, lastgood, col1, col2
+pointer imgl2$t()
+
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ # If this line has bad pixels replace them. Otherwise just
+ # read the line.
+
+ if (badlines[1, line, 1] != 0) {
+ # Save the last line which has already been fixed.
+ if (line != 1)
+ call amov$t (Mem$t[data], Mem$t[lastbuf], ncols)
+
+ # Determine the next line with no bad line pixels. Note that
+ # this requirement is overly strict since the bad columns
+ # may not be the same in neighboring lines.
+
+ nextgood = 0
+ do i = line+1, nlines {
+ if (badlines[1, i, 1] == 0) {
+ nextgood = i
+ break
+ }
+ }
+
+ # If the next good line is not the same as previously
+ # read the data line and store it in a buffer.
+
+ if ((nextgood != lastgood) && (nextgood != 0)) {
+ data = imgl2$t (im, nextgood)
+ call amov$t (Mem$t[data], Mem$t[nextbuf], ncols)
+ lastgood = nextgood
+ }
+
+ # Get the data line.
+ data = imgl2$t (im, line)
+
+ # Interpolate the bad columns. At the ends of the image use
+ # extension otherwise use linear interpolation.
+
+ if (line == 1) { # First line is bad
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i]
+ call amov$t (Mem$t[nextbuf+col1], Mem$t[data+col1],
+ col2-col1)
+ }
+ } else if (nextgood == 0) { # Last line is bad
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i]
+ call amov$t (Mem$t[lastbuf+col1], Mem$t[data+col1],
+ col2-col1)
+ }
+ } else { # Interpolate
+ wt1 = 1. / (nextgood - line + 1)
+ wt2 = 1. - wt1
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i] - 1
+ call awsu$t (Mem$t[nextbuf+col1], Mem$t[lastbuf+col1],
+ Mem$t[data+col1], col2-col1+1, wt1, wt2)
+ }
+ }
+ } else
+ data = imgl2$t (im, line)
+end
+
+
+# LFIXINIT -- Allocate internal buffers.
+
+procedure lfixinit$t (im)
+
+pointer im # IMIO pointer
+
+int lastgood
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ call malloc (lastbuf, IM_LEN(im,1), TY_PIXEL)
+ call malloc (nextbuf, IM_LEN(im,1), TY_PIXEL)
+ lastgood=0
+end
+
+# LFIXFREE -- Free memory when the last line has been obtained.
+
+procedure lfixfree$t ()
+
+int lastgood
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ call mfree (lastbuf, TY_PIXEL)
+ call mfree (nextbuf, TY_PIXEL)
+end
+$endfor
diff --git a/noao/imred/quadred/src/ccdproc/doc/ccdproc.hlp b/noao/imred/quadred/src/ccdproc/doc/ccdproc.hlp
new file mode 100644
index 00000000..e942a299
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/doc/ccdproc.hlp
@@ -0,0 +1,778 @@
+.help ccdproc Aug01 noao.imred.quadred
+.ih
+NAME
+ccdproc -- Process CCD images
+.ih
+SYNOPSIS
+This is the main processing task for CCD data in single image or
+\fBquadformat\fR image formats.
+.ih
+USAGE
+ccdproc images
+.ih
+PARAMETERS
+.ls images
+List of input CCD images to process. The list may include processed
+images and calibration images.
+.le
+.ls output = ""
+List of output images. If no list is given then the processing will replace
+the input images with the processed images. If a list is given it must
+match the input image list. \fINote that any dependent calibration images
+still be processed in-place with optional backup.\fR
+.le
+.ls ccdtype = ""
+CCD image type to select from the input image list. If no type is given
+then all input images will be selected. The recognized types are described
+in \fBccdtypes\fR.
+.le
+.ls max_cache = 0
+Maximum image caching memory (in Mbytes). If there is sufficient memory
+the calibration images, such as zero level, dark count, and flat fields,
+will be cached in memory when processing many input images. This
+reduces the disk I/O and makes the task run a little faster. If the
+value is zero image caching is not used.
+.le
+.ls noproc = no
+List processing steps only?
+.le
+
+.ce
+PROCESSING SWITCHES
+.ls fixpix = yes
+Fix bad CCD lines and columns by linear interpolation from neighboring
+lines and columns? If yes then a bad pixel mask, image, or file must be
+specified.
+.le
+.ls overscan = yes
+Apply overscan or prescan bias correction? If yes then the overscan
+image section and the readout axis must be specified.
+.le
+.ls trim = yes
+Trim the image of the overscan region and bad edge lines and columns?
+If yes then the trim section must be specified.
+.le
+.ls zerocor = yes
+Apply zero level correction? If yes a zero level image must be specified.
+.le
+.ls darkcor = yes
+Apply dark count correction? If yes a dark count image must be specified.
+.le
+.ls flatcor = yes
+Apply flat field correction? If yes flat field images must be specified.
+.le
+.ls illumcor = no
+Apply iillumination correction? If yes iillumination images must be specified.
+.le
+.ls fringecor = no
+Apply fringe correction? If yes fringe images must be specified.
+.le
+.ls readcor = no
+Convert zero level images to readout correction images? If yes then
+zero level images are averaged across the readout axis to form one
+dimensional zero level readout correction images.
+.le
+.ls scancor = no
+Convert zero level, dark count and flat field images to scan mode flat
+field images? If yes then the form of scan mode correction is specified by
+the parameter \fIscantype\fR.
+.le
+
+.ce
+PROCESSING PARAMETERS
+.ls readaxis = "line"
+Read out axis specified as "line" or "column".
+.le
+.ls fixfile
+Bad pixel mask, image, or file. If "image" is specified then the name is
+specified in the image header or instrument translation file. If "BPM" is
+specified then the standard BPM image header keyword defines a bad pixel
+mask. A bad pixel mask is a compact format (".pl" extension) with zero
+values indicating good pixels and non-zero values indicating bad pixels. A
+bad pixel image is a regular image in which zero values are good pixels and
+non-zero values are bad pixels. A bad pixel file specifies bad pixels or
+rectangular bad pixel regions as described later. The direction of
+interpolation is determined by the mask value with a value of two
+interpolating across columns, a value of three interpolating across lines,
+and any other non-zero value interpolating along the narrowest dimension.
+.le
+.ls biassec
+Overscan bias strip image section. If "image" is specified then the overscan
+bias section is specified in the image header or instrument translation file.
+Only the part of the bias section along the readout axis is used. The
+length of the bias region fit is defined by the trim section. If one
+wants to limit the region of the overscan used in the fit to be less
+than that of the trim section then the sample region parameter,
+\fIsample\fR, should be used. It is an error if no section or the
+whole image is specified.
+.le
+.ls trimsec
+Image section for trimming. If "image" is specified then the trim image
+section is specified in the image header or instrument translation file.
+However, for \fIquadformat\fR data this parameter is not used and the trim
+sections are assumed to be in the image header.
+.le
+.ls zero = ""
+Zero level calibration image. The zero level image may be one or two
+dimensional. The CCD image type and subset are not checked for these
+images and they take precedence over any zero level calibration images
+given in the input list.
+.le
+.ls dark = ""
+Dark count calibration image. The CCD image type and subset are not checked
+for these images and they take precedence over any dark count calibration
+images given in the input list.
+.le
+.ls flat = ""
+Flat field calibration images. The flat field images may be one or
+two dimensional. The CCD image type is not checked for these
+images and they take precedence over any flat field calibration images given
+in the input list. The flat field image with the same subset as the
+input image being processed is selected.
+.le
+.ls illum = ""
+Iillumination correction images. The CCD image type is not checked for these
+images and they take precedence over any iillumination correction images given
+in the input list. The iillumination image with the same subset as the
+input image being processed is selected.
+.le
+.ls fringe = ""
+Fringe correction images. The CCD image type is not checked for these
+images and they take precedence over any fringe correction images given
+in the input list. The fringe image with the same subset as the
+input image being processed is selected.
+.le
+.ls minreplace = 1.
+When processing flat fields, pixel values below this value (after
+all other processing such as overscan, zero, and dark corrections) are
+replaced by this value. This allows flat fields processed by \fBccdproc\fR
+to be certain to avoid divide by zero problems when applied to object
+images.
+.le
+.ls scantype = "shortscan"
+Type of scan format used in creating the CCD images. The modes are:
+.ls "shortscan"
+The CCD is scanned over a number of lines and then read out as a regular
+two dimensional image. In this mode unscanned zero level, dark count and
+flat fields are numerically scanned to form scanned flat fields comparable
+to the observations.
+.le
+.ls "longscan"
+In this mode the CCD is clocked and read out continuously to form a long
+strip. Flat fields are averaged across the readout axis to
+form a one dimensional flat field readout correction image. This assumes
+that all recorded image lines are clocked over the entire active area of the
+CCD.
+.le
+.le
+.ls nscan
+Number of object scan readout lines used in short scan mode. This parameter
+is used when the scan type is "shortscan" and the number of scan lines
+cannot be determined from the object image header (using the keyword
+nscanrows or it's translation).
+.le
+
+
+.ce
+OVERSCAN FITTING PARAMETERS
+
+There are two types of overscan (or prescan) determinations. One determines
+a independent overscan value for each line and is only available for a
+\fIreadaxis\fR of 1. The other averages the overscan along the readout
+direction to make an overscan vector, fits a smoothing function to the vector,
+and then evaluate and then evaluates the smooth function at each readout
+line or column. The line-by-line determination only uses the
+\fIfunction\fR parameter and the smoothing determinations uses all
+the following parameters.
+
+.ls function = "legendre"
+Line-by-line determination of the overscan is specified by:
+
+.nf
+ mean - the mean of the biassec columns at each line
+ median - the median of the biassec columns at each line
+ minmax - the mean at each line with the min and max excluded
+.fi
+
+The smoothed overscan vector may be fit by one of the functions:
+
+.nf
+ legendre - legendre polynomial
+ chebyshev - chebyshev polynomial
+ spline1 - linear spline
+ spline3 - cubic spline
+.fi
+.le
+.ls order = 1
+Number of polynomial terms or spline pieces in the overscan fit.
+.le
+.ls sample = "*"
+Sample points to use in the overscan fit. The string "*" specified all
+points otherwise an \fBicfit\fR range string is used.
+.le
+.ls naverage = 1
+Number of points to average or median to form fitting points. Positive
+numbers specify averages and negative numbers specify medians.
+.le
+.ls niterate = 1
+Number of rejection iterations to remove deviant points from the overscan fit.
+If 0 then no points are rejected.
+.le
+.ls low_reject = 3., high_reject = 3.
+Low and high sigma rejection factors for rejecting deviant points from the
+overscan fit.
+.le
+.ls grow = 0.
+One dimensional growing radius for rejection of neighbors to deviant points.
+.le
+.ls interactive = no
+Fit the overscan vector interactively? If yes and the overscan function type
+is one of the \fBicfit\fR types then the average overscan vector is fit
+interactively using the \fBicfit\fR package. If no then the fitting parameters
+given below are used.
+.le
+
+The parameters \fIverbose\fR, \fIlogfile\fR, and \fIbackup\fR default to
+the package parameters but may be specified to override the package
+values. This is used by the \fBquadproc\fR script task. These parameters
+are described in the help topic "quadred.package".
+.ih
+DESCRIPTION
+\fBCcdproc\fR processes CCD images to correct and calibrate for
+detector defects, readout bias, zero level bias, dark counts,
+response, iillumination, and fringing. It also trims unwanted
+lines and columns and changes the pixel datatype. It is efficient
+and easy to use; all one has to do is set the parameters and then
+begin processing the images. The task takes care of most of the
+record keeping and automatically does the prerequisite processing
+of calibration images. Beneath this simplicity there is much that
+is going on. In this section a simple description of the usage is
+given. The following sections present more detailed discussions
+on the different operations performed and the order and logic
+of the processing steps. For a user's guide to the \fBccdred\fR
+package see \fBguide\fR. Much of the ease of use derives from using
+information in the image header. If this information is missing
+see section 13.
+
+One begins by setting the task parameters. There are many parameters
+but they may be easily reviewed and modified using the task \fBeparam\fR.
+The input CCD images to be processed are given as an image list.
+Previously processed images are ignored and calibration images are
+recognized, provided the CCD image types are in the image header (see
+\fBinstruments\fR and \fBccdtypes\fR). Therefore it is permissible to
+use simple image templates such as "*.imh". The \fIccdtype\fR parameter
+may be used to select only certain types of CCD images to process
+(see \fBccdtypes\fR).
+
+The processing operations are selected by boolean (yes/no) parameters.
+Because calibration images are recognized and processed appropriately,
+the processing operations for object images should be set.
+Any combination of operations may be specified and the operations are
+performed simultaneously. While it is possible to do operations in
+separate steps this is much less efficient. Two of the operation
+parameters apply only to zero level and flat field images. These
+are used for certain types of CCDs and modes of operation.
+
+The processing steps selected have related parameters which must be
+set. These are things like image sections defining the overscan and
+trim regions and calibration images. There are a number of parameters
+used for fitting the overscan or prescan bias section. These are
+parameters used by the standard IRAF curve fitting package \fBicfit\fR.
+The parameters are described in more detail in the following sections.
+
+In addition to the task parameters there are package parameters
+which affect \fBccdproc\fR. These include the instrument and subset
+files, the text and plot log files, the output pixel datatype,
+the amount of memory available for calibration image caching,
+the verbose parameter for logging to the terminal, and the backup
+prefix. These are described in \fBccdred\fR.
+
+Calibration images are specified by task parameters and/or in the
+input image list. If more than one calibration image is specified
+then the first one encountered is used and a warning is issued for the
+extra images. Calibration images specified by
+task parameters take precedence over calibration images in the input list.
+These images also need not have a CCD image type parameter since the task
+parameter identifies the type of calibration image. This method is
+best if there is only one calibration image for all images
+to be processed. This is almost always true for zero level and dark
+count images. If no calibration image is specified by task parameter
+then calibration images in the input image list are identified and
+used. This requires that the images have CCD image types recognized
+by the package. This method is useful if one may simply say "*.imh"
+as the image list to process all images or if the images are broken
+up into groups, in "@" files for example, each with their own calibration
+frames.
+
+When an input image is processed the task first determines the processing
+parameters and calibration images. If a requested operation has been
+done it is skipped and if all requested operations have been completed then
+no processing takes place. When it determines that a calibration image
+is required it checks for the image from the task parameter and then
+for a calibration image of the proper type in the input list.
+
+Having
+selected a calibration image it checks if it has been processed by
+looking for the image header flag CCDPROC. If it is not present then
+the calibration image is processed. When any image has been processed
+the CCDPROC flag is added. For images processed directly by \fBccdproc\fR
+the individual processing flags are checked even if the CCDPROC flag is
+present. However, the automatic processing of the calibration images is
+only done if the CCDPROC flag is absent! This is to make the task more
+efficient by not having to check every flag for every calibration image
+for every input image. Thus, if additional processing
+steps are added after images have been partially reduced then input images
+will be processed for the new steps but calibration images will not be
+processed automatically.
+
+After the calibration images have been identified, and processed if
+necessary, the images may be cached in memory. This is done when there
+are more than two input images (it is actually less efficient to
+cache the calibration images for one or two input images) and the parameter
+\fImax_cache\fR is greater than zero. When caching, as many calibration
+images as allowed by the specified memory are read into memory and
+kept there for all the input images. Cached images are, therefore,
+only read once from disk which reduces the amount of disk I/O. This
+makes a modest decrease in the execution time. It is not dramatic
+because the actual processing is fairly CPU intensive.
+
+Once the processing parameters and calibration images have been determined
+the input image is processed for all the desired operations in one step;
+i.e. there are no intermediate results or images. This makes the task
+efficient. If a matching list of output images is given then the processed
+image is written to the specified output image name. If no output image
+list is given then the corrected image is output as a temporary image until
+the entire image has been processed. When the image has been completely
+processed then the original image is deleted (or renamed using the
+specified backup prefix) and the corrected image replaces the original
+image. Using a temporary image protects the data in the event of an abort
+or computer failure. Keeping the original image name eliminates much of
+the record keeping and the need to generate new image names.
+.sh
+1. Fixpix
+Regions of bad lines and columns may be replaced by linear
+interpolation from neighboring lines and columns when the parameter
+\fIfixpix\fR is set. This algorithm is the same as used in the
+task \fBfixpix\fR. The bad pixels may be specified by a pixel mask,
+an image, or a text file. For the mask or image, values of zero indicate
+good pixels and other values indicate bad pixels to be replaced.
+
+The text file consists of lines with four fields, the starting and
+ending columns and the starting and ending lines. Any number of
+regions may be specified. Comment lines beginning with the character
+'#' may be included. The description applies directly to the input
+image (before trimming) so different files are needed for previously
+trimmed or subsection readouts. The data in this file is internally
+turned into the same description as a bad pixel mask with values of
+two for regions which are narrower or equal across the columns and
+a value of three for regions narrower across lines.
+
+The direction of interpolation is determined from the values in the
+mask, image, or the converted text file. A value of two interpolates
+across columns, a value of three interpolates across lines, and any
+other value interpolates across the narrowest dimension of bad pixels
+and using column interpolation if the two dimensions are equal.
+
+The bad pixel description may be specified explicitly with the parameter
+\fIfixfile\fR or indirectly if the parameter has the value "image". In the
+latter case the instrument file must contain the name of the file.
+.sh
+2. Overscan
+If an overscan or prescan correction is specified (\fIoverscan\fR
+parameter) then the image section (\fIbiassec\fR parameter) defines
+the overscan region.
+
+There are two types of overscan (or prescan) determinations. One determines
+a independent overscan value for each line and is only available for a
+\fIreadaxis\fR of 1. The other averages the overscan along the readout
+direction to make an overscan vector, fits a smoothing function to the vector,
+and then evaluate and then evaluates the smooth function at each readout
+line or column.
+
+The line-by-line determination provides an mean, median, or
+mean with the minimum and maximum values excluded. The median
+is lowest value of the middle two when the number of overscan columns
+is even rather than the mean.
+
+The smoothed overscan vector determination uses the \fBicfit\fR options
+including interactive fitting. The fitting function is generally either a
+constant (polynomial of 1 term) or a high order function which fits the
+large scale shape of the overscan vector. Bad pixel rejection is also
+available to eliminate cosmic ray events. The function fitting may be done
+interactively using the standard \fBicfit\fR iteractive graphical curve
+fitting tool. Regardless of whether the fit is done interactively, the
+overscan vector and the fit may be recorded for later review in a metacode
+plot file named by the parameter \fIccdred.plotfile\fR. The mean value of
+the bias function is also recorded in the image header and log file.
+.sh
+3. Trim
+When the parameter \fItrim\fR is set the input image will be trimmed to
+the image section given by the parameter \fItrimsec\fR. This trim
+should, of course, be the same as that used for the calibration images.
+.sh
+4. Zerocor
+After the readout bias is subtracted, as defined by the overscan or prescan
+region, there may still be a zero level bias. This level may be two
+dimensional or one dimensional (the same for every readout line). A
+zero level calibration is obtained by taking zero length exposures;
+generally many are taken and combined. To apply this zero
+level calibration the parameter \fIzerocor\fR is set. In addition if
+the zero level bias is only readout dependent then the parameter \fIreadcor\fR
+is set to reduce two dimensional zero level images to one dimensional
+images. The zero level images may be specified by the parameter \fIzero\fR
+or given in the input image list (provided the CCD image type is defined).
+
+When the zero level image is needed to correct an input image it is checked
+to see if it has been processed and, if not, it is processed automatically.
+Processing of zero level images consists of bad pixel replacement,
+overscan correction, trimming, and averaging to one dimension if the
+readout correction is specified.
+.sh
+5. Darkcor
+Dark counts are subtracted by scaling a dark count calibration image to
+the same exposure time as the input image and subtracting. The
+exposure time used is the dark time which may be different than the
+actual integration or exposure time. A dark count calibration image is
+obtained by taking a very long exposure with the shutter closed; i.e.
+an exposure with no light reaching the detector. The dark count
+correction is selected with the parameter \fIdarkcor\fR and the dark
+count calibration image is specified either with the parameter
+\fIdark\fR or as one of the input images. The dark count image is
+automatically processed as needed. Processing of dark count images
+consists of bad pixel replacement, overscan and zero level correction,
+and trimming.
+.sh
+6. Flatcor
+The relative detector pixel response is calibrated by dividing by a
+scaled flat field calibration image. A flat field image is obtained by
+exposure to a spatially uniform source of light such as an lamp or
+twilight sky. Flat field images may be corrected for the spectral
+signature in spectroscopic images (see \fBresponse\fR and
+\fBapnormalize\fR), or for iillumination effects (see \fBmkillumflat\fR
+or \fBmkskyflat\fR). For more on flat fields and iillumination corrections
+see \fBflatfields\fR. The flat field response is dependent on the
+wavelength of light so if different filters or spectroscopic wavelength
+coverage are used a flat field calibration for each one is required.
+The different flat fields are automatically selected by a subset
+parameter (see \fBsubsets\fR).
+
+Flat field calibration is selected with the parameter \fBflatcor\fR
+and the flat field images are specified with the parameter \fBflat\fR
+or as part of the input image list. The appropriate subset is automatically
+selected for each input image processed. The flat field image is
+automatically processed as needed. Processing consists of bad pixel
+replacement, overscan subtraction, zero level subtraction, dark count
+subtraction, and trimming. Also if a scan mode is used and the
+parameter \fIscancor\fR is specified then a scan mode correction is
+applied (see below). The processing also computes the mean of the
+flat field image which is used later to scale the flat field before
+division into the input image. For scan mode flat fields the ramp
+part is included in computing the mean which will affect the level
+of images processed with this flat field. Note that there is no check for
+division by zero in the interest of efficiency. If division by zero
+does occur a fatal error will occur. The flat field can be fixed by
+replacing small values using a task such as \fBimreplace\fR or
+during processing using the \fIminreplace\fR parameter. Note that the
+\fIminreplace\fR parameter only applies to flat fields processed by
+\fBccdproc\fR.
+.sh
+7. Illumcor
+CCD images processed through the flat field calibration may not be
+completely flat (in the absence of objects). In particular, a blank
+sky image may still show gradients. This residual nonflatness is called
+the iillumination pattern. It may be introduced even if the detector is
+uniformly illuminated by the sky because the flat field lamp
+iillumination may be nonuniform. The iillumination pattern is found from a
+blank sky, or even object image, by heavily smoothing and rejecting
+objects using sigma clipping. The iillumination calibration image is
+divided into the data being processed to remove the iillumination
+pattern. The iillumination pattern is a function of the subset so there
+must be an iillumination correction image for each subset to be
+processed. The tasks \fBmkillumcor\fR and \fBmkskycor\fR are used to
+create the iillumination correction images. For more on iillumination
+corrections see \fBflatfields\fR.
+
+An alternative to treating the iillumination correction as a separate
+operation is to combine the flat field and iillumination correction
+into a corrected flat field image before processing the object
+images. This will save some processing time but does require creating
+the flat field first rather than correcting the images at the same
+time or later. There are two methods, removing the large scale
+shape of the flat field and combining a blank sky image iillumination
+with the flat field. These methods are discussed further in the
+tasks which create them; \fBmkillumcor\fR and \fBmkskycor\fR.
+.sh
+8. Fringecor
+There may be a fringe pattern in the images due to the night sky lines.
+To remove this fringe pattern a blank sky image is heavily smoothed
+to produce an iillumination image which is then subtracted from the
+original sky image. The residual fringe pattern is scaled to the
+exposure time of the image to be fringe corrected and then subtracted.
+Because the intensity of the night sky lines varies with time an
+additional scaling factor may be given in the image header.
+The fringe pattern is a function of the subset so there must be
+a fringe correction image for each subset to be processed.
+The task \fBmkfringecor\fR is used to create the fringe correction images.
+.sh
+9. Readcor
+If a zero level correction is desired (\fIzerocor\fR parameter)
+and the parameter \fIreadcor\fR is yes then a single zero level
+correction vector is applied to each readout line or column. Use of a
+readout correction rather than a two dimensional zero level image
+depends on the nature of the detector or if the CCD is operated in
+longscan mode (see below). The readout correction is specified by a
+one dimensional image (\fIzero\fR parameter) and the readout axis
+(\fIreadaxis\fR parameter). If the zero level image is two dimensional
+then it is automatically processed to a one dimensional image by
+averaging across the readout axis. Note that this modifies the zero
+level calibration image.
+.sh
+10. Scancor
+CCD detectors may be operated in several modes in astronomical
+applications. The most common is as a direct imager where each pixel
+integrates one point in the sky or spectrum. However, the design of most CCD's
+allows the sky to be scanned across the CCD while shifting the
+accumulating signal at the same rate. \fBCcdproc\fR provides for two
+scanning modes called "shortscan" and "longscan". The type of scan
+mode is set with the parameter \fIscanmode\fR.
+
+In "shortscan" mode the detector is scanned over a specified number of
+lines (not necessarily at sideral rates). The lines that scroll off the
+detector during the integration are thrown away. At the end of the
+integration the detector is read out in the same way as an unscanned
+observation. The advantage of this mode is that the small scale, zero
+level, dark count and flat field responses are averaged in one dimension
+over the number of lines scanned. A zero level, dark count or flat field may be
+observed in the same way in which case there is no difference in the
+processing from unscanned imaging and the parameter \fIscancor\fR may be
+no. If it is yes, though, checking is done to insure that the calibration
+image used has the same number of scan lines as the object being
+processed. However, one obtains an increase in the statistical accuracy of
+if they are not scanned during the observation but
+digitally scanned during the processing. In shortscan mode with
+\fIscancor\fR set to yes, zero level, dark count and flat field images are
+digitally scanned, if needed, by the same number of scan lines as the
+object. The number of scan lines is determined from the object image
+header using the keyword nscanrow (or it's translation). If not found the
+object is assumed to have been scanned with the value given by the
+\fInscan\fR parameter. Zero, dark and flat calibration images are assumed
+to be unscanned if the header keyword is not found.
+
+If a scanned zero level, dark count or flat field image is not found
+matching the object then one may be created from the unscanned calibration
+image. The image will have the root name of the unscanned image with an
+extension of the number of scan rows; i.e. Flat1.32 is created from Flat1
+with a digital scanning of 32 lines.
+
+In "longscan" mode the detector is continuously read out to produce an
+arbitrarily long strip. Provided data which has not passed over the entire
+detector is thrown away, the zero level, dark count, and flat field
+corrections will be one dimensional. If \fIscancor\fR is specified and the
+scan mode is "longscan" then a one dimensional zero level, dark count, and
+flat field correction will be applied.
+.sh
+11. Processing Steps
+The following describes the steps taken by the task. This detailed
+outline provides the most detailed specification of the task.
+
+.ls 5 (1)
+An image to be processed is first checked that it is of the specified
+CCD image type. If it is not the desired type then go on to the next image.
+.le
+.ls (2)
+A temporary output image is created of the specified pixel data type
+(\fBccdred.pixeltype\fR). The header parameters are copied from the
+input image.
+.le
+.ls (3)
+If trimming is specified and the image has not been trimmed previously,
+the trim section is determined.
+.le
+.ls (4)
+If bad pixel replacement is specified and this has not been done
+previously, the bad pixel file is determined either from the task
+parameter or the instrument translation file. The bad pixel regions
+are read. If the image has been trimmed previously and the bad pixel
+file contains the word "untrimmed" then the bad pixel coordinates are
+translated to those of the trimmed image.
+.le
+.ls (5)
+If an overscan correction is specified and this correction has not been
+applied, the overscan section is averaged along the readout axis. If
+trimming is to be done the overscan section is trimmed to the same
+limits. A function is fit either interactively or noninteractively to
+the overscan vector. The function is used to produce the overscan
+vector to be subtracted from the image. This is done in real
+arithmetic.
+.le
+.ls (6)
+If the image is a zero level image go to processing step 12.
+If a zero level correction is desired and this correction has not been
+performed, find the zero level calibration image. If the zero level
+calibration image has not been processed it is processed at this point.
+This is done by going to processing step 1 for this image. After the
+calibration image has been processed, processing of the input image
+continues from this point.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (7)
+If the image is a dark count image go to processing step 12.
+If a dark count correction is desired and this correction has not been
+performed, find the dark count calibration image. If the dark count
+calibration image has not been processed it is processed at this point.
+This is done by going to processing step 1 for this image. After the
+calibration image has been processed, processing of the input image
+continues from this point. The ratio of the input image dark time
+to the dark count image dark time is determined to be multiplied with
+each pixel of the dark count image before subtracting from the input
+image.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (8)
+If the image is a flat field image go to processing step 12. If a flat
+field correction is desired and this correction has not been performed,
+find the flat field calibration image of the appropriate subset. If
+the flat field calibration image has not been processed it is processed
+at this point. This is done by going to processing step 1 for this
+image. After the calibration image has been processed, processing of
+the input image continues from this point. The mean of the image
+is determined from the image header to be used for scaling. If no
+mean is found then a unit scaling is used.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (9)
+If the image is an iillumination image go to processing step 12. If an
+iillumination correction is desired and this correction has not been performed,
+find the iillumination calibration image of the appropriate subset.
+The iillumination image must have the "mkillum" processing flag or the
+\fBccdproc\fR will abort with an error. The mean of the image
+is determined from the image header to be used for scaling. If no
+mean is found then a unit scaling is used. The processed calibration
+image may be
+cached in memory if it has not been previously and there is enough memory.
+.le
+.ls (10)
+If the image is a fringe image go to processing step 12. If a fringe
+correction is desired and this correction has not been performed,
+find the fringe calibration image of the appropriate subset.
+The iillumination image must have the "mkfringe" processing flag or the
+\fBccdproc\fR will abort with an error. The ratio of the input
+image exposure time to the fringe image exposure time is determined.
+If there is a fringe scaling in the image header then this factor
+is multiplied by the exposure time ratio. This factor is used
+for scaling. The processed calibration image may be
+cached in memory if it has not been previously and there is enough memory.
+.le
+.ls (11)
+If there are no processing operations flagged, delete the temporary output
+image, which has been opened but not used, and go to 14.
+.le
+.ls (12)
+The input image is processed line by line with trimmed lines ignored.
+A line of the input image is read. Bad pixel replacement and trimming
+is applied to the image. Image lines from the calibration images
+are read from disk or the image cache. If the calibration is one
+dimensional (such as a readout zero
+level correction or a longscan flat field correction) then the image
+vector is read only once. Note that IRAF image I/O is buffered for
+efficiency and accessing a line at a time does not mean that image
+lines are read from disk a line at a time. Given the input line, the
+calibration images, the overscan vector, and the various scale factors
+a special data path for each combination of corrections is used to
+perform all the processing in the most efficient manner. If the
+image is a flat field any pixels less than the \fIminreplace\fR
+parameter are replaced by that minimum value. Also a mean is
+computed for the flat field and stored as the CCDMEAN keyword and
+the time, in a internal format, when this value was calculated is stored
+in the CCDMEANT keyword. The time is checked against the image modify
+time to determine if the value is valid or needs to be recomputed.
+.le
+.ls (13)
+The input image is deleted or renamed to a backup image. The temporary
+output image is renamed to the input image name.
+.le
+.ls (14)
+If the image is a zero level image and the readout correction is specified
+then it is averaged to a one dimensional readout correction.
+.le
+.ls (15)
+If the image is a zero level, dark count, or flat field image and the scan
+mode correction is specified then the correction is applied. For shortscan
+mode a modified two dimensional image is produced while for longscan mode a
+one dimensional average image is produced.
+.le
+.ls (16)
+The processing is completed and either the next input image is processed
+beginning at step 1 or, if it is a calibration image which is being
+processed for an input image, control returns to the step which initiated
+the calibration image processing.
+.le
+.sh
+12. Processing Arithmetic
+The \fBccdproc\fR task has two data paths, one for real image pixel datatypes
+and one for short integer pixel datatype. In addition internal arithmetic
+is based on the rules of FORTRAN. For efficiency there is
+no checking for division by zero in the flat field calibration.
+The following rules describe the processing arithmetic and data paths.
+
+.ls (1)
+If the input, output, or any calibration image is of type real the
+real data path is used. This means all image data is converted to
+real on input. If all the images are of type short all input data
+is kept as short integers. Thus, if all the images are of the same type
+there is no datatype conversion on input resulting in greater
+image I/O efficiency.
+.le
+.ls (2)
+In the real data path the processing arithmetic is always real and,
+if the output image is of short pixel datatype, the result
+is truncated.
+.le
+.ls (3)
+The overscan vector and the scale factors for dark count, flat field,
+iillumination, and fringe calibrations are always of type real. Therefore,
+in the short data path any processing which includes these operations
+will be coerced to real arithmetic and the result truncated at the end
+of the computation.
+.le
+.sh
+13. In the Absence of Image Header Information
+The tasks in the \fBccdred\fR package are most convenient to use when
+the CCD image type, subset, and exposure time are contained in the
+image header. The ability to redefine which header parameters contain
+this information makes it possible to use the package at many different
+observatories (see \fBinstruments\fR). However, in the absence of any
+image header information the tasks may still be used effectively.
+There are two ways to proceed. One way is to use \fBccdhedit\fR
+to place the information in the image header.
+
+The second way is to specify the processing operations more explicitly
+than is needed when the header information is present. The parameter
+\fIccdtype\fR is set to "" or to "none". The calibration images are
+specified explicitly by task parameter since they cannot be recognized
+in the input list. Only one subset at a time may be processed.
+
+If dark count and fringe corrections are to be applied the exposure
+times must be added to all the images. Alternatively, the dark count
+and fringe images may be scaled explicitly for each input image. This
+works because the exposure times default to 1 if they are not given in
+the image header.
+.ih
+EXAMPLES
+The user's \fBguide\fR presents a tutorial in the use of this task.
+
+1. In general all that needs to be done is to set the task parameters
+and enter
+
+ cl> ccdproc *.imh &
+
+This will run in the background and process all images which have not
+been processed previously.
+.ih
+SEE ALSO
+package, quadformat, instruments, ccdtypes, flatfields, icfit, ccdred,
+guide, mkillumcor, mkskycor, mkfringecor
+.endhelp
diff --git a/noao/imred/quadred/src/ccdproc/doproc.x b/noao/imred/quadred/src/ccdproc/doproc.x
new file mode 100644
index 00000000..909c6f12
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/doproc.x
@@ -0,0 +1,29 @@
+include "ccdred.h"
+
+# DOPROC -- Call the appropriate processing procedure.
+#
+# There are four data type paths depending on the readout axis and
+# the calculation data type.
+
+procedure doproc (ccd)
+
+pointer ccd # CCD processing structure
+
+begin
+ switch (READAXIS (ccd)) {
+ case 1:
+ switch (CALCTYPE (ccd)) {
+ case TY_SHORT:
+ call proc1s (ccd)
+ default:
+ call proc1r (ccd)
+ }
+ case 2:
+ switch (CALCTYPE (ccd)) {
+ case TY_SHORT:
+ call proc2s (ccd)
+ default:
+ call proc2r (ccd)
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/ccdproc/generic/ccdred.h b/noao/imred/quadred/src/ccdproc/generic/ccdred.h
new file mode 100644
index 00000000..ef41f592
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/generic/ccdred.h
@@ -0,0 +1,155 @@
+# CCDRED Data Structures and Definitions
+
+# The CCD structure: This structure is used to communicate processing
+# parameters between the package procedures. It contains pointers to
+# data, calibration image IMIO pointers, scaling parameters, and the
+# correction flags. The corrections flags indicate which processing
+# operations are to be performed. The subsection parameters do not
+# include a step size. A step size is assumed. If arbitrary subsampling
+# is desired this would be the next generalization.
+
+define LEN_CCD 75 # Length of CCD structure
+
+# CCD data coordinates
+define CCD_C1 Memi[$1] # CCD starting column
+define CCD_C2 Memi[$1+1] # CCD ending column
+define CCD_L1 Memi[$1+2] # CCD starting line
+define CCD_L2 Memi[$1+3] # CCD ending line
+
+# Input data
+define IN_IM Memi[$1+4] # Input image pointer
+define IN_C1 Memi[$1+5] # Input data starting column
+define IN_C2 Memi[$1+6] # Input data ending column
+define IN_L1 Memi[$1+7] # Input data starting line
+define IN_L2 Memi[$1+8] # Input data ending line
+define IN_NSEC Memi[$1+71] # Number of input pieces
+define IN_SEC Memi[$1+72] # Pointer to sections (c1,c2,l1,l2)xn
+
+# Output data
+define OUT_IM Memi[$1+9] # Output image pointer
+define OUT_C1 Memi[$1+10] # Output data starting column
+define OUT_C2 Memi[$1+11] # Output data ending column
+define OUT_L1 Memi[$1+12] # Output data starting line
+define OUT_L2 Memi[$1+13] # Output data ending line
+define OUT_SEC Memi[$1+73] # Pointer to sections (c1,c2,l1,l2)xn
+
+# Zero level data
+define ZERO_IM Memi[$1+14] # Zero level image pointer
+define ZERO_C1 Memi[$1+15] # Zero level data starting column
+define ZERO_C2 Memi[$1+16] # Zero level data ending column
+define ZERO_L1 Memi[$1+17] # Zero level data starting line
+define ZERO_L2 Memi[$1+18] # Zero level data ending line
+
+# Dark count data
+define DARK_IM Memi[$1+19] # Dark count image pointer
+define DARK_C1 Memi[$1+20] # Dark count data starting column
+define DARK_C2 Memi[$1+21] # Dark count data ending column
+define DARK_L1 Memi[$1+22] # Dark count data starting line
+define DARK_L2 Memi[$1+23] # Dark count data ending line
+
+# Flat field data
+define FLAT_IM Memi[$1+24] # Flat field image pointer
+define FLAT_C1 Memi[$1+25] # Flat field data starting column
+define FLAT_C2 Memi[$1+26] # Flat field data ending column
+define FLAT_L1 Memi[$1+27] # Flat field data starting line
+define FLAT_L2 Memi[$1+28] # Flat field data ending line
+
+# Illumination data
+define ILLUM_IM Memi[$1+29] # Illumination image pointer
+define ILLUM_C1 Memi[$1+30] # Illumination data starting column
+define ILLUM_C2 Memi[$1+31] # Illumination data ending column
+define ILLUM_L1 Memi[$1+32] # Illumination data starting line
+define ILLUM_L2 Memi[$1+33] # Illumination data ending line
+
+# Fringe data
+define FRINGE_IM Memi[$1+34] # Fringe image pointer
+define FRINGE_C1 Memi[$1+35] # Fringe data starting column
+define FRINGE_C2 Memi[$1+36] # Fringe data ending column
+define FRINGE_L1 Memi[$1+37] # Fringe data starting line
+define FRINGE_L2 Memi[$1+38] # Fringe data ending line
+
+# Trim section
+define TRIM_C1 Memi[$1+39] # Trim starting column
+define TRIM_C2 Memi[$1+40] # Trim ending column
+define TRIM_L1 Memi[$1+41] # Trim starting line
+define TRIM_L2 Memi[$1+42] # Trim ending line
+
+# Bias section
+define BIAS_C1 Memi[$1+43] # Bias starting column
+define BIAS_C2 Memi[$1+44] # Bias ending column
+define BIAS_L1 Memi[$1+45] # Bias starting line
+define BIAS_L2 Memi[$1+46] # Bias ending line
+define BIAS_SEC Memi[$1+74] # Multiple bias sections
+
+define READAXIS Memi[$1+47] # Read out axis (1=cols, 2=lines)
+define CALCTYPE Memi[$1+48] # Calculation data type
+define NBADCOLS Memi[$1+49] # Number of column interpolation regions
+define BADCOLS Memi[$1+50] # Pointer to col interpolation regions
+define NBADLINES Memi[$1+51] # Number of line interpolation regions
+define BADLINES Memi[$1+52] # Pointer to line interpolation regions
+define OVERSCAN_VEC Memi[$1+53] # Pointer to overscan vector
+define DARKSCALE Memr[P2R($1+54)] # Dark count scale factor
+define FRINGESCALE Memr[P2R($1+55)] # Fringe scale factor
+define FLATSCALE Memr[P2R($1+56)] # Flat field scale factor
+define ILLUMSCALE Memr[P2R($1+57)] # Illumination scale factor
+define MINREPLACE Memr[P2R($1+58)] # Minimum replacement value
+define MEAN Memr[P2R($1+59)] # Mean of output image
+define COR Memi[$1+60] # Overall correction flag
+define CORS Memi[$1+61+($2-1)] # Individual correction flags
+
+# Individual components of input, output, and bias section pieces.
+define IN_SC1 Memi[IN_SEC($1)+4*$2-4]
+define IN_SC2 Memi[IN_SEC($1)+4*$2-3]
+define IN_SL1 Memi[IN_SEC($1)+4*$2-2]
+define IN_SL2 Memi[IN_SEC($1)+4*$2-1]
+define OUT_SC1 Memi[OUT_SEC($1)+4*$2-4]
+define OUT_SC2 Memi[OUT_SEC($1)+4*$2-3]
+define OUT_SL1 Memi[OUT_SEC($1)+4*$2-2]
+define OUT_SL2 Memi[OUT_SEC($1)+4*$2-1]
+define BIAS_SC1 Memi[BIAS_SEC($1)+4*$2-4]
+define BIAS_SC2 Memi[BIAS_SEC($1)+4*$2-3]
+define BIAS_SL1 Memi[BIAS_SEC($1)+4*$2-2]
+define BIAS_SL2 Memi[BIAS_SEC($1)+4*$2-1]
+
+# The correction array contains the following elements with array indices
+# given by the macro definitions.
+
+define NCORS 10 # Number of corrections
+
+define FIXPIX 1 # Fix bad pixels
+define TRIM 2 # Trim image
+define OVERSCAN 3 # Apply overscan correction
+define ZEROCOR 4 # Apply zero level correction
+define DARKCOR 5 # Apply dark count correction
+define FLATCOR 6 # Apply flat field correction
+define ILLUMCOR 7 # Apply illumination correction
+define FRINGECOR 8 # Apply fringe correction
+define FINDMEAN 9 # Find the mean of the output image
+define MINREP 10 # Check and replace minimum value
+
+# The following definitions identify the correction values in the correction
+# array. They are defined in terms of bit fields so that it is possible to
+# add corrections to form unique combination corrections. Some of
+# these combinations are implemented as compound operations for efficiency.
+
+define O 001B # overscan
+define Z 002B # zero level
+define D 004B # dark count
+define F 010B # flat field
+define I 020B # Illumination
+define Q 040B # Fringe
+
+# The following correction combinations are recognized.
+
+define ZO 003B # zero level + overscan
+define DO 005B # dark count + overscan
+define DZ 006B # dark count + zero level
+define DZO 007B # dark count + zero level + overscan
+define FO 011B # flat field + overscan
+define FZ 012B # flat field + zero level
+define FZO 013B # flat field + zero level + overscan
+define FD 014B # flat field + dark count
+define FDO 015B # flat field + dark count + overscan
+define FDZ 016B # flat field + dark count + zero level
+define FDZO 017B # flat field + dark count + zero level + overscan
+define QI 060B # fringe + illumination
diff --git a/noao/imred/quadred/src/ccdproc/generic/cor.x b/noao/imred/quadred/src/ccdproc/generic/cor.x
new file mode 100644
index 00000000..0dc21310
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/generic/cor.x
@@ -0,0 +1,695 @@
+include "ccdred.h"
+
+
+.help cor Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+cor -- Process CCD image lines
+
+These procedures are the heart of the CCD processing. They do the desired
+set of processing operations on the image line data as efficiently as
+possible. They are called by the PROC procedures. There are four procedures
+one for each readout axis and one for short and real image data.
+Some sets of operations are coded as single compound operations for efficiency.
+To keep the number of combinations managable only the most common
+combinations are coded as compound operations. The combinations
+consist of any set of line overscan, column overscan, zero level, dark
+count, and flat field and any set of illumination and fringe
+correction. The corrections are applied in place to the output vector.
+
+The column readout procedure is more complicated in order to handle
+zero level and flat field corrections specified as one dimensional
+readout corrections instead of two dimensional calibration images.
+Column readout format is probably extremely rare and the 1D readout
+corrections are used only for special types of data.
+.ih
+SEE ALSO
+proc, ccdred.h
+.endhelp -----------------------------------------------------------------------
+
+
+# COR1 -- Correct image lines with readout axis 1 (lines).
+
+procedure cor1s (cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, darkscale, flatscale, illumscale, frgscale)
+
+int cors[ARB] # Correction flags
+short out[n] # Output data
+real overscan # Overscan value
+short zero[n] # Zero level correction
+short dark[n] # Dark count correction
+short flat[n] # Flat field correction
+short illum[n] # Illumination correction
+short fringe[n] # Fringe correction
+int n # Number of pixels
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan
+ case Z: # zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+
+ case ZO: # zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i]
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ case DZO: # dark count + zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i] - darkscale * dark[i]
+
+ case F: # flat field
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ case FO: # flat field + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan) * flatscale / flat[i]
+ case FZ: # flat field + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ case FZO: # flat field + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale /
+ flat[i]
+ case FD: # flat field + dark count
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale / flat[i]
+ case FDO: # flat field + dark count + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZ: # flat field + dark count + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZO: # flat field + dark count + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+
+# COR2 -- Correct lines for readout axis 2 (columns). This procedure is
+# more complex than when the readout is along the image lines because the
+# zero level and/or flat field corrections may be single readout column
+# vectors.
+
+procedure cor2s (line, cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+int line # Line to be corrected
+int cors[ARB] # Correction flags
+short out[n] # Output data
+real overscan[n] # Overscan value
+short zero[n] # Zero level correction
+short dark[n] # Dark count correction
+short flat[n] # Flat field correction
+short illum[n] # Illumination correction
+short fringe[n] # Fringe correction
+int n # Number of pixels
+pointer zeroim # Zero level IMIO pointer (NULL if 1D vector)
+pointer flatim # Flat field IMIO pointer (NULL if 1D vector)
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+short zeroval
+real flatval
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i]
+ case Z: # zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval
+ }
+
+ case ZO: # zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval
+ }
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval - darkscale * dark[i]
+ }
+ case DZO: # dark count + zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]
+ }
+
+ case F: # flat field
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = out[i] * flatval
+ }
+ case FO: # flat field + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatval
+ }
+ case FZ: # flat field + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatval
+ }
+ }
+ case FZO: # flat field + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) * flatval
+ }
+ }
+ case FD: # flat field + dark count
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale/flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatval
+ }
+ case FDO: # flat field + dark count + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatval
+ }
+ case FDZ: # flat field + dark count + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatval
+ }
+ }
+ case FDZO: # flat field + dark count + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatval
+ }
+ }
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+# COR1 -- Correct image lines with readout axis 1 (lines).
+
+procedure cor1r (cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, darkscale, flatscale, illumscale, frgscale)
+
+int cors[ARB] # Correction flags
+real out[n] # Output data
+real overscan # Overscan value
+real zero[n] # Zero level correction
+real dark[n] # Dark count correction
+real flat[n] # Flat field correction
+real illum[n] # Illumination correction
+real fringe[n] # Fringe correction
+int n # Number of pixels
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan
+ case Z: # zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+
+ case ZO: # zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i]
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ case DZO: # dark count + zero level + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan - zero[i] - darkscale * dark[i]
+
+ case F: # flat field
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ case FO: # flat field + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan) * flatscale / flat[i]
+ case FZ: # flat field + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ case FZO: # flat field + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale /
+ flat[i]
+ case FD: # flat field + dark count
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale / flat[i]
+ case FDO: # flat field + dark count + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZ: # flat field + dark count + zero level
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ case FDZO: # flat field + dark count + zero level + overscan
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
+
+# COR2 -- Correct lines for readout axis 2 (columns). This procedure is
+# more complex than when the readout is along the image lines because the
+# zero level and/or flat field corrections may be single readout column
+# vectors.
+
+procedure cor2r (line, cors, out, overscan, zero, dark, flat, illum,
+ fringe, n, zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+int line # Line to be corrected
+int cors[ARB] # Correction flags
+real out[n] # Output data
+real overscan[n] # Overscan value
+real zero[n] # Zero level correction
+real dark[n] # Dark count correction
+real flat[n] # Flat field correction
+real illum[n] # Illumination correction
+real fringe[n] # Fringe correction
+int n # Number of pixels
+pointer zeroim # Zero level IMIO pointer (NULL if 1D vector)
+pointer flatim # Flat field IMIO pointer (NULL if 1D vector)
+real darkscale # Dark count scale factor
+real flatscale # Flat field scale factor
+real illumscale # Illumination scale factor
+real frgscale # Fringe scale factor
+
+real zeroval
+real flatval
+int i, op
+
+begin
+ op = cors[OVERSCAN] + cors[ZEROCOR] + cors[DARKCOR] + cors[FLATCOR]
+ switch (op) {
+ case O: # overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i]
+ case Z: # zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval
+ }
+
+ case ZO: # zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval
+ }
+
+ case D: # dark count
+ do i = 1, n
+ out[i] = out[i] - darkscale * dark[i]
+ case DO: # dark count + overscan
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - darkscale * dark[i]
+ case DZ: # dark count + zero level
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - zero[i] - darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - zeroval - darkscale * dark[i]
+ }
+ case DZO: # dark count + zero level + overscan
+ if (zeroim != NULL)
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]
+ else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]
+ }
+
+ case F: # flat field
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = out[i] * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = out[i] * flatval
+ }
+ case FO: # flat field + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i]) * flatval
+ }
+ case FZ: # flat field + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval) * flatval
+ }
+ }
+ case FZO: # flat field + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval) * flatval
+ }
+ }
+ case FD: # flat field + dark count
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatscale/flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - darkscale * dark[i]) * flatval
+ }
+ case FDO: # flat field + dark count + overscan
+ if (flatim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ flatval = flatscale / flat[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - darkscale * dark[i]) *
+ flatval
+ }
+ case FDZ: # flat field + dark count + zero level
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - zero[i] - darkscale * dark[i]) *
+ flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - zeroval - darkscale * dark[i]) *
+ flatval
+ }
+ }
+ case FDZO: # flat field + dark count + zero level + overscan
+ if (flatim != NULL) {
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatscale / flat[i]
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatscale / flat[i]
+ }
+ } else {
+ flatval = flatscale / flat[line]
+ if (zeroim != NULL) {
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zero[i] -
+ darkscale * dark[i]) * flatval
+ } else {
+ zeroval = zero[line]
+ do i = 1, n
+ out[i] = (out[i] - overscan[i] - zeroval -
+ darkscale * dark[i]) * flatval
+ }
+ }
+ }
+
+ # Often these operations will not be performed so test for no
+ # correction rather than go through the switch.
+
+ op = cors[ILLUMCOR] + cors[FRINGECOR]
+ if (op != 0) {
+ switch (op) {
+ case I: # illumination
+ do i = 1, n
+ out[i] = out[i] * illumscale / illum[i]
+ case Q: # fringe
+ do i = 1, n
+ out[i] = out[i] - frgscale * fringe[i]
+ case QI: # fringe + illumination
+ do i = 1, n
+ out[i] = out[i]*illumscale/illum[i] - frgscale*fringe[i]
+ }
+ }
+end
+
diff --git a/noao/imred/quadred/src/ccdproc/generic/corinput.x b/noao/imred/quadred/src/ccdproc/generic/corinput.x
new file mode 100644
index 00000000..07afaa41
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/generic/corinput.x
@@ -0,0 +1,436 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+# CORINPUT -- Get an input image line, fix the bad pixels, and trim.
+# Return the corrected input line in the output array.
+
+procedure corinputs (in, line, ccd, output, ncols)
+
+pointer in # Input IMIO pointer
+int line # Corrected output line
+pointer ccd # CCD pointer
+short output[ncols] # Output data (returned)
+int ncols # Number of output columns
+
+int i, inline
+pointer inbuf, imgl2s()
+
+begin
+ # Determine the input line in terms of the trimmed output line.
+ if (IN_SEC(ccd) == NULL)
+ inline = IN_L1(ccd) + line - 1
+ else {
+ do i = 1, IN_NSEC(ccd) {
+ if (line < OUT_SL1(ccd,i) || line > OUT_SL2(ccd,i))
+ next
+ inline = IN_SL1(ccd,i) + line - OUT_SL1(ccd,i)
+ break
+ }
+ }
+
+ # If there are bad lines call a procedure to fix them. Otherwise
+ # read the image line directly.
+
+ if (NBADLINES(ccd) != 0)
+ call lfixs (in, inline, Mems[BADLINES(ccd)], IM_LEN(in,1),
+ IM_LEN(in,2), NBADLINES(ccd), inbuf)
+ else
+ inbuf = imgl2s (in, inline)
+
+ # IF there are bad columns call a procedure to fix them.
+ if (NBADCOLS(ccd) != 0)
+ call cfixs (inline, Mems[BADCOLS(ccd)], IM_LEN(in,1),
+ IM_LEN(in,2), NBADCOLS(ccd), Mems[inbuf])
+
+ # Move the pixels to the output line.
+ if (IN_SEC(ccd) == NULL)
+ call amovs (Mems[inbuf+IN_C1(ccd)-OUT_C1(ccd)], output, ncols)
+ else {
+ do i = 1, IN_NSEC(ccd) {
+ if (inline < IN_SL1(ccd,i) || inline > IN_SL2(ccd,i))
+ next
+ call amovs (Mems[inbuf+IN_SC1(ccd,i)-OUT_C1(ccd)],
+ output[OUT_SC1(ccd,i)], OUT_SC2(ccd,i)-OUT_SC1(ccd,i)+1)
+ }
+ }
+end
+
+
+# CFIX -- Interpolate across bad columns defined in the bad column array.
+
+procedure cfixs (line, badcols, ncols, nlines, nbadcols, data)
+
+int line # Line to be fixed
+short badcols[2, nlines, nbadcols] # Bad column array
+int ncols # Number of columns
+int nlines # Number of lines
+int nbadcols # Number of bad column regions
+short data[ncols] # Data to be fixed
+
+short val
+real del
+int i, j, col1, col2
+
+begin
+ do i = 1, nbadcols {
+ col1 = badcols[1, line, i]
+ if (col1 == 0) # No bad columns
+ return
+ col2 = badcols[2, line, i]
+ if (col1 == 1) { # Bad first column
+ val = data[col2+1]
+ do j = col1, col2
+ data[j] = val
+ } else if (col2 == ncols) { # Bad last column
+ val = data[col1-1]
+ do j = col1, col2
+ data[j] = val
+ } else { # Interpolate
+ del = (data[col2+1] - data[col1-1]) / (col2 - col1 + 2)
+ val = data[col1-1] + del
+ do j = col1, col2
+ data[j] = val + (j - col1) * del
+ }
+ }
+end
+
+
+# LFIX -- Get image line and replace bad pixels by interpolation from
+# neighboring lines. Internal buffers are used to keep the last fixed
+# line and the next good line. They are allocated with LFIXINIT and
+# freed with LFIXFREE.
+
+procedure lfixs (im, line, badlines, ncols, nlines, nbadlines, data)
+
+pointer im # IMIO pointer
+int line # Line to be obtained and fixed
+short badlines[2,nlines,nbadlines] # Bad line region array
+int ncols # Number of columns in image
+int nlines # Number of lines in images
+int nbadlines # Number of bad line regions
+pointer data # Data line pointer (returned)
+
+real wt1, wt2
+int i, nextgood, lastgood, col1, col2
+pointer imgl2s()
+
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ # If this line has bad pixels replace them. Otherwise just
+ # read the line.
+
+ if (badlines[1, line, 1] != 0) {
+ # Save the last line which has already been fixed.
+ if (line != 1)
+ call amovs (Mems[data], Mems[lastbuf], ncols)
+
+ # Determine the next line with no bad line pixels. Note that
+ # this requirement is overly strict since the bad columns
+ # may not be the same in neighboring lines.
+
+ nextgood = 0
+ do i = line+1, nlines {
+ if (badlines[1, i, 1] == 0) {
+ nextgood = i
+ break
+ }
+ }
+
+ # If the next good line is not the same as previously
+ # read the data line and store it in a buffer.
+
+ if ((nextgood != lastgood) && (nextgood != 0)) {
+ data = imgl2s (im, nextgood)
+ call amovs (Mems[data], Mems[nextbuf], ncols)
+ lastgood = nextgood
+ }
+
+ # Get the data line.
+ data = imgl2s (im, line)
+
+ # Interpolate the bad columns. At the ends of the image use
+ # extension otherwise use linear interpolation.
+
+ if (line == 1) { # First line is bad
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i]
+ call amovs (Mems[nextbuf+col1], Mems[data+col1],
+ col2-col1)
+ }
+ } else if (nextgood == 0) { # Last line is bad
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i]
+ call amovs (Mems[lastbuf+col1], Mems[data+col1],
+ col2-col1)
+ }
+ } else { # Interpolate
+ wt1 = 1. / (nextgood - line + 1)
+ wt2 = 1. - wt1
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i] - 1
+ call awsus (Mems[nextbuf+col1], Mems[lastbuf+col1],
+ Mems[data+col1], col2-col1+1, wt1, wt2)
+ }
+ }
+ } else
+ data = imgl2s (im, line)
+end
+
+
+# LFIXINIT -- Allocate internal buffers.
+
+procedure lfixinits (im)
+
+pointer im # IMIO pointer
+
+int lastgood
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ call malloc (lastbuf, IM_LEN(im,1), TY_SHORT)
+ call malloc (nextbuf, IM_LEN(im,1), TY_SHORT)
+ lastgood=0
+end
+
+# LFIXFREE -- Free memory when the last line has been obtained.
+
+procedure lfixfrees ()
+
+int lastgood
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ call mfree (lastbuf, TY_SHORT)
+ call mfree (nextbuf, TY_SHORT)
+end
+
+# CORINPUT -- Get an input image line, fix the bad pixels, and trim.
+# Return the corrected input line in the output array.
+
+procedure corinputr (in, line, ccd, output, ncols)
+
+pointer in # Input IMIO pointer
+int line # Corrected output line
+pointer ccd # CCD pointer
+real output[ncols] # Output data (returned)
+int ncols # Number of output columns
+
+int i, inline
+pointer inbuf, imgl2r()
+
+begin
+ # Determine the input line in terms of the trimmed output line.
+ if (IN_SEC(ccd) == NULL)
+ inline = IN_L1(ccd) + line - 1
+ else {
+ do i = 1, IN_NSEC(ccd) {
+ if (line < OUT_SL1(ccd,i) || line > OUT_SL2(ccd,i))
+ next
+ inline = IN_SL1(ccd,i) + line - OUT_SL1(ccd,i)
+ break
+ }
+ }
+
+ # If there are bad lines call a procedure to fix them. Otherwise
+ # read the image line directly.
+
+ if (NBADLINES(ccd) != 0)
+ call lfixr (in, inline, Mems[BADLINES(ccd)], IM_LEN(in,1),
+ IM_LEN(in,2), NBADLINES(ccd), inbuf)
+ else
+ inbuf = imgl2r (in, inline)
+
+ # IF there are bad columns call a procedure to fix them.
+ if (NBADCOLS(ccd) != 0)
+ call cfixr (inline, Mems[BADCOLS(ccd)], IM_LEN(in,1),
+ IM_LEN(in,2), NBADCOLS(ccd), Memr[inbuf])
+
+ # Move the pixels to the output line.
+ if (IN_SEC(ccd) == NULL)
+ call amovr (Memr[inbuf+IN_C1(ccd)-OUT_C1(ccd)], output, ncols)
+ else {
+ do i = 1, IN_NSEC(ccd) {
+ if (inline < IN_SL1(ccd,i) || inline > IN_SL2(ccd,i))
+ next
+ call amovr (Memr[inbuf+IN_SC1(ccd,i)-OUT_C1(ccd)],
+ output[OUT_SC1(ccd,i)], OUT_SC2(ccd,i)-OUT_SC1(ccd,i)+1)
+ }
+ }
+end
+
+
+# CFIX -- Interpolate across bad columns defined in the bad column array.
+
+procedure cfixr (line, badcols, ncols, nlines, nbadcols, data)
+
+int line # Line to be fixed
+short badcols[2, nlines, nbadcols] # Bad column array
+int ncols # Number of columns
+int nlines # Number of lines
+int nbadcols # Number of bad column regions
+real data[ncols] # Data to be fixed
+
+real val
+real del
+int i, j, col1, col2
+
+begin
+ do i = 1, nbadcols {
+ col1 = badcols[1, line, i]
+ if (col1 == 0) # No bad columns
+ return
+ col2 = badcols[2, line, i]
+ if (col1 == 1) { # Bad first column
+ val = data[col2+1]
+ do j = col1, col2
+ data[j] = val
+ } else if (col2 == ncols) { # Bad last column
+ val = data[col1-1]
+ do j = col1, col2
+ data[j] = val
+ } else { # Interpolate
+ del = (data[col2+1] - data[col1-1]) / (col2 - col1 + 2)
+ val = data[col1-1] + del
+ do j = col1, col2
+ data[j] = val + (j - col1) * del
+ }
+ }
+end
+
+
+# LFIX -- Get image line and replace bad pixels by interpolation from
+# neighboring lines. Internal buffers are used to keep the last fixed
+# line and the next good line. They are allocated with LFIXINIT and
+# freed with LFIXFREE.
+
+procedure lfixr (im, line, badlines, ncols, nlines, nbadlines, data)
+
+pointer im # IMIO pointer
+int line # Line to be obtained and fixed
+short badlines[2,nlines,nbadlines] # Bad line region array
+int ncols # Number of columns in image
+int nlines # Number of lines in images
+int nbadlines # Number of bad line regions
+pointer data # Data line pointer (returned)
+
+real wt1, wt2
+int i, nextgood, lastgood, col1, col2
+pointer imgl2r()
+
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ # If this line has bad pixels replace them. Otherwise just
+ # read the line.
+
+ if (badlines[1, line, 1] != 0) {
+ # Save the last line which has already been fixed.
+ if (line != 1)
+ call amovr (Memr[data], Memr[lastbuf], ncols)
+
+ # Determine the next line with no bad line pixels. Note that
+ # this requirement is overly strict since the bad columns
+ # may not be the same in neighboring lines.
+
+ nextgood = 0
+ do i = line+1, nlines {
+ if (badlines[1, i, 1] == 0) {
+ nextgood = i
+ break
+ }
+ }
+
+ # If the next good line is not the same as previously
+ # read the data line and store it in a buffer.
+
+ if ((nextgood != lastgood) && (nextgood != 0)) {
+ data = imgl2r (im, nextgood)
+ call amovr (Memr[data], Memr[nextbuf], ncols)
+ lastgood = nextgood
+ }
+
+ # Get the data line.
+ data = imgl2r (im, line)
+
+ # Interpolate the bad columns. At the ends of the image use
+ # extension otherwise use linear interpolation.
+
+ if (line == 1) { # First line is bad
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i]
+ call amovr (Memr[nextbuf+col1], Memr[data+col1],
+ col2-col1)
+ }
+ } else if (nextgood == 0) { # Last line is bad
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i]
+ call amovr (Memr[lastbuf+col1], Memr[data+col1],
+ col2-col1)
+ }
+ } else { # Interpolate
+ wt1 = 1. / (nextgood - line + 1)
+ wt2 = 1. - wt1
+ do i = 1, nbadlines {
+ col1 = badlines[1,line,i] - 1
+ if (col1 == -1)
+ break
+ col2 = badlines[2,line,i] - 1
+ call awsur (Memr[nextbuf+col1], Memr[lastbuf+col1],
+ Memr[data+col1], col2-col1+1, wt1, wt2)
+ }
+ }
+ } else
+ data = imgl2r (im, line)
+end
+
+
+# LFIXINIT -- Allocate internal buffers.
+
+procedure lfixinitr (im)
+
+pointer im # IMIO pointer
+
+int lastgood
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ call malloc (lastbuf, IM_LEN(im,1), TY_REAL)
+ call malloc (nextbuf, IM_LEN(im,1), TY_REAL)
+ lastgood=0
+end
+
+# LFIXFREE -- Free memory when the last line has been obtained.
+
+procedure lfixfreer ()
+
+int lastgood
+pointer lastbuf, nextbuf
+common /lfixcom/ lastbuf, nextbuf, lastgood
+
+begin
+ call mfree (lastbuf, TY_REAL)
+ call mfree (nextbuf, TY_REAL)
+end
+
diff --git a/noao/imred/quadred/src/ccdproc/generic/mkpkg b/noao/imred/quadred/src/ccdproc/generic/mkpkg
new file mode 100644
index 00000000..0f12b368
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/generic/mkpkg
@@ -0,0 +1,12 @@
+# Make CCDRED Package.
+
+$checkout libpkg.a ../
+$update libpkg.a
+$checkin libpkg.a ../
+$exit
+
+libpkg.a:
+ cor.x ccdred.h
+ corinput.x ccdred.h <imhdr.h>
+ proc.x ccdred.h <imhdr.h>
+ ;
diff --git a/noao/imred/quadred/src/ccdproc/generic/proc.x b/noao/imred/quadred/src/ccdproc/generic/proc.x
new file mode 100644
index 00000000..0251f4f8
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/generic/proc.x
@@ -0,0 +1,678 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+.help proc Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+proc -- Process CCD images
+
+These are the main CCD reduction procedures. There is one for each
+readout axis (lines or columns) and one for short and real image data.
+They apply corrections for bad pixels, overscan levels, zero levels,
+dark counts, flat field response, illumination response, and fringe
+effects. The image is also trimmed if it was mapped with an image
+section. The mean value for the output image is computed when the flat
+field or illumination image is processed to form the scale factor for
+these calibrations in order to avoid reading through these image a
+second time.
+
+The processing information and parameters are specified in the CCD
+structure. The processing operations to be performed are specified by
+the correction array CORS in the ccd structure. There is one array
+element for each operation with indices defined symbolically by macro
+definitions (see ccdred.h); i.e. FLATCOR. The value of the array
+element is an integer bit field in which the bit set is the same as the
+array index; i.e element 3 will have the third bit set for an operation
+with array value 2**(3-1)=4. If an operation is not to be performed
+the bit is not set and the array element has the numeric value zero.
+Note that the addition of several correction elements gives a unique
+bit field describing a combination of operations. For efficiency the
+most common combinations are implemented as separate units.
+
+The CCD structure also contains the correction or calibration data
+consisting either pointers to data, IMIO pointers for the calibration
+images, and scale factors.
+
+The processing is performed line-by-line. The procedure CORINPUT is
+called to get an input line. This procedure trims and fixes bad pixels by
+interpolation. The output line and lines from the various calibration
+images are read. The image vectors as well as the overscan vector and
+the scale factors are passed to the procedure COR (which also
+dereferences the pointer data into simple arrays and variables). That
+procedure does the actual corrections apart from bad pixel
+corrections.
+
+The final optional step is to add each corrected output line to form a
+mean. This adds efficiency since the operation is done only if desired
+and the output image data is already in memory so there is no I/O
+penalty.
+
+SEE ALSO
+ ccdred.h, cor, fixpix, setfixpix, setoverscan, settrim,
+ setzero, setdark, setflat, setillum, setfringe
+.endhelp ----------------------------------------------------------------------
+
+
+
+# PROC1 -- Process CCD images with readout axis 1 (lines).
+
+procedure proc1s (ccd)
+
+pointer ccd # CCD structure
+
+int i, line, nlin, ncols, nlines, findmean, rep
+int c1, c2, l1, l2
+real overscan, darkscale, flatscale, illumscale, frgscale, mean
+short minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim
+pointer outbuf, overscan_vec, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asums()
+pointer imgl2s(), impl2s(), ccd_gls()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ nlin = IM_LEN(in,2)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixinits (in)
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),2) == 1) {
+ zeroim = NULL
+ zerobuf = ccd_gls (ZERO_IM(ccd), ZERO_C1(ccd), ZERO_C2(ccd), 1)
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),2) == 1) {
+ darkim = NULL
+ darkbuf = ccd_gls (DARK_IM(ccd), DARK_C1(ccd), DARK_C2(ccd), 1)
+ darkscale = FLATSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),2) == 1) {
+ flatim = NULL
+ flatbuf = ccd_gls (FLAT_IM(ccd), FLAT_C1(ccd), FLAT_C2(ccd), 1)
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR1 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2s (out, OUT_L1(ccd)+line-1)
+ call corinputs (in, line, ccd, Mems[outbuf], IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (overscan_vec != NULL)
+ overscan = Memr[overscan_vec+line-1]
+ if (zeroim != NULL)
+ zerobuf = ccd_gls (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gls (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gls (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gls (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gls (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ if (OUT_SEC(ccd) == NULL) {
+ call cor1s (CORS(ccd,1), Mems[outbuf],
+ overscan, Mems[zerobuf], Mems[darkbuf],
+ Mems[flatbuf], Mems[illumbuf], Mems[fringebuf], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+ } else {
+ do i = 1, IN_NSEC(ccd) {
+ l1 = OUT_SL1(ccd,i)
+ l2 = OUT_SL2(ccd,i)
+ if (line < l1 || line > l2)
+ next
+ c1 = OUT_SC1(ccd,i) - 1
+ c2 = OUT_SC2(ccd,i) - 1
+ ncols = c2 - c1 + 1
+ if (overscan_vec != NULL)
+ overscan = Memr[overscan_vec+(i-1)*nlin+line-l1]
+
+ call cor1s (CORS(ccd,1), Mems[outbuf+c1],
+ overscan, Mems[zerobuf+c1], Mems[darkbuf+c1],
+ Mems[flatbuf+c1], Mems[illumbuf+c1],
+ Mems[fringebuf+c1], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+ }
+ }
+
+ if (rep == YES)
+ call amaxks (Mems[outbuf], minrep, Mems[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asums (Mems[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixfrees ()
+end
+
+
+# PROC2 -- Process CCD images with readout axis 2 (columns).
+
+procedure proc2s (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+real darkscale, flatscale, illumscale, frgscale, mean
+short minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim
+pointer outbuf, overscan_vec, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asums()
+pointer imgl2s(), impl2s(), imgs2s(), ccd_gls()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixinits (in)
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),1) == 1) {
+ zeroim = NULL
+ zerobuf = imgs2s (ZERO_IM(ccd), 1, 1, ZERO_L1(ccd), ZERO_L2(ccd))
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),1) == 1) {
+ darkim = NULL
+ darkbuf = imgs2s (DARK_IM(ccd), 1, 1, DARK_L1(ccd), DARK_L2(ccd))
+ darkscale = DARKSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),1) == 1) {
+ flatim = NULL
+ flatbuf = imgs2s (FLAT_IM(ccd), 1, 1, FLAT_L1(ccd), FLAT_L2(ccd))
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR2 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2s (out, OUT_L1(ccd)+line-1)
+ call corinputs (in, line, ccd, Mems[outbuf], IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (zeroim != NULL)
+ zerobuf = ccd_gls (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gls (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gls (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gls (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gls (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor2s (line, CORS(ccd,1), Mems[outbuf],
+ Memr[overscan_vec], Mems[zerobuf], Mems[darkbuf],
+ Mems[flatbuf], Mems[illumbuf], Mems[fringebuf], ncols,
+ zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxks (Mems[outbuf], minrep, Mems[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asums (Mems[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovs (
+ Mems[imgl2s(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mems[impl2s(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixfrees ()
+end
+
+# PROC1 -- Process CCD images with readout axis 1 (lines).
+
+procedure proc1r (ccd)
+
+pointer ccd # CCD structure
+
+int i, line, nlin, ncols, nlines, findmean, rep
+int c1, c2, l1, l2
+real overscan, darkscale, flatscale, illumscale, frgscale, mean
+real minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim
+pointer outbuf, overscan_vec, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asumr()
+pointer imgl2r(), impl2r(), ccd_glr()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ nlin = IM_LEN(in,2)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixinitr (in)
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),2) == 1) {
+ zeroim = NULL
+ zerobuf = ccd_glr (ZERO_IM(ccd), ZERO_C1(ccd), ZERO_C2(ccd), 1)
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),2) == 1) {
+ darkim = NULL
+ darkbuf = ccd_glr (DARK_IM(ccd), DARK_C1(ccd), DARK_C2(ccd), 1)
+ darkscale = FLATSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),2) == 1) {
+ flatim = NULL
+ flatbuf = ccd_glr (FLAT_IM(ccd), FLAT_C1(ccd), FLAT_C2(ccd), 1)
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR1 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2r (out, OUT_L1(ccd)+line-1)
+ call corinputr (in, line, ccd, Memr[outbuf], IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (overscan_vec != NULL)
+ overscan = Memr[overscan_vec+line-1]
+ if (zeroim != NULL)
+ zerobuf = ccd_glr (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_glr (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_glr (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_glr (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_glr (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ if (OUT_SEC(ccd) == NULL) {
+ call cor1r (CORS(ccd,1), Memr[outbuf],
+ overscan, Memr[zerobuf], Memr[darkbuf],
+ Memr[flatbuf], Memr[illumbuf], Memr[fringebuf], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+ } else {
+ do i = 1, IN_NSEC(ccd) {
+ l1 = OUT_SL1(ccd,i)
+ l2 = OUT_SL2(ccd,i)
+ if (line < l1 || line > l2)
+ next
+ c1 = OUT_SC1(ccd,i) - 1
+ c2 = OUT_SC2(ccd,i) - 1
+ ncols = c2 - c1 + 1
+ if (overscan_vec != NULL)
+ overscan = Memr[overscan_vec+(i-1)*nlin+line-l1]
+
+ call cor1r (CORS(ccd,1), Memr[outbuf+c1],
+ overscan, Memr[zerobuf+c1], Memr[darkbuf+c1],
+ Memr[flatbuf+c1], Memr[illumbuf+c1],
+ Memr[fringebuf+c1], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+ }
+ }
+
+ if (rep == YES)
+ call amaxkr (Memr[outbuf], minrep, Memr[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asumr (Memr[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixfreer ()
+end
+
+
+# PROC2 -- Process CCD images with readout axis 2 (columns).
+
+procedure proc2r (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+real darkscale, flatscale, illumscale, frgscale, mean
+real minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim
+pointer outbuf, overscan_vec, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+real asumr()
+pointer imgl2r(), impl2r(), imgs2r(), ccd_glr()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixinitr (in)
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),1) == 1) {
+ zeroim = NULL
+ zerobuf = imgs2r (ZERO_IM(ccd), 1, 1, ZERO_L1(ccd), ZERO_L2(ccd))
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),1) == 1) {
+ darkim = NULL
+ darkbuf = imgs2r (DARK_IM(ccd), 1, 1, DARK_L1(ccd), DARK_L2(ccd))
+ darkscale = DARKSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),1) == 1) {
+ flatim = NULL
+ flatbuf = imgs2r (FLAT_IM(ccd), 1, 1, FLAT_L1(ccd), FLAT_L2(ccd))
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR2 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2r (out, OUT_L1(ccd)+line-1)
+ call corinputr (in, line, ccd, Memr[outbuf], IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (zeroim != NULL)
+ zerobuf = ccd_glr (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_glr (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_glr (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_glr (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_glr (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor2r (line, CORS(ccd,1), Memr[outbuf],
+ Memr[overscan_vec], Memr[zerobuf], Memr[darkbuf],
+ Memr[flatbuf], Memr[illumbuf], Memr[fringebuf], ncols,
+ zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxkr (Memr[outbuf], minrep, Memr[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asumr (Memr[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amovr (
+ Memr[imgl2r(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Memr[impl2r(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixfreer ()
+end
+
diff --git a/noao/imred/quadred/src/ccdproc/hdrmap.com b/noao/imred/quadred/src/ccdproc/hdrmap.com
new file mode 100644
index 00000000..5aa74185
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/hdrmap.com
@@ -0,0 +1,4 @@
+# Common for HDRMAP package.
+
+pointer stp # Symbol table pointer
+common /hdmcom/ stp
diff --git a/noao/imred/quadred/src/ccdproc/hdrmap.x b/noao/imred/quadred/src/ccdproc/hdrmap.x
new file mode 100644
index 00000000..ebcb253e
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/hdrmap.x
@@ -0,0 +1,544 @@
+include <error.h>
+include <syserr.h>
+
+.help hdrmap
+.nf-----------------------------------------------------------------------------
+HDRMAP -- Map translation between task parameters and image header parameters.
+
+In order for tasks to be partially independent of the image header
+parameter names used by different instruments and observatories a
+translation is made between task parameters and image header
+parameters. This translation is given in a file consisting of the task
+parameter name, the image header parameter name, and an optional
+default value. This file is turned into a symbol table. If the
+translation file is not found a null pointer is returned. The package will
+then use the task parameter names directly. Also if there is no
+translation given in the file for a particular parameter it is passed
+on directly. If a parameter is not in the image header then the symbol
+table default value, if given, is returned. This package is layered on
+the IMIO header package.
+
+ hdmopen (fname)
+ hdmclose ()
+ hdmwrite (fname, mode)
+ hdmname (parameter, str, max_char)
+ hdmgdef (parameter, str, max_char)
+ hdmpdef (parameter, str, max_char)
+ y/n = hdmaccf (im, parameter)
+ hdmgstr (im, parameter, str, max_char)
+ ival = hdmgeti (im, parameter)
+ rval = hdmgetr (im, parameter)
+ hdmpstr (im, parameter, str)
+ hdmputi (im, parameter, value)
+ hdmputr (im, parameter, value)
+ hdmgstp (stp)
+ hdmpstp (stp)
+ hdmdelf (im, parameter)
+ hdmparm (name, parameter, max_char)
+
+hdmopen -- Open the translation file and map it into a symbol table pointer.
+hdmclose -- Close the symbol table pointer.
+hdmwrite -- Write out translation file.
+hdmname -- Return the image header parameter name.
+hdmpname -- Put the image header parameter name.
+hdmgdef -- Get the default value as a string (null if none).
+hdmpdef -- Put the default value as a string.
+hdmaccf -- Return whether the image header parameter exists (regardless of
+ whether there is a default value).
+hdmgstr -- Get a string valued parameter. Return default value if not in the
+ image header. Return null string if no default or image value.
+hdmgeti -- Get an integer valued parameter. Return default value if not in
+ the image header and error condition if no default or image value.
+hdmgetr -- Get a real valued parameter. Return default value if not in
+ the image header or error condition if no default or image value.
+hdmpstr -- Put a string valued parameter in the image header.
+hdmputi -- Put an integer valued parameter in the image header.
+hdmputr -- Put a real valued parameter in the image header.
+hdmgstp -- Get the symbol table pointer to save it while another map is used.
+hdmpstp -- Put the symbol table pointer to restore a map.
+hdmdelf -- Delete a field.
+hdmparm -- Return the parameter name corresponding to an image header name.
+.endhelp -----------------------------------------------------------------------
+
+# Symbol table definitions.
+define LEN_INDEX 32 # Length of symtab index
+define LEN_STAB 1024 # Length of symtab string buffer
+define SZ_SBUF 128 # Size of symtab string buffer
+
+define SZ_NAME 79 # Size of translation symbol name
+define SZ_DEFAULT 79 # Size of default string
+define SYMLEN 80 # Length of symbol structure
+
+# Symbol table structure
+define NAME Memc[P2C($1)] # Translation name for symbol
+define DEFAULT Memc[P2C($1+40)] # Default value of parameter
+
+
+# HDMOPEN -- Open the translation file and map it into a symbol table pointer.
+
+procedure hdmopen (fname)
+
+char fname[ARB] # Image header map file
+
+int fd, open(), fscan(), nscan(), errcode()
+pointer sp, parameter, sym, stopen(), stenter()
+include "hdrmap.com"
+
+begin
+ # Create an empty symbol table.
+ stp = stopen (fname, LEN_INDEX, LEN_STAB, SZ_SBUF)
+
+ # Return if file not found.
+ iferr (fd = open (fname, READ_ONLY, TEXT_FILE)) {
+ if (errcode () != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ call smark (sp)
+ call salloc (parameter, SZ_NAME, TY_CHAR)
+
+ # Read the file an enter the translations in the symbol table.
+ while (fscan(fd) != EOF) {
+ call gargwrd (Memc[parameter], SZ_NAME)
+ if ((nscan() == 0) || (Memc[parameter] == '#'))
+ next
+ sym = stenter (stp, Memc[parameter], SYMLEN)
+ call gargwrd (NAME(sym), SZ_NAME)
+ call gargwrd (DEFAULT(sym), SZ_DEFAULT)
+ }
+
+ call close (fd)
+ call sfree (sp)
+end
+
+
+# HDMCLOSE -- Close the symbol table pointer.
+
+procedure hdmclose ()
+
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ call stclose (stp)
+end
+
+
+# HDMWRITE -- Write out translation file.
+
+procedure hdmwrite (fname, mode)
+
+char fname[ARB] # Image header map file
+int mode # Access mode (APPEND, NEW_FILE)
+
+int fd, open(), stridxs()
+pointer sym, sthead(), stnext(), stname()
+errchk open
+include "hdrmap.com"
+
+begin
+ # If there is no symbol table do nothing.
+ if (stp == NULL)
+ return
+
+ fd = open (fname, mode, TEXT_FILE)
+
+ sym = sthead (stp)
+ for (sym = sthead (stp); sym != NULL; sym = stnext (stp, sym)) {
+ if (stridxs (" ", Memc[stname (stp, sym)]) > 0)
+ call fprintf (fd, "'%s'%30t")
+ else
+ call fprintf (fd, "%s%30t")
+ call pargstr (Memc[stname (stp, sym)])
+ if (stridxs (" ", NAME(sym)) > 0)
+ call fprintf (fd, " '%s'%10t")
+ else
+ call fprintf (fd, " %s%10t")
+ call pargstr (NAME(sym))
+ if (DEFAULT(sym) != EOS) {
+ if (stridxs (" ", DEFAULT(sym)) > 0)
+ call fprintf (fd, " '%s'")
+ else
+ call fprintf (fd, " %s")
+ call pargstr (DEFAULT(sym))
+ }
+ call fprintf (fd, "\n")
+ }
+
+ call close (fd)
+end
+
+
+# HDMNAME -- Return the image header parameter name
+
+procedure hdmname (parameter, str, max_char)
+
+char parameter[ARB] # Parameter name
+char str[max_char] # String containing mapped parameter name
+int max_char # Maximum characters in string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call strcpy (NAME(sym), str, max_char)
+ else
+ call strcpy (parameter, str, max_char)
+end
+
+
+# HDMPNAME -- Put the image header parameter name
+
+procedure hdmpname (parameter, str)
+
+char parameter[ARB] # Parameter name
+char str[ARB] # String containing mapped parameter name
+
+pointer sym, stfind(), stenter()
+include "hdrmap.com"
+
+begin
+ if (stp == NULL)
+ return
+
+ sym = stfind (stp, parameter)
+ if (sym == NULL) {
+ sym = stenter (stp, parameter, SYMLEN)
+ DEFAULT(sym) = EOS
+ }
+
+ call strcpy (str, NAME(sym), SZ_NAME)
+end
+
+
+# HDMGDEF -- Get the default value as a string (null string if none).
+
+procedure hdmgdef (parameter, str, max_char)
+
+char parameter[ARB] # Parameter name
+char str[max_char] # String containing default value
+int max_char # Maximum characters in string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call strcpy (DEFAULT(sym), str, max_char)
+ else
+ str[1] = EOS
+end
+
+
+# HDMPDEF -- PUt the default value as a string.
+
+procedure hdmpdef (parameter, str)
+
+char parameter[ARB] # Parameter name
+char str[ARB] # String containing default value
+
+pointer sym, stfind(), stenter()
+include "hdrmap.com"
+
+begin
+ if (stp == NULL)
+ return
+
+ sym = stfind (stp, parameter)
+ if (sym == NULL) {
+ sym = stenter (stp, parameter, SYMLEN)
+ call strcpy (parameter, NAME(sym), SZ_NAME)
+ }
+
+ call strcpy (str, DEFAULT(sym), SZ_DEFAULT)
+end
+
+
+# HDMACCF -- Return whether the image header parameter exists (regardless of
+# whether there is a default value).
+
+int procedure hdmaccf (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int imaccf()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ return (imaccf (im, NAME(sym)))
+ else
+ return (imaccf (im, parameter))
+end
+
+
+# HDMGSTR -- Get a string valued parameter. Return default value if not in
+# the image header. Return null string if no default or image value.
+
+procedure hdmgstr (im, parameter, str, max_char)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+char str[max_char] # String value to return
+int max_char # Maximum characters in returned string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (call imgstr (im, NAME(sym), str, max_char))
+ call strcpy (DEFAULT(sym), str, max_char)
+ } else {
+ iferr (call imgstr (im, parameter, str, max_char))
+ str[1] = EOS
+ }
+end
+
+
+# HDMGETR -- Get a real valued parameter. Return default value if not in
+# the image header. Return error condition if no default or image value.
+
+real procedure hdmgetr (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int ip, ctor()
+real value, imgetr()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (value = imgetr (im, NAME(sym))) {
+ ip = 1
+ if (ctor (DEFAULT(sym), ip, value) == 0)
+ call error (0, "HDMGETR: No value found")
+ }
+ } else
+ value = imgetr (im, parameter)
+
+ return (value)
+end
+
+
+# HDMGETI -- Get an integer valued parameter. Return default value if not in
+# the image header. Return error condition if no default or image value.
+
+int procedure hdmgeti (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int ip, ctoi()
+int value, imgeti()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (value = imgeti (im, NAME(sym))) {
+ ip = 1
+ if (ctoi (DEFAULT(sym), ip, value) == 0)
+ call error (0, "HDMGETI: No value found")
+ }
+ } else
+ value = imgeti (im, parameter)
+
+ return (value)
+end
+
+
+# HDMPSTR -- Put a string valued parameter in the image header.
+
+procedure hdmpstr (im, parameter, str)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+char str[ARB] # String value
+
+int imaccf(), imgftype()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ if (imaccf (im, NAME(sym)) == YES)
+ if (imgftype (im, NAME(sym)) != TY_CHAR)
+ call imdelf (im, NAME(sym))
+ call imastr (im, NAME(sym), str)
+ } else {
+ if (imaccf (im, parameter) == YES)
+ if (imgftype (im, parameter) != TY_CHAR)
+ call imdelf (im, parameter)
+ call imastr (im, parameter, str)
+ }
+end
+
+
+# HDMPUTI -- Put an integer valued parameter in the image header.
+
+procedure hdmputi (im, parameter, value)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+int value # Integer value to put
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imaddi (im, NAME(sym), value)
+ else
+ call imaddi (im, parameter, value)
+end
+
+
+# HDMPUTR -- Put a real valued parameter in the image header.
+
+procedure hdmputr (im, parameter, value)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+real value # Real value to put
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imaddr (im, NAME(sym), value)
+ else
+ call imaddr (im, parameter, value)
+end
+
+
+# HDMGSTP -- Get the symbol table pointer to save a translation map.
+# The symbol table is restored with HDMPSTP.
+
+procedure hdmgstp (ptr)
+
+pointer ptr # Symbol table pointer to return
+
+include "hdrmap.com"
+
+begin
+ ptr = stp
+end
+
+
+# HDMPSTP -- Put a symbol table pointer to restore a header map.
+# The symbol table is optained with HDMGSTP.
+
+procedure hdmpstp (ptr)
+
+pointer ptr # Symbol table pointer to restore
+
+include "hdrmap.com"
+
+begin
+ stp = ptr
+end
+
+
+# HDMDELF -- Delete a field. It is an error if the field does not exist.
+
+procedure hdmdelf (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imdelf (im, NAME(sym))
+ else
+ call imdelf (im, parameter)
+end
+
+
+# HDMPARAM -- Get parameter given the image header name.
+
+procedure hdmparam (name, parameter, max_char)
+
+char name[ARB] # Image header name
+char parameter[max_char] # Parameter
+int max_char # Maximum size of parameter string
+
+bool streq()
+pointer sym, sthead(), stname(), stnext()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = sthead (stp)
+ else
+ sym = NULL
+
+ while (sym != NULL) {
+ if (streq (NAME(sym), name)) {
+ call strcpy (Memc[stname(stp, sym)], parameter, max_char)
+ return
+ }
+ sym = stnext (stp, sym)
+ }
+ call strcpy (name, parameter, max_char)
+end
diff --git a/noao/imred/quadred/src/ccdproc/mkpkg b/noao/imred/quadred/src/ccdproc/mkpkg
new file mode 100644
index 00000000..7f263c15
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/mkpkg
@@ -0,0 +1,78 @@
+# Make QUADRED Package.
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $call quadred
+ ;
+
+install:
+ $move xx_quadred.e noaobin$x_quadred.e
+ ;
+
+quadred:
+ $omake x_quadred.x
+ $link x_quadred.o libpkg.a -lxtools -lcurfit -lgsurfit -lncar -lgks\
+ -o xx_quadred.e
+ ;
+
+generic:
+ $set GEN = "$$generic -k"
+
+ $ifolder (generic/ccdred.h, ccdred.h)
+ $copy ccdred.h generic/ccdred.h $endif
+ $ifolder (generic/proc.x, proc.gx)
+ $(GEN) proc.gx -o generic/proc.x $endif
+ $ifolder (generic/cor.x, cor.gx)
+ $(GEN) cor.gx -o generic/cor.x $endif
+ $ifolder (generic/corinput.x, corinput.gx)
+ $(GEN) corinput.gx -o generic/corinput.x $endif
+ ;
+
+libpkg.a:
+ $ifeq (USE_GENERIC, yes) $call generic $endif
+ @generic
+
+ calimage.x ccdtypes.h <error.h> <imset.h>
+ ccdcache.x ccdcache.com ccdcache.h <imhdr.h> <imset.h> <mach.h>\
+ ccdcache.com
+ ccdcheck.x ccdtypes.h <imhdr.h>
+ ccdcmp.x
+ ccddelete.x
+ ccdflag.x
+ ccdlog.x <imhdr.h> <imset.h>
+ ccdmean.x <imhdr.h>
+ ccdnscan.x ccdtypes.h
+ ccdproc.x ccdred.h ccdtypes.h <error.h>
+ ccdsection.x <ctype.h>
+ ccdsubsets.x
+ ccdtypes.x ccdtypes.h
+ doproc.x ccdred.h
+ hdrmap.x hdrmap.com <error.h>
+ readcor.x <imhdr.h>
+ scancor.x <imhdr.h> <imset.h>
+ setdark.x ccdred.h ccdtypes.h <imhdr.h>
+ setfixpix.x ccdred.h <imhdr.h>
+ setflat.x ccdred.h ccdtypes.h <imhdr.h>
+ setfringe.x ccdred.h ccdtypes.h <imhdr.h>
+ setheader.x ccdred.h <imhdr.h>
+ setillum.x ccdred.h ccdtypes.h <imhdr.h>
+ setinput.x ccdtypes.h <error.h>
+ setinteract.x <pkg/xtanswer.h>
+ setoutput.x <imhdr.h> <imset.h>
+ setoverscan.x ccdred.h <imhdr.h> <imset.h> <pkg/xtanswer.h>\
+ <pkg/gtools.h>
+ setproc.x ccdred.h <imhdr.h>
+ setsections.x ccdred.h <imhdr.h>
+ settrim.x ccdred.h <imhdr.h> <imset.h>
+ setzero.x ccdred.h ccdtypes.h <imhdr.h>
+ t_ccdproc.x ccdred.h ccdtypes.h <error.h> <imhdr.h>
+ timelog.x <time.h>
+ ;
diff --git a/noao/imred/quadred/src/ccdproc/proc.gx b/noao/imred/quadred/src/ccdproc/proc.gx
new file mode 100644
index 00000000..b6604179
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/proc.gx
@@ -0,0 +1,379 @@
+include <imhdr.h>
+include "ccdred.h"
+
+
+.help proc Feb87 noao.imred.ccdred
+.nf ----------------------------------------------------------------------------
+proc -- Process CCD images
+
+These are the main CCD reduction procedures. There is one for each
+readout axis (lines or columns) and one for short and real image data.
+They apply corrections for bad pixels, overscan levels, zero levels,
+dark counts, flat field response, illumination response, and fringe
+effects. The image is also trimmed if it was mapped with an image
+section. The mean value for the output image is computed when the flat
+field or illumination image is processed to form the scale factor for
+these calibrations in order to avoid reading through these image a
+second time.
+
+The processing information and parameters are specified in the CCD
+structure. The processing operations to be performed are specified by
+the correction array CORS in the ccd structure. There is one array
+element for each operation with indices defined symbolically by macro
+definitions (see ccdred.h); i.e. FLATCOR. The value of the array
+element is an integer bit field in which the bit set is the same as the
+array index; i.e element 3 will have the third bit set for an operation
+with array value 2**(3-1)=4. If an operation is not to be performed
+the bit is not set and the array element has the numeric value zero.
+Note that the addition of several correction elements gives a unique
+bit field describing a combination of operations. For efficiency the
+most common combinations are implemented as separate units.
+
+The CCD structure also contains the correction or calibration data
+consisting either pointers to data, IMIO pointers for the calibration
+images, and scale factors.
+
+The processing is performed line-by-line. The procedure CORINPUT is
+called to get an input line. This procedure trims and fixes bad pixels by
+interpolation. The output line and lines from the various calibration
+images are read. The image vectors as well as the overscan vector and
+the scale factors are passed to the procedure COR (which also
+dereferences the pointer data into simple arrays and variables). That
+procedure does the actual corrections apart from bad pixel
+corrections.
+
+The final optional step is to add each corrected output line to form a
+mean. This adds efficiency since the operation is done only if desired
+and the output image data is already in memory so there is no I/O
+penalty.
+
+SEE ALSO
+ ccdred.h, cor, fixpix, setfixpix, setoverscan, settrim,
+ setzero, setdark, setflat, setillum, setfringe
+.endhelp ----------------------------------------------------------------------
+
+
+$for (sr)
+# PROC1 -- Process CCD images with readout axis 1 (lines).
+
+procedure proc1$t (ccd)
+
+pointer ccd # CCD structure
+
+int i, line, nlin, ncols, nlines, findmean, rep
+int c1, c2, l1, l2
+real overscan, darkscale, flatscale, illumscale, frgscale, mean
+PIXEL minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim
+pointer outbuf, overscan_vec, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+$if (datatype == csir)
+real asum$t()
+$else $if (datatype == ld)
+double asum$t()
+$else
+PIXEL asum$t()
+$endif $endif
+pointer imgl2$t(), impl2$t(), ccd_gl$t()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ nlin = IM_LEN(in,2)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixinit$t (in)
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),2) == 1) {
+ zeroim = NULL
+ zerobuf = ccd_gl$t (ZERO_IM(ccd), ZERO_C1(ccd), ZERO_C2(ccd), 1)
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),2) == 1) {
+ darkim = NULL
+ darkbuf = ccd_gl$t (DARK_IM(ccd), DARK_C1(ccd), DARK_C2(ccd), 1)
+ darkscale = FLATSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),2) == 1) {
+ flatim = NULL
+ flatbuf = ccd_gl$t (FLAT_IM(ccd), FLAT_C1(ccd), FLAT_C2(ccd), 1)
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR1 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2$t (out, OUT_L1(ccd)+line-1)
+ call corinput$t (in, line, ccd, Mem$t[outbuf], IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (overscan_vec != NULL)
+ overscan = Memr[overscan_vec+line-1]
+ if (zeroim != NULL)
+ zerobuf = ccd_gl$t (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gl$t (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gl$t (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gl$t (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gl$t (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ if (OUT_SEC(ccd) == NULL) {
+ call cor1$t (CORS(ccd,1), Mem$t[outbuf],
+ overscan, Mem$t[zerobuf], Mem$t[darkbuf],
+ Mem$t[flatbuf], Mem$t[illumbuf], Mem$t[fringebuf], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+ } else {
+ do i = 1, IN_NSEC(ccd) {
+ l1 = OUT_SL1(ccd,i)
+ l2 = OUT_SL2(ccd,i)
+ if (line < l1 || line > l2)
+ next
+ c1 = OUT_SC1(ccd,i) - 1
+ c2 = OUT_SC2(ccd,i) - 1
+ ncols = c2 - c1 + 1
+ if (overscan_vec != NULL)
+ overscan = Memr[overscan_vec+(i-1)*nlin+line-l1]
+
+ call cor1$t (CORS(ccd,1), Mem$t[outbuf+c1],
+ overscan, Mem$t[zerobuf+c1], Mem$t[darkbuf+c1],
+ Mem$t[flatbuf+c1], Mem$t[illumbuf+c1],
+ Mem$t[fringebuf+c1], ncols,
+ darkscale, flatscale, illumscale, frgscale)
+ }
+ }
+
+ if (rep == YES)
+ call amaxk$t (Mem$t[outbuf], minrep, Mem$t[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asum$t (Mem$t[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixfree$t ()
+end
+
+
+# PROC2 -- Process CCD images with readout axis 2 (columns).
+
+procedure proc2$t (ccd)
+
+pointer ccd # CCD structure
+
+int line, ncols, nlines, findmean, rep
+real darkscale, flatscale, illumscale, frgscale, mean
+PIXEL minrep
+pointer in, out, zeroim, darkim, flatim, illumim, fringeim
+pointer outbuf, overscan_vec, zerobuf, darkbuf, flatbuf, illumbuf, fringebuf
+
+$if (datatype == csir)
+real asum$t()
+$else $if (datatype == ld)
+double asum$t()
+$else
+PIXEL asum$t()
+$endif $endif
+pointer imgl2$t(), impl2$t(), imgs2$t(), ccd_gl$t()
+
+begin
+ # Initialize. If the correction image is 1D then just get the
+ # data once.
+
+ in = IN_IM(ccd)
+ out = OUT_IM(ccd)
+ ncols = OUT_C2(ccd) - OUT_C1(ccd) + 1
+ nlines = OUT_L2(ccd) - OUT_L1(ccd) + 1
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixinit$t (in)
+
+ findmean = CORS(ccd, FINDMEAN)
+ if (findmean == YES)
+ mean = 0.
+ rep = CORS(ccd, MINREP)
+ if (rep == YES)
+ minrep = MINREPLACE(ccd)
+
+ overscan_vec = OVERSCAN_VEC(ccd)
+
+ if (CORS(ccd, ZEROCOR) == 0) {
+ zeroim = NULL
+ zerobuf = 1
+ } else if (IM_LEN(ZERO_IM(ccd),1) == 1) {
+ zeroim = NULL
+ zerobuf = imgs2$t (ZERO_IM(ccd), 1, 1, ZERO_L1(ccd), ZERO_L2(ccd))
+ } else
+ zeroim = ZERO_IM(ccd)
+
+ if (CORS(ccd, DARKCOR) == 0) {
+ darkim = NULL
+ darkbuf = 1
+ } else if (IM_LEN(DARK_IM(ccd),1) == 1) {
+ darkim = NULL
+ darkbuf = imgs2$t (DARK_IM(ccd), 1, 1, DARK_L1(ccd), DARK_L2(ccd))
+ darkscale = DARKSCALE(ccd)
+ } else {
+ darkim = DARK_IM(ccd)
+ darkscale = DARKSCALE(ccd)
+ }
+
+ if (CORS(ccd, FLATCOR) == 0) {
+ flatim = NULL
+ flatbuf = 1
+ } else if (IM_LEN(FLAT_IM(ccd),1) == 1) {
+ flatim = NULL
+ flatbuf = imgs2$t (FLAT_IM(ccd), 1, 1, FLAT_L1(ccd), FLAT_L2(ccd))
+ flatscale = FLATSCALE(ccd)
+ } else {
+ flatim = FLAT_IM(ccd)
+ flatscale = FLATSCALE(ccd)
+ }
+
+ if (CORS(ccd, ILLUMCOR) == 0) {
+ illumim = NULL
+ illumbuf = 1
+ } else {
+ illumim = ILLUM_IM(ccd)
+ illumscale = ILLUMSCALE(ccd)
+ }
+
+ if (CORS(ccd, FRINGECOR) == 0) {
+ fringeim = NULL
+ fringebuf = 1
+ } else {
+ fringeim = FRINGE_IM(ccd)
+ frgscale = FRINGESCALE(ccd)
+ }
+
+ # For each line read lines from the input. Procedure CORINPUT
+ # replaces bad pixels by interpolation and applies a trim to the
+ # input. Get lines from the output image and from the zero level,
+ # dark count, flat field, illumination, and fringe images.
+ # Call COR2 to do the actual pixel corrections. Finally, add the
+ # output pixels to a sum for computing the mean.
+ # We must copy data outside of the output data section.
+
+ do line = 2 - OUT_L1(ccd), 0
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ do line = 1, nlines {
+ outbuf = impl2$t (out, OUT_L1(ccd)+line-1)
+ call corinput$t (in, line, ccd, Mem$t[outbuf], IM_LEN(out,1))
+
+ outbuf = outbuf + OUT_C1(ccd) - 1
+ if (zeroim != NULL)
+ zerobuf = ccd_gl$t (zeroim, ZERO_C1(ccd), ZERO_C2(ccd),
+ ZERO_L1(ccd)+line-1)
+ if (darkim != NULL)
+ darkbuf = ccd_gl$t (darkim, DARK_C1(ccd), DARK_C2(ccd),
+ DARK_L1(ccd)+line-1)
+ if (flatim != NULL)
+ flatbuf = ccd_gl$t (flatim, FLAT_C1(ccd), FLAT_C2(ccd),
+ FLAT_L1(ccd)+line-1)
+ if (illumim != NULL)
+ illumbuf = ccd_gl$t (illumim, ILLUM_C1(ccd), ILLUM_C2(ccd),
+ ILLUM_L1(ccd)+line-1)
+ if (fringeim != NULL)
+ fringebuf = ccd_gl$t (fringeim, FRINGE_C1(ccd), FRINGE_C2(ccd),
+ FRINGE_L1(ccd)+line-1)
+
+ call cor2$t (line, CORS(ccd,1), Mem$t[outbuf],
+ Memr[overscan_vec], Mem$t[zerobuf], Mem$t[darkbuf],
+ Mem$t[flatbuf], Mem$t[illumbuf], Mem$t[fringebuf], ncols,
+ zeroim, flatim, darkscale, flatscale, illumscale, frgscale)
+
+ if (rep == YES)
+ call amaxk$t (Mem$t[outbuf], minrep, Mem$t[outbuf], ncols)
+ if (findmean == YES)
+ mean = mean + asum$t (Mem$t[outbuf], ncols)
+ }
+
+ do line = nlines+1, IM_LEN(out,2)-OUT_L1(ccd)+1
+ call amov$t (
+ Mem$t[imgl2$t(in,IN_L1(ccd)+line-1)+IN_C1(ccd)-OUT_C1(ccd)],
+ Mem$t[impl2$t(out,OUT_L1(ccd)+line-1)], IM_LEN(out,1))
+
+ # Compute the mean from the sum of the output pixels.
+ if (findmean == YES)
+ MEAN(ccd) = mean / ncols / nlines
+
+ if (CORS(ccd, FIXPIX) == YES)
+ call lfixfree$t ()
+end
+$endfor
diff --git a/noao/imred/quadred/src/ccdproc/readcor.x b/noao/imred/quadred/src/ccdproc/readcor.x
new file mode 100644
index 00000000..61fbd836
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/readcor.x
@@ -0,0 +1,138 @@
+include <imhdr.h>
+
+# READCOR -- Create a readout image.
+# Assume it is appropriate to perform this operation on the input image.
+# There is no CCD type checking.
+
+procedure readcor (input)
+
+char input[ARB] # Input image
+int readaxis # Readout axis
+
+int i, nc, nl, c1, c2, cs, l1, l2, ls
+int in_c1, in_c2, in_l1, in_l2, ccd_c1, ccd_c2, ccd_l1, ccd_l2
+pointer sp, output, str, in, out, data
+
+real asumr()
+int clgwrd()
+bool clgetb(), ccdflag()
+pointer immap(), imgl2r(), impl2r(), imps2r()
+errchk immap, ccddelete
+
+begin
+ # Check if this operation is desired.
+ if (!clgetb ("readcor"))
+ return
+
+ # Check if this operation has been done. Unfortunately this requires
+ # mapping the image.
+
+ in = immap (input, READ_ONLY, 0)
+ if (ccdflag (in, "readcor")) {
+ call imunmap (in)
+ return
+ }
+
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Convert %s to readout correction\n")
+ call pargstr (input)
+ call imunmap (in)
+ return
+ }
+
+ call smark (sp)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # The default data section is the entire image.
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ c1 = 1
+ c2 = nc
+ cs = 1
+ l1 = 1
+ l2 = nl
+ ls = 1
+ call hdmgstr (in, "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ in_c1 = c1
+ in_c2 = c2
+ in_l1 = l1
+ in_l2 = l2
+
+ # The default ccd section is the data section.
+ call hdmgstr (in, "ccdsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs != 1) || (ls != 1))
+ call error (0, "Error in CCDSEC parameter")
+ ccd_c1 = c1
+ ccd_c2 = c2
+ ccd_l1 = l1
+ ccd_l2 = l2
+ if ((in_c2-in_c1 != ccd_c2-ccd_c1) || (in_l2-in_l1 != ccd_l2-ccd_l1))
+ call error (0, "Size of DATASEC and CCDSEC do not agree")
+
+ # Determine the readout axis.
+ readaxis = clgwrd ("readaxis", Memc[str], SZ_LINE, "|lines|columns|")
+
+ # Create output.
+ call mktemp ("tmp", Memc[output], SZ_FNAME)
+ call set_output (in, out, Memc[output])
+
+ # Average across the readout axis.
+ switch (readaxis) {
+ case 1:
+ IM_LEN(out,2) = 1
+ data = impl2r (out, 1)
+ call aclrr (Memr[data], nc)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ data = data + in_c1 - 1
+ do i = in_l1, in_l2
+ call aaddr (Memr[imgl2r(in,i)+in_c1-1], Memr[data],
+ Memr[data], nc)
+ call adivkr (Memr[data], real (nl), Memr[data], nc)
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,1:1]")
+ call pargi (in_c1)
+ call pargi (in_c2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,*]")
+ call pargi (ccd_c1)
+ call pargi (ccd_c2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ case 2:
+ IM_LEN(out,1) = 1
+ data = imps2r (out, 1, 1, 1, nl)
+ call aclrr (Memr[data], nl)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ do i = in_l1, in_l2
+ Memr[data+i-1] = asumr (Memr[imgl2r(in,i)+in_c1-1], nc) / nc
+ call sprintf (Memc[str], SZ_LINE, "[1:1,%d:%d]")
+ call pargi (in_l1)
+ call pargi (in_l2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[*,%d:%d]")
+ call pargi (ccd_l1)
+ call pargi (ccd_l2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ }
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Converted to readout format")
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (in, Memc[str])
+ call hdmpstr (out, "readcor", Memc[str])
+
+ # Replace the input image by the output image.
+ call imunmap (in)
+ call imunmap (out)
+ call ccddelete (input)
+ call imrename (Memc[output], input)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/scancor.x b/noao/imred/quadred/src/ccdproc/scancor.x
new file mode 100644
index 00000000..6a5eb84c
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/scancor.x
@@ -0,0 +1,340 @@
+include <imhdr.h>
+include <imset.h>
+
+define SCANTYPES "|shortscan|longscan|"
+define SHORTSCAN 1 # Short scan accumulation, normal readout
+define LONGSCAN 2 # Long scan continuous readout
+
+# SCANCOR -- Create a scanned image from an unscanned image.
+
+procedure scancor (input, output, nscan, minreplace)
+
+char input[ARB] # Input image
+char output[ARB] # Output image (must be new image)
+int nscan # Number of scan lines
+real minreplace # Minmum value of output
+
+int scantype # Type of scan format
+int readaxis # Readout axis
+
+int clgwrd()
+pointer sp, str, in, out, immap()
+errchk immap
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Determine readout axis and create the temporary output image.
+ scantype = clgwrd ("scantype", Memc[str], SZ_LINE, SCANTYPES)
+ readaxis = clgwrd ("readaxis", Memc[str], SZ_LINE, "|lines|columns|")
+
+ # Make the output scanned image.
+ in = immap (input, READ_ONLY, 0)
+ call set_output (in, out, output)
+
+ switch (scantype) {
+ case SHORTSCAN:
+ call shortscan (in, out, nscan, minreplace, readaxis)
+ case LONGSCAN:
+ call longscan (in, out, readaxis)
+ }
+
+ # Log the operation.
+ switch (scantype) {
+ case SHORTSCAN:
+ call sprintf (Memc[str], SZ_LINE,
+ "Converted to shortscan from %s with nscan=%d")
+ call pargstr (input)
+ call pargi (nscan)
+ call hdmputi (out, "nscanrow", nscan)
+ case LONGSCAN:
+ call sprintf (Memc[str], SZ_LINE, "Converted to longscan from %s")
+ call pargstr (input)
+ }
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (out, Memc[str])
+ call hdmpstr (out, "scancor", Memc[str])
+
+ call imunmap (in)
+ call imunmap (out)
+
+ call sfree (sp)
+end
+
+
+# SHORTSCAN -- Make a shortscan mode image by using a moving average.
+#
+# NOTE!! The value of nscan used here is increased by 1 because the
+# current information in the image header is actually the number of
+# scan steps and NOT the number of rows.
+
+procedure shortscan (in, out, nscan, minreplace, readaxis)
+
+pointer in # Input image
+pointer out # Output image
+int nscan # Number of lines scanned before readout
+real minreplace # Minimum output value
+int readaxis # Readout axis
+
+bool replace
+real nscanr, sum, mean, asumr()
+int i, j, k, l, len1, len2, nc, nl, nscani, c1, c2, cs, l1, l2, ls
+pointer sp, str, bufs, datain, dataout, data, imgl2r(), impl2r()
+long clktime()
+errchk malloc, calloc
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # The default data section is the entire image.
+ len1 = IM_LEN(in,1)
+ len2 = IM_LEN(in,2)
+ c1 = 1
+ c2 = len1
+ cs = 1
+ l1 = 1
+ l2 = len2
+ ls = 1
+ call hdmgstr (in, "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>len1)||(l1<1)||(l2>len2)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ nc = c2 - c1 + 1
+ nl = l2 - l1 + 1
+
+ # Copy initial lines.
+ do i = 1, l1 - 1
+ call amovr (Memr[imgl2r(in,i)], Memr[impl2r(out,i)], len1)
+
+ replace = !IS_INDEF(minreplace)
+ mean = 0.
+ switch (readaxis) {
+ case 1:
+ nscani = max (1, min (nscan, nl) + 1)
+ nscanr = nscani
+ call imseti (in, IM_NBUFS, nscani)
+ call malloc (bufs, nscani, TY_INT)
+ call calloc (data, nc, TY_REAL)
+ j = 1
+ k = 1
+ l = 1
+
+ # Ramp up
+ while (j <= nscani) {
+ i = j + l1 - 1
+ datain = imgl2r (in, i)
+ if (nc < len1)
+ call amovr (Memr[datain], Memr[impl2r(out,i)], len1)
+ datain = datain + c1 - 1
+ Memi[bufs+mod(j,nscani)] = datain
+ call aaddr (Memr[data], Memr[datain], Memr[data], nc)
+ j = j + 1
+ }
+ dataout = impl2r (out, l+l1-1) + c1 - 1
+ call adivkr (Memr[data], nscanr, Memr[dataout], nc)
+ if (replace)
+ call amaxkr (Memr[dataout], minreplace, Memr[dataout], nc)
+ mean = mean + asumr (Memr[dataout], nc)
+ l = l + 1
+
+ # Moving average
+ while (j <= nl) {
+ datain = Memi[bufs+mod(k,nscani)]
+ call asubr (Memr[data], Memr[datain], Memr[data], nc)
+ i = j + l1 - 1
+ datain = imgl2r (in, i)
+ if (nc < len1)
+ call amovr (Memr[datain], Memr[impl2r(out,i)], len1)
+ datain = datain + c1 - 1
+ Memi[bufs+mod(j,nscani)] = datain
+ call aaddr (Memr[data], Memr[datain], Memr[data], nc)
+ dataout = impl2r (out, l+l1-1) + c1 - 1
+ call adivkr (Memr[data], nscanr, Memr[dataout], nc)
+ if (replace)
+ call amaxkr (Memr[dataout], minreplace, Memr[dataout], nc)
+ mean = mean + asumr (Memr[dataout], nc)
+
+ j = j + 1
+ k = k + 1
+ l = l + 1
+ }
+
+ # Ramp down.
+ while (l <= nl) {
+ datain = Memi[bufs+mod(k,nscani)]
+ call asubr (Memr[data], Memr[datain], Memr[data], nc)
+ dataout = impl2r (out, l+l1-1) + c1 - 1
+ call adivkr (Memr[data], nscanr, Memr[dataout], nc)
+ if (replace)
+ call amaxkr (Memr[dataout], minreplace, Memr[dataout], nc)
+ mean = mean + asumr (Memr[dataout], nc)
+
+ k = k + 1
+ l = l + 1
+ }
+
+ call mfree (bufs, TY_INT)
+ call mfree (data, TY_REAL)
+
+ case 2:
+ nscani = max (1, min (nscan, nc) + 1)
+ nscanr = nscani
+ do i = 1, nl {
+ datain = imgl2r (in, i + l1 - 1)
+ datain = datain + c1 - 1
+ data = impl2r (out, i + l1 - 1)
+ call amovr (Memr[datain], Memr[data], len1)
+ datain = datain + c1 - 1
+ data = data + c1 - 1
+ sum = 0
+ j = 0
+ k = 0
+ l = 0
+
+ # Ramp up
+ while (j < nscani) {
+ sum = sum + Memr[datain+j]
+ j = j + 1
+ }
+ if (replace)
+ Memr[data] = max (minreplace, sum / nscani)
+ else
+ Memr[data] = sum / nscani
+ mean = mean + Memr[data]
+ l = l + 1
+
+ # Moving average
+ while (j < nl) {
+ sum = sum + Memr[datain+j] - Memr[datain+k]
+ if (replace)
+ Memr[data+l] = max (minreplace, sum / nscani)
+ else
+ Memr[data+l] = sum / nscani
+ mean = mean + Memr[data+l]
+ j = j + 1
+ k = k + 1
+ l = l + 1
+ }
+
+ # Ramp down
+ while (l < nl) {
+ sum = sum - Memr[datain+k]
+ if (replace)
+ Memr[data+l] = max (minreplace, sum / nscani)
+ else
+ Memr[data+l] = sum / nscani
+ mean = mean + Memr[data+l]
+ k = k + 1
+ l = l + 1
+ }
+ }
+ }
+
+ # Copy final lines.
+ do i = l2+1, len2
+ call amovr (Memr[imgl2r(in,i)], Memr[impl2r(out,i)], len1)
+
+ mean = mean / nc / nl
+ call hdmputr (out, "ccdmean", mean)
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+
+ call sfree (sp)
+end
+
+
+# LONGSCAN -- Make a longscan mode readout flat field correction by averaging
+# across the readout axis.
+
+procedure longscan (in, out, readaxis)
+
+pointer in # Input image
+pointer out # Output image
+int readaxis # Readout axis
+
+int i, nc, nl, c1, c2, cs, l1, l2, ls
+int in_c1, in_c2, in_l1, in_l2, ccd_c1, ccd_c2, ccd_l1, ccd_l2
+real mean, asumr()
+long clktime()
+pointer sp, str, data, imgl2r(), impl2r(), imps2r()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # The default data section is the entire image.
+ nc = IM_LEN(in,1)
+ nl = IM_LEN(in,2)
+ c1 = 1
+ c2 = nc
+ cs = 1
+ l1 = 1
+ l2 = nl
+ ls = 1
+ call hdmgstr (in, "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ in_c1 = c1
+ in_c2 = c2
+ in_l1 = l1
+ in_l2 = l2
+
+ # The default ccd section is the data section.
+ call hdmgstr (in, "ccdsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs != 1) || (ls != 1))
+ call error (0, "Error in CCDSEC parameter")
+ ccd_c1 = c1
+ ccd_c2 = c2
+ ccd_l1 = l1
+ ccd_l2 = l2
+ if ((in_c2-in_c1 != ccd_c2-ccd_c1) || (in_l2-in_l1 != ccd_l2-ccd_l1))
+ call error (0, "Size of DATASEC and CCDSEC do not agree")
+
+ switch (readaxis) {
+ case 1:
+ IM_LEN(out,2) = 1
+ data = impl2r (out, 1)
+ call aclrr (Memr[data], nc)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ data = data + in_c1 - 1
+ do i = in_l1, in_l2
+ call aaddr (Memr[imgl2r(in,i)+in_c1-1], Memr[data],
+ Memr[data], nc)
+ call adivkr (Memr[data], real (nl), Memr[data], nc)
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,1:1]")
+ call pargi (in_c1)
+ call pargi (in_c2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,*]")
+ call pargi (ccd_c1)
+ call pargi (ccd_c2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ mean = asumr (Memr[data], nc) / nl
+ case 2:
+ IM_LEN(out,1) = 1
+ data = imps2r (out, 1, 1, 1, nl)
+ call aclrr (Memr[data], nl)
+ nc = in_c2 - in_c1 + 1
+ nl = in_l2 - in_l1 + 1
+ do i = in_l1, in_l2
+ Memr[data+i-1] = asumr (Memr[imgl2r(in,i)+in_c1-1], nc) / nc
+ call sprintf (Memc[str], SZ_LINE, "[1:1,%d:%d]")
+ call pargi (in_l1)
+ call pargi (in_l2)
+ call hdmpstr (out, "datasec", Memc[str])
+ call sprintf (Memc[str], SZ_LINE, "[*,%d:%d]")
+ call pargi (ccd_l1)
+ call pargi (ccd_l2)
+ call hdmpstr (out, "ccdsec", Memc[str])
+ mean = asumr (Memr[data], nl) / nc
+ }
+
+ call hdmputr (out, "ccdmean", mean)
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setdark.x b/noao/imred/quadred/src/ccdproc/setdark.x
new file mode 100644
index 00000000..bf3c7354
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setdark.x
@@ -0,0 +1,155 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+
+# SET_DARK -- Set parameters for dark count correction.
+#
+# 1. Return immediately if the dark count correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the dark count correction image and return an error if not found.
+# 3. If the dark count image has not been processed call PROC.
+# 4. Compute the dark count integration time scale factor.
+# 5. Set the processing flags.
+# 6. Log the operation (to user, logfile, and output image header).
+
+procedure set_dark (ccd)
+
+pointer ccd # CCD structure
+
+int nscan, nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+real darktime1, darktime2
+pointer sp, image, str, im
+
+bool clgetb(), ccdflag(), ccdcheck()
+int ccdnscan(), ccdtypei()
+real hdmgetr()
+pointer ccd_cache()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr
+
+begin
+ # Check if the user wants this operation or it has already been done.
+ if (!clgetb ("darkcor") || ccdflag (IN_IM(ccd), "darkcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the dark count correction image name.
+ if (clgetb ("scancor"))
+ nscan = ccdnscan (IN_IM(ccd), ccdtypei(IN_IM(ccd)))
+ else
+ nscan = 1
+ call cal_image (IN_IM(ccd), DARK, nscan, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print dark count image and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Dark count correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the image and return on an error.
+ # Process the dark count image if necessary.
+ # If nscan > 1 then the dark may not yet exist so create it
+ # from the unscanned dark.
+
+ iferr (im = ccd_cache (Memc[image], DARK)) {
+ call cal_image (IN_IM(ccd), DARK, 1, Memc[str], SZ_LINE)
+ im = ccd_cache (Memc[str], DARK)
+ if (ccdcheck (im, DARK)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[str], DARK)
+ }
+ call scancor (Memc[str], Memc[image], nscan, INDEF)
+ im = ccd_cache (Memc[image], DARK)
+ }
+
+ if (ccdcheck (im, DARK)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[image], DARK)
+ im = ccd_cache (Memc[image], DARK)
+ }
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if (nc == 1) {
+ c1 = CCD_C1(ccd)
+ c2 = CCD_C2(ccd)
+ }
+ if (nl == 1) {
+ l1 = CCD_L1(ccd)
+ l2 = CCD_L2(ccd)
+ }
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ DARK_IM(ccd) = im
+ DARK_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ DARK_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ DARK_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ DARK_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ # Get the dark count integration times. Return an error if not found.
+ iferr (darktime1 = hdmgetr (IN_IM(ccd), "darktime"))
+ darktime1 = hdmgetr (IN_IM(ccd), "exptime")
+ iferr (darktime2 = hdmgetr (im, "darktime"))
+ darktime2 = hdmgetr (im, "exptime")
+
+ DARKSCALE(ccd) = darktime1 / darktime2
+ CORS(ccd, DARKCOR) = D
+ COR(ccd) = YES
+
+ # Record the operation in the output image and write a log record.
+ call sprintf (Memc[str], SZ_LINE,
+ "Dark count correction image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (DARKSCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "darkcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setfixpix.x b/noao/imred/quadred/src/ccdproc/setfixpix.x
new file mode 100644
index 00000000..05866bed
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setfixpix.x
@@ -0,0 +1,181 @@
+include <imhdr.h>
+include "ccdred.h"
+
+# SET_FIXPIX -- Setup for fixing bad pixels.
+#
+# 1. Return immediately if the bad pixel correction is not requested or
+# if the image has been previously corrected.
+# 2. Determine the bad pixel correction file. This may be specified
+# directly or indirectly through the image header or symbol table.
+# Return warning if not found.
+# 3. Read through the file collecting the bad pixel regions into a
+# bad column array (regions to be interpolated across columns) and
+# a bad line array (regions to be interpolated across lines).
+# 4. Set the processing flag.
+# 5. Log the operation (to user, logfile, and output image header).
+
+procedure set_fixpix (ccd)
+
+pointer ccd # CCD structure
+
+int fd, nc, nl, c1, c2, l1, l2, dc, dl, nbadcols, nbadlines
+pointer sp, image, str, badcols, badlines
+
+int open(), fscan(), nscan(), strmatch()
+bool clgetb(), streq(), ccdflag()
+errchk open
+
+begin
+ # Check if the user wants this operation or it has been done.
+ if (!clgetb ("fixpix") || ccdflag (IN_IM(ccd), "fixpix"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the bad pixel file. If the name is "image" then get the file
+ # name from the image header or symbol table.
+
+ call clgstr ("fixfile", Memc[image], SZ_FNAME)
+ if (streq (Memc[image], "image"))
+ call hdmgstr (IN_IM(ccd), "fixfile", Memc[image], SZ_FNAME)
+
+ # If no processing is desired print message and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Bad pixel file is %s\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Open the file and read the bad pixel regions. Use dynamic memory.
+ # Set the bad pixel coordinates. By default the bad pixel coordinates
+ # refer to the image directly but if the word "untrimmed" appears
+ # in a comment then the coordinates refer to the CCD coordinates.
+
+ fd = open (Memc[image], READ_ONLY, TEXT_FILE)
+ dc = 0
+ dl = 0
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+ nbadcols = 0
+ nbadlines = 0
+ while (fscan (fd) != EOF) {
+ call gargwrd (Memc[str], SZ_LINE)
+ if (Memc[str] == '#') {
+ call gargstr (Memc[str], SZ_LINE)
+ if (strmatch (Memc[str], "{untrimmed}") != 0) {
+ dc = IN_C1(ccd) - CCD_C1(ccd)
+ dl = IN_L1(ccd) - CCD_L1(ccd)
+ }
+ next
+ }
+
+ call reset_scan()
+ call gargi (c1)
+ call gargi (c2)
+ call gargi (l1)
+ call gargi (l2)
+
+ # Ignore badly specified lines.
+ if (nscan() != 4) {
+ if (nscan() == 2) {
+ l1 = c2
+ c2 = c1
+ l2 = l1
+ } else
+ next
+ }
+
+ # Do the coordinate conversion.
+ c1 = max (IN_C1(ccd), c1 + dc)
+ c2 = min (IN_C2(ccd), c2 + dc)
+ l1 = max (IN_L1(ccd), l1 + dl)
+ l2 = min (IN_L2(ccd), l2 + dl)
+
+ # Ignore an inproperly specified region.
+ if ((c1 > c2) || (l1 > l2))
+ next
+
+ # Interpolate across the shortest direction.
+ if ((l2 - l1) < (c2 - c1)) {
+ nbadlines = nbadlines + 1
+ if (nbadlines == 1)
+ call calloc (badlines, 2*nl*nbadlines, TY_SHORT)
+ else {
+ call realloc (badlines, 2*nl*nbadlines, TY_SHORT)
+ call aclrs (Mems[badlines+2*nl*(nbadlines-1)], 2*nl)
+ }
+ call set_badcols (c1, c2, l1, l2, Mems[badlines],
+ nl, nbadlines)
+
+ } else {
+ nbadcols = nbadcols + 1
+ if (nbadcols == 1)
+ call calloc (badcols, 2*nl*nbadcols, TY_SHORT)
+ else {
+ call realloc (badcols, 2*nl*nbadcols, TY_SHORT)
+ call aclrs (Mems[badcols+2*nl*(nbadcols-1)], 2*nl)
+ }
+ call set_badcols (c1, c2, l1, l2, Mems[badcols],
+ nl, nbadcols)
+ }
+ }
+ call close (fd)
+
+ # Set structure parameters and the correction flags.
+ if (nbadcols != 0) {
+ NBADCOLS(ccd) = nbadcols
+ BADCOLS(ccd) = badcols
+ CORS(ccd, FIXPIX) = YES
+ COR(ccd) = YES
+ }
+ if (nbadlines != 0) {
+ NBADLINES(ccd) = nbadlines
+ BADLINES(ccd) = badlines
+ CORS(ccd, FIXPIX) = YES
+ COR(ccd) = YES
+ }
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE, "Bad pixel file is %s")
+ call pargstr (Memc[image])
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "fixpix", Memc[str])
+
+ call sfree (sp)
+end
+
+
+# SET_BADCOLS -- Enter bad columns in a bad column array.
+# This procedure is used both for the line and column interpolation arrays.
+# The bad column array contains the starting and ending bad columns for
+# each line. This allows quick look up when processing the image at the
+# expense of memory. A column index of zero indicates no further bad columns
+# in the line.
+
+procedure set_badcols (c1, c2, l1, l2, array, nl, nbadcols)
+
+int c1, c2, l1, l2 # Bad column
+short array[2,nl,nbadcols] # Bad column array
+int nl # Number of image lines
+int nbadcols # Number of bad column areas
+
+int i, j
+
+begin
+ # For each line in the bad columns set the columns
+ # in the first unused entry in the array.
+
+ do i = l1, l2 {
+ do j = 1, nbadcols {
+ if (array[1,i,j] == 0) {
+ array[1,i,j] = c1
+ array[2,i,j] = c2
+ break
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/ccdproc/setflat.x b/noao/imred/quadred/src/ccdproc/setflat.x
new file mode 100644
index 00000000..87713404
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setflat.x
@@ -0,0 +1,146 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_FLAT -- Set parameters for flat field correction.
+#
+# 1. Return immediately if the flat field correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the flat field image and return on an error.
+# 3. If the flat field image has not been processed call PROC.
+# 4. Set the processing flags and record the operation in the output
+# image and write a log record.
+
+procedure set_flat (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+pointer sp, str, image, im, ccd_cache()
+bool clgetb(), ccdflag(), ccdcheck()
+int nscan, ccdnscan(), ccdtypei()
+real hdmgetr()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("flatcor") || ccdflag (IN_IM(ccd), "flatcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the flat field correction image.
+ if (clgetb ("scancor"))
+ nscan = ccdnscan (IN_IM(ccd), ccdtypei(IN_IM(ccd)))
+ else
+ nscan = 1
+ call cal_image (IN_IM(ccd), FLAT, nscan, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print flat field image name and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Flat correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the image and return on an error.
+ # Process the flat field image if necessary.
+ # If nscan > 1 then the flat field may not yet exist so create it
+ # from the unscanned flat field.
+
+ iferr (im = ccd_cache (Memc[image], FLAT)) {
+ call cal_image (IN_IM(ccd), FLAT, 1, Memc[str], SZ_LINE)
+ im = ccd_cache (Memc[str], FLAT)
+ if (ccdcheck (im, FLAT)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[str], FLAT)
+ }
+ call scancor (Memc[str], Memc[image], nscan, MINREPLACE(ccd))
+ im = ccd_cache (Memc[image], FLAT)
+ }
+
+ if (ccdcheck (im, FLAT)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[image], FLAT)
+ im = ccd_cache (Memc[image], FLAT)
+ }
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if (nc == 1) {
+ c1 = CCD_C1(ccd)
+ c2 = CCD_C2(ccd)
+ }
+ if (nl == 1) {
+ l1 = CCD_L1(ccd)
+ l2 = CCD_L2(ccd)
+ }
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ FLAT_IM(ccd) = im
+ FLAT_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ FLAT_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ FLAT_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ FLAT_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ # If no mean value use 1 as the scale factor.
+ iferr (FLATSCALE(ccd) = hdmgetr (im, "ccdmean"))
+ FLATSCALE(ccd) = 1.
+ CORS(ccd, FLATCOR) = F
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Flat field image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (FLATSCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "flatcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setfringe.x b/noao/imred/quadred/src/ccdproc/setfringe.x
new file mode 100644
index 00000000..7055f35f
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setfringe.x
@@ -0,0 +1,123 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_FRINGE -- Set parameters for fringe correction.
+#
+# 1. Return immediately if the fringe correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the fringe image and return error if the mkfringe flag is missing.
+# 3. Set the processing flags and record the operation in the output
+# image and write a log record.
+
+procedure set_fringe (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+real exptime1, exptime2, fringescale
+pointer sp, str, image, im
+
+bool clgetb(), ccdflag()
+real hdmgetr()
+pointer ccd_cache()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("fringecor") || ccdflag (IN_IM(ccd), "fringcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the fringe correction image.
+ call cal_image (IN_IM(ccd), FRINGE, 1, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print fringe image name and return.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Fringe correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Return an error if the fringe flag is missing.
+ im = ccd_cache (Memc[image], FRINGE)
+ if (!ccdflag (im, "mkfringe"))
+ call error (0, "MKFRINGE flag missing from fringe image.")
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ FRINGE_IM(ccd) = im
+ FRINGE_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ FRINGE_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ FRINGE_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ FRINGE_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ # Get the scaling factors. If no fringe scale factor assume 1.
+ exptime1 = hdmgetr (IN_IM(ccd), "exptime")
+ exptime2 = hdmgetr (im, "exptime")
+ iferr (fringescale = hdmgetr (im, "fringscl"))
+ fringescale = 1.
+
+ FRINGESCALE(ccd) = exptime1 / exptime2 * fringescale
+ CORS(ccd, FRINGECOR) = Q
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Fringe image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (FRINGESCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "fringcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setheader.x b/noao/imred/quadred/src/ccdproc/setheader.x
new file mode 100644
index 00000000..5687612d
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setheader.x
@@ -0,0 +1,76 @@
+include <imhdr.h>
+include "ccdred.h"
+
+# SET_HEADER -- Set the output image header.
+
+procedure set_header (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl
+pointer sp, str, out
+long clktime()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ out = OUT_IM(ccd)
+ nc = IM_LEN(out,1)
+ nl = IM_LEN(out,2)
+
+ # Set the data section if it is not the whole image.
+ if ((OUT_C1(ccd) != 1) || (OUT_C2(ccd) != nc) ||
+ (OUT_L1(ccd) != 1) || (OUT_L2(ccd) != nl)) {
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (OUT_C1(ccd))
+ call pargi (OUT_C2(ccd))
+ call pargi (OUT_L1(ccd))
+ call pargi (OUT_L2(ccd))
+ call hdmpstr (out, "datasec", Memc[str])
+ } else {
+ iferr (call hdmdelf (out, "datasec"))
+ ;
+ }
+
+ # Set the CCD section.
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call hdmpstr (out, "ccdsec", Memc[str])
+
+ # If trimming update the trim and bias section parameters.
+ if (CORS(ccd, TRIM) == YES) {
+ iferr (call hdmdelf (out, "trimsec"))
+ ;
+ iferr (call hdmdelf (out, "biassec"))
+ ;
+ BIAS_C1(ccd) = max (1, BIAS_C1(ccd) - TRIM_C1(ccd) + 1)
+ BIAS_C2(ccd) = min (nc, BIAS_C2(ccd) - TRIM_C1(ccd) + 1)
+ BIAS_L1(ccd) = max (1, BIAS_L1(ccd) - TRIM_L1(ccd) + 1)
+ BIAS_L2(ccd) = min (nl, BIAS_L2(ccd) - TRIM_L1(ccd) + 1)
+ if ((BIAS_C1(ccd)<=BIAS_C2(ccd)) && (BIAS_L1(ccd)<=BIAS_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (BIAS_C1(ccd))
+ call pargi (BIAS_C2(ccd))
+ call pargi (BIAS_L1(ccd))
+ call pargi (BIAS_L2(ccd))
+ call hdmpstr (out, "biassec", Memc[str])
+ }
+ }
+
+ # Set mean value if desired.
+ if (CORS(ccd, FINDMEAN) == YES) {
+ call hdmputr (out, "ccdmean", MEAN(ccd))
+ call hdmputi (out, "ccdmeant", int (clktime (long (0))))
+ }
+
+ # Mark image as processed.
+ call sprintf (Memc[str], SZ_LINE, "CCD processing done")
+ call timelog (Memc[str], SZ_LINE)
+ call hdmpstr (out, "ccdproc", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setillum.x b/noao/imred/quadred/src/ccdproc/setillum.x
new file mode 100644
index 00000000..d1677301
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setillum.x
@@ -0,0 +1,132 @@
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_ILLUM -- Set parameters for illumination correction.
+#
+# 1. Return immediately if the illumination correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the illumination image and return error if mkillum flag missing.
+# 3. Set the processing flags and record the operation in the output
+# image and write a log record.
+
+procedure set_illum (ccd)
+
+pointer ccd # CCD structure
+
+int nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+long time
+pointer sp, str, image, im
+
+bool clgetb(), ccdflag()
+long hdmgeti()
+real hdmgetr()
+pointer ccd_cache()
+errchk cal_image, ccd_cache, ccdproc, hdmgetr, hdmgeti
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("illumcor") || ccdflag (IN_IM(ccd), "illumcor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the illumcor correction image.
+ call cal_image (IN_IM(ccd), ILLUM, 1, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print illumination image name and return.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Illumination correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Return a warning if the illumination flag is missing.
+ im = ccd_cache (Memc[image], ILLUM)
+ if (!ccdflag (im, "mkillum")) {
+ call ccd_flush (im)
+ call error (0, "MKILLUM flag missing from illumination image")
+ }
+
+ # If no mean value for the scale factor compute it.
+ iferr (ILLUMSCALE(ccd) = hdmgetr (im, "ccdmean"))
+ ILLUMSCALE(ccd) = INDEF
+ iferr (time = hdmgeti (im, "ccdmeant"))
+ time = IM_MTIME(im)
+ if (IS_INDEF(ILLUMSCALE(ccd)) || time < IM_MTIME(im)) {
+ call ccd_flush (im)
+ call ccdmean (Memc[image])
+ im = ccd_cache (Memc[image], ILLUM)
+ }
+ iferr (ILLUMSCALE(ccd) = hdmgetr (im, "ccdmean"))
+ ILLUMSCALE(ccd) = 1.
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ ILLUM_IM(ccd) = im
+ ILLUM_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ ILLUM_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ ILLUM_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ ILLUM_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ CORS(ccd, ILLUMCOR) = I
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE,
+ "Illumination image is %s with scale=%g")
+ call pargstr (Memc[image])
+ call pargr (ILLUMSCALE(ccd))
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "illumcor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setinput.x b/noao/imred/quadred/src/ccdproc/setinput.x
new file mode 100644
index 00000000..3d3170db
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setinput.x
@@ -0,0 +1,48 @@
+include <error.h>
+include "ccdtypes.h"
+
+# SET_INPUT -- Set the input image and image type.
+#
+# 1. Open the input image. Return warning and NULL pointer for an error.
+# 2. Get the requested CCD image type.
+# a. If no type is requested then accept the image.
+# b. If a type is requested then match against the image type.
+# Unmap the image if no match.
+# 3. If the image is acceptable then get the CCD type code.
+
+procedure set_input (image, im, ccdtype)
+
+char image[ARB] # Input image name
+pointer im # IMIO pointer (returned)
+int ccdtype # CCD image type
+
+bool strne()
+int ccdtypei()
+pointer sp, str1, str2, immap()
+
+begin
+ # Open the image. Return a warning and NULL pointer for an error.
+ iferr (im = immap (image, READ_ONLY, 0)) {
+ call erract (EA_WARN)
+ im = NULL
+ return
+ }
+
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the requested CCD type.
+ call clgstr ("ccdtype", Memc[str1], SZ_LINE)
+ call xt_stripwhite (Memc[str1])
+ if (Memc[str1] != EOS) {
+ call ccdtypes (im, Memc[str2], SZ_LINE)
+ if (strne (Memc[str1], Memc[str2]))
+ call imunmap (im)
+ }
+
+ if (im != NULL)
+ ccdtype = ccdtypei (im)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setinteract.x b/noao/imred/quadred/src/ccdproc/setinteract.x
new file mode 100644
index 00000000..05bc0f71
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setinteract.x
@@ -0,0 +1,31 @@
+include <pkg/xtanswer.h>
+
+# SET_INTERACTIVE -- Set the interactive flag. Query the user if necessary.
+#
+# This procedure initializes the interactive flag if there is no query.
+# If there is a query it is issued by XT_ANSWER. The four valued
+# interactive flag is returned.
+
+procedure set_interactive (query, interactive)
+
+char query[ARB] # Query prompt
+int interactive # Fit overscan interactively? (returned)
+
+int interact # Saves last value of interactive flag
+bool clgetb()
+
+begin
+ # If the query is null then initialize from the CL otherwise
+ # query the user. This response is four valued to allow the user
+ # to turn off the query when processing multiple images.
+
+ if (query[1] == EOS) {
+ if (clgetb ("interactive"))
+ interact = YES
+ else
+ interact = ALWAYSNO
+ } else
+ call xt_answer (query, interact)
+
+ interactive = interact
+end
diff --git a/noao/imred/quadred/src/ccdproc/setoutput.x b/noao/imred/quadred/src/ccdproc/setoutput.x
new file mode 100644
index 00000000..0c4e608f
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setoutput.x
@@ -0,0 +1,51 @@
+include <imhdr.h>
+include <imset.h>
+
+# SET_OUTPUT -- Setup the output image.
+# The output image is a NEW_COPY of the input image.
+# The user may select a pixel datatype with higher precision though not
+# lower.
+
+procedure set_output (in, out, output)
+
+pointer in # Input IMIO pointer to copy
+pointer out # Output IMIO pointer
+char output[SZ_FNAME] # Output image name
+
+int i, clscan(), nscan()
+char type[1]
+pointer immap()
+errchk immap
+
+begin
+ out = immap (output, NEW_COPY, in)
+ IM_PIXTYPE(out) = TY_REAL
+ if (clscan ("pixeltype") != EOF) {
+ call gargwrd (type, 1)
+ if (nscan() == 1) {
+ i = IM_PIXTYPE(in)
+ IM_PIXTYPE(out) = i
+ switch (type[1]) {
+ case 's':
+ ;
+ case 'u':
+ if (i == TY_SHORT)
+ IM_PIXTYPE(out) = TY_USHORT
+ case 'i':
+ if (i == TY_SHORT || i == TY_USHORT)
+ IM_PIXTYPE(out) = TY_INT
+ case 'l':
+ if (i == TY_SHORT || i == TY_USHORT || i == TY_INT)
+ IM_PIXTYPE(out) = TY_LONG
+ case 'r':
+ if (i != TY_DOUBLE)
+ IM_PIXTYPE(out) = TY_REAL
+ case 'd':
+ IM_PIXTYPE(out) = TY_DOUBLE
+ default:
+ call imunmap (out)
+ call error (0, "Unknown pixel type")
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/ccdproc/setoverscan.x b/noao/imred/quadred/src/ccdproc/setoverscan.x
new file mode 100644
index 00000000..2fef378a
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setoverscan.x
@@ -0,0 +1,344 @@
+include <imhdr.h>
+include <imset.h>
+include <pkg/gtools.h>
+include <pkg/xtanswer.h>
+include "ccdred.h"
+
+
+# SET_OVERSCAN -- Set the overscan vector.
+#
+# 1. Return immediately if the overscan correction is not requested or
+# if the image has been previously corrected.
+# 2. Determine the overscan columns or lines. This may be specifed
+# directly or indirectly through the image header or symbol table.
+# 3. Average the overscan columns or lines.
+# 4. Fit a function with the ICFIT routines to smooth the overscan vector.
+# 5. Set the processing flag.
+# 6. Log the operation (to user, logfile, and output image header).
+
+procedure set_overscan (ccd)
+
+pointer ccd # CCD structure pointer
+
+int i, j, nsec, navg, npts, first, last
+int nc, nl, c1, c2, l1, l2
+pointer sp, str, errstr, buf, overscan, x, y, z
+
+real asumr()
+bool clgetb(), ccdflag()
+pointer imgl2r()
+errchk imgl2r, fit_overscan
+
+begin
+ # Check if the user wants this operation or if it has been done.
+ if (!clgetb ("overscan") || ccdflag (IN_IM(ccd), "overscan"))
+ return
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (errstr, SZ_LINE, TY_CHAR)
+ call imstats (IN_IM(ccd), IM_IMAGENAME, Memc[str], SZ_LINE)
+
+ # Check bias section.
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+
+ c1 = BIAS_C1(ccd)
+ c2 = BIAS_C2(ccd)
+ l1 = BIAS_L1(ccd)
+ l2 = BIAS_L2(ccd)
+ navg = c2 - c1 + 1
+ npts = CCD_L2(ccd) - CCD_L1(ccd) + 1
+
+ nsec = max (1, IN_NSEC(ccd))
+ do i = 1, nsec {
+ if (BIAS_SEC(ccd) != NULL) {
+ c1 = BIAS_SC1(ccd,i)
+ c2 = BIAS_SC2(ccd,i)
+ l1 = BIAS_SL1(ccd,i)
+ l2 = BIAS_SL2(ccd,i)
+ }
+ if ((c1 < 1) || (c2 > nc) || (l1 < 1) || (l2 > nl)) {
+ call sprintf (Memc[errstr], SZ_LINE,
+ "Error in bias section: image=%s[%d,%d], biassec=[%d:%d,%d:%d]")
+ call pargstr (Memc[str])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[errstr])
+ }
+ if ((c1 == 1) && (c2 == nc) && (l1 == 1) && (l2 == nl)) {
+ call error (0,
+ "Bias section not specified or given as full image")
+ }
+
+ # If no processing is desired then print overscan strip and return.
+ if (clgetb ("noproc")) {
+ call eprintf (
+ " [TO BE DONE] Overscan section is [%d:%d,%d:%d].\n")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call sfree (sp)
+ return
+ }
+ }
+
+ # Determine the overscan section parameters. The readout axis
+ # determines the type of overscan. The step sizes are ignored.
+ # The limits in the long dimension are replaced by the trim limits.
+
+ if (READAXIS(ccd) == 1) {
+ call salloc (buf, nsec*nl, TY_REAL)
+ z = buf
+ do i = 1, nl {
+ y = imgl2r (IN_IM(ccd), i)
+ do j = 1, nsec {
+ if (BIAS_SEC(ccd) != NULL) {
+ l1 = BIAS_SL1(ccd,j)
+ l2 = BIAS_SL2(ccd,j)
+ if (i < l1 || i > l2)
+ next
+ c1 = BIAS_SC1(ccd,j)
+ c2 = BIAS_SC2(ccd,j)
+ navg = c2 - c1 + 1
+ z = buf + (j - 1) * nl
+ }
+ Memr[z+i-1] = asumr (Memr[y+c1-1], navg)
+ }
+ }
+
+ # Trim the overscan vector and set the pixel coordinate.
+ call salloc (x, nl, TY_REAL)
+ call malloc (overscan, nsec*nl, TY_REAL)
+ y = overscan
+ do i = 1, nsec {
+ if (BIAS_SEC(ccd) != NULL) {
+ c1 = BIAS_SC1(ccd,i)
+ c2 = BIAS_SC2(ccd,i)
+ l1 = BIAS_SL1(ccd,i)
+ l2 = BIAS_SL2(ccd,i)
+ navg = c2 - c1 + 1
+ npts = l2 - l1 + 1
+ y = overscan + (i - 1) * nl
+ z = buf + (i - 1) * nl
+ }
+ if (navg > 1)
+ call adivkr (Memr[z+l1-1], real (navg), Memr[z+l1-1],
+ npts)
+ call trim_overscan (Memr[z], npts, l1, Memr[x], Memr[y])
+ call fit_overscan (Memc[str], c1, c2, l1, l2, Memr[x],
+ Memr[y], npts)
+ }
+
+ } else {
+ first = l1
+ last = l2
+ navg = last - first + 1
+ npts = nc
+ call salloc (buf, npts, TY_REAL)
+ call aclrr (Memr[buf], npts)
+ do i = first, last
+ call aaddr (Memr[imgl2r(IN_IM(ccd),i)], Memr[buf], Memr[buf],
+ npts)
+ if (navg > 1)
+ call adivkr (Memr[buf], real (navg), Memr[buf], npts)
+
+ # Trim the overscan vector and set the pixel coordinate.
+ npts = CCD_C2(ccd) - CCD_C1(ccd) + 1
+ call malloc (overscan, npts, TY_REAL)
+ call salloc (x, npts, TY_REAL)
+ call trim_overscan (Memr[buf], npts, IN_C1(ccd), Memr[x],
+ Memr[overscan])
+
+ call fit_overscan (Memc[str], c1, c2, l1, l2, Memr[x],
+ Memr[overscan], npts)
+ }
+
+ # Set the CCD structure overscan parameters.
+ CORS(ccd, OVERSCAN) = O
+ COR(ccd) = YES
+ OVERSCAN_VEC(ccd) = overscan
+
+ # Log the operation.
+ call strcpy ("overscan", Memc[errstr], SZ_LINE)
+ y = overscan
+ do i = 1, nsec {
+ if (BIAS_SEC(ccd) != NULL) {
+ c1 = BIAS_SC1(ccd,i)
+ c2 = BIAS_SC2(ccd,i)
+ l1 = BIAS_SL1(ccd,i)
+ l2 = BIAS_SL2(ccd,i)
+ y = overscan + (i - 1) * nl
+ npts = c2 - c1 + 1
+ if (i > 1) {
+ call sprintf (Memc[errstr], SZ_LINE, "ovrscn%d")
+ call pargi (i)
+ }
+ }
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan section is [%d:%d,%d:%d] with mean=%g")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call pargr (asumr (Memr[y], npts) / npts)
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), Memc[errstr], Memc[str])
+ }
+
+ call sfree (sp)
+end
+
+
+# FIT_OVERSCAN -- Fit a function to smooth the overscan vector.
+# The fitting uses the ICFIT procedures which may be interactive.
+# Changes to these parameters are "learned". The user is queried with a four
+# valued logical query (XT_ANSWER routine) which may be turned off when
+# multiple images are processed.
+
+procedure fit_overscan (image, c1, c2, l1, l2, x, overscan, npts)
+
+char image[ARB] # Image name for query and title
+int c1, c2, l1, l2 # Overscan strip
+real x[npts] # Pixel coordinates of overscan
+real overscan[npts] # Input overscan and output fitted overscan
+int npts # Number of data points
+
+int interactive, fd
+pointer sp, str, w, ic, cv, gp, gt
+
+int clgeti(), ic_geti(), open()
+real clgetr(), ic_getr()
+pointer gopen(), gt_init()
+errchk gopen, open
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+ call salloc (w, npts, TY_REAL)
+ call amovkr (1., Memr[w], npts)
+
+ # Open the ICFIT procedures, get the fitting parameters, and
+ # set the fitting limits.
+
+ call ic_open (ic)
+ call clgstr ("function", Memc[str], SZ_LINE)
+ call ic_pstr (ic, "function", Memc[str])
+ call ic_puti (ic, "order", clgeti ("order"))
+ call clgstr ("sample", Memc[str], SZ_LINE)
+ call ic_pstr (ic, "sample", Memc[str])
+ call ic_puti (ic, "naverage", clgeti ("naverage"))
+ call ic_puti (ic, "niterate", clgeti ("niterate"))
+ call ic_putr (ic, "low", clgetr ("low_reject"))
+ call ic_putr (ic, "high", clgetr ("high_reject"))
+ call ic_putr (ic, "grow", clgetr ("grow"))
+ call ic_putr (ic, "xmin", min (x[1], x[npts]))
+ call ic_putr (ic, "xmax", max (x[1], x[npts]))
+ call ic_pstr (ic, "xlabel", "Pixel")
+ call ic_pstr (ic, "ylabel", "Overscan")
+
+ # If the fitting is done interactively set the GTOOLS and GIO
+ # pointers. Also "learn" the fitting parameters since they may
+ # be changed when fitting interactively.
+
+ call sprintf (Memc[str], SZ_LINE,
+ "Fit overscan vector for %s[%d:%d,%d:%d] interactively")
+ call pargstr (image)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call set_interactive (Memc[str], interactive)
+ if ((interactive == YES) || (interactive == ALWAYSYES)) {
+ gt = gt_init ()
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan vector for %s from section [%d:%d,%d:%d]\n")
+ call pargstr (image)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call gt_sets (gt, GTTITLE, Memc[str])
+ call gt_sets (gt, GTTYPE, "line")
+ call gt_setr (gt, GTXMIN, x[1])
+ call gt_setr (gt, GTXMAX, x[npts])
+ call clgstr ("graphics", Memc[str], SZ_FNAME)
+ gp = gopen (Memc[str], NEW_FILE, STDGRAPH)
+
+ call icg_fit (ic, gp, "cursor", gt, cv, x, overscan, Memr[w], npts)
+
+ call ic_gstr (ic, "function", Memc[str], SZ_LINE)
+ call clpstr ("function", Memc[str])
+ call clputi ("order", ic_geti (ic, "order"))
+ call ic_gstr (ic, "sample", Memc[str], SZ_LINE)
+ call clpstr ("sample", Memc[str])
+ call clputi ("naverage", ic_geti (ic, "naverage"))
+ call clputi ("niterate", ic_geti (ic, "niterate"))
+ call clputr ("low_reject", ic_getr (ic, "low"))
+ call clputr ("high_reject", ic_getr (ic, "high"))
+ call clputr ("grow", ic_getr (ic, "grow"))
+
+ call gclose (gp)
+ call gt_free (gt)
+ } else
+ call ic_fit (ic, cv, x, overscan, Memr[w], npts, YES, YES, YES, YES)
+
+ # Make a log of the fit in the plot file if given.
+ call clgstr ("plotfile", Memc[str], SZ_LINE)
+ call xt_stripwhite (Memc[str])
+ if (Memc[str] != EOS) {
+ fd = open (Memc[str], APPEND, BINARY_FILE)
+ gp = gopen ("stdvdm", NEW_FILE, fd)
+ gt = gt_init ()
+ call sprintf (Memc[str], SZ_LINE,
+ "Overscan vector for %s from section [%d:%d,%d:%d]\n")
+ call pargstr (image)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call gt_sets (gt, GTTITLE, Memc[str])
+ call gt_sets (gt, GTTYPE, "line")
+ call gt_setr (gt, GTXMIN, 1.)
+ call gt_setr (gt, GTXMAX, real (npts))
+ call icg_graphr (ic, gp, gt, cv, x, overscan, Memr[w], npts)
+ call gclose (gp)
+ call close (fd)
+ call gt_free (gt)
+ }
+
+ # Replace the raw overscan vector with the smooth fit.
+ call cvvector (cv, x, overscan, npts)
+
+ # Finish up.
+ call ic_closer (ic)
+ call cvfree (cv)
+ call sfree (sp)
+end
+
+
+# TRIM_OVERSCAN -- Trim the overscan vector.
+
+procedure trim_overscan (data, npts, start, x, overscan)
+
+real data[ARB] # Full overscan vector
+int npts # Length of trimmed vector
+int start # Trim start
+real x[npts] # Trimmed pixel coordinates (returned)
+real overscan[npts] # Trimmed overscan vector (returned)
+
+int i, j
+
+begin
+ do i = 1, npts {
+ j = start + i - 1
+ x[i] = j
+ overscan[i] = data[j]
+ }
+end
diff --git a/noao/imred/quadred/src/ccdproc/setproc.x b/noao/imred/quadred/src/ccdproc/setproc.x
new file mode 100644
index 00000000..595acd76
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setproc.x
@@ -0,0 +1,80 @@
+include <imhdr.h>
+include "ccdred.h"
+
+# SET_PROC -- Set the processing parameter structure pointer.
+
+procedure set_proc (in, out, ccd)
+
+pointer in # Input IMIO pointer
+pointer out # Output IMIO pointer
+pointer ccd # CCD structure (returned)
+
+int clgwrd(), clscan(), nscan()
+real clgetr()
+pointer sp, str
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Allocate the ccd structure.
+ call calloc (ccd, LEN_CCD, TY_STRUCT)
+
+ IN_IM(ccd) = in
+ OUT_IM(ccd) = out
+ COR(ccd) = NO
+ CORS(ccd, FIXPIX) = NO
+ CORS(ccd, OVERSCAN) = NO
+ CORS(ccd, TRIM) = NO
+ READAXIS(ccd) = clgwrd ("readaxis",Memc[str],SZ_LINE,"|line|columns|")
+ MINREPLACE(ccd) = clgetr ("minreplace")
+
+ CALCTYPE(ccd) = TY_REAL
+ if (clscan ("pixeltype") != EOF) {
+ call gargwrd (Memc[str], SZ_LINE)
+ call gargwrd (Memc[str], SZ_LINE)
+ if (nscan() == 2) {
+ if (Memc[str] == 'r')
+ CALCTYPE(ccd) = TY_REAL
+ else if (Memc[str] == 's')
+ CALCTYPE(ccd) = TY_SHORT
+ else
+ call error (1, "Invalid calculation datatype")
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# FREE_PROC -- Free the processing structure pointer.
+
+procedure free_proc (ccd)
+
+pointer ccd # CCD structure
+
+begin
+ # Unmap calibration images.
+ if (ZERO_IM(ccd) != NULL)
+ call ccd_unmap (ZERO_IM(ccd))
+ if (DARK_IM(ccd) != NULL)
+ call ccd_unmap (DARK_IM(ccd))
+ if (FLAT_IM(ccd) != NULL)
+ call ccd_unmap (FLAT_IM(ccd))
+ if (ILLUM_IM(ccd) != NULL)
+ call ccd_unmap (ILLUM_IM(ccd))
+ if (FRINGE_IM(ccd) != NULL)
+ call ccd_unmap (FRINGE_IM(ccd))
+
+ # Free memory
+ if (BADCOLS(ccd) != NULL)
+ call mfree (BADCOLS(ccd), TY_SHORT)
+ if (BADLINES(ccd) != NULL)
+ call mfree (BADLINES(ccd), TY_SHORT)
+ if (OVERSCAN_VEC(ccd) != NULL)
+ call mfree (OVERSCAN_VEC(ccd), TY_REAL)
+ call mfree (IN_SEC(ccd), TY_INT)
+ call mfree (OUT_SEC(ccd), TY_INT)
+ call mfree (BIAS_SEC(ccd), TY_INT)
+ call mfree (ccd, TY_STRUCT)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setsections.x b/noao/imred/quadred/src/ccdproc/setsections.x
new file mode 100644
index 00000000..b83a9d13
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setsections.x
@@ -0,0 +1,327 @@
+include <imhdr.h>
+include "ccdred.h"
+
+# SET_SECTIONS -- Set the data section, ccd section, trim section and
+# bias section.
+
+procedure set_sections (ccd)
+
+pointer ccd # CCD structure (returned)
+
+pointer sp, str
+int nc, nl, c1, c2, cs, l1, l2, ls
+bool streq()
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+
+ # The default data section is the entire image.
+ c1 = 1
+ c2 = nc
+ cs = 1
+ l1 = 1
+ l2 = nl
+ ls = 1
+ call hdmgstr (IN_IM(ccd), "datasec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1))
+ call error (0, "Error in DATASEC parameter")
+ IN_C1(ccd) = c1
+ IN_C2(ccd) = c2
+ IN_L1(ccd) = l1
+ IN_L2(ccd) = l2
+
+ # The default trim section is the data section.
+ # Defer limit checking until actually used.
+ c1 = IN_C1(ccd)
+ c2 = IN_C2(ccd)
+ l1 = IN_L1(ccd)
+ l2 = IN_L2(ccd)
+ call clgstr ("trimsec", Memc[str], SZ_LINE)
+ if (streq (Memc[str], "image"))
+ call hdmgstr (IN_IM(ccd), "trimsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs!=1)||(ls!=1))
+ call error (0, "Error in TRIMSEC parameter")
+ TRIM_C1(ccd) = c1
+ TRIM_C2(ccd) = c2
+ TRIM_L1(ccd) = l1
+ TRIM_L2(ccd) = l2
+
+ # The default bias section is the whole image.
+ # Defer limit checking until actually used.
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ call clgstr ("biassec", Memc[str], SZ_LINE)
+ if (streq (Memc[str], "image"))
+ call hdmgstr (IN_IM(ccd), "biassec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs!=1)||(ls!=1))
+ call error (0, "Error in BIASSEC parameter")
+ BIAS_C1(ccd) = c1
+ BIAS_C2(ccd) = c2
+ BIAS_L1(ccd) = l1
+ BIAS_L2(ccd) = l2
+
+ # The default ccd section is the size of the data section.
+ c1 = 1
+ c2 = IN_C2(ccd) - IN_C1(ccd) + 1
+ l1 = 1
+ l2 = IN_L2(ccd) - IN_L1(ccd) + 1
+ call hdmgstr (IN_IM(ccd), "ccdsec", Memc[str], SZ_LINE)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs != 1) || (ls != 1))
+ call error (0, "Error in CCDSEC parameter")
+ CCD_C1(ccd) = c1
+ CCD_C2(ccd) = c2
+ CCD_L1(ccd) = l1
+ CCD_L2(ccd) = l2
+ if ((IN_C2(ccd)-IN_C1(ccd) != CCD_C2(ccd)-CCD_C1(ccd)) ||
+ (IN_L2(ccd)-IN_L1(ccd) != CCD_L2(ccd)-CCD_L1(ccd)))
+ call error (0, "Size of DATASEC and CCDSEC do not agree")
+
+ # The default output data section is the input data section.
+ OUT_C1(ccd) = IN_C1(ccd)
+ OUT_C2(ccd) = IN_C2(ccd)
+ OUT_L1(ccd) = IN_L1(ccd)
+ OUT_L2(ccd) = IN_L2(ccd)
+
+ # Set ARCON style sections.
+ call set_arcon (ccd)
+
+ call sfree (sp)
+end
+
+
+# SET_ARCON -- Set the data section, ccd section, trim section and
+# bias section.
+
+procedure set_arcon (ccd)
+
+pointer ccd # CCD structure (returned)
+
+pointer sp, amplist, amp, key, str
+int i, ip, nc, nl, c1, c2, cs, l1, l2, ls, ctowrd()
+int xt1, xt2, yt1, yt2
+bool trim, clgetb()
+
+begin
+ call smark (sp)
+ call salloc (amplist, SZ_LINE, TY_CHAR)
+ call salloc (amp, SZ_LINE, TY_CHAR)
+ call salloc (key, SZ_LINE, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ trim = clgetb ("trim")
+
+ # Get AMPLIST and determine the number of amplifiers.
+ # If there is no AMPLIST or missing BSEC keywords return.
+ call hdmgstr (IN_IM(ccd), "amplist", Memc[amplist], SZ_LINE)
+ if (Memc[amplist] == EOS) {
+ call sfree (sp)
+ return
+ }
+
+ ip = 1
+ for (i=0; ctowrd(Memc[amplist],ip,Memc[amp],SZ_LINE)!=0; i=i+1) {
+ call sprintf (Memc[key], SZ_LINE, "bsec%s")
+ call pargstr (Memc[amp])
+ call hdmgstr (IN_IM(ccd), Memc[key], Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call sfree (sp)
+ return
+ }
+ }
+ if (i == 0) {
+ call sfree (sp)
+ return
+ }
+
+ IN_NSEC(ccd) = i
+ call malloc (IN_SEC(ccd), 4*i, TY_INT)
+ call malloc (OUT_SEC(ccd), 4*i, TY_INT)
+ call malloc (BIAS_SEC(ccd), 4*i, TY_INT)
+
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+
+ ip = 1
+ for (i=1; ctowrd(Memc[amplist],ip,Memc[amp],SZ_LINE)!=0; i=i+1) {
+
+ # Use amp section if no trim and data section if trim.
+ c1 = 1; c2 = nc; cs = 1; l1 = 1; l2 = nl; ls = 1
+ if (trim)
+ call sprintf (Memc[key], SZ_LINE, "dsec%s")
+ else
+ call sprintf (Memc[key], SZ_LINE, "asec%s")
+ call pargstr (Memc[amp])
+ call hdmgstr (IN_IM(ccd), Memc[key], Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call sprintf (Memc[str], SZ_LINE, "Keyword %s not found")
+ call pargstr (Memc[key])
+ call error (0, Memc[str])
+ }
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)) {
+ call sprintf (Memc[str], SZ_LINE, "Error in %s parameter")
+ call pargstr (Memc[key])
+ call error (0, Memc[str])
+ }
+ IN_SC1(ccd,i) = c1
+ IN_SC2(ccd,i) = c2
+ IN_SL1(ccd,i) = l1
+ IN_SL2(ccd,i) = l2
+
+ # If trimming match dsec with csec and then use tsec.
+ if (trim) {
+ c1 = IN_SC1(ccd,i); c2 = IN_SC2(ccd,i); cs = 1
+ l1 = IN_SL1(ccd,i); l2 = IN_SL2(ccd,i); ls = 1
+ call sprintf (Memc[key], SZ_LINE, "tsec%s")
+ call pargstr (Memc[amp])
+ call hdmgstr (IN_IM(ccd), Memc[key], Memc[str], SZ_LINE)
+ if (Memc[str] != EOS)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<IN_SC1(ccd,i))||(c2>IN_SC2(ccd,i))||
+ (l1<IN_SL1(ccd,i))||(l2>IN_SL2(ccd,i))) {
+ call sprintf (Memc[str], SZ_LINE, "Error in %s parameter")
+ call pargstr (Memc[key])
+ call error (0, Memc[str])
+ }
+ xt1 = max (0, c1 - IN_SC1(ccd,i))
+ xt2 = min (0, c2 - IN_SC2(ccd,i))
+ yt1 = max (0, l1 - IN_SL1(ccd,i))
+ yt2 = min (0, l2 - IN_SL2(ccd,i))
+
+ call sprintf (Memc[key], SZ_LINE, "csec%s")
+ call pargstr (Memc[amp])
+ call hdmgstr (IN_IM(ccd), Memc[key], Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call sprintf (Memc[str], SZ_LINE, "Keyword %s not found")
+ call pargstr (Memc[key])
+ call error (0, Memc[str])
+ }
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c2-c1) != (IN_SC2(ccd,i)-IN_SC1(ccd,i)) ||
+ (l2-l1) != (IN_SL2(ccd,i)-IN_SL1(ccd,i)))
+ call error (1, "DSEC and CSEC are different sizes")
+
+ IN_SC1(ccd,i) = IN_SC1(ccd,i) + xt1
+ IN_SC2(ccd,i) = IN_SC2(ccd,i) + xt2
+ IN_SL1(ccd,i) = IN_SL1(ccd,i) + yt1
+ IN_SL2(ccd,i) = IN_SL2(ccd,i) + yt2
+ OUT_SC1(ccd,i) = c1 + xt1
+ OUT_SC2(ccd,i) = c2 + xt2
+ OUT_SL1(ccd,i) = l1 + yt1
+ OUT_SL2(ccd,i) = l2 + yt2
+
+ } else {
+ OUT_SC1(ccd,i) = c1
+ OUT_SC2(ccd,i) = c2
+ OUT_SL1(ccd,i) = l1
+ OUT_SL2(ccd,i) = l2
+ }
+
+ # The default bias section is the whole image.
+ # Defer limit checking until actually used.
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ call sprintf (Memc[key], SZ_LINE, "bsec%s")
+ call pargstr (Memc[amp])
+ call hdmgstr (IN_IM(ccd), Memc[key], Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call sprintf (Memc[str], SZ_LINE, "Keyword %s not found")
+ call pargstr (Memc[key])
+ call error (0, Memc[str])
+ }
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((cs!=1)||(ls!=1))
+ call error (0, "Error in BSEC parameter")
+ BIAS_SC1(ccd,i) = c1
+ BIAS_SC2(ccd,i) = c2
+ BIAS_SL1(ccd,i) = l1
+ BIAS_SL2(ccd,i) = l2
+
+ if (trim) {
+ #iferr (call hdmdelf (OUT_IM(ccd), "amplist"))
+ # ;
+ #call sprintf (Memc[key], SZ_LINE, "asec%s")
+ # call pargstr (Memc[amp])
+ #iferr (call hdmdelf (OUT_IM(ccd), Memc[key]))
+ # ;
+ call sprintf (Memc[key], SZ_LINE, "bsec%s")
+ call pargstr (Memc[amp])
+ iferr (call hdmdelf (OUT_IM(ccd), Memc[key]))
+ ;
+ call sprintf (Memc[key], SZ_LINE, "csec%s")
+ call pargstr (Memc[amp])
+ iferr (call hdmdelf (OUT_IM(ccd), Memc[key]))
+ ;
+ call sprintf (Memc[key], SZ_LINE, "dsec%s")
+ call pargstr (Memc[amp])
+ iferr (call hdmdelf (OUT_IM(ccd), Memc[key]))
+ ;
+ call sprintf (Memc[key], SZ_LINE, "tsec%s")
+ call pargstr (Memc[amp])
+ iferr (call hdmdelf (OUT_IM(ccd), Memc[key]))
+ ;
+ }
+ }
+
+ # Set global sections.
+ IN_C1(ccd) = IN_SC1(ccd,1)
+ IN_C2(ccd) = IN_SC2(ccd,1)
+ IN_L1(ccd) = IN_SL1(ccd,1)
+ IN_L2(ccd) = IN_SL2(ccd,1)
+ CCD_C1(ccd) = OUT_SC1(ccd,1)
+ CCD_C2(ccd) = OUT_SC2(ccd,1)
+ CCD_L1(ccd) = OUT_SL1(ccd,1)
+ CCD_L2(ccd) = OUT_SL2(ccd,1)
+ do i = 2, IN_NSEC(ccd) {
+ IN_C1(ccd) = min (IN_SC1(ccd,i), IN_C1(ccd))
+ IN_C2(ccd) = max (IN_SC2(ccd,i), IN_C2(ccd))
+ IN_L1(ccd) = min (IN_SL1(ccd,i), IN_L1(ccd))
+ IN_L2(ccd) = max (IN_SL2(ccd,i), IN_L2(ccd))
+ CCD_C1(ccd) = min (OUT_SC1(ccd,i), CCD_C1(ccd))
+ CCD_C2(ccd) = max (OUT_SC2(ccd,i), CCD_C2(ccd))
+ CCD_L1(ccd) = min (OUT_SL1(ccd,i), CCD_L1(ccd))
+ CCD_L2(ccd) = max (OUT_SL2(ccd,i), CCD_L2(ccd))
+ }
+ if (trim) {
+ OUT_C1(ccd) = CCD_C1(ccd) - CCD_C1(ccd) + 1
+ OUT_C2(ccd) = CCD_C2(ccd) - CCD_C1(ccd) + 1
+ OUT_L1(ccd) = CCD_L1(ccd) - CCD_L1(ccd) + 1
+ OUT_L2(ccd) = CCD_L2(ccd) - CCD_L1(ccd) + 1
+ ip = 1
+ for (i=1; ctowrd(Memc[amplist],ip,Memc[amp],SZ_LINE)!=0; i=i+1) {
+ OUT_SC1(ccd,i) = OUT_SC1(ccd,i) - CCD_C1(ccd) + 1
+ OUT_SC2(ccd,i) = OUT_SC2(ccd,i) - CCD_C1(ccd) + 1
+ OUT_SL1(ccd,i) = OUT_SL1(ccd,i) - CCD_L1(ccd) + 1
+ OUT_SL2(ccd,i) = OUT_SL2(ccd,i) - CCD_L1(ccd) + 1
+ call sprintf (Memc[key], SZ_LINE, "asec%s")
+ call pargstr (Memc[amp])
+ call sprintf (Memc[str], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (OUT_SC1(ccd,i))
+ call pargi (OUT_SC2(ccd,i))
+ call pargi (OUT_SL1(ccd,i))
+ call pargi (OUT_SL2(ccd,i))
+ call hdmpstr (OUT_IM(ccd), Memc[key], Memc[str])
+ }
+ IM_LEN(OUT_IM(ccd),1) = OUT_C2(ccd)
+ IM_LEN(OUT_IM(ccd),2) = OUT_L2(ccd)
+ } else {
+ OUT_C1(ccd) = IN_C1(ccd)
+ OUT_C2(ccd) = IN_C2(ccd)
+ OUT_L1(ccd) = IN_L1(ccd)
+ OUT_L2(ccd) = IN_L2(ccd)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/settrim.x b/noao/imred/quadred/src/ccdproc/settrim.x
new file mode 100644
index 00000000..1aef62c3
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/settrim.x
@@ -0,0 +1,115 @@
+include <imhdr.h>
+include <imset.h>
+include "ccdred.h"
+
+# SET_TRIM -- Set the trim parameters.
+#
+# 1. Return immediately if the trim correction is not requested or
+# if the image has been previously corrected.
+# 2. Determine the trim section. This may be specifed directly or
+# indirectly through the image header or symbol table.
+# 3. Parse the trim section and apply it to the output image.
+# 4. If the image is trimmed then log the operation and reset the output
+# image size.
+
+procedure set_trim (ccd)
+
+pointer ccd # CCD structure
+
+int xt1, xt2, yt1, yt2
+int nc, nl, c1, c2, l1, l2
+pointer sp, str, image
+bool clgetb(), ccdflag()
+define log_ 10
+
+begin
+ # Check if the user wants this operation or it has been done.
+ if (!clgetb ("trim") || ccdflag (IN_IM(ccd), "trim"))
+ return
+
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ if (IN_SEC(ccd) != NULL)
+ goto log_
+
+ # Check trim section.
+ nc = IM_LEN(IN_IM(ccd),1)
+ nl = IM_LEN(IN_IM(ccd),2)
+ c1 = TRIM_C1(ccd)
+ c2 = TRIM_C2(ccd)
+ l1 = TRIM_L1(ccd)
+ l2 = TRIM_L2(ccd)
+ if ((c1 < 1) || (c2 > nc) || (l1 < 1) || (l2 > nl)) {
+ call salloc (image, SZ_LINE, TY_CHAR)
+ call imstats (IN_IM(ccd), IM_IMAGENAME, Memc[image], SZ_FNAME)
+ call sprintf (Memc[str], SZ_LINE,
+ "Error in trim section: image=%s[%d,%d], trimsec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ # If no processing is desired print trim section and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Trim section is [%d:%d,%d:%d].\n")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call sfree (sp)
+ return
+ }
+
+ xt1 = max (0, c1 - IN_C1(ccd))
+ xt2 = min (0, c2 - IN_C2(ccd))
+ yt1 = max (0, l1 - IN_L1(ccd))
+ yt2 = min (0, l2 - IN_L2(ccd))
+
+ CCD_C1(ccd) = CCD_C1(ccd) + xt1
+ CCD_C2(ccd) = CCD_C2(ccd) + xt2
+ CCD_L1(ccd) = CCD_L1(ccd) + yt1
+ CCD_L2(ccd) = CCD_L2(ccd) + yt2
+ IN_C1(ccd) = IN_C1(ccd) + xt1
+ IN_C2(ccd) = IN_C2(ccd) + xt2
+ IN_L1(ccd) = IN_L1(ccd) + yt1
+ IN_L2(ccd) = IN_L2(ccd) + yt2
+ OUT_C1(ccd) = IN_C1(ccd) - c1 + 1
+ OUT_C2(ccd) = IN_C2(ccd) - c1 + 1
+ OUT_L1(ccd) = IN_L1(ccd) - l1 + 1
+ OUT_L2(ccd) = IN_L2(ccd) - l1 + 1
+ IM_LEN(OUT_IM(ccd),1) = c2 - c1 + 1
+ IM_LEN(OUT_IM(ccd),2) = l2 - l1 + 1
+
+log_
+ if (IN_SEC(ccd) == NULL) {
+ CORS(ccd, TRIM) = YES
+ COR(ccd) = YES
+
+ call sprintf (Memc[str], SZ_LINE,
+ "Trim data section is [%d:%d,%d:%d]")
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "trim", Memc[str])
+ } else {
+ CORS(ccd, TRIM) = NO
+ COR(ccd) = YES
+
+ call sprintf (Memc[str], SZ_LINE,
+ "Trim multiple overscan sections")
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "trim", Memc[str])
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/setzero.x b/noao/imred/quadred/src/ccdproc/setzero.x
new file mode 100644
index 00000000..610aeee7
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/setzero.x
@@ -0,0 +1,141 @@
+# Copyright(c) 1986 Association of Universities for Research in Astronomy Inc.
+
+include <imhdr.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+# SET_ZERO -- Set parameters for zero level correction.
+# 1. Return immediately if the zero level correction is not requested or
+# if the image has been previously corrected.
+# 2. Get the zero level correction image. Return an error if not found.
+# 3. If the zero level image has not been processed call ZEROPROC.
+# 4. Set the processing flag.
+# 5. Log the operation (to user, logfile, and output image header).
+
+procedure set_zero (ccd)
+
+pointer ccd # CCD structure
+
+int nscan, nc, nl, c1, c2, cs, l1, l2, ls, data_c1, ccd_c1, data_l1, ccd_l1
+pointer sp, str, image, im, ccd_cache()
+bool clgetb(), ccdflag(), ccdcheck()
+int ccdtypei(), ccdnscan()
+errchk cal_image, ccd_cache, ccdproc
+
+begin
+ # Check if the user wants this operation or it has been done.
+ if (!clgetb ("zerocor") || ccdflag (IN_IM(ccd), "zerocor"))
+ return
+
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the zero level correction image.
+ if (clgetb ("scancor"))
+ nscan = ccdnscan (IN_IM(ccd), ccdtypei(IN_IM(ccd)))
+ else
+ nscan = 1
+ call cal_image (IN_IM(ccd), ZERO, nscan, Memc[image], SZ_FNAME)
+
+ # If no processing is desired print zero correction image and return.
+ if (clgetb ("noproc")) {
+ call eprintf (" [TO BE DONE] Zero level correction image is %s.\n")
+ call pargstr (Memc[image])
+ call sfree (sp)
+ return
+ }
+
+ # Map the image and return on an error.
+ # Process the zero image if necessary.
+ # If nscan > 1 then the zero may not yet exist so create it
+ # from the unscanned zero.
+
+ iferr (im = ccd_cache (Memc[image], ZERO)) {
+ call cal_image (IN_IM(ccd), ZERO, 1, Memc[str], SZ_LINE)
+ im = ccd_cache (Memc[str], ZERO)
+ if (ccdcheck (im, ZERO)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[str], ZERO)
+ }
+ call scancor (Memc[str], Memc[image], nscan, INDEF)
+ im = ccd_cache (Memc[image], ZERO)
+ }
+
+ if (ccdcheck (im, ZERO)) {
+ call ccd_flush (im)
+ call ccdproc (Memc[image], ZERO)
+ im = ccd_cache (Memc[image], ZERO)
+ }
+
+ # Set the processing parameters in the CCD structure.
+ nc = IM_LEN(im,1)
+ nl = IM_LEN(im,2)
+ c1 = 1
+ c2 = nc
+ l1 = 1
+ l2 = nl
+ cs = 1
+ ls = 1
+ call hdmgstr (im, "datasec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if ((c1<1)||(c2>nc)||(l1<1)||(l2>nl)||(cs!=1)||(ls!=1)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Data section error: image=%s[%d,%d], datasec=[%d:%d,%d:%d]")
+ call pargstr (Memc[image])
+ call pargi (nc)
+ call pargi (nl)
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+ data_c1 = c1
+ data_l1 = l1
+ call hdmgstr (im, "ccdsec", Memc[str], SZ_FNAME)
+ call ccd_section (Memc[str], c1, c2, cs, l1, l2, ls)
+ if (nc == 1) {
+ c1 = CCD_C1(ccd)
+ c2 = CCD_C2(ccd)
+ }
+ if (nl == 1) {
+ l1 = CCD_L1(ccd)
+ l2 = CCD_L2(ccd)
+ }
+ ccd_c1 = c1
+ ccd_l1 = l1
+ if ((c1 > CCD_C1(ccd)) || (c2 < CCD_C2(ccd)) ||
+ (l1 > CCD_L1(ccd)) || (l2 < CCD_L2(ccd))) {
+ call sprintf (Memc[str], SZ_LINE,
+ "CCD section error: input=[%d:%d,%d:%d], %s=[%d:%d,%d:%d]")
+ call pargi (CCD_C1(ccd))
+ call pargi (CCD_C2(ccd))
+ call pargi (CCD_L1(ccd))
+ call pargi (CCD_L2(ccd))
+ call pargstr (Memc[image])
+ call pargi (c1)
+ call pargi (c2)
+ call pargi (l1)
+ call pargi (l2)
+ call error (0, Memc[str])
+ }
+
+ ZERO_IM(ccd) = im
+ ZERO_C1(ccd) = CCD_C1(ccd) - ccd_c1 + data_c1
+ ZERO_C2(ccd) = CCD_C2(ccd) - ccd_c1 + data_c1
+ ZERO_L1(ccd) = CCD_L1(ccd) - ccd_l1 + data_l1
+ ZERO_L2(ccd) = CCD_L2(ccd) - ccd_l1 + data_l1
+
+ CORS(ccd, ZEROCOR) = Z
+ COR(ccd) = YES
+
+ # Log the operation.
+ call sprintf (Memc[str], SZ_LINE, "Zero level correction image is %s")
+ call pargstr (Memc[image])
+ call timelog (Memc[str], SZ_LINE)
+ call ccdlog (IN_IM(ccd), Memc[str])
+ call hdmpstr (OUT_IM(ccd), "zerocor", Memc[str])
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/t_ccdproc.x b/noao/imred/quadred/src/ccdproc/t_ccdproc.x
new file mode 100644
index 00000000..8d256046
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/t_ccdproc.x
@@ -0,0 +1,155 @@
+include <imhdr.h>
+include <error.h>
+include "ccdred.h"
+include "ccdtypes.h"
+
+define CACHEUNIT 1000000. # Units of max_cache parameter
+
+# T_CCDPROC -- Process CCD images
+#
+# This is the main procedure for processing CCD images. The images are
+# corrected for bad pixels, overscan levels, zero levels, dark counts,
+# flat field response, illumination errors, and fringe response. They
+# may also be trimmed. The input is a list of images to be processed.
+# Each image must match any image type requested. The checking of
+# whether to apply each correction, getting the required parameters, and
+# logging the operations is left to separate procedures, one for each
+# correction. The actual processing is done by a specialized procedure
+# designed to be very efficient. These procedures may also process
+# calibration images if necessary. There are two data type paths; one
+# for short pixel types and one for all other pixel types (usually
+# real).
+
+procedure t_ccdproc ()
+
+int list # List of CCD images to process
+int ccdtype # CCD image type
+int interactive # Fit overscan interactively?
+int max_cache # Maximum image cache size
+
+bool clgetb()
+real clgetr()
+int imtopenp(), imtgetim(), imtlen()
+pointer sp, input, output, str, in, out, ccd
+errchk set_input, set_output, ccddelete, cal_open
+errchk set_fixpix, set_zero, set_dark, set_flat, set_illum, set_fringe
+
+begin
+ call smark (sp)
+ call salloc (input, SZ_FNAME, TY_CHAR)
+ call salloc (output, SZ_FNAME, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the list and instrument translation file. Open the translation
+ # file. Initialize the interactive flag and the calibration images.
+
+ list = imtopenp ("images")
+ call clgstr ("instrument", Memc[input], SZ_FNAME)
+ call hdmopen (Memc[input])
+ call set_interactive ("", interactive)
+ call cal_open (list)
+ if (imtlen (list) < 3)
+ max_cache = 0.
+ else
+ max_cache = CACHEUNIT * clgetr ("max_cache")
+ call ccd_open (max_cache)
+
+ # Process each image.
+ while (imtgetim (list, Memc[input], SZ_FNAME) != EOF) {
+ if (clgetb ("noproc")) {
+ call printf ("%s:\n")
+ call pargstr (Memc[input])
+ }
+ call set_input (Memc[input], in, ccdtype)
+ if (in == NULL)
+ next
+
+ # Use a temporary image for output which will replace the input
+ # image after processing.
+
+ call mktemp ("tmp", Memc[output], SZ_FNAME)
+ call set_output (in, out, Memc[output])
+
+ # Set processing parameters applicable to all images.
+ call set_proc (in, out, ccd)
+ call set_sections (ccd)
+ call set_trim (ccd)
+ call set_fixpix (ccd)
+ call set_overscan (ccd)
+
+ # Set processing parameters for the standard CCD image types.
+ switch (ccdtype) {
+ case ZERO:
+ case DARK:
+ call set_zero (ccd)
+ case FLAT:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ CORS(ccd, FINDMEAN) = YES
+ CORS(ccd, MINREP) = YES
+ case ILLUM:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ case OBJECT, COMP:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ iferr {
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ } then
+ call erract (EA_WARN)
+ default:
+ call set_zero (ccd)
+ call set_dark (ccd)
+ call set_flat (ccd)
+ iferr {
+ call set_illum (ccd)
+ call set_fringe (ccd)
+ } then
+ call erract (EA_WARN)
+ CORS(ccd, FINDMEAN) = YES
+ }
+
+ # Do the processing if the COR flag is set.
+
+ if (COR(ccd) == YES) {
+ call doproc (ccd)
+ call set_header (ccd)
+
+ # Replace the input image by the corrected image.
+ call imunmap (in)
+ call imunmap (out)
+ iferr (call ccddelete (Memc[input])) {
+ call imdelete (Memc[output])
+ call error (1,
+ "Can't delete or make backup of original image")
+ }
+ call imrename (Memc[output], Memc[input])
+ } else {
+ # Delete the temporary output image leaving the input unchanged.
+ call imunmap (in)
+ iferr (call imunmap (out))
+ ;
+ iferr (call imdelete (Memc[output]))
+ ;
+ }
+ call free_proc (ccd)
+
+ # Do special processing on certain image types.
+ switch (ccdtype) {
+ case ZERO:
+ call readcor (Memc[input])
+ case FLAT:
+ call ccdmean (Memc[input])
+ }
+ }
+
+ # Finish up.
+ call hdmclose ()
+ call imtclose (list)
+ call cal_close ()
+ call ccd_close ()
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/timelog.x b/noao/imred/quadred/src/ccdproc/timelog.x
new file mode 100644
index 00000000..7a8d969f
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/timelog.x
@@ -0,0 +1,29 @@
+include <time.h>
+
+
+# TIMELOG -- Prepend a time stamp to the given string.
+#
+# For the purpose of a history logging prepend a short time stamp to the
+# given string. Note that the input string is modified.
+
+procedure timelog (str, max_char)
+
+char str[max_char] # String to be time stamped
+int max_char # Maximum characters in string
+
+pointer sp, time, temp
+long clktime()
+
+begin
+ call smark (sp)
+ call salloc (time, SZ_DATE, TY_CHAR)
+ call salloc (temp, max_char, TY_CHAR)
+
+ call cnvdate (clktime(0), Memc[time], SZ_DATE)
+ call sprintf (Memc[temp], max_char, "%s %s")
+ call pargstr (Memc[time])
+ call pargstr (str)
+ call strcpy (Memc[temp], str, max_char)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/ccdproc/x_quadred.x b/noao/imred/quadred/src/ccdproc/x_quadred.x
new file mode 100644
index 00000000..a603d0d5
--- /dev/null
+++ b/noao/imred/quadred/src/ccdproc/x_quadred.x
@@ -0,0 +1 @@
+task ccdproc = t_ccdproc
diff --git a/noao/imred/quadred/src/mkpkg b/noao/imred/quadred/src/mkpkg
new file mode 100644
index 00000000..bd2bdbc0
--- /dev/null
+++ b/noao/imred/quadred/src/mkpkg
@@ -0,0 +1,4 @@
+update:
+ $call update@ccdproc
+ $call update@quad
+ ;
diff --git a/noao/imred/quadred/src/quad/Revisions b/noao/imred/quadred/src/quad/Revisions
new file mode 100644
index 00000000..55b490ed
--- /dev/null
+++ b/noao/imred/quadred/src/quad/Revisions
@@ -0,0 +1,92 @@
+---------------------------------------+--------------------------------------
+ Revisions started with version 1.1 of the QUAD package
+ 19/Mar/93
+
+ This package was written by Steve Heathcote!
+---------------------------------------+--------------------------------------
+
+quadgeom.x
+ Modified call to the ccd_section() routine (SH 19/Mar/93).
+
+ccdtypes.h
+ccdtypes.x
+ccdgetparam.x
+ Added routines to retrieve the image type from the image header,
+ and added special case to translate the image type to the
+ corresponding package name in ccdgetparam() (SH 19/May/93).
+
+quadproc.cl
+ Added check to see if image is of type to be processed (SH 24/May/93).
+
+mkpkg
+quad.cl
+quadscale.x
+x_quad.x
+ Installed QUADSCALE task (SH,PG 24/May/93).
+
+
+________________________________________________________________________________
+ Version 2.0 29 March 94
+
+________________________________________________________________________________
+
+quadproc.cl
+qproc.cl
+ Fitting parameters adjusted interactively where not being saved if
+ quadproc aborted at a later stage. In interactive mode these parameters
+ are now written in qproc after each image has been processed and are
+ updated on disk. (SRH 30/Mar/94)
+
+quadproc.cl
+ When running with noproc+, if the calibration images supplied in the
+ input list, and these have already been processed through [OT] task
+ would complain that the calibration images where missing when the test
+ for second stage [ZF...] procesing was performed. Added calibration
+ images to list to be considered.n this case. This means that the
+ calibration images may appear twice in the output list but ....
+ (SRH 30/Mar/94)
+
+quadproc.cl
+ complained about missing calibration images if the process flags
+ where set and no calibrations where specified, even when no images in
+ the list required the calibration step (e.g. all were zeros). Switched
+ off the check for existance in the qpcalimage call. This means the
+ task will not report the absence of required calibration images until
+ they come to be used but ccdproc does that too. (SRH 30/Mar/94)
+
+ccddb/ctio/instruments.men
+ /Xfccd_f1.dat
+ /Xfccd_f2.dat
+ /Xfccd_bith.dat
+ Added specific instrument files for the different filter subsets.
+ (SRH 30/Mar/94)
+
+qghdr2.x, quadsections.x
+ All quadrants in reduced images were being flagged as phantoms
+ causing quadsections to return no sections. (SRH 10/Jun/94)
+
+quad/ccdproc.par
+ Updated for V2.11.2. This should be backwards compatible.
+ (10/8/99, Valdes)
+
+________________________________________________________________________________
+ Version 2.1 29 October 99
+
+________________________________________________________________________________
+
+qnoproc.cl
+qproc.cl
+ Removed explicit dependence on "imh". The image extension is that
+ given by the imtype environment variable. (FV 20/Oct/99)
+
+________________________________________________________________________________
+ Version 2.2 20 June 00
+
+________________________________________________________________________________
+
+qnoproc.cl
+qproc.cl
+ Changed "len" to "i" in the clause that is executed when
+ imtype contains a ','. This caused the error
+ "Attempt to access undefined local variable `len'.
+ (6/20/00, Valdes)
diff --git a/noao/imred/quadred/src/quad/ccd.dat b/noao/imred/quadred/src/quad/ccd.dat
new file mode 100644
index 00000000..9ed9a970
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccd.dat
@@ -0,0 +1,27 @@
+# CCD.DAT -- Instrument file to be used with ccdred when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+#subset filter1
+subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" other
+COMPARISON other
+ZERO zero # New software
+#BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
+FOCUS object
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_both.dat b/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_both.dat
new file mode 100644
index 00000000..37991738
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_both.dat
@@ -0,0 +1,27 @@
+# CFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+subset filters
+#subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f1.dat b/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f1.dat
new file mode 100644
index 00000000..68cd2063
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f1.dat
@@ -0,0 +1,27 @@
+# CFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f2.dat b/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f2.dat
new file mode 100644
index 00000000..c4d03cb8
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/cfccd_f2.dat
@@ -0,0 +1,27 @@
+# CFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+#subset filter1
+subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/csccd.dat b/noao/imred/quadred/src/quad/ccddb/ctio/csccd.dat
new file mode 100644
index 00000000..000f8c07
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/csccd.dat
@@ -0,0 +1,23 @@
+# CCD.DAT -- Instrument file to be used with ccdred when reducing spectroscopic
+# data obtained with ArCon.
+
+subset none
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON object
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/echccd.dat b/noao/imred/quadred/src/quad/ccddb/ctio/echccd.dat
new file mode 100644
index 00000000..90d08173
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/echccd.dat
@@ -0,0 +1,23 @@
+# ECHCCD.DAT -- Instrument file to be used with ccdred when reducing echelle
+# spectroscopic data obtained with ArCon.
+
+subset none
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
+FOCUS object
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/instruments.men b/noao/imred/quadred/src/quad/ccddb/ctio/instruments.men
new file mode 100644
index 00000000..144c41d5
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/instruments.men
@@ -0,0 +1,9 @@
+cfccd_f1 - Cassegrain focus CCD direct subset=filter1
+cfccd_f2 - Cassegrain focus CCD direct subset=filter2
+cfccd_both - Cassegrain focus CCD direct subset=filters
+csccd - Cassegrain focus spectroscopy
+echccd - Echelle spectroscopy
+nfccd - Newtonian focus CCD direct (Schmidt)
+pfccd_f1 - Prime focus CCD direct subset=filter1
+pfccd_f2 - Prime focus CCD direct subset=filter2
+pfccd_both - Prime focus CCD direct subset=filters
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/nfccd.dat b/noao/imred/quadred/src/quad/ccddb/ctio/nfccd.dat
new file mode 100644
index 00000000..06a173cf
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/nfccd.dat
@@ -0,0 +1,23 @@
+# NFCCD.DAT -- Instrument file to be used with ccdred when reducing direct
+# imageing data obtained with ArCon.
+
+subset filter1
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_both.dat b/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_both.dat
new file mode 100644
index 00000000..ac8e03a6
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_both.dat
@@ -0,0 +1,27 @@
+# PFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+subset filters
+#subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f1.dat b/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f1.dat
new file mode 100644
index 00000000..9893d7f1
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f1.dat
@@ -0,0 +1,27 @@
+# PFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+subset filter1
+#subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f2.dat b/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f2.dat
new file mode 100644
index 00000000..89028468
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddb/ctio/pfccd_f2.dat
@@ -0,0 +1,27 @@
+# PFCCD.DAT -- Instrument file to be used with quad when reducing direct
+# imageing data obtained with ArCon.
+
+# Uncomment ONE of the following 3 lines to select the
+# header keyword to use when grouping images into subsets by filter.
+#subset filters
+#subset filter1
+subset filter2
+
+exptime exptime
+darktime darktime
+imagetyp imagetyp
+biassec biassec
+datasec datasec
+trimsec trimsec
+fixfile fixfile
+
+FOCUS object
+OBJECT object
+DARK dark
+"PROJECTOR FLAT" flat
+"SKY FLAT" flat
+COMPARISON other
+ZERO zero # New software
+BIAS zero # Old software
+"DOME FLAT" flat
+MASK other
diff --git a/noao/imred/quadred/src/quad/ccddelete.par b/noao/imred/quadred/src/quad/ccddelete.par
new file mode 100644
index 00000000..eaa63a22
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddelete.par
@@ -0,0 +1 @@
+image,s,a,"",,,Image to be delete (backed up)
diff --git a/noao/imred/quadred/src/quad/ccddelete.x b/noao/imred/quadred/src/quad/ccddelete.x
new file mode 100644
index 00000000..8a72796d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccddelete.x
@@ -0,0 +1,65 @@
+procedure t_ccddelete ()
+
+char image[SZ_LINE] # Image to delete (backup)
+
+begin
+
+ call clgstr ("image", image, SZ_LINE)
+ call ccddelete (image)
+end
+
+# CCDDELETE -- Delete an image by renaming it to a backup image.
+#
+# 1. Get the backup prefix which may be a path name.
+# 2. If no prefix is specified then delete the image without a backup.
+# 3. If there is a prefix then make a backup image name.
+# Rename the image to the backup image name.
+#
+# The backup image name is formed by prepending the backup prefix to the
+# image name. If a previous backup exist append integers to the backup
+# prefix until a nonexistant image name is created.
+
+procedure ccddelete (image)
+
+char image[ARB] # Image to delete (backup)
+
+int i, imaccess()
+pointer sp, prefix, backup
+errchk imdelete, imrename
+
+begin
+ call smark (sp)
+ call salloc (prefix, SZ_FNAME, TY_CHAR)
+ call salloc (backup, SZ_FNAME, TY_CHAR)
+
+ # Get the backup prefix.
+ call clgstr ("backup", Memc[prefix], SZ_FNAME)
+ call xt_stripwhite (Memc[prefix])
+
+ # If there is no prefix then simply delete the image.
+ if (Memc[prefix] == EOS)
+ call imdelete (image)
+
+ # Otherwise create a backup image name which does not exist and
+ # rename the image to the backup image.
+
+ else {
+ i = 0
+ repeat {
+ if (i == 0) {
+ call sprintf (Memc[backup], SZ_FNAME, "%s%s")
+ call pargstr (Memc[prefix])
+ call pargstr (image)
+ } else {
+ call sprintf (Memc[backup], SZ_FNAME, "%s%d%s")
+ call pargstr (Memc[prefix])
+ call pargi (i)
+ call pargstr (image)
+ }
+ i = i + 1
+ } until (imaccess (Memc[backup], READ_ONLY) == NO)
+ call imrename (image, Memc[backup])
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/quad/ccdgetparam.par b/noao/imred/quadred/src/quad/ccdgetparam.par
new file mode 100644
index 00000000..8647c9a9
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdgetparam.par
@@ -0,0 +1,2 @@
+image,s,a,"",,,Image whose parameter is to be fetched
+parameter,s,a,"",,,Parameter to be listed
diff --git a/noao/imred/quadred/src/quad/ccdgetparam.x b/noao/imred/quadred/src/quad/ccdgetparam.x
new file mode 100644
index 00000000..a032b553
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdgetparam.x
@@ -0,0 +1,48 @@
+procedure ccdgetparam ()
+
+char image[SZ_FNAME] # Image whose parameter is to be fetched
+char parameter[SZ_LINE] # Parameter whose value is required.
+char instrument[SZ_FNAME] # CCD intrument file.
+
+char buffer[SZ_LINE]
+pointer im
+
+pointer immap()
+int hdmaccf()
+bool streq()
+
+begin
+
+ call clgstr ("image", image, SZ_FNAME)
+ im = immap (image, READ_ONLY, 0)
+
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ call clgstr ("parameter", parameter, SZ_LINE)
+
+ # Handle special cases where we must translate the parameter value
+ # to the corresponding package name.
+ if (streq (parameter, "imagetyp")) {
+ call ccdtypes (im, buffer, SZ_LINE)
+ call printf ("%s\n")
+ call pargstr (buffer)
+
+ } else if (streq (parameter, "subset")) {
+ call ccdsubset (im, buffer, SZ_LINE)
+ call printf ("%s\n")
+ call pargstr (buffer)
+
+ } else {
+
+ if (hdmaccf (im, parameter) == NO) {
+ call printf ("UNDEFINED!\n")
+ } else {
+ call hdmgstr (im, parameter, buffer, SZ_LINE)
+ call printf ("%s\n")
+ call pargstr (buffer)
+ }
+ }
+
+ call imunmap (im)
+end
diff --git a/noao/imred/quadred/src/quad/ccdlog.x b/noao/imred/quadred/src/quad/ccdlog.x
new file mode 100644
index 00000000..61bbff10
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdlog.x
@@ -0,0 +1,42 @@
+
+# CCDLOG -- Log information about the processing with the image name.
+#
+# 1. If the package "verbose" parameter is set print the string preceded
+# by the image name.
+# 2. If the package "logfile" parameter is not null append the string,
+# preceded by the image name, to the file.
+
+procedure ccdlog (image, str)
+
+char image[ARB] # Image name
+char str[ARB] # Log string
+
+int fd, open()
+bool clgetb()
+pointer sp, fname
+errchk open
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+
+ # Write to the standard error output if "verbose".
+ if (clgetb ("verbose")) {
+ call eprintf ("%s: %s\n")
+ call pargstr (image)
+ call pargstr (str)
+ }
+
+ # Append to the "logfile" if not null.
+ call clgstr ("logfile", Memc[fname], SZ_FNAME)
+ call xt_stripwhite (Memc[fname])
+ if (Memc[fname] != EOS) {
+ fd = open (Memc[fname], APPEND, TEXT_FILE)
+ call fprintf (fd, "%s: %s\n")
+ call pargstr (image)
+ call pargstr (str)
+ call close (fd)
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/quad/ccdprcselect.par b/noao/imred/quadred/src/quad/ccdprcselect.par
new file mode 100644
index 00000000..453f4b4d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdprcselect.par
@@ -0,0 +1,4 @@
+input,s,a,"",,,Input image list
+output,s,h,"STDOUT",,,Output image list
+procflag,s,h,"",,,Processing flag for filter action
+ccdtype,s,h,"",,,CCD image type to be listed
diff --git a/noao/imred/quadred/src/quad/ccdprcselect.x b/noao/imred/quadred/src/quad/ccdprcselect.x
new file mode 100644
index 00000000..11b4e2ef
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdprcselect.x
@@ -0,0 +1,90 @@
+# CCD_PRCSELECT -- Filter a list of image names passing on only those that
+# do (or don't) have a specified processing flag set.
+
+include "ccdtypes.h"
+
+define PROCFLAGS "|fixpix|overscan|trim|zerocor|darkcor|flatcor|illumcor\
+ |fringecor|ccdproc|"
+
+procedure t_ccdprcselect ()
+
+pointer inlist #TI List of input image name.
+char output[SZ_FNAME] #TI List of output image names.
+char instrument[SZ_FNAME] #TI Instrument translation file.
+char procflag[SZ_LINE] #TI List of proc flags.
+char ccdtype[SZ_LINE] #TI ccdtype to select.
+
+int flag, ip, type
+char image[SZ_LINE], buffer[SZ_LINE]
+pointer fdout, im
+
+int strdic(), imtopenp(), imtgetim(), hdmaccf(), ctowrd(), imaccess()
+int ccdtypei()
+
+pointer open(), immap()
+
+begin
+ # Open input and output image lists
+ inlist = imtopenp ("input")
+ call clgstr ("output", output, SZ_LINE)
+ fdout = open (output, APPEND, TEXT_FILE)
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Get processing flag.
+ # If the first character is "!" pass all images for which the specified
+ # flag is not set. If the processing flag is "" we pass al images.
+ flag = 0
+ call clgstr ("procflag", buffer, SZ_LINE)
+ ip = 1
+ if (ctowrd (buffer, ip, procflag, SZ_LINE) != 0) {
+ if (procflag[1] == '!') {
+ flag = -1 * strdic (procflag[2], procflag, SZ_LINE, PROCFLAGS)
+ } else {
+ flag = strdic (procflag, procflag, SZ_LINE, PROCFLAGS)
+ }
+ if (flag == 0)
+ call error (0, "Unknown processing flag")
+ }
+
+ # Get ccdtype to select.
+ call clgstr ("ccdtype", ccdtype, SZ_LINE)
+ type = strdic (ccdtype, ccdtype, SZ_LINE, CCDTYPES)
+
+ while (imtgetim (inlist, image, SZ_LINE) != EOF) {
+
+ # Silently skip any non-existant images
+ if (imaccess (image, READ_ONLY) == NO)
+ next
+
+ im = immap (image, READ_ONLY, 0)
+
+ if ((ccdtype[1] != EOS) && (type != ccdtypei (im))) {
+ call imunmap (im)
+ next
+ }
+
+ if (flag < 0) {
+ if (hdmaccf (im, procflag) == NO) {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+ }
+ } else if (flag > 0) {
+ if (hdmaccf (im, procflag) == YES) {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+ }
+ } else {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+ }
+ call imunmap (im)
+ }
+
+ # Tidy up
+ call close (fdout)
+ call hdmclose ()
+ call imtclose (inlist)
+end
diff --git a/noao/imred/quadred/src/quad/ccdproc.par b/noao/imred/quadred/src/quad/ccdproc.par
new file mode 100644
index 00000000..01065e28
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdproc.par
@@ -0,0 +1,43 @@
+images,s,a,"",,,List of CCD images to correct
+output,s,h,"",,,List of output CCD images
+ccdtype,s,h,"other",,,CCD image type to correct
+max_cache,i,h,0,0,,Maximum image caching memory (in Mbytes)
+noproc,b,h,no,,,"List processing steps only?
+"
+fixpix,b,h,no,,,Fix bad CCD lines and columns?
+overscan,b,h,no,,,Apply overscan strip correction?
+trim,b,h,no,,,Trim the image?
+zerocor,b,h,no,,,Apply zero level correction?
+darkcor,b,h,no,,,Apply dark count correction?
+flatcor,b,h,no,,,Apply flat field correction?
+illumcor,b,h,no,,,Apply illumination correction?
+fringecor,b,h,no,,,Apply fringe correction?
+readcor,b,h,no,,,Convert zero level image to readout correction?
+scancor,b,h,no,,,"Convert flat field image to scan correction?
+"
+readaxis,s,h,"line","column|line",, Read out axis (column|line)
+fixfile,s,h,"",,,File describing the bad lines and columns
+biassec,s,h,"",,,Overscan strip image section
+trimsec,s,h,"",,,Trim data section
+zero,s,h,"",,,Zero level calibration image
+dark,s,h,"",,,Dark count calibration image
+flat,s,h,"",,,Flat field images
+illum,s,h,"",,,Illumination correction images
+fringe,s,h,"",,,Fringe correction images
+minreplace,r,h,1.,,,Minimum flat field value
+scantype,s,h,"shortscan","shortscan|longscan",,Scan type (shortscan|longscan)
+nscan,i,h,1,1,,"Number of short scan lines
+"
+interactive,b,h,no,,,Fit overscan interactively?
+function,s,h,"legendre",,,Fitting function
+order,i,h,1,1,,Number of polynomial terms or spline pieces
+sample,s,h,"*",,,Sample points to fit
+naverage,i,h,1,,,Number of sample points to combine
+niterate,i,h,1,0,,Number of rejection iterations
+low_reject,r,h,3.,0.,,Low sigma rejection factor
+high_reject,r,h,3.,0.,,High sigma rejection factor
+grow,r,h,0.,0.,,"Rejection growing radius
+"
+backup,s,h,"",,,Backup directory or prefix
+logfile,s,h,"",,,Text log file
+verbose,b,h,no,,,Print log information to the standard output?
diff --git a/noao/imred/quadred/src/quad/ccdsection.par b/noao/imred/quadred/src/quad/ccdsection.par
new file mode 100644
index 00000000..ff8ae7ed
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdsection.par
@@ -0,0 +1 @@
+section,s,a,,,,Section to decode
diff --git a/noao/imred/quadred/src/quad/ccdsection.x b/noao/imred/quadred/src/quad/ccdsection.x
new file mode 100644
index 00000000..d6b0d6a7
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdsection.x
@@ -0,0 +1,119 @@
+include <ctype.h>
+
+# CCD_SECTION -- Parse a 2D image section into its elements.
+# 1. The default values must be set by the caller.
+# 2. A null image section is OK.
+# 3. The first nonwhitespace character must be '['.
+# 4. The last interpreted character must be ']'.
+#
+# This procedure should be replaced with an IMIO procedure at some
+# point.
+
+
+# Cl callable entry point
+
+procedure t_ccdsection ()
+
+char section[SZ_LINE] #T Section to parse
+
+int x1, x2, y1, y2, xstep, ystep
+
+begin
+ call clgstr ("section", section, SZ_LINE)
+ call ccd_section (section, x1, x2, xstep, y1, y2, ystep)
+ call printf ("%d %d %d %d \n")
+ call pargi (x1)
+ call pargi (x2)
+ call pargi (y1)
+ call pargi (y2)
+end
+
+procedure ccd_section (section, x1, x2, xstep, y1, y2, ystep)
+
+char section[ARB] # Image section
+int x1, x2, xstep # X image section parameters
+int y1, y2, ystep # X image section parameters
+
+int i, ip, a, b, c, temp, ctoi()
+define error_ 99
+
+begin
+ # Decode the section string.
+ ip = 1
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == '[')
+ ip = ip + 1
+ else if (section[ip] == EOS)
+ return
+ else
+ goto error_
+
+ do i = 1, 2 {
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+
+ # Default values
+ if (i == 1) {
+ a = x1
+ b = x2
+ c = xstep
+ } else {
+ a = y1
+ b = y2
+ c = ystep
+ }
+
+ # Get a:b:c. Allow notation such as "-*:c"
+ # (or even "-:c") where the step is obviously negative.
+
+ if (ctoi (section, ip, temp) > 0) { # a
+ a = temp
+ if (section[ip] == ':') {
+ ip = ip + 1
+ if (ctoi (section, ip, b) == 0) # a:b
+ goto error_
+ } else
+ b = a
+ } else if (section[ip] == '-') { # -*
+ temp = a
+ a = b
+ b = temp
+ ip = ip + 1
+ if (section[ip] == '*')
+ ip = ip + 1
+ } else if (section[ip] == '*') # *
+ ip = ip + 1
+ if (section[ip] == ':') { # ..:step
+ ip = ip + 1
+ if (ctoi (section, ip, c) == 0)
+ goto error_
+ else if (c == 0)
+ goto error_
+ }
+ if (a > b && c > 0)
+ c = -c
+
+ if (i == 1) {
+ x1 = a
+ x2 = b
+ xstep = c
+ } else {
+ y1 = a
+ y2 = b
+ ystep = c
+ }
+
+ while (IS_WHITE(section[ip]))
+ ip = ip + 1
+ if (section[ip] == ',')
+ ip = ip + 1
+ }
+
+ if (section[ip] != ']')
+ goto error_
+
+ return
+error_
+ call error (0, "Error in image section specification")
+end
diff --git a/noao/imred/quadred/src/quad/ccdssselect.par b/noao/imred/quadred/src/quad/ccdssselect.par
new file mode 100644
index 00000000..8499900d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdssselect.par
@@ -0,0 +1,4 @@
+input,s,a,"",,,Input image list
+output,s,h,"STDOUT",,,Output image list
+subset,s,h,"",,,Subset to be listed
+ccdtype,s,h,"",,,CCD image type to be listed
diff --git a/noao/imred/quadred/src/quad/ccdssselect.x b/noao/imred/quadred/src/quad/ccdssselect.x
new file mode 100644
index 00000000..65a7248f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdssselect.x
@@ -0,0 +1,73 @@
+# CCDSUBSETSELECT -- Filter a list of image names passing on only those that
+# belong to a specified subset.
+
+include "ccdtypes.h"
+
+procedure t_ccdssselect ()
+
+pointer inlist #TI List of input image name.
+char output[SZ_FNAME] #TI List of output image names.
+char instrument[SZ_FNAME] #TI Instrument translation file.
+char subset[SZ_LINE] #TI Subset required.
+char ccdtype[SZ_LINE] #TI ccdtype required.
+
+int type
+char image[SZ_LINE], buffer[SZ_LINE]
+pointer fdout, im
+
+int strdic(), imtopenp(), imtgetim(), ccdtypei(), imaccess()
+pointer open(), immap()
+bool strne()
+
+begin
+ # Open input and output image lists
+ inlist = imtopenp ("input")
+ call clgstr ("output", output, SZ_LINE)
+ fdout = open (output, APPEND, TEXT_FILE)
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Get subset required.
+ call clgstr ("subset", subset, SZ_LINE)
+
+ # Get ccdtype required.
+ call clgstr ("ccdtype", ccdtype, SZ_LINE)
+ type = strdic (ccdtype, ccdtype, SZ_LINE, CCDTYPES)
+
+ while (imtgetim (inlist, image, SZ_LINE) != EOF) {
+
+ # Silently skip non-existant images
+ if (imaccess (image, READ_ONLY) == NO)
+ next
+
+ im = immap (image, READ_ONLY, 0)
+
+ # Skip images of the wrong type
+ if ((ccdtype[1] != EOS) && (type != ccdtypei (im))) {
+ call imunmap (im)
+ next
+ }
+
+ # Skip images of the wrong subset
+ if (subset[1] != EOS) {
+ call ccdsubset (im, buffer, SZ_LINE)
+ if (strne (subset, buffer)) {
+ call imunmap (im)
+ next
+ }
+ }
+
+ # print names of any images which pass the test.
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+
+ call imunmap (im)
+ }
+
+ # Tidy up
+ call close (fdout)
+ call hdmclose ()
+ call imtclose (inlist)
+end
diff --git a/noao/imred/quadred/src/quad/ccdsubsets.x b/noao/imred/quadred/src/quad/ccdsubsets.x
new file mode 100644
index 00000000..6152897f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdsubsets.x
@@ -0,0 +1,92 @@
+# CCDSUBSET -- Return the CCD subset identifier.
+#
+# 1. Get the subset string and search the subset record file for the ID string.
+# 2. If the subset string is not in the record file define a default ID string
+# based on the first word of the subset string. If the first word is not
+# unique append a integer to the first word until it is unique.
+# 3. Add the new subset string and identifier to the record file.
+# 4. Since the ID string is used to generate image names replace all
+# nonimage name characters with '_'.
+#
+# It is an error if the record file cannot be created or written when needed.
+
+procedure ccdsubset (im, subset, sz_name)
+
+pointer im # Image
+char subset[sz_name] # CCD subset identifier
+int sz_name # Size of subset string
+
+bool streq()
+int i, fd, ctowrd(), open(), fscan()
+pointer sp, fname, str1, str2, subset1, subset2, subset3
+errchk open
+
+begin
+ call smark (sp)
+ call salloc (fname, SZ_FNAME, TY_CHAR)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+ call salloc (subset1, SZ_LINE, TY_CHAR)
+ call salloc (subset2, SZ_LINE, TY_CHAR)
+ call salloc (subset3, SZ_LINE, TY_CHAR)
+
+ # Get the subset record file and the subset string.
+ call clgstr ("ssfile", Memc[fname], SZ_LINE)
+ call hdmgstr (im, "subset", Memc[str1], SZ_LINE)
+
+ # The default subset identifier is the first word of the subset string.
+ i = 1
+ i = ctowrd (Memc[str1], i, Memc[subset1], SZ_LINE)
+
+ # A null subset string is ok. If not null check for conflict
+ # with previous subset IDs.
+ if (Memc[str1] != EOS) {
+ call strcpy (Memc[subset1], Memc[subset3], SZ_LINE)
+
+ # Search the subset record file for the same subset string.
+ # If found use the ID string. If the subset ID has been
+ # used for another subset string then increment an integer
+ # suffix to the default ID and check the list again.
+
+ i = 1
+ ifnoerr (fd = open (Memc[fname], READ_ONLY, TEXT_FILE)) {
+ while (fscan (fd) != EOF) {
+ call gargwrd (Memc[str2], SZ_LINE)
+ call gargwrd (Memc[subset2], SZ_LINE)
+ if (streq (Memc[str1], Memc[str2])) {
+ i = 0
+ call strcpy (Memc[subset2], Memc[subset1], SZ_LINE)
+ break
+ } if (streq (Memc[subset1], Memc[subset2])) {
+ call sprintf (Memc[subset1], SZ_LINE, "%s%d")
+ call pargstr (Memc[subset3])
+ call pargi (i)
+ i = i + 1
+ call seek (fd, BOF)
+ }
+ }
+ call close (fd)
+ }
+
+ # If the subset is not in the record file add it.
+ if (i > 0) {
+ fd = open (Memc[fname], APPEND, TEXT_FILE)
+ call fprintf (fd, "'%s'\t%s\n")
+ call pargstr (Memc[str1])
+ call pargstr (Memc[subset1])
+ call close (fd)
+ }
+ }
+
+ # Set the subset ID string and replace magic characters by '_'
+ # since the subset ID is used in forming image names.
+
+ call strcpy (Memc[subset1], subset, sz_name)
+ for (i=1; subset[i]!=EOS; i=i+1)
+ switch (subset[i]) {
+ case '-','+','?','*','[',']',' ','\t':
+ subset[i] = '_'
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/quad/ccdtypes.h b/noao/imred/quadred/src/quad/ccdtypes.h
new file mode 100644
index 00000000..0d5d4caf
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdtypes.h
@@ -0,0 +1,14 @@
+# Standard CCD image types.
+
+define CCDTYPES "|object|zero|dark|flat|illum|fringe|other|comp|"
+
+define NONE -1
+define UNKNOWN 0
+define OBJECT 1
+define ZERO 2
+define DARK 3
+define FLAT 4
+define ILLUM 5
+define FRINGE 6
+define OTHER 7
+define COMP 8
diff --git a/noao/imred/quadred/src/quad/ccdtypes.x b/noao/imred/quadred/src/quad/ccdtypes.x
new file mode 100644
index 00000000..bf6d29e2
--- /dev/null
+++ b/noao/imred/quadred/src/quad/ccdtypes.x
@@ -0,0 +1,72 @@
+include "ccdtypes.h"
+
+# CCDTYPES -- Return the CCD type name string.
+# CCDTYPEI -- Return the CCD type code.
+
+
+# CCDTYPES -- Return the CCD type name string.
+
+procedure ccdtypes (im, name, sz_name)
+
+pointer im # Image
+char name[sz_name] # CCD type name
+int sz_name # Size of name string
+
+int strdic()
+pointer sp, str
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ # Get the image type string. If none then return "none".
+ # Otherwise get the corresponding package image type string.
+ # If the image type is unknown return "unknown" otherwise return
+ # the package name.
+
+ call hdmgstr (im, "imagetyp", Memc[str], SZ_LINE)
+ if (Memc[str] == EOS) {
+ call strcpy ("none", name, sz_name)
+ } else {
+ call hdmname (Memc[str], name, sz_name)
+ if (name[1] == EOS)
+ call strcpy (Memc[str], name, sz_name)
+ if (strdic (name, name, sz_name, CCDTYPES) == UNKNOWN)
+ call strcpy ("unknown", name, sz_name)
+ }
+
+ call sfree (sp)
+end
+
+
+# CCDTYPEI -- Return the CCD type code.
+
+int procedure ccdtypei (im)
+
+pointer im # Image
+int ccdtype # CCD type (returned)
+
+pointer sp, str1, str2
+int strdic()
+
+begin
+ call smark (sp)
+ call salloc (str1, SZ_LINE, TY_CHAR)
+ call salloc (str2, SZ_LINE, TY_CHAR)
+
+ # Get the image type and if there is none then return the NONE code.
+ call hdmgstr (im, "imagetyp", Memc[str1], SZ_LINE)
+ if (Memc[str1] == EOS) {
+ ccdtype = NONE
+
+ # Otherwise get the package type and convert to an image type code.
+ } else {
+ call hdmname (Memc[str1], Memc[str2], SZ_LINE)
+ if (Memc[str2] == EOS)
+ call strcpy (Memc[str1], Memc[str2], SZ_LINE)
+ ccdtype = strdic (Memc[str2], Memc[str2], SZ_LINE, CCDTYPES)
+ }
+
+ call sfree (sp)
+ return (ccdtype)
+end
diff --git a/noao/imred/quadred/src/quad/detpars.par b/noao/imred/quadred/src/quad/detpars.par
new file mode 100644
index 00000000..bbb9f8aa
--- /dev/null
+++ b/noao/imred/quadred/src/quad/detpars.par
@@ -0,0 +1,6 @@
+xskip1,i,h,INDEF,0,,X pixels to skip at start of overscan
+xskip2,i,h,INDEF,0,,X pixels to skip at end of overscan
+xtrim1,i,h,INDEF,0,,X pixels to trim at start of data
+xtrim2,i,h,INDEF,0,,X pixels to trim at end of data
+ytrim1,i,h,INDEF,0,,Y pixels to trim at start of data
+ytrim2,i,h,INDEF,0,,Y pixels to trim at end of data
diff --git a/noao/imred/quadred/src/quad/doc/Geometry.fig b/noao/imred/quadred/src/quad/doc/Geometry.fig
new file mode 100644
index 00000000..429d987f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/Geometry.fig
@@ -0,0 +1,91 @@
+# The following diagrams show the CCD geometry for the different readout modes.
+#
+# Single readout:
+# G
+# <-------------------------------->
+# A F
+# <----------------------><-------->
+# B C D E
+# <-> <--><> <>
+# ^ ^ +-----------------------+--------+ ^ ^
+# | | | | | | e |
+# | c | | |--------| v |
+# | v | +----------------+ || || |
+# | | | | || o || |
+# | | | | || v || |
+# | | | | || e || |
+# a | | | | || r || | f
+# | | | | || s || |
+# | | | | || c || |
+# | | | | || a || |
+# | | +----------------+ || n || |
+# | ^ | |--------| |
+# | b | | | | ^ |
+# | | | | | | d |
+# v V *-----------------------+--------+ v v
+#
+# Quad readout (single frame):
+# G'
+# <--------------------------------->
+# A/2 F/2 F/2 A/2
+# <---------><---><---><----------->
+# B D 2*E D C
+# <-> <> < > <> <-->
+# ^ ^ *----------+----.----+------------* ^ ^
+# | | | | . | | | e |
+# | c | | |----.----| | v |
+# | v | +-------+| |.| |+--------+ | |
+# a/2 | | | ||o |.|o || | | | f/2
+# | | | 3 ||v |.|v || 4 | | |
+# | | | || |.| || | | |
+# | | | ||3 |.|4 || | | |
+# V ................................... v
+# ^ | | ||o |.|o || | | ^
+# | | | 1 ||v |.|v || 2 | | |
+# | | | || |.| || | | |
+# a/2 | | +-------+|1 |.|2 |+--------+ | | f/2
+# | ^ | || |.| || | |
+# | b | | |----.----| | ^ |
+# | | | | . | | | d |
+# v v *----------+----.----+------------* v V
+#
+#
+# Quad readout (four frames):
+#
+# G" G"
+# <--------------> <---------------->
+# A/2 F/2 F/2 A/2
+# <---------><----> <----><---------->
+# B D E E D C
+# <-> <> <> <> <> <-->
+# ^ ^ *----------+----+ +----+------------* ^ ^
+# | | | | | | | | | e |
+# | c | | |----| |----| | v |
+# a/2 | v | +-------+| || || |+--------+ | ^ |
+# | | | ||o || ||o || | | | | f/2
+# | | | 3 ||v || ||v || 4 | | | |
+# | | | || || || || | | | |
+# V | | ||3 || ||4 || | | | |
+# +---------------+ +-----------------+ v v
+#
+#
+# +---------------+ +-----------------+ ^ ^
+# ^ | | ||o || ||o || | | | |
+# | | | 1 ||v || ||v || 2 | | | |
+# | | | || || || || | | | |
+# | | +-------+|1 || ||2 |+--------+ | | | f/2
+# a/2 | ^ | || || || || | v |
+# | b | | |----| |----| | ^ |
+# | | | | | | | | | d |
+# v v *----------+----+ +----+------------* v v
+#
+# Where
+# A = xdata a = ydata
+# B = txskip1 b = tyskip1
+# C = txskip2 c = tyskip2
+# D = bxskip1 d = byskip1
+# E = bxskip2 e = byskip2
+# F = xover f = yover = a
+# G = A + F
+# G' = A + F
+# G" = (A + F) / 2
diff --git a/noao/imred/quadred/src/quad/doc/badpiximage.hlp b/noao/imred/quadred/src/quad/doc/badpiximage.hlp
new file mode 100644
index 00000000..46e13160
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/badpiximage.hlp
@@ -0,0 +1,51 @@
+.help badpiximage Jun87 noao.imred.ccdred
+.ih
+NAME
+badpiximage -- Create a bad pixel mask image from a bad pixel file
+.ih
+USAGE
+badpiximage fixfile template image
+.ih
+PARAMETERS
+.ls fixfile
+Bad pixel file.
+.le
+.ls template
+Template image used to define the size of the bad pixel mask image.
+.le
+.ls image
+Bad pixel mask image to be created.
+.le
+.ls goodvalue = 1
+Integer value assigned to the good pixels.
+.le
+.ls badvalue = 0
+Integer value assigned to the bad pixels.
+.le
+.ih
+DESCRIPTION
+A bad pixel mask image is created from the specified bad pixel file.
+The format of the bad pixel file is that used by \fBccdproc\fR to
+correct CCD defects (see instruments). The bad pixel image is of pixel type short and
+has the value given by the parameter \fBgoodvalue\fR for the good
+pixels and the value given by the parameter \fBbadvalue\fR for the bad pixels.
+The image size and header parameters are taken from the specified
+template image. The bad pixel mask image may be used to view the
+location of the bad pixels and blink against an data image using an
+image display, to mask or flag bad pixels later by image arithmetic,
+and to propagate the positions of the bad pixels through the
+reductions.
+.ih
+EXAMPLES
+1. To make a bad pixel mask image from the bad pixel file "cryocambp.dat"
+using the image "ccd005" as the template:
+
+ cl> badpiximage cryocambp.dat ccd005 cryocambp
+
+2. To make the bad pixel mask image with good values of 0 and bad values of 1:
+
+ cl> badpixim cryomapbp.dat ccd005 cryocambp good=0 bad=1
+.ih
+SEE ALSO
+ccdproc, instruments
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdgeometry.hlp b/noao/imred/quadred/src/quad/doc/ccdgeometry.hlp
new file mode 100644
index 00000000..c01a09c8
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdgeometry.hlp
@@ -0,0 +1,70 @@
+.help ccdgeometry Sep87 noao.imred.ccdred
+.ih
+NAME
+ccdgeometry - Discussion of CCD geometry and header parameters
+.ih
+DESCRIPTION
+The \fBccdred\fR package maintains and updates certain geometry
+information about the images. This geometry is described by four image
+header parameters which may be present. These are defined below by the
+parameter names used in the package. Note that these names may be
+different in the image header using the image header translation
+feature of the package.
+
+.ls DATASEC
+The section of the image containing the CCD data. If absent the
+entire image is assumed to be data. Only the pixels within the
+data section are modified during processing. Therefore, there may be
+additional calibration or observation information in the image.
+If after processing, the data section is the entire image it is
+not recorded in the image header.
+.le
+.ls CCDSEC
+The section of the CCD to corresponding to the data section. This
+refers to the physical format, columns and lines, of the detector. This is
+the coordinate system used during processing to relate calibration
+data to the image data; i.e. image data pixels are calibrated by
+calibration pixels at the same CCD coordinates regardless of image pixel
+coordinates. This allows recording only parts of the CCD during data
+taking and calibrating with calibration frames covering some or all of
+the CCD. The CCD section is maintained during trimming operations.
+Note that changing the format of the images by image operators outside
+of the \fBccdred\fR package will invalidate this coordinate system.
+The size of the CCD section must agree with that of the data section.
+If a CCD section is absent then it defaults to the data section such
+that the first pixel of the data section has CCD coordinate (1,1).
+.le
+.ls BIASSEC
+The section of the image containing prescan or overscan bias information.
+It consists of a strip perpendicular to the readout axis. There may be
+both a prescan and overscan but the package currently only uses one.
+This parameter may be overridden during processing by the parameter
+\fIccdproc.biassec\fR.
+.le
+.ls TRIMSEC
+The section of the image extracted during processing when the trim
+operation is selected (\fIccdproc.trim\fR). If absent when the trim
+operation is selected it defaults to the data section; i.e. the processed
+image consists only of the data section. This parameter may be overridden
+during processing by the parameter \fIccdproc.trimsec\fR. After trimming
+this parameter, if present, is removed from the image header. The
+CCD section, data section, and bias section parameters are also modified
+by trimming.
+.le
+
+The geometry is as follows. When a CCD image is recorded it consists
+of a data section corresponding to part or all of the CCD detector.
+Regions outside of the data section may contain additional information
+which are not affected except by trimming. Most commonly this consists
+of prescan and overscan bias data. When recording only part of the
+full CCD detector the package maintains information about that part and
+correctly applies calibrations for that part of the detector. Also any
+trimming operation updates the CCD coordinate information. If the
+images include the data section, bias section, trim section, and ccd
+section the processing may be performed entirely automatically.
+
+The sections are specified using the notation [c1:c2,l1:l2] where c1
+and c2 are the first and last columns and l1 and l2 are the first and
+last lines. Currently c1 and l1 must be less than c2 and l2
+respectively and no subsampling is allowed. This may be added later.
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdgroups.hlp b/noao/imred/quadred/src/quad/doc/ccdgroups.hlp
new file mode 100644
index 00000000..48c29b99
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdgroups.hlp
@@ -0,0 +1,163 @@
+.help ccdgroups Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdgroups -- Group CCD images into image lists
+.ih
+USAGE
+ccdgroups images output
+.ih
+PARAMETERS
+.ls images
+List of CCD images to be grouped.
+.le
+.ls output
+Output root group filename. The image group lists will be put in files
+with this root name followed by a number.
+.le
+.ls group = "ccdtype"
+Group type. There are currently four grouping types:
+.ls ccdtype
+Group by CCD image type.
+.le
+.ls subset
+Group by subset parameter.
+.le
+.ls position
+Group by position in right ascension (in hours) and declination (in degrees).
+The groups are defined by a radius parameter (in arc seconds).
+.le
+.ls title
+Group by identical titles.
+.le
+.ls date
+Group by identical dates.
+.le
+.le
+.ls radius = 60.
+Grouping radius when grouping by positions. This is given in arc seconds.
+.le
+.ls ccdtype = ""
+CCD image types to select from the input image list. If null ("") then
+all image types are used.
+.le
+.ih
+DESCRIPTION
+The input images, possible restricted to a particular CCD image type,
+are grouped into image lists. The "ccdtype" or "subset" groups
+produce output image lists with the given root name and the CCD type
+or subset as an extension (without a period). For the other group
+types the
+image lists have file names given by
+the root output name and a numeric extension (without a period).
+If the package parameter \fIccdred.verbose\fR is yes then the
+image name and output group list is printed for each image. The image lists can
+be used with the @ list feature for processing separate groups of observations.
+Note that grouping by CCD image type and subset is often not necessary since
+the \fBccdred\fR tasks automatically use this information (see
+\fBccdtypes\fR and \fBsubsets\fR).
+
+Besides CCD image type and subsets there are currently three ways to
+group images. These are by position in the sky, by title, and by
+date. Further groups may be added as suggested. The title grouping is
+useful if consistent titles are used when taking data. The date
+grouping is useful if multiple nights of observations are not organized
+by directories (it is recommended that data from separate nights be
+kept in separate directories). The position grouping finds
+observations within a given radius on the sky of the first member of
+the group (this is not a clustering algorithm). The right ascension
+and declination coordinates must be in standard units, hours and
+degrees respectively. The grouping radius is in arc seconds. This
+grouping type is useful for making sets of data in which separate
+calibration images are taken at each position.
+
+The date, title, and coordinates are accessed through the instrument
+translation file. The standard names used are "date-obs", "title", "ra",
+and "dec".
+.ih
+EXAMPLES
+1. For each object 5 exposures were taken to be combined in order to remove
+cosmic rays. If the titles are the same then (with ccdred.verbose=yes):
+
+.nf
+ cl> ccdgroups *.imh group group=title ccdtype=object
+ ccd005.imh --> group1
+ ccd006.imh --> group1
+ ccd007.imh --> group1
+ ccd008.imh --> group1
+ ccd009.imh --> group1
+ ccd012.imh --> group2
+ ccd013.imh --> group2
+ ccd014.imh --> group2
+ ccd015.imh --> group2
+ ccd016.imh --> group2
+ [... etc ...]
+ cl> combine @group1 obj1 proc+
+ cl> combine @group2 obj2 proc+
+ [... etc ...]
+.fi
+
+Note the numeric suffixes to the output root name "group".
+
+2. CCD observations were made in groups with a flat field, the object, and
+a comparison spectrum at each position. To group and process this data:
+
+.nf
+ cl> ccdgroups *.imh obs group=position >> logfile
+ cl> ccdproc @obs1
+ cl> ccdproc @obs2
+ cl> ccdproc @obs3
+.fi
+
+Since no flat field is specified for the parameter \fIccdproc.flat\fR
+the flat field is taken from the input image list.
+
+3. If for some reason you want to group by date and position it is possible
+to use two steps.
+
+.nf
+ cl> ccdgroups *.imh date group=date
+ cl> ccdgroups @data1 pos1
+ cl> ccdgroups @data2 pos2
+.fi
+
+4. To get groups by CCD image type:
+
+.nf
+ cl> ccdgroups *.imh "" group=ccdtype
+ ccd005.imh --> zero
+ ccd006.imh --> zero
+ ccd007.imh --> zero
+ ccd008.imh --> dark
+ ccd009.imh --> flat
+ ccd012.imh --> flat
+ ccd013.imh --> object
+ ccd014.imh --> object
+ ccd015.imh --> object
+ ccd016.imh --> object
+ [... etc ...]
+.fi
+
+Note the use of a null root name and the extension is the standard
+CCDRED types (not necessarily those used in the image header).
+
+5. To get groups by subset:
+
+.nf
+ cl> ccdgroups *.imh filt group=subset
+ ccd005.imh --> filt
+ ccd006.imh --> filtB
+ ccd007.imh --> filtB
+ ccd008.imh --> filtB
+ ccd009.imh --> filtV
+ ccd012.imh --> filtV
+ ccd013.imh --> filtV
+ ccd014.imh --> filtB
+ ccd015.imh --> filtB
+ ccd016.imh --> filtB
+ [... etc ...]
+.fi
+
+.ih
+SEE ALSO
+ccdlist, ccdtypes, instruments, subsets
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdhedit.hlp b/noao/imred/quadred/src/quad/doc/ccdhedit.hlp
new file mode 100644
index 00000000..1bc27d29
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdhedit.hlp
@@ -0,0 +1,108 @@
+.help ccdhedit Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdhedit -- CCD image header editor
+.ih
+USAGE
+ccdhedit images parameter value
+.ih
+PARAMETERS
+.ls images
+List of CCD images to be edited.
+.le
+.ls parameter
+Image header parameter. The image header parameter will be translated by
+the header translation file for the images.
+.le
+.ls value
+The parameter value. If the null string ("") is specified then the
+parameter is deleted from the image header, otherwise it is added or
+modified. If the parameter is "imagetyp" then the value string giving
+the CCD image type is translated from the package CCD type to the
+instrument specific string.
+.le
+.ls type = "string"
+The parameter type. The parameter types are "string", "real", or "integer".
+.le
+.ih
+DESCRIPTION
+The image headers of the specified CCD images are edited to add, modify,
+or delete a parameter. The parameters may be those used by the \fBccdred\fR
+package. The parameter name is translated to an image header parameter by the
+instrument translation file (see \fBinstruments\fR) if a translation is
+given. Otherwise the parameter is that in the image header. If the parameter
+is "imagetyp" the parameter value for the CCD image type may be that
+used by the package; i.e. dark, object, flat, etc. The value string will be
+translated to the instrument image string in this case. The translation
+facility allows use of this task in an instrument independent way.
+
+The value string is used to determine whether to delete or modify the
+image parameter. If the null string, "", is given the specified parameter
+is deleted. If parameters are added the header type must be specified
+as a string, real, or integer parameter. The numeric types convert the
+value string to a number.
+.ih
+EXAMPLES
+The \fBccdred\fR package is usable even with little image header information.
+However, if desired the header information can be added to images which
+lack it. In all the examples the parameters used are those of the package
+and apply equally well to any image header format provided there is an
+instrument translation file.
+
+.nf
+1. cl> ccdhedit obj* imagetyp object
+2. cl> ccdhedit flat* imagetyp flat
+3. cl> ccdhedit zero* imagetyp zero
+4. cl> ccdhedit obj0![1-3]* subset "V filter"
+5. cl> ccdhedit obj0![45]* subset "R filter"
+6. cl> ccdhedit flat001 subset "R filter"
+7. cl> ccdhedit obj* exptime 500 type=integer
+.fi
+
+8. The following is an example of a CL script which sets the CCD image type,
+the subset, and the exposure time simultaneously. The user may expand
+on this example to include other parameters or other initialization
+operations.
+
+.nf
+ cl> edit ccdheader.cl
+
+ ----------------------------------------------------------------
+ # Program to set CCD header parameters.
+
+ procedure ccdheader (images)
+
+ string images {prompt="CCD images"}
+ string imagetyp {prompt="CCD image type"}
+ string subset {prompt="CCD subset"}
+ string exptime {prompt="CCD exposure time"}
+
+ begin
+ string ims
+
+ ims = images
+ ccdhedit (ims, "imagetyp", imagetyp, type="string")
+ ccdhedit (ims, "subset", subset, type="string")
+ ccdhedit (ims, "exptime", exptime, type="real")
+ end
+ ----------------------------------------------------------------
+
+ cl> task ccdheader=ccdheader.cl
+ cl> ccdheader obj* imagetyp=object subset="V" exptime=500
+.fi
+
+9. The image header may be changed to force processing a calibration image
+as an object. For example to flatten a flat field:
+
+.nf
+ cl> ccdhedit testflat imagetyp other
+ cl> ccdproc testflat
+.fi
+
+10. To delete processing flags:
+
+ cl> ccdhedit obj042 flatcor ""
+.ih
+SEE ALSO
+hedit, instruments, ccdtypes, subsets
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdinst.hlp b/noao/imred/quadred/src/quad/doc/ccdinst.hlp
new file mode 100644
index 00000000..23ebea60
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdinst.hlp
@@ -0,0 +1,389 @@
+.help ccdinstrument Nov90 noao.imred.ccdred
+.ih
+NAME
+ccdinstrument -- Setup and verify CCD instrument translation files
+.ih
+USAGE
+ccdinstrument images
+.ih
+PARAMETERS
+.ls images
+List of images to be verified or used to setup a CCD instrument translation
+file.
+.le
+.ls instrument = ")_.instrument"
+CCD instrument translation file. The default is to use the translation
+file defined in the \fBccdred\fR package parameters. Note that one would
+need write permission to update this file though the task has a write
+command to save any changes to a different file.
+.le
+.ls ssfile = ")_.ssfile"
+Subset translation file. The default is to use the file defined in
+the \fBccdred\fR package parameters.
+.le
+.ls edit = yes
+Edit the instrument translation file? If "yes" an interactive
+mode is entered allowing translation parameters to be modified while if
+"no" the task is simply used to verify the translations noninteractively.
+.le
+.ls parameters = "basic"
+Parameters to be displayed. The choices are "basic" to display only the
+most basic parameters (those needed for the simplest automation of
+\fBccdred\fR tasks), "common" to display the common parameters used
+by the package (most of these are keywords to be written to the image
+rather than translated), and "all" to display all the parameters
+referenced by the package including the most obscure. For most uses
+the "basic" set is all that is important and the other options are
+included for completeness.
+.le
+.ih
+DESCRIPTION
+The purpose of this task is to provide an interface to simplify setting
+up CCD instrument translation files and to verify the translations
+for a set of images. Before this task was written users who needed to
+set up translation files for new instruments and observatories had
+to directly create the files with an editor. Many people encountered
+difficulties and were prone to errors. Also there was no task that
+directly verified the translations though \fBccdlist\fR provided some
+clues.
+
+The \fBccdred\fR package was designed to make intelligent use of
+information in image headers for determining things such as image
+calibration or object type and exposure times. While the package may
+be used without this capability it is much more convenient to be
+able to use information from the image. The package was also intended
+to be used with many different instruments, detectors, and observatories.
+The key to providing image header access across different observatories
+is the ability to translate the needs of the package to the appropriate
+keywords in the image header. This is done through a file called
+an "instrument translation file". For a complete description of
+this file and other instrument setup features of the package see
+\fBccdred.instruments\fR.
+
+The instrument translation file translates the parameter names used by
+the \fBccdred\fR package into image specific parameters and also
+supplies default values for parameters. The translation proceeds as
+follows. When a package task needs a parameter for an image, for
+example "imagetyp", it looks in the instrument translation file. If
+the file is not found or none is specified then the image header
+keyword that is requested is assumed to have the same name. If an
+instrument translation file is defined then the requested parameter is
+translated to an image header keyword, provided a translation entry is
+given. If no translation is given the package name is used. For
+example the package parameter "imagetyp" might be translated to
+"data-typ" (the old NOAO CCD keyword). If the parameter is not found
+then the default value specified in the translation file, if present,
+is returned.
+
+For recording parameter information in the header, such
+as processing flags, translation is also used. For example, if the
+flag specifying that the image has been corrected by a flat field is to
+be set then the package parameter name "flatcor" might be translated to
+"ff-flag". If no translation is given then the new image header
+parameter is entered as "flatcor".
+
+The CCD image type requires a second level of translation also defined
+in the translation file. Once the image keyword which identifies the
+type of CCD image, for example a flat field or object, is translated
+to an imahe keyword the specific
+string value must be translated to one of the CCD image types used
+by the package. The translation works in the same way, the specific
+string found is translated to the \fBccdred\fR type and returned to
+the task. This translation is tricky in that the exact string
+including all spaces and capitalizations must be correctly defined
+in the translation file. The \fBccdinstrument\fR allows doing
+this automatically thus minimizing typing errors.
+
+The basic display format of the task is a table of five columns
+giving the parameter name used by the package, the image keyword
+to which it is translated, the default value (if any), the value
+the task will receive for the current image after translation,
+and the actual keyword value in the image. A "?" is printed if
+a value cannot be determined. The idea of the task is to make sure
+that the value a \fBccdred\fR task sees is the correct one and if not
+to modify the translation appropriately. In verify mode when the
+\fBedit\fR parameter is not set the translation table is simply
+printed for each input image.
+
+In edit mode the user interactively gives commands at the ccdinstrument
+prompt to display or modify keywords. The modifications can then be
+written to the instrument file or saved in a private copy. The
+list of commands is shown below and may be printed using ? or help.
+
+.in 4
+.nf
+ CCDINSTRUMENT COMMANDS
+
+? Print command summary
+help Print command summary
+imheader Page image header
+instrument Print current instrument translation file
+next Next image
+newimage Select a new image
+quit Quit
+read Read instrument translation file
+show Show current translations
+write Write instrument translation file
+
+translate Translate image string selected by the imagetyp
+ parameter to one of the CCDRED types given as an
+ argument or queried:
+ object, zero, dark, flat, comp, illum, fringe, other
+
+.fi
+The following are CCDRED parameters which may be translated. You are
+queried for the image keyword to use or it may be typed after the command.
+An optional default value (returned if the image does not contain the
+keyword) may be typed as the second argument of the command.
+.nf
+
+ BASIC PARAMETERS
+imagetyp Image type parameter (see also translate)
+subset Subset or filter parameter
+exptime Exposure time
+darktime Dark time (may be same as the exposure time)
+.fi
+.in -4
+
+The commands may be followed by values such as file names for some of
+the general commands or the keyword and default value for the parameters
+to be translated. Note this is the only way to specify a default value.
+If no arguments are given the user is prompted with the current value
+which may then be changed.
+
+The set of parameters shown above are only those considered "basic".
+In order to avoid confusion the task can limit the set of parameters
+displayed. Without going into great detail, it is only the basic
+parameters which are generally required to have valid translations to
+allow the package to work well. However, for completeness, and if someone
+wants to go wild with translations, further parameters may be displayed
+and changed. The parameters displayed is controlled by the \fIparameters\fR
+keyword. The additional parameters not shown above are:
+
+.in 4
+.nf
+ USEFUL DEFAULT GEOMETRY PARAMETERS
+biassec Bias section (often has a default value)
+trimsec Trim section (often has a default value)
+
+ COMMON PROCESSING FLAGS
+fixpix Bad pixel replacement flag
+overscan Overscan correction flag
+trim Trim flag
+zerocor Zero level correction flag
+darkcor Dark count correction flag
+flatcor Flat field correction flag
+
+ RARELY TRANSLATED PARAMETERS
+ccdsec CCD section
+datasec Data section
+fixfile Bad pixel file
+
+fringcor Fringe correction flag
+illumcor Ilumination correction flag
+readcor One dimensional zero level read out correction
+scancor Scan mode correction flag
+
+illumflt Ilumination flat image
+mkfringe Fringe image
+mkillum Iillumination image
+skyflat Sky flat image
+
+ccdmean Mean value
+fringscl Fringe scale factor
+ncombine Number of images combined
+date-obs Date of observations
+dec Declination
+ra Right Ascension
+title Image title
+.fi
+.in -4
+.ih
+EXAMPLES
+1. To verify the translations for a set of images using the default
+translation file:
+
+.nf
+ cl> setinst "" review-
+ cl> ccdinst dev$pix edit-
+ Image: dev$pix
+ Instrument file:
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ --------------------------------
+ imagetyp imagetyp none ?
+ subset subset ?
+ exptime exptime ? ?
+ darktime darktime ? ?
+
+ cl> setinst "" site=kpno dir=ccddb$ review-
+ cl> ccdinst dev$pix edit-
+ Image: dev$pix
+
+ Instrument file: ccddb$kpno/camera.dat
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ --------------------------------
+ imagetyp data-typ object OBJECT (0)
+ subset f1pos 2 2
+ exptime otime 600 600
+ darktime ttime 600 600
+.fi
+
+2. Set up an instrument translation file from scratch.
+
+.nf
+ ccdinst ech???.imh instr=myccd edit+
+ Warning: OPEN: File does not exist (myccd)
+ Image: ech001.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp imagetyp none ?
+ subset subset ?
+ exptime exptime ? ?
+ darktime darktime ? ?
+
+ ccdinstrument> imagetyp
+ Image keyword for image type (imagetyp): ccdtype
+ imagetyp ccdtype unknown BIAS
+ ccdinstrument> translate
+ CCDRED image type for 'BIAS' (unknown): zero
+ imagetyp ccdtype zero BIAS
+ ccdinstrument> subset
+ Image keyword for subset parameter (subset): filters
+ subset filters 1 1 0
+ ccdinstrument> exptime integ
+ exptime integ 0. 0.
+ ccdinstrument> darktime integ
+ darktime integ 0. 0.
+ ccdinstrument> show
+ Image: ech001.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype zero BIAS
+ subset filters 1 1 0
+ exptime integ 0. 0.
+ darktime integ 0. 0.
+
+ ccdinstrument> next
+ Image: ech002.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype unknown PROJECTOR FLAT
+ subset filters 1 1 0
+ exptime integ 20. 20.
+ darktime integ 20. 20.
+
+ ccdinstrument> trans
+ CCDRED image type for 'PROJECTOR FLAT' (unknown): flat
+ imagetyp ccdtype flat PROJECTOR FLAT
+ ccdinstrument> next
+ Image: ech003.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype unknown COMPARISON
+ subset filters 1 1 0
+ exptime integ 300 300
+ darktime integ 300 300
+
+ ccdinstrument> translate comp
+ imagetyp ccdtype comp COMPARISON
+ ccdinstrument> next
+ Image: ech004.imh
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype unknown OBJECT
+ subset filters 1 1 0
+ exptime integ 3600 3600
+ darktime integ 3600 3600
+
+ ccdinstrument> translate object
+ imagetyp ccdtype object OBJECT
+ ccdinstrument> inst
+ imagetyp ccdtype
+ BIAS zero
+ subset filters
+ exptime integ
+ darktime integ
+ 'PROJECTOR FLAT' flat
+ COMPARISON comp
+ OBJECT object
+
+ ccdinstrument> next
+ Update instrument file myccd (yes)?
+.fi
+
+3. Set default geometry parameters. Note that to set a default the
+arguments must be on the command line.
+
+.nf
+ cc> ccdinst ech001 instr=myccd param=common edit+
+ Image: ech001
+ Instrument file: myccd
+ Subset file: subsets
+
+ CCDRED IMAGE DEFAULT CCDRED IMAGE
+ PARAM KEYWORD VALUE VALUE VALUE
+ ------------------------------------------------------
+ imagetyp ccdtype zero BIAS
+ subset filters 1 1 0
+ exptime integ 0. 0.
+ darktime integ 0. 0.
+
+ biassec biassec ? ?
+ trimsec trimsec ? ?
+
+ fixpix fixpix no ?
+ overscan overscan no ?
+ trim trim no ?
+ zerocor zerocor no ?
+ darkcor darkcor no ?
+ flatcor flatcor no ?
+
+ ccdinstrument> biassec biassec [803:830,*]
+ biassec biassec [803:830,*] [803:830,*] ?
+ ccdinstrument> trimsec trimsec [2:798,2:798]
+ trimsec trimsec [2:798,2:798] [2:798,2:798] ?
+ ccdinstrument> instr
+ trimsec trimsec [2:798,2:798]
+ biassec biassec [803:830,*]
+ imagetyp ccdtype
+ BIAS zero
+ subset filters
+ exptime integ
+ darktime integ
+ 'PROJECTOR FLAT' flat
+ COMPARISON comp
+ OBJECT object
+
+ ccdinstrument> q
+ Update instrument file myccd (yes)?
+.fi
+.ih
+SEE ALSO
+instruments, setinstrument
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdlist.hlp b/noao/imred/quadred/src/quad/doc/ccdlist.hlp
new file mode 100644
index 00000000..9ce7dfdd
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdlist.hlp
@@ -0,0 +1,133 @@
+.help ccdlist Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdlist -- List CCD processing information
+.ih
+USAGE
+ccdlist images
+.ih
+PARAMETERS
+.ls images
+CCD images to be listed. A subset of the these may be selected using the
+CCD image type parameter.
+.le
+.ls ccdtype = ""
+CCD image type to be listed. If no type is specified then all the images
+are listed. If an image type is specified then only images
+of that type are listed. See \fBccdtypes\fR for a list of the package
+image types.
+.le
+.ls names = no
+List the image names only? Used with the CCD image type parameter to make
+a list of the images of the specified type.
+.le
+.ls long = no
+Long format listing? The images are listed in a long format containing some
+image parameters and the processing history.
+.le
+.ls ccdproc (pset)
+CCD processing parameter set.
+.le
+.ih
+DESCRIPTION
+Information from the specified input images is listed on the standard
+output. A specific CCD image type may be selected from the input
+images by the parameter \fIccdtype\fR. There are three list formats;
+the default one line per image format, an image name only format, and a
+multi-line long format. The default one line format consists of the
+image name, image size, image pixel type, CCD image type, subset ID (if
+defined), processing flags, and title. This format contains the same
+information as that produced by \fBimheader\fR as well as CCD specific
+information. The processing flags identifying the processing operations
+performed on the image are given by the following single letter codes.
+
+.nf
+ B - Bad pixel replacement
+ O - Overscan bias subtraction
+ T - Trimming
+ Z - Zero level subtraction
+ D - Dark count subtraction
+ F - Flat field calibration
+ I - Iillumination correction
+ Q - Fringe correction
+.fi
+
+The long format has the same first line as the default format plus additional
+instrument information such as the exposure time and the full processing
+history. In addition to listing the completed processing, the operations
+not yet done (as specified by the \fBccdproc\fR parameters) are also
+listed.
+
+The image name only format is intended to be used to generate lists of
+images of the same CCD image type. These lists may be used as "@" file
+lists in IRAF tasks.
+.ih
+EXAMPLES
+1. To list the default format for all images:
+
+.nf
+ cl> ccdlist *.imh
+ ccd001.imh[544,512][short][unknown][V]:FOCUS L98-193
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+ ccd045.imh[544,512][short][flat][V]:dflat 6v+blue 5s
+ ccd066.imh[544,512][short][flat][B]:dflat 6v+blue 5s
+ ccd103.imh[544,512][short][flat][R]:dflat 6v+blue 5s
+ ccd104.imh[544,512][short][zero][]:bias
+ ccd105.imh[544,512][short][dark][]:dark 3600s
+.fi
+
+These images have not been processed.
+
+2. To restrict the listing to just the object images:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+.fi
+
+3. The long list for image "ccd007" is obtained by:
+
+.nf
+ cl> ccdlist ccd007 l+
+ ccd007[544,512][short][object][V]:N2968 R 600s
+ exptime = 200. darktime = 200.
+ [TO BE DONE] Overscan strip is [520:540,*]
+ [TO BE DONE] Trim image section is [3:510,3:510]
+ [TO BE DONE] Flat field correction
+.fi
+
+4. After processing the images have the short listing:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[508,508][real][object][V][OTF]:N2968 V 600s
+ ccd015.imh[508,508][real][object][B][OTF]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R][OTF]:N4036 R 600s
+.fi
+
+The processing indicated is overscan subtraction, trimming, and flat fielding.
+
+5. The long listing for "ccd007" after processing is:
+
+.nf
+ cl> ccdlist ccd007 l+
+ ccd007[508,508][real][object][V][OTF]:N2968 R 600s
+ exptime = 200. darktime = 200.
+ Jun 2 18:18 Overscan section is [520:540,*] with mean=481.8784
+ Jun 2 18:18 Trim data section is [3:510,3:510]
+ Jun 2 18:19 Flat field image is FlatV.imh with scale=138.2713
+.fi
+
+6. To make a list file containing all the flat field images:
+
+ cl> ccdlist *.imh ccdtype=flat name+ > flats
+
+This file can be used as an @ file for processing.
+.ih
+SEE ALSO
+ccdtypes ccdgroups
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdproc.hlp b/noao/imred/quadred/src/quad/doc/ccdproc.hlp
new file mode 100644
index 00000000..4be65f73
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdproc.hlp
@@ -0,0 +1,720 @@
+.help ccdproc Oct90 noao.imred.ccdred
+.ih
+NAME
+ccdproc -- Process CCD images
+.ih
+USAGE
+ccdproc images
+.ih
+PARAMETERS
+.ls images
+List of input CCD images to process. The list may include processed
+images and calibration images.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input image list. If no type is given
+then all input images will be selected. The recognized types are described
+in \fBccdtypes\fR.
+.le
+.ls max_cache = 0
+Maximum image caching memory (in Mbytes). If there is sufficient memory
+the calibration images, such as zero level, dark count, and flat fields,
+will be cached in memory when processing many input images. This
+reduces the disk I/O and makes the task run a little faster. If the
+value is zero image caching is not used.
+.le
+.ls noproc = no
+List processing steps only?
+.le
+
+.ce
+PROCESSING SWITCHES
+.ls fixpix = yes
+Fix bad CCD lines and columns by linear interpolation from neighboring
+lines and columns? If yes then a bad pixel file must be specified.
+.le
+.ls overscan = yes
+Apply overscan or prescan bias correction? If yes then the overscan
+image section and the readout axis must be specified.
+.le
+.ls trim = yes
+Trim the image of the overscan region and bad edge lines and columns?
+If yes then the data section must be specified.
+.le
+.ls zerocor = yes
+Apply zero level correction? If yes a zero level image must be specified.
+.le
+.ls darkcor = yes
+Apply dark count correction? If yes a dark count image must be specified.
+.le
+.ls flatcor = yes
+Apply flat field correction? If yes flat field images must be specified.
+.le
+.ls illumcor = no
+Apply iillumination correction? If yes iillumination images must be specified.
+.le
+.ls fringecor = no
+Apply fringe correction? If yes fringe images must be specified.
+.le
+.ls readcor = no
+Convert zero level images to readout correction images? If yes then
+zero level images are averaged across the readout axis to form one
+dimensional zero level readout correction images.
+.le
+.ls scancor = no
+Convert flat field images to scan mode flat field images? If yes then the
+form of scan mode correction is specified by the parameter \fIscantype\fR.
+.le
+
+.ce
+PROCESSING PARAMETERS
+.ls readaxis = "line"
+Read out axis specified as "line" or "column".
+.le
+.ls fixfile
+File describing the bad lines and columns. If "image" is specified then
+the file is specified in the image header or instrument translation file.
+.le
+.ls biassec
+Overscan bias strip image section. If "image" is specified then the overscan
+bias section is specified in the image header or instrument translation file.
+.le
+.ls trimsec
+image section for trimming. If "image" is specified then the trim
+image section is specified in the image header or instrument translation file.
+.le
+.ls zero = ""
+Zero level calibration image. The zero level image may be one or two
+dimensional. The CCD image type and subset are not checked for these
+images and they take precedence over any zero level calibration images
+given in the input list.
+.le
+.ls dark = ""
+Dark count calibration image. The CCD image type and subset are not checked
+for these images and they take precedence over any dark count calibration
+images given in the input list.
+.le
+.ls flat = ""
+Flat field calibration images. The flat field images may be one or
+two dimensional. The CCD image type is not checked for these
+images and they take precedence over any flat field calibration images given
+in the input list. The flat field image with the same subset as the
+input image being processed is selected.
+.le
+.ls illum = ""
+Iillumination correction images. The CCD image type is not checked for these
+images and they take precedence over any iillumination correction images given
+in the input list. The iillumination image with the same subset as the
+input image being processed is selected.
+.le
+.ls fringe = ""
+Fringe correction images. The CCD image type is not checked for these
+images and they take precedence over any fringe correction images given
+in the input list. The fringe image with the same subset as the
+input image being processed is selected.
+.le
+.ls minreplace = 1.
+When processing flat fields, pixel values below this value (after
+all other processing such as overscan, zero, and dark corrections) are
+replaced by this value. This allows flat fields processed by \fBccdproc\fR
+to be certain to avoid divide by zero problems when applied to object
+images.
+.le
+.ls scantype = "shortscan"
+Type of scan format used in creating the CCD images. The modes are:
+.ls "shortscan"
+The CCD is scanned over a number of lines and then read out as a regular
+two dimensional image. In this mode unscanned flat fields are numerically
+scanned to form scanned flat fields comparable to the observations. If
+the flat field calibration images are taken in scanned mode then
+\fIscancor\fR should be no and the processing performed in the same manner
+as in unscanned mode.
+.le
+.ls "longscan"
+In this mode the CCD is clocked and read out continuously to form a long
+strip. Flat fields are averaged across the readout axis to
+form a one dimensional flat field readout correction image. This assumes
+that all recorded image lines are clocked over the entire active area of the
+CCD.
+.le
+.le
+.ls nscan
+Number of scan readout lines used in short scan mode. This parameter is used
+when the scan type is "shortscan".
+.le
+
+
+.ce
+OVERSCAN FITTING PARAMETERS
+.ls interactive = no
+Fit the overscan vector interactively? If yes the overscan vector is fit
+interactively using the \fBicfit\fR package. If no then the fitting parameters
+given below are used.
+.le
+.ls function = "legendre"
+Overscan fitting function. The function types are "legendre" polynomial,
+"chebyshev" polynomial, "spline1" linear spline, and "spline3" cubic
+spline.
+.le
+.ls order = 1
+Number of polynomial terms or spline pieces in the overscan fit.
+.le
+.ls sample = "*"
+Sample points to use in the overscan fit. The string "*" specified all
+points otherwise an \fBicfit\fR range string is used.
+.le
+.ls naverage = 1
+Number of points to average or median to form fitting points. Positive
+numbers specify averages and negative numbers specify medians.
+.le
+.ls niterate = 1
+Number of rejection iterations to remove deviant points from the overscan fit.
+If 0 then no points are rejected.
+.le
+.ls low_reject = 3., high_reject = 3.
+Low and high sigma rejection factors for rejecting deviant points from the
+overscan fit.
+.le
+.ls grow = 0.
+One dimensional growing radius for rejection of neighbors to deviant points.
+.le
+.ih
+DESCRIPTION
+\fBCcdproc\fR processes CCD images to correct and calibrate for
+detector defects, readout bias, zero level bias, dark counts,
+response, iillumination, and fringing. It also trims unwanted
+lines and columns and changes the pixel datatype. It is efficient
+and easy to use; all one has to do is set the parameters and then
+begin processing the images. The task takes care of most of the
+record keeping and automatically does the prerequisite processing
+of calibration images. Beneath this simplicity there is much that
+is going on. In this section a simple description of the usage is
+given. The following sections present more detailed discussions
+on the different operations performed and the order and logic
+of the processing steps. For a user's guide to the \fBccdred\fR
+package see \fBguide\fR. Much of the ease of use derives from using
+information in the image header. If this information is missing
+see section 13.
+
+One begins by setting the task parameters. There are many parameters
+but they may be easily reviewed and modified using the task \fBeparam\fR.
+The input CCD images to be processed are given as an image list.
+Previously processed images are ignored and calibration images are
+recognized, provided the CCD image types are in the image header (see
+\fBinstruments\fR and \fBccdtypes\fR). Therefore it is permissible to
+use simple image templates such as "*.imh". The \fIccdtype\fR parameter
+may be used to select only certain types of CCD images to process
+(see \fBccdtypes\fR).
+
+The processing operations are selected by boolean (yes/no) parameters.
+Because calibration images are recognized and processed appropriately,
+the processing operations for object images should be set.
+Any combination of operations may be specified and the operations are
+performed simultaneously. While it is possible to do operations in
+separate steps this is much less efficient. Two of the operation
+parameters apply only to zero level and flat field images. These
+are used for certain types of CCDs and modes of operation.
+
+The processing steps selected have related parameters which must be
+set. These are things like image sections defining the overscan and
+trim regions and calibration images. There are a number of parameters
+used for fitting the overscan or prescan bias section. These are
+parameters used by the standard IRAF curve fitting package \fBicfit\fR.
+The parameters are described in more detail in the following sections.
+
+In addition to the task parameters there are package parameters
+which affect \fBccdproc\fR. These include the instrument and subset
+files, the text and plot log files, the output pixel datatype,
+the amount of memory available for calibration image caching,
+the verbose parameter for logging to the terminal, and the backup
+prefix. These are described in \fBccdred\fR.
+
+Calibration images are specified by task parameters and/or in the
+input image list. If more than one calibration image is specified
+then the first one encountered is used and a warning is issued for the
+extra images. Calibration images specified by
+task parameters take precedence over calibration images in the input list.
+These images also need not have a CCD image type parameter since the task
+parameter identifies the type of calibration image. This method is
+best if there is only one calibration image for all images
+to be processed. This is almost always true for zero level and dark
+count images. If no calibration image is specified by task parameter
+then calibration images in the input image list are identified and
+used. This requires that the images have CCD image types recognized
+by the package. This method is useful if one may simply say "*.imh"
+as the image list to process all images or if the images are broken
+up into groups, in "@" files for example, each with their own calibration
+frames.
+
+When an input image is processed the task first determines the processing
+parameters and calibration images. If a requested operation has been
+done it is skipped and if all requested operations have been completed then
+no processing takes place. When it determines that a calibration image
+is required it checks for the image from the task parameter and then
+for a calibration image of the proper type in the input list.
+
+Having
+selected a calibration image it checks if it has been processed by
+looking for the image header flag CCDPROC. If it is not present then
+the calibration image is processed. When any image has been processed
+the CCDPROC flag is added. For images processed directly by \fBccdproc\fR
+the individual processing flags are checked even if the CCDPROC flag is
+present. However, the automatic processing of the calibration images is
+only done if the CCDPROC flag is absent! This is to make the task more
+efficient by not having to check every flag for every calibration image
+for every input image. Thus, if additional processing
+steps are added after images have been partially reduced then input images
+will be processed for the new steps but calibration images will not be
+processed automatically.
+
+After the calibration images have been identified, and processed if
+necessary, the images may be cached in memory. This is done when there
+are more than two input images (it is actually less efficient to
+cache the calibration images for one or two input images) and the parameter
+\fImax_cache\fR is greater than zero. When caching, as many calibration
+images as allowed by the specified memory are read into memory and
+kept there for all the input images. Cached images are, therefore,
+only read once from disk which reduces the amount of disk I/O. This
+makes a modest decrease in the execution time. It is not dramatic
+because the actual processing is fairly CPU intensive.
+
+Once the processing parameters and calibration images have been determined
+the input image is processed for all the desired operations in one
+step; i.e. there are no intermediate results or images. This makes
+the task efficient. The corrected image is output as a temporary image
+until the entire image has been processed. When the image has been
+completely processed then the original image is deleted (or renamed
+using the specified backup prefix) and the corrected image replaces
+the original image. Using a temporary image protects the data in the
+event of an abort or computer failure. Keeping the original image name
+eliminates much of the record keeping and the need to generate new
+image names.
+.sh
+1. Fixpix
+Regions of bad lines and columns may be replaced by linear
+interpolation from neighboring lines and columns when the parameter
+\fIfixpix\fR is set. The bad regions are specified in a bad pixel
+file. The file consists of lines with four fields, the starting and
+ending columns and the starting and ending lines. Any number of
+regions may be specified. Comment lines beginning with the character
+'#' may be included. If a comment line preceding the bad regions
+contains the word "untrimmed" then the coordinate system refers to the
+original format of the images; i.e. before trimming. If an image has
+been trimmed previously then the trim region specified in the image
+header is used to convert the coordinates in the bad pixel file to
+those of the trimmed image. If the file does not contain the word
+"untrimmed" then the coordinate system must match that of the image
+being corrected; i.e. untrimmed coordinates if the image has not been
+trimmed and trimmed coordinates if the image has been trimmed.
+Standard bad pixel files should always be specified in terms of the
+original format.
+
+The bad pixel file may be specified explicitly with the parameter \fIfixfile\fR
+or indirectly if the parameter has the value "image". In the latter case
+the instrument file must contain the name of the file.
+.sh
+2. Overscan
+If an overscan or prescan correction is specified (\fIoverscan\fR
+parameter) then the image section (\fIbiassec\fR parameter) is averaged
+along the readout axis (\fIreadaxis\fR parameter) to form a
+correction vector. A function is fit to this vector and for each readout
+line (image line or column) the function value for that line is
+subtracted from the image line. The fitting function is generally
+either a constant (polynomial of 1 term) or a high order function
+which fits the large scale shape of the overscan vector. Bad pixel
+rejection is also used to eliminate cosmic ray events. The function
+fitting may be done interactively using the standard \fBicfit\fR
+iteractive graphical curve fitting tool. Regardless of whether the fit
+is done interactively, the overscan vector and the fit may be recorded
+for later review in a metacode plot file named by the parameter
+\fIccdred.plotfile\fR. The mean value of the bias function is also recorded in
+the image header and log file.
+.sh
+3. Trim
+When the parameter \fItrim\fR is set the input image will be trimmed to
+the image section given by the parameter \fItrimsec\fR. This trim
+should, of course, be the same as that used for the calibration images.
+.sh
+4. Zerocor
+After the readout bias is subtracted, as defined by the overscan or prescan
+region, there may still be a zero level bias. This level may be two
+dimensional or one dimensional (the same for every readout line). A
+zero level calibration is obtained by taking zero length exposures;
+generally many are taken and combined. To apply this zero
+level calibration the parameter \fIzerocor\fR is set. In addition if
+the zero level bias is only readout dependent then the parameter \fIreadcor\fR
+is set to reduce two dimensional zero level images to one dimensional
+images. The zero level images may be specified by the parameter \fIzero\fR
+or given in the input image list (provided the CCD image type is defined).
+
+When the zero level image is needed to correct an input image it is checked
+to see if it has been processed and, if not, it is processed automatically.
+Processing of zero level images consists of bad pixel replacement,
+overscan correction, trimming, and averaging to one dimension if the
+readout correction is specified.
+.sh
+5. Darkcor
+Dark counts are subtracted by scaling a dark count calibration image to
+the same exposure time as the input image and subtracting. The
+exposure time used is the dark time which may be different than the
+actual integration or exposure time. A dark count calibration image is
+obtained by taking a very long exposure with the shutter closed; i.e.
+an exposure with no light reaching the detector. The dark count
+correction is selected with the parameter \fIdarkcor\fR and the dark
+count calibration image is specified either with the parameter
+\fIdark\fR or as one of the input images. The dark count image is
+automatically processed as needed. Processing of dark count images
+consists of bad pixel replacement, overscan and zero level correction,
+and trimming.
+.sh
+6. Flatcor
+The relative detector pixel response is calibrated by dividing by a
+scaled flat field calibration image. A flat field image is obtained by
+exposure to a spatially uniform source of light such as an lamp or
+twilight sky. Flat field images may be corrected for the spectral
+signature in spectroscopic images (see \fBresponse\fR and
+\fBapnormalize\fR), or for iillumination effects (see \fBmkillumflat\fR
+or \fBmkskyflat\fR). For more on flat fields and iillumination corrections
+see \fBflatfields\fR. The flat field response is dependent on the
+wavelength of light so if different filters or spectroscopic wavelength
+coverage are used a flat field calibration for each one is required.
+The different flat fields are automatically selected by a subset
+parameter (see \fBsubsets\fR).
+
+Flat field calibration is selected with the parameter \fBflatcor\fR
+and the flat field images are specified with the parameter \fBflat\fR
+or as part of the input image list. The appropriate subset is automatically
+selected for each input image processed. The flat field image is
+automatically processed as needed. Processing consists of bad pixel
+replacement, overscan subtraction, zero level subtraction, dark count
+subtraction, and trimming. Also if a scan mode is used and the
+parameter \fIscancor\fR is specified then a scan mode correction is
+applied (see below). The processing also computes the mean of the
+flat field image which is used later to scale the flat field before
+division into the input image. For scan mode flat fields the ramp
+part is included in computing the mean which will affect the level
+of images processed with this flat field. Note that there is no check for
+division by zero in the interest of efficiency. If division by zero
+does occur a fatal error will occur. The flat field can be fixed by
+replacing small values using a task such as \fBimreplace\fR or
+during processing using the \fIminreplace\fR parameter. Note that the
+\fIminreplace\fR parameter only applies to flat fields processed by
+\fBccdproc\fR.
+.sh
+7. Illumcor
+CCD images processed through the flat field calibration may not be
+completely flat (in the absence of objects). In particular, a blank
+sky image may still show gradients. This residual nonflatness is called
+the iillumination pattern. It may be introduced even if the detector is
+uniformly illuminated by the sky because the flat field lamp
+iillumination may be nonuniform. The iillumination pattern is found from a
+blank sky, or even object image, by heavily smoothing and rejecting
+objects using sigma clipping. The iillumination calibration image is
+divided into the data being processed to remove the iillumination
+pattern. The iillumination pattern is a function of the subset so there
+must be an iillumination correction image for each subset to be
+processed. The tasks \fBmkillumcor\fR and \fBmkskycor\fR are used to
+create the iillumination correction images. For more on iillumination
+corrections see \fBflatfields\fR.
+
+An alternative to treating the iillumination correction as a separate
+operation is to combine the flat field and iillumination correction
+into a corrected flat field image before processing the object
+images. This will save some processing time but does require creating
+the flat field first rather than correcting the images at the same
+time or later. There are two methods, removing the large scale
+shape of the flat field and combining a blank sky image iillumination
+with the flat field. These methods are discussed further in the
+tasks which create them; \fBmkillumcor\fR and \fBmkskycor\fR.
+.sh
+8. Fringecor
+There may be a fringe pattern in the images due to the night sky lines.
+To remove this fringe pattern a blank sky image is heavily smoothed
+to produce an iillumination image which is then subtracted from the
+original sky image. The residual fringe pattern is scaled to the
+exposure time of the image to be fringe corrected and then subtracted.
+Because the intensity of the night sky lines varies with time an
+additional scaling factor may be given in the image header.
+The fringe pattern is a function of the subset so there must be
+a fringe correction image for each subset to be processed.
+The task \fBmkfringecor\fR is used to create the fringe correction images.
+.sh
+9. Readcor
+If a zero level correction is desired (\fIzerocor\fR parameter)
+and the parameter \fIreadcor\fR is yes then a single zero level
+correction vector is applied to each readout line or column. Use of a
+readout correction rather than a two dimensional zero level image
+depends on the nature of the detector or if the CCD is operated in
+longscan mode (see below). The readout correction is specified by a
+one dimensional image (\fIzero\fR parameter) and the readout axis
+(\fIreadaxis\fR parameter). If the zero level image is two dimensional
+then it is automatically processed to a one dimensional image by
+averaging across the readout axis. Note that this modifies the zero
+level calibration image.
+.sh
+10. Scancor
+CCD detectors may be operated in several modes in astronomical
+applications. The most common is as a direct imager where each pixel
+integrates one point in the sky or spectrum. However, the design of most CCD's
+allows the sky to be scanned across the CCD while shifting the
+accumulating signal at the same rate. \fBCcdproc\fR provides for two
+scanning modes called "shortscan" and "longscan". The type of scan
+mode is set with the parameter \fIscanmode\fR.
+
+In "shortscan" mode the detector is scanned over a specified number of
+lines (not necessarily at sideral rates). The lines that scroll off
+the detector during the integration are thrown away. At the end of the
+integration the detector is read out in the same way as an unscanned
+observation. The advantage of this mode is that the small scale flat
+field response is averaged in one dimension over the number of lines
+scanned. A flat field may be observed in the same way in which case
+there is no difference in the processing from unscanned imaging and the
+parameter \fIscancor\fR should be no. However, one obtains an increase
+in the statistical accuracy of the flat fields if they are not scanned
+during the observation but digitally scanned during the processing. In
+shortscan mode with \fIscancor\fR set to yes, flat field images are
+digitally scanned, if needed, by the specified number of scan lines
+(\fInscan\fR parameter).
+
+In "longscan" mode the detector is continuously read out to produce
+an arbitrarily long strip. Provided data which has not passed over
+the entire detector is thrown away, the flat field corrections will
+be one dimensional. If \fIscancor\fR is specified and the
+scan mode is "longscan" then a one dimensional flat field correction
+will be applied. If the specified flat field (\fIflat\fR parameter)
+is a two dimensional image then when the flat field image is processed
+it will be averaged across the readout axis to form a one dimensional
+correction image.
+.sh
+11. Processing Steps
+The following describes the steps taken by the task. This detailed
+outline provides the most detailed specification of the task.
+
+.ls 5 (1)
+An image to be processed is first checked that it is of the specified
+CCD image type. If it is not the desired type then go on to the next image.
+.le
+.ls (2)
+A temporary output image is created of the specified pixel data type
+(\fBccdred.pixeltype\fR). The header parameters are copied from the
+input image.
+.le
+.ls (3)
+If trimming is specified and the image has not been trimmed previously,
+the trim section is determined.
+.le
+.ls (4)
+If bad pixel replacement is specified and this has not been done
+previously, the bad pixel file is determined either from the task
+parameter or the instrument translation file. The bad pixel regions
+are read. If the image has been trimmed previously and the bad pixel
+file contains the word "untrimmed" then the bad pixel coordinates are
+translated to those of the trimmed image.
+.le
+.ls (5)
+If an overscan correction is specified and this correction has not been
+applied, the overscan section is averaged along the readout axis. If
+trimming is to be done the overscan section is trimmed to the same
+limits. A function is fit either interactively or noninteractively to
+the overscan vector. The function is used to produce the overscan
+vector to be subtracted from the image. This is done in real
+arithmetic.
+.le
+.ls (6)
+If the image is a zero level image go to processing step 12.
+If a zero level correction is desired and this correction has not been
+performed, find the zero level calibration image. If the zero level
+calibration image has not been processed it is processed at this point.
+This is done by going to processing step 1 for this image. After the
+calibration image has been processed, processing of the input image
+continues from this point.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (7)
+If the image is a dark count image go to processing step 12.
+If a dark count correction is desired and this correction has not been
+performed, find the dark count calibration image. If the dark count
+calibration image has not been processed it is processed at this point.
+This is done by going to processing step 1 for this image. After the
+calibration image has been processed, processing of the input image
+continues from this point. The ratio of the input image dark time
+to the dark count image dark time is determined to be multiplied with
+each pixel of the dark count image before subtracting from the input
+image.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (8)
+If the image is a flat field image go to processing step 12. If a flat
+field correction is desired and this correction has not been performed,
+find the flat field calibration image of the appropriate subset. If
+the flat field calibration image has not been processed it is processed
+at this point. This is done by going to processing step 1 for this
+image. After the calibration image has been processed, processing of
+the input image continues from this point. The mean of the image
+is determined from the image header to be used for scaling. If no
+mean is found then a unit scaling is used.
+The processed calibration image may be
+cached in memory if it has not been previously and if there is enough memory.
+.le
+.ls (9)
+If the image is an iillumination image go to processing step 12. If an
+iillumination correction is desired and this correction has not been performed,
+find the iillumination calibration image of the appropriate subset.
+The iillumination image must have the "mkillum" processing flag or the
+\fBccdproc\fR will abort with an error. The mean of the image
+is determined from the image header to be used for scaling. If no
+mean is found then a unit scaling is used. The processed calibration
+image may be
+cached in memory if it has not been previously and there is enough memory.
+.le
+.ls (10)
+If the image is a fringe image go to processing step 12. If a fringe
+correction is desired and this correction has not been performed,
+find the fringe calibration image of the appropriate subset.
+The iillumination image must have the "mkfringe" processing flag or the
+\fBccdproc\fR will abort with an error. The ratio of the input
+image exposure time to the fringe image exposure time is determined.
+If there is a fringe scaling in the image header then this factor
+is multiplied by the exposure time ratio. This factor is used
+for scaling. The processed calibration image may be
+cached in memory if it has not been previously and there is enough memory.
+.le
+.ls (11)
+If there are no processing operations flagged, delete the temporary output
+image, which has been opened but not used, and go to 14.
+.le
+.ls (12)
+The input image is processed line by line with trimmed lines ignored.
+A line of the input image is read. Bad pixel replacement and trimming
+is applied to the image. Image lines from the calibration images
+are read from disk or the image cache. If the calibration is one
+dimensional (such as a readout zero
+level correction or a longscan flat field correction) then the image
+vector is read only once. Note that IRAF image I/O is buffered for
+efficiency and accessing a line at a time does not mean that image
+lines are read from disk a line at a time. Given the input line, the
+calibration images, the overscan vector, and the various scale factors
+a special data path for each combination of corrections is used to
+perform all the processing in the most efficient manner. If the
+image is a flat field any pixels less than the \fIminreplace\fR
+parameter are replaced by that minimum value. Also a mean is
+computed for the flat field and stored as the CCDMEAN keyword.
+.le
+.ls (13)
+The input image is deleted or renamed to a backup image. The temporary
+output image is renamed to the input image name.
+.le
+.ls (14)
+If the image is a zero level image and the readout correction is specified
+then it is averaged to a one dimensional readout correction.
+.le
+.ls (15)
+If the image is a flat field image and the scan mode correction is specified
+then the correction is applied. For shortscan mode a
+modified two dimensional image is produced while for longscan mode a
+one dimensional average image is produced.
+.le
+.ls (16)
+The processing is completed and either the next input image is processed
+beginning at step 1 or, if it is a calibration image which is being
+processed for an input image, control returns to the step which initiated
+the calibration image processing.
+.le
+.sh
+12. Processing Arithmetic
+The \fBccdproc\fR task has two data paths, one for real image pixel datatypes
+and one for short integer pixel datatype. In addition internal arithmetic
+is based on the rules of FORTRAN. For efficiency there is
+no checking for division by zero in the flat field calibration.
+The following rules describe the processing arithmetic and data paths.
+
+.ls (1)
+If the input, output, or any calibration image is of type real the
+real data path is used. This means all image data is converted to
+real on input. If all the images are of type short all input data
+is kept as short integers. Thus, if all the images are of the same type
+there is no datatype conversion on input resulting in greater
+image I/O efficiency.
+.le
+.ls (2)
+In the real data path the processing arithmetic is always real and,
+if the output image is of short pixel datatype, the result
+is truncated.
+.le
+.ls (3)
+The overscan vector and the scale factors for dark count, flat field,
+iillumination, and fringe calibrations are always of type real. Therefore,
+in the short data path any processing which includes these operations
+will be coerced to real arithmetic and the result truncated at the end
+of the computation.
+.le
+.sh
+13. In the Absence of Image Header Information
+The tasks in the \fBccdred\fR package are most convenient to use when
+the CCD image type, subset, and exposure time are contained in the
+image header. The ability to redefine which header parameters contain
+this information makes it possible to use the package at many different
+observatories (see \fBinstruments\fR). However, in the absence of any
+image header information the tasks may still be used effectively.
+There are two ways to proceed. One way is to use \fBccdhedit\fR
+to place the information in the image header.
+
+The second way is to specify the processing operations more explicitly
+than is needed when the header information is present. The parameter
+\fIccdtype\fR is set to "" or to "none". The calibration images are
+specified explicitly by task parameter since they cannot be recognized
+in the input list. Only one subset at a time may be processed.
+
+If dark count and fringe corrections are to be applied the exposure
+times must be added to all the images. Alternatively, the dark count
+and fringe images may be scaled explicitly for each input image. This
+works because the exposure times default to 1 if they are not given in
+the image header.
+.ih
+EXAMPLES
+The user's \fBguide\fR presents a tutorial in the use of this task.
+
+1. In general all that needs to be done is to set the task parameters
+and enter
+
+ cl> ccdproc *.imh &
+
+This will run in the background and process all images which have not
+been processed previously.
+.ih
+TIME REQUIREMENTS
+.nf
+o SUN-3, 15 MHz 68020 with 68881 floating point hardware (no FPA)
+o 8 Mb RAM, 2 Fuji Eagle disks.
+o Input images = 544 x 512 short
+o Output image = 500 x 500 real
+o Operations are overscan subtraction (O), trimming to 500x500 (T),
+ zero level subtraction (Z), dark count scaling and subtraction (D),
+ and flat field scaling and subtraction (F).
+o UNIX statistics
+ (user, system, and clock time, and misc. memory and i/o statistics):
+
+[OTF] One calibration image and 9 object images:
+No caching: 110.6u 25.5s 3:18 68% 28+ 40K 3093+1645io 9pf+0w
+Caching: 111.2u 23.0s 2:59 74% 28+105K 2043+1618io 9pf+0w
+
+[OTZF] Two calibration images and 9 object images:
+No caching: 119.2u 29.0s 3:45 65% 28+ 50K 4310+1660io 9pf+0w
+Caching: 119.3u 23.0s 3:07 75% 28+124K 2179+1601io 9pf+0w
+
+[OTZDF] Three calibration images and 9 object images:
+No caching: 149.4u 31.6s 4:41 64% 28+ 59K 5501+1680io 19pf+0w
+Caching: 151.5u 29.0s 4:14 70% 27+227K 2346+1637io 148pf+0w
+
+[OTZF] 2 calibration images and 20 images processed:
+No caching: 272.7u 63.8u 8:47 63% 28+ 50K 9598+3713io 12pf+0w
+Caching: 271.2u 50.9s 7:00 76% 28+173K 4487+3613io 51pf+0w
+.fi
+.ih
+SEE ALSO
+.nf
+instruments, ccdtypes, flatfields, icfit, ccdred, guide, mkillumcor,
+mkskycor, mkfringecor
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdred.hlp b/noao/imred/quadred/src/quad/doc/ccdred.hlp
new file mode 100644
index 00000000..0300bd38
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdred.hlp
@@ -0,0 +1,98 @@
+.help package Jun87 noao.imred
+.ih
+NAME
+ccdred -- CCD image reduction package
+.ih
+USAGE
+ccdred
+.ih
+PARAMETERS
+.ls pixeltype = "real real"
+Output pixel datatype and calculation datatype. When images are processed
+or created the output pixel datatype is determined by this parameter.
+The allowed types are "short" for short integer, and "real" for real
+floating point. Note that if short input images are processed into
+real images the disk space required will generally increase.
+The calculation datatypes are also short and real with a default of
+real if none is specified.
+.le
+.ls verbose = no
+Print log information to the standard output?
+.le
+.ls logfile = "logfile"
+Text log file. If no filename is specified then no log file is kept.
+.le
+.ls plotfile = ""
+Log metacode plot file for the overscan bias vector fits. If
+no filename is specified then no metacode plot file is kept.
+.le
+.ls backup = ""
+Backup prefix for backup images. If no prefix is specified then no backup
+images are kept when processing. If specified then the backup image
+has the specified prefix.
+.le
+.ls instrument = ""
+CCD instrument translation file. This is usually set with \fBsetinstrument\fR.
+.le
+.ls ssfile = "subsets"
+Subset translation file used to define the subset identifier. See
+\fBsubsets\fR for more.
+.le
+.ls graphics = "stdgraph"
+Interactive graphics output device when fitting the overscan bias vector.
+.le
+.ls cursor = ""
+Graphics cursor input. The default is the standard graphics cursor.
+.le
+.ls version = "June 1987"
+Package version.
+.le
+.ih
+DESCRIPTION
+The CCD reduction package is loaded when this command is entered. The
+package contains parameters which affect the operation of the tasks
+it defines. When images are processed or new image are created the
+output pixel datatype is that specified by the parameter \fBpixeltype\fR.
+Note that CCD processing replaces the original image by the processed
+image so the pixel type of the CCD images may change during processing.
+It is unlikely that real images will be processed to short images but
+the reverse is quite likely. Processing images from short to real
+pixel datatypes will generally increase the amount of disk space
+required (a factor of 2 on most computers).
+
+The tasks produce log output which may be printed on the standard
+output (the terminal unless redirected) and appended to a file. The
+parameter \fIverbose\fR determines whether processing information
+is printed. This may be desirable initially, but when using background
+jobs the verbose output should be turned off. The user may look at
+the end of the log file (for example with \fBtail\fR) to determine
+the status of the processing.
+
+The package was designed to work with data from many different observatories
+and instruments. In order to accomplish this an instrument translation
+file is used to define a mapping between the package parameters and
+the particular image header format. The instrument translation file
+is specified to the package by the parameter \fIinstrument\fR. This
+parameter is generally set by the task \fBsetinstrument\fR. The other
+file used is a subset file. This is generally created and maintained
+by the package and the user need not do anything. For more sophisticated
+users see \fBinstruments\fR and \fBsubsets\fR.
+
+The package has very little graphics
+output. The exception is the overscan bias subtraction. The bias
+vector is logged in the metacode plot file if given. The plot file
+may be examined with the tasks in the \fBplot\fR package such as
+\fBgkimosaic\fR. When interactively fitting the overscan vector
+the graphics input and output devices must be specified. The defaults
+should apply in most cases.
+
+Because processing replaces the input image by the processed image it
+may be desired to save the original image. This may be done by
+specifying a backup prefix with the parameter \fIbackup\fR. For
+example, if the prefix is "orig" and the image is "ccd001", the backup
+image will be "origccd001". The prefix may be a directory but it must
+end with '/' or '$' (for logical directories).
+.ih
+SEE ALSO
+instruments, setinstrument, subsets
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/ccdred.ms b/noao/imred/quadred/src/quad/doc/ccdred.ms
new file mode 100644
index 00000000..645514ec
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdred.ms
@@ -0,0 +1,787 @@
+.RP
+.TL
+The IRAF CCD Reduction Package -- CCDRED
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+P.O. Box 26732, Tucson, Arizona 85726
+September 1987
+.AB
+The IRAF\(dg CCD reduction package, \fBccdred\fR, provides tools
+for the easy and efficient reduction of CCD images. The standard
+reduction operations are replacement of bad pixels, subtraction of an
+overscan or prescan bias, subtraction of a zero level image,
+subtraction of a dark count image, division by a flat field calibration
+image, division by an illumination correction, subtraction of a fringe
+image, and trimming unwanted lines or columns. Another common
+operation provided by the package is scaling and combining images with
+a number of algorithms for rejecting cosmic rays. Data in the image
+header is used to make the reductions largely automated and
+self-documenting though the package may still be used in the absence of
+this data. Also a translation mechanism is used to relate image header
+parameters to those used by the package to allow data from a variety of
+observatories and instruments to be processed. This paper describes
+the design goals for the package and the main tasks and algorithms
+which satisfy these goals.
+.PP
+This paper is to be published as part of the proceedings of the
+Santa Cruz Summer Workshop in Astronomy and Astrophysics,
+\fIInstrumentation for Ground-Based Optical Astronomy: Present and
+Future\fR, edited by Lloyd B. Robinson and published by
+Springer-Verlag.
+.LP
+\(dgImage Reduction and Analysis Facility (IRAF), a software system
+distributed by the National Optical Astronomy Observatories (NOAO).
+.AE
+.NH
+Introduction
+.PP
+The IRAF CCD reduction package, \fBccdred\fR, provides tools
+for performing the standard instrumental corrections and calibrations
+to CCD images. The major design goals were:
+.IP
+.nf
+\(bu To be easy to use
+\(bu To be largely automated
+\(bu To be image header driven if the data allows
+\(bu To be usable for a variety of instruments and observatories
+\(bu To be efficient and capable of processing large volumes of data
+.fi
+.LP
+This paper describes the important tasks and algorithms and shows how
+these design goals were met. It is not intended to describe every
+task, parameter, and usage in detail; the package has full
+documentation on each task plus a user's guide.
+.PP
+The standard CCD correction and calibration operations performed are
+replacement of bad columns and lines by interpolation from neighboring
+columns and lines, subtraction of a bias level determined from overscan
+or prescan columns or lines, subtraction of a zero level using a zero
+length exposure calibration image, subtraction of a dark count
+calibration image appropriately scaled to the dark time exposure of the
+image, division by a scaled flat field calibration image, division by
+an illumination image (derived from a blank sky image), subtraction of
+a scaled fringe image (also derived from a blank sky image), and
+trimming the image of unwanted lines or columns such as the overscan
+strip. The processing may change the pixel datatype on disk (IRAF allows
+seven image datatypes); usually from 16 bit integer to real format.
+Two special operations are also supported for scan mode and one
+dimensional zero level and flat field calibrations; i.e. the same
+calibration is applied to each CCD readout line. Any set of operations
+may be done simultaneously over a list of images in a highly efficient
+manner. The reduction operations are recorded in the image header and
+may also be logged on the terminal and in a log file.
+.PP
+The package also provides tools for combining multiple exposures
+of object and calibration images to improve the statistical accuracy of
+the observations and to remove transient bad pixels. The combining
+operation scales images of different exposure times, adjusts for
+variable sky background, statistically weights the images by their
+signal-to-noise, and provides a number of useful algorithms for
+detecting and rejecting transient bad pixels.
+.PP
+Other tasks are provided for listing reduction information about
+the images, deriving secondary calibration images (such as sky
+corrected flat fields or illumination correction images), and easily
+setting the package parameters for different instruments.
+.PP
+This paper is organized as follows. There is a section giving an
+overview of how the package is used to reduce CCD data. This gives the
+user's perspective and illustrates the general ease of use. The next
+section describes many of the features of the package contributing to
+its ease of use, automation, and generality. The next two sections
+describe the major tools and algorithms in some detail. This includes
+discussions about achieving high efficiency. Finally the status of the
+package and its use at NOAO is given. References to additional
+documentation about IRAF and the CCD reduction package and an appendix
+listing the individual tasks in the package are found at the end of
+this paper.
+.NH
+A User's Overview
+.PP
+This section provides an overview of reducing data with the IRAF CCD
+reduction package. There are many variations in usage depending on the
+type of data, whether the image headers contain information about the
+data which may be used by the tasks, and the scientific goal. Only a
+brief example is given. A more complete discussion of usage and
+examples is given in \fIA User's Guide to the IRAF CCDRED Package\fR.
+The package was developed within the IRAF system and so makes use of
+all the sophisticated features provided. These features are also
+summarized here for those not familiar with IRAF since they are an
+important part of using the package.
+.PP
+Since the IRAF system is widely distributed and runs on a wide variety
+of computers, the site of the CCD reductions might be at the telescope,
+a system at the observatory provided for this purpose, or at the
+user's home computer. The CCD images to be processed are either
+available immediately as the data is taken, transferred from the data taking
+computer via a network link (the method adopted at NOAO), or transferred
+to the reduction computer via a medium such as magnetic tape in FITS
+format. The flexibility in reduction sites and hardware is one of the
+virtues of the IRAF-based CCD reduction package.
+.PP
+IRAF tasks typically have a number of parameters which give the user
+control over most aspects of the program. This is possible since the
+parameters are kept in parameter files so that the user need not enter
+a large number of parameters every time the task is run. The user may
+change any of these parameters as desired in several ways, such as by
+explicit assignment and using an easy to learn and use,
+fill-in-the-value type of screen editor. The parameter values are
+\fIlearned\fR so that once a user sets the values they are maintained
+until the user changes them again; even between login sessions.
+.PP
+The first step in using the CCD reduction package is to set the default
+processing parameters for the data to be reduced. These parameters include
+a database file describing the image header keyword translations and
+default values, the processing operations desired (operations
+required vary with instrument and observer), the calibration image names,
+and certain special parameters for special types of observations such
+as scan mode. A special script task (a command procedure) is available
+to automatically set the default values, given the instrument name, to standard
+values defined by the support staff. Identifying the instrument in this
+way may be all the novice user need do though most people quickly learn
+to adjust parameters at will.
+.PP
+As an example suppose there is an instrument identified as \fLrca4m\fR
+for an RCA CCD at the NOAO 4 meter telescope. The user gives the command
+
+.ft L
+ cl> setinstrument rca4m
+.ft R
+
+which sets the default parameters to values suggested by the support staff
+for this instrument. The user may then change these suggested values if
+desired. In this example the processing switches are set to perform
+overscan bias subtraction, zero level image subtraction, flat fielding,
+and trimming.
+.PP
+The NOAO image headers contain information identifying the type of
+image, such as object, zero level, and flat field, the filter used to
+match flat fields with object images, the location of the overscan bias
+data, the trim size for the data, and whether the image has been
+processed. With this information the user need not worry about
+selecting images, pairing object images with calibration images, or
+inadvertently reprocessing an image.
+.PP
+The first step is to combine multiple zero level and flat field observations
+to reduce the effects of statistical noise. This is done by the
+commands
+
+.nf
+.ft L
+ cl> zerocombine *.imh
+ cl> flatcombine *.imh
+.ft R
+.fi
+
+The "cl> " is the IRAF command language prompt. The first command says
+look through all the images and combine the zero level images. The
+second command says look through all the images and combine the flat
+field images by filter. What could be simpler? Some \fIhidden\fR (default)
+parameters the user may modify are the combined image name, whether to
+process the images first, and the type of combining algorithm to use.
+.PP
+The next step is to process the images using the combined calibration
+images. The command is
+
+.ft L
+ cl> ccdproc *.imh
+.ft R
+
+This command says look through all the images, find the object images,
+find the overscan data based on the image header and subtract the
+bias, subtract the zero level calibration image, divide by the flat field
+calibration image, and trim the bias data and edge lines and columns.
+During this operation the task recognizes that the
+zero level and flat field calibration images have not been processed
+and automatically processes them when they are needed. The log output
+of this task, which may be to the terminal, to a file, or both, shows
+how this works.
+
+.nf
+.ft L
+ ccd003: Jun 1 15:12 Trim data section is [3:510,3:510]
+ ccd003: Jun 1 15:12 Overscan section is [520:540,*], mean=485.0
+ Dark: Jun 1 15:12 Trim data section is [3:510,3:510]
+ Dark: Jun 1 15:13 Overscan section is [520:540,*], mean=484.6
+ ccd003: Jun 1 15:13 Dark count image is Dark.imh
+ FlatV: Jun 1 15:13 Trim data section is [3:510,3:510]
+ FlatV: Jun 1 15:14 Overscan section is [520:540,*], mean=486.4
+ ccd003: Jun 1 15:15 Flat field image is FlatV.imh, scale=138.2
+ ccd004: Jun 1 15:16 Trim data section is [3:510,3:510]
+ ccd004: Jun 1 15:16 Overscan section is [520:540,*], mean=485.2
+ ccd004: Jun 1 15:16 Dark count image is Dark.imh
+ ccd004: Jun 1 15:16 Flat field image is FlatV.imh, scale=138.2
+ \fI<... more ...>\fL
+ ccd013: Jun 1 15:22 Trim data section is [3:510,3:510]
+ ccd013: Jun 1 15:23 Overscan section is [520:540,*], mean=482.4
+ ccd013: Jun 1 15:23 Dark count image is Dark.imh
+ FlatB: Jun 1 15:23 Trim data section is [3:510,3:510]
+ FlatB: Jun 1 15:23 Overscan section is [520:540,*], mean=486.4
+ ccd013: Jun 1 15:24 Flat field image is FlatB.imh, scale=132.3
+ \fI<... more ...>\fL
+.ft R
+.fi
+
+.PP
+The log gives the name of the image and a time stamp for each entry.
+The first image is ccd003. It is to be trimmed to the specified
+size given as an \fIimage section\fR, an array notation used commonly
+in IRAF to specify subsections of images. The location of the
+overscan data is also given by an image section which, in this case,
+was found in the image header. The mean bias level of the overscan
+is also logged though the overscan is actually a function of the
+readout line with the order of the function selected by the user.
+.PP
+When the task comes to subtracting the zero level image it first
+notes that the calibration image has not been processed and switches
+to processing the zero level image. Since it knows it is a zero level
+image the task does not attempt to zero level or flat field correct
+this image. After the zero level image has been processed the task
+returns to the object image only to find that the flat field image
+also has not been processed. It determines that the object image was
+obtained with a V filter and selects the flat field image having the same
+filter. The flat field image is processed through the zero level correction
+and then the task again returns to the object image, ccd003, which it
+finishes processing.
+.PP
+The next image, ccd004, is also a V filter
+observation. Since the zero level and V filter flat field have been
+processed the object image is processed directly. This continues
+for all the object images except for a detour to process the B filter flat
+field when the task first encounters a B filter object image.
+.PP
+In summary, the basic usage of the CCD reduction package is quite simple.
+First, the instrument is identified and some parameters for the data
+are set. Calibration images are then combined if needed. Finally,
+the processing is done with the simple command
+
+.ft L
+ cl> ccdproc *.imh&
+.ft R
+
+where the processing is performed as a \fIbackground job\fR in this example.
+This simplicity was a major goal of the package.
+.NH
+Features of the Package
+.PP
+This section describes some of the special features of the package
+which contribute to its ease of use, generality, and efficiency.
+The major criteria for ease of use are to minimize the user's record keeping
+involving input and output image names, the types of images, subset
+parameters such as filters which must be kept separate, and the state
+of processing of each image. The goal is to allow input images to
+be specified using simple wildcards, such as "*.imh" to specify all
+images, with the knowledge that the task will only operate on images
+for which it makes sense. To accomplish this the tasks must be able to
+determine the type of image, subset, and the state of processing from
+the image itself. This is done by making use of image header parameters.
+.PP
+For generality the package does not require any image header information
+except the exposure time. It is really not very much more difficult to
+reduce such data. Mainly, the user must be more explicit about specifying
+images and setting task parameters or add the information to the image
+headers. Some default header information may also be set in the image
+header translation file (discussed below).
+.PP
+One important image header parameter is the image type. This
+discriminates between object images and various types of calibration
+images such as flat field, zero level, dark count, comparison arcs,
+illumination, and fringe images. This information is used in two
+ways. For most of the tasks the user may select that only one type of
+image be considered. Thus, all the flat field images may be selected
+for combining or only the processing status of the object images be
+listed. The second usage is to allow the processing tasks to identify
+the standard calibration images and apply only those operations which
+make sense. For example, flat field images are not divided by a
+flat field. This allows the user to set the processing operations
+desired for the object images without fear of misprocessing the
+calibration images. The image type is also used to automatically
+select calibration images from a list of images to be processed instead
+of explicitly identifying them.
+.PP
+A related parameter specifies the subset. For certain operations the
+images must have a common value for this parameter. This parameter is
+often the filter but it may also apply to a grating or aperture, for example.
+The subset parameter is used to identify the appropriate flat field
+image to apply to an image or to select common flat fields to be combined
+into a higher quality flat field. This is automatic and the user need not
+keep track of which image was taken with which filter or grating.
+.PP
+The other important image header parameters are the processing flags.
+These identify when an image has been processed and also act as a history
+of the operation including calibration images used and other parameter
+information. The usage of these parameters is obvious; it allows the
+user to include processed images in a wildcard list knowing that the
+processing will not be repeated and to quickly determine the processing
+status of the image.
+.PP
+Use of image header parameters often ties the software to the a
+particular observatory. To maintain generality and usefulness for data
+other than that at NOAO, the CCD reduction package was designed to
+provide a translation between parameters requested by the package and
+those actually found in the image header. This translation is defined
+in a simple text file which maps one keyword to another and also gives
+a default value to be used if the image header does not include a
+value. In addition the translation file maps the arbitrary strings
+which may identify image types to the standard types which the package
+recognizes. This is a relatively simple scheme and does not allow for
+forming combinations or for interpreting values which are not simple
+such as embedding an exposure time as part of a string. A more complex
+translation scheme may prove desirable as experience is gained with
+other types of image header formats, but by then a general header translation
+ability and/or new image database structure may be a standard IRAF
+feature.
+.PP
+This feature has proven useful at NOAO. During the course of
+developing the package the data taking system was modernized by
+updating keywords and adding new information in the image headers,
+generally following the lines laid out by the \fBccdred\fR package.
+However, there is a period of transition and it is also desirable to
+reduce preexisting data. There are several different formats for this
+data. The header translation files make coping with these different
+formats relatively easy.
+.PP
+A fundamental aspect of the package is that the processing
+modifies the images. In other words, the reduction operations are
+performed directly on the image. This "feature" further simplifies
+record keeping, frees the user from having to form unique output image
+names, and minimizes the amount of disk space required. There
+are two safety features in this process. First, the modifications do
+not take effect until the operation is completed on the image. This
+allows the user to abort the task without leaving the image data in a
+partially processed state and protects data if the computer
+crashes. The second feature is that there is a parameter which may be
+set to make a backup of the input data with a particular prefix; for
+example "b", "orig", or "imdir$" (a logical directory prefix). This
+backup feature may be used when there is sufficient disk space, when
+learning to use the package, or just to be cautious.
+.PP
+In a similar effort to efficiently manage disk space, when combining
+images into a master object or calibration image, there is an option to
+delete the input images upon completion of the combining operation.
+Generally this is desirable when there are many calibration exposures,
+such as zero level or flat field images, which are not used after they
+are combined into a final calibration image.
+.PP
+The goal of generality for many instruments at
+different observatories inherently conflicts with the goal of ease of
+use. Generality requires many parameters and options. This is
+feasible in the CCD reduction package, as well as the other IRAF packages,
+because of the IRAF parameter handling mechanism. In \fBccdred\fR
+there still remains the problem of setting the parameters appropriately
+for a particular instrument, image header format, and observatory.
+.PP
+To make this convenient there is a task, \fBsetinstrument\fR, that,
+based on an instrument name, runs a setup script for the instrument.
+An example of this task was given in the previous section.
+The script may do any type of operation but mainly it sets default
+parameters. The setup scripts are generally created by the support staff
+for the instrument. The combination of the setup script and the
+instrument translation file make the package, in a sense, programmable
+and achieves the desired instrument/observatory generality with ease of use.
+.NH
+CCD Processing
+.PP
+This section describes in some detail how the CCD processing is performed.
+The task which does the basic CCD processing is call \fBccdproc\fR.
+From the point of view of usage the task is very simple but a great deal
+is required to achieve this simplicity. The approach we take in describing
+the task is to follow the flow of control as the task runs with digressions
+as appropriate.
+.PP
+The highest level of control is a loop over the input images; all the
+operations are performed successively on each image. It is common for
+IRAF tasks which operate on individual images to allow the operation to
+be repeated automatically over a list of input images. This is important
+in the \fBccdred\fR package because data sets are often large and the
+processing is generally the same for each image. It would be tedious
+to have to give the processing command for each image to be processed.
+If an error occurs while processing an image the error is
+printed as a warning and processing continues with the next image.
+This provides protection primarily against mistyped or nonexistent images.
+.PP
+Before the first image is processed the calibration images are
+identified. There are two ways to specify calibration images;
+explicitly via task parameters or implicitly as part of the list of
+images to be processed. Explicitly identifying calibration images
+takes precedence over calibration images in the input list. Specifying
+calibration images as part of the input image list requires that the
+image types can be determined from the image header. Using the input
+list provides a mechanism for breaking processing up into sets of
+images (possibly using files containing the image names for each set)
+each having their own calibration images. One can, of course,
+selectively specify input and calibration images, but whenever possible
+one would like to avoid having to specify explicit images to process
+since this requires record keeping by the user.
+.PP
+The first step in processing an image is to check that it is of the
+appropriate image type. The user may select to process images of only
+one type. Generally this is object images since calibration images are
+automatically processed as needed. Images which are not of the desired
+type are skipped and the next image is considered.
+.PP
+A temporary output image is created next. The output pixel datatype on
+disk may be changed at this point as selected by the user.
+For example it is common for the raw CCD images to be digitized as 16
+bit integers but after calibration it is sometimes desirable to have
+real format pixels. If no output pixel datatype is specified the
+output image takes the same pixel datatype as the input image. The
+processing is done by operating on the input image and writing the
+results to a temporary output image. When the processing is complete
+the output image replaces the input image. This gives the effect of
+processing the images in place but with certain safeguards. If the
+computer crashes or the processing is interrupted the integrity of the
+input image is maintained. The reasons for chosing to process the
+images in this way are to avoid having to generate new image names (a
+tiresome record keeping process for the user), to minimize disk
+usage, and generally the unprocessed images are not used once they have
+been processed. When dealing with large volumes of data these reasons
+become fairly important. However, the user may specify a backup prefix
+for the images in which case, once the processing is completed, the
+original input image is renamed by appending it to the prefix (or with
+an added digit if a previous backup image of the same name exits)
+before the processed output image takes the original input name.
+.PP
+The next step is to determine the image geometry. Only a subsection of
+the raw image may contain the CCD data. If this region is specified by
+a header parameter then the processing will affect only this region.
+This allows calibration and other data to be part of the image.
+Normally, the only other data in a image is overscan or prescan data.
+The location of this bias data is determined from the image header or
+from a task parameter (which overrides the image header value). To
+relate calibration images of different sizes and to allow for readout
+of only a portion of the CCD detector, a header parameter may relate
+the image data coordinates to the full CCD coordinates. Application of
+calibration image data and identifying bad pixel regions via a bad
+pixel file is done in this CCD coordinate system. The final
+geometrical information is the region of the input image to be output
+after processing; an operation called trimming. This is defined by an
+image header parameter or a task parameter. Trimming of the image is
+selected by the user. Any or all of this geometry information may be
+absent from the image and appropriate defaults are used.
+.PP
+Each selected operation which is appropriate for the image type is then
+considered. If the operation has been performed previously it will not
+be repeated. If all selected operations have been performed then the
+temporary output image is deleted and the input image is left
+unchanged. The next image is then processed.
+.PP
+For each selected operation to be performed the pertinent data is
+determined. This consists of such things as the name of the
+calibration image, scaling factors, the overscan bias function, etc.
+Note that at this point only the parameters are determined, the
+operation is not yet performed. This is because operations are not
+performed sequentially but simultaneously as described below. Consider
+flat fielding as an example. First the input image is checked to see
+if it has been flat fielded. Then the flat field calibration image is
+determined. The flat field image is checked to see if it has been
+processed. If it has not been processed then it is processed by
+calling a procedure which is essentially a copy of the main processing
+program. After the flat field image has been processed, parameters
+affecting the processing, such as the flat field scale factor
+(essentially the mean of the flat field image), are determined. A log
+of the operation is then printed if desired.
+.PP
+Once all the processing operations and parameters have been defined the
+actual processing begins. One of the key design goals was that the
+processing be efficient. There are two primary methods used to achieve
+this goal; separate processing paths for 16 bit integer data and
+floating point data and simultaneous operations. If the image, the
+calibration images, and the output image (as selected by the user) are
+16 bit integer pixel datatypes then the image data is read and written
+as integer data. This eliminates internal datatype conversions both
+during I/O and during computations. However, many operations include
+use of real factors such as the overscan bias, dark count exposure
+scaling, and flat field scaling which causes the computation to be done
+in real arithmetic before the result is stored again as an integer
+value. In any case there is never any loss of precision except when
+converting the output pixel to short integer. If any of the images are
+not integer then a real internal data path is used in which input and
+output image data are converted to real as necessary.
+.PP
+For each data path the processing proceeds line-by-line. For each line
+in the output image data region (ignoring pixels outside the data area
+and pixels which are trimmed) the appropriate input data and
+calibration data are obtained. The calibration data is determined from
+the CCD coordinates of the output image and are not necessarily from
+the same image line or columns. The input data is copied to the output
+array while applying bad pixel corrections and trimming. The line is
+then processed using a specially optimized procedure. This procedure
+applies all operations simultaneously for all combinations of
+operations. As an example, consider subtracting an overscan bias,
+subtracting a zero level, and dividing by a flat field. The basic
+kernel of the task, where the bulk of the CPU time is used, is
+
+.nf
+.ft L
+ do i = 1, n
+ out[i] = (out[i] - overscan - zero[i]) * flatscale / flat[i]
+.ft R
+.fi
+
+Here, \fIn\fR is the number of pixels in the line, \fIoverscan\fR is
+the overscan bias value for the line, \fIzero\fR is the zero level data
+from the zero level image, \fIflatscale\fR is the mean of the flat
+field image, and \fIflat\fR is the flat field data from the flat
+field image. Note the operations are not applied sequentially but
+in a single statement. This is the most efficient method and there is
+no need for intermediate images.
+.PP
+Though the processing is logically performed line-by-line in the program,
+the image I/O from the disk is not done this way. The IRAF virtual
+operating system image interface automatically provides multi-line
+buffering for maximal I/O efficiency.
+.PP
+In many image processing systems it has been standard to apply operations
+sequentially over an image. This requires producing intermediate images.
+Since this is clearly inefficient in terms of I/O it has been the practice
+to copy the images into main memory and operate upon them there until
+the final image is ready to be saved. This has led to the perception
+that in order to be efficient an image processing system \fImust\fR
+store images in memory. This is not true and the IRAF CCD reduction
+package illustrates this. The CCD processing does not use intermediate
+images and does not need to keep the entire image in main memory.
+Furthermore, though of lesser importance than I/O, the single statement method
+illustrated above is more efficient than multiple passes through the
+images even when the images are kept in main memory. Finally, as CCD
+detectors increase in size and small, fast, and cheap processors become
+common it is a distinct advantage to not require the large amounts of
+memory needed to keep entire images in memory.
+.PP
+There is one area in which use of main memory can improve performance
+and \fBccdproc\fR does take advantage of it if desired. The calibration
+images usually are the same for many input images. By specifying the
+maximum amount of memory available for storing images in memory
+the calibration images may be stored in memory up to that amount.
+By parameterizing the memory requirement there is no builtin dependence
+on large memory!
+.PP
+After processing the input image the last steps are to log the operations
+in the image header using processing keywords and replace the input
+image by the output image as described earlier. The CCD coordinates
+of the data are recorded in the header, even if not there previously, to
+allow further processing on the image after the image has been trimmed.
+.NH
+Combining Images
+.PP
+The second important tool in the CCD reduction package is a task to combine
+many images into a single, higher quality image. While this may also be
+done with more general image processing tools (the IRAF task \fBimsum\fR
+for example) the \fBccdred\fR tasks include special CCD dependent features such
+as recognizing the image types and using the image header translation
+file. Combining images is often done
+with calibration images, which are easy to obtain in number, where it
+is important to minimize the statistical noise so as to not affect the
+object images. Sometimes object images also are combined.
+The task is called \fBcombine\fR and there are special versions of
+this task called \fBzerocombine, darkcombine\fR, and \fBflatcombine\fR
+for the standard calibration images.
+.PP
+The task takes a list of input images to be combined. As output there
+is the combined image, an optional sigma image, and optional log output either
+to the terminal, to a log file, or both. A subset or subsets
+of the input images may be selected based on the image type and a
+subset parameter such as the filter. As with the processing task,
+this allows selecting images without having to explicitly list each
+image from a large data set. When combining based on a subset parameter
+there is an output image, and possibly a sigma image, for each separate subset.
+The output image pixel datatype may also be changed during combining;
+usually from 16 bit integer input to real output.
+The sigma image is the standard deviation of the input images about the
+output image.
+.PP
+Except for summing the images together,
+combining images may require correcting for variations between the images
+due to differing exposure times, sky background, extinctions, and
+positions. Currently, extinction corrections and registration are
+not included but scaling and shifting corrections are included.
+The scaling corrections may be done by exposure times or by computing
+the mode in each image. Additive shifting is also done by computing
+the mode in the images. The region of the image in which the mode
+is computed can be specified but by default the whole image is used.
+A scaling correction is used when the flux level or sensitivity is varying.
+The offset correction is used when the sky brightness is varying independently
+of the object brightness. If the images are not scaled then special
+data paths combine the images more efficiently.
+.PP
+Except for medianing and summing, the images are combined by averaging.
+The average may be weighted by
+
+.nf
+.ft L
+ weight = (N * scale / mode) ** 2
+.ft R
+.fi
+
+where \fIN\fR is the number of images previously combined (the task
+records the number of images combined in the image header), \fIscale\fR
+is the relative scale (applied by dividing) from the exposure time or
+mode, and \fImode\fR is the background mode estimate used when adding a
+variable offset.
+.PP
+The combining operation is the heart of the task. There are a number
+algorithms which may be used as well as applying statistical weights.
+The algorithms are used to detect and reject deviant pixels, such as
+cosmic rays.
+The choice of algorithm depends on the data, the number of images,
+and the importance of rejecting cosmic rays. The more complex the
+algorithm the more time consuming the operation.
+The list below summarizes the algorithms.
+Further algorithms may be added in time.
+
+.IP "Sum - sum the input images"
+.br
+The input images are combined by summing. Care must be taken
+not to exceed the range of the 16 bit integer datatype when summing if the
+output datatype is of this type. Summing is the only algorithm in which
+scaling and weighting are not used. Also no sigma image is produced.
+.IP "Average - average the input images"
+.br
+The input images are combined by averaging. The images may be scaled
+and weighted. There is no pixel rejection. A sigma image is produced
+if more than one image is combined.
+.IP "Median - median the input images"
+.br
+The input images are combined by medianing each pixel. Unless the images
+are at the same exposure level they should be scaled. The sigma image
+is based on all the input images and is only an approximation to the
+uncertainty in the median estimates.
+.IP "Minreject, maxreject, minmaxreject - reject extreme pixels"
+.br
+At each pixel the minimum, maximum, or both are excluded from the
+average. The images should be scaled and the average may be
+weighted. The sigma image requires at least two pixels after rejection
+of the extreme values. These are relatively fast algorithms and are
+a good choice if there are many images (>15).
+.IP "Threshold - reject pixels above and below specified thresholds"
+.br
+The input images are combined with pixels above and below specified
+threshold values (before scaling) excluded. The images may be scaled
+and the average weighted. The sigma image also has the rejected
+pixels excluded.
+.IP "Sigclip - apply a sigma clipping algorithm to each pixel"
+.br
+The input images are combined by applying a sigma clipping algorithm
+at each pixel. The images should be scaled. This only rejects highly
+deviant points and so
+includes more of the data than the median or minimum and maximum
+algorithms. It requires many images (>10-15) to work effectively.
+Otherwise the bad pixels bias the sigma significantly. The mean
+used to determine the sigmas is based on the "minmaxrej" algorithm
+to eliminate the effects of bad pixels on the mean. Only one
+iteration is performed and at most one pixel is rejected at each
+point in the output image. After the deviant pixels are rejected the final
+mean is computed from all the data. The sigma image excludes the
+rejected pixels.
+.IP "Avsigclip - apply a sigma clipping algorithm to each pixel"
+.br
+The input images are combined with a variant of the sigma clipping
+algorithm which works well with only a few images. The images should
+be scaled. For each line the mean is first estimated using the
+"minmaxrej" algorithm. The sigmas at each point in the line are scaled
+by the square root of the mean, that is a Poisson scaling of the noise
+is assumed. These sigmas are averaged to get a line estimate of the
+sigma. Then the sigma at each point in the line is estimated by
+multiplying the line sigma by the square root of the mean at that point. As
+with the sigma clipping algorithm only one iteration is performed and
+at most one pixel is rejected at each point. After the deviant pixels
+are rejected the file mean is computed from all the data. The sigma
+image excludes the rejected pixels.
+.RE
+.PP
+The "avsigclip" algorithm is the best algorithm for rejecting cosmic
+rays, especially with a small number of images, but it is also the most
+time consuming. With many images (>10-15) it might be advisable to use
+one of the other algorithms ("maxreject", "median", "minmaxrej") because
+of their greater speed.
+.PP
+This task also has several design features to make it efficient and
+versatile. There are separate data paths for integer data and real
+data; as with processing, if all input images and the output image are
+of the same datatype then the I/O is done with no internal conversions.
+With mixed datatypes the operations are done as real. Even in the
+integer path the operations requiring real arithmetic to preserve the
+accuracy of the calculation are performed in that mode. There is
+effectively no limit to the number of images which may be combined.
+Also, the task determines the amount of memory available and buffers
+the I/O as much as possible. This is a case where operating on images
+from disk rather than in memory is essential.
+.NH
+Status and Conclusion
+.PP
+The initial implementation of the IRAF \fBccdred\fR package was
+completed in June 1987. It has been in use at the National Optical
+Astronomy Observatories since April 1987. The package was not
+distributed with Version 2.5 of IRAF (released in August 1987) but is
+available as a separate installation upon request. It will be part of
+future releases of IRAF.
+.PP
+At NOAO the CCD reduction package is available at the telescopes as the
+data is obtained. This is accomplished by transferring the images from
+the data taking computer to a Sun workstation (Sun Microsystems, Inc.)
+initially via tape and later by a direct link. There are several
+reasons for adopting this architecture. First, the data acquisition
+system is well established and is dedicated to its real-time function.
+The second computer was phased in without disrupting the essential
+operation of the telescopes and if it fails data taking may continue
+with data being stored on tape. The role of the second computer is to
+provide faster and more powerful reduction and analysis capability not
+required in a data acquisition system. In the future it can be more
+easily updated to follow the state of the art in small computers. As
+CCD detectors get larger the higher processing speeds will be essential
+to keep up with the data flow.
+.PP
+By writing the reduction software in the high level, portable, IRAF
+system the users have the capability to process their data from the
+basic CCD reductions to a full analysis at the telescope. Furthermore,
+the same software is widely available on a variety of computers if
+later processing or reprocessing is desired; staff and visitors at NOAO
+may also reduce their data at the headquarters facilities. The use of
+a high level system was also essential in achieving the design goals;
+it would be difficult to duplicate this complex package without
+the rich programming environment provided by the IRAF system.
+.NH
+References
+.PP
+The following documentation is distributed by the National Optical
+Astronomy Observatories, Central Computer Services, P.O. Box 26732,
+Tucson, Arizona, 85726. A comprehensive description of the IRAF system
+is given in \fIThe IRAF Data Reduction and Analysis System\fR by Doug
+Tody (also appearing in \fIProceedings of the SPIE - Instrumentation in
+Astronomy VI\fR, Vol. 627, 1986). A general guide to using IRAF is \fIA
+User's Introduction to the IRAF Command Language\fR by Peter Shames
+and Doug Tody. Both these documents are also part of the IRAF
+documentation distributed with the system.
+.PP
+A somewhat more tutorial description of the \fBccdred\fR package is
+\fIA User's Guide to the IRAF CCDRED Package\fR by the author.
+Detailed task descriptions and supplementary documentation are
+given in the on-line help library and are part of the user's guide.
+.NH
+Appendix
+.PP
+The current set of tasks making up the IRAF CCD Reduction Package,
+\fBccdred\fR, are summarized below.
+
+.nf
+.ft L
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdlist - List CCD processing information
+ ccdproc - Process CCD images
+ combine - Combine CCD images
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+.fi
+.ft R
diff --git a/noao/imred/quadred/src/quad/doc/ccdtypes.hlp b/noao/imred/quadred/src/quad/doc/ccdtypes.hlp
new file mode 100644
index 00000000..2cec33ea
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/ccdtypes.hlp
@@ -0,0 +1,124 @@
+.help ccdtypes Jun87 noao.imred.ccdred
+.ih
+NAME
+ccdtypes -- Description of the CCD image types
+.ih
+CCDTYPES
+The following CCD image types may be specified as the value of the parameter
+\fIccdtype\fR:
+
+.nf
+ "" - (the null string) all image types
+ object - object images
+ zero - zero level images such as a bias or preflash
+ dark - dark count images
+ flat - flat field images
+ illum - iillumination images
+ fringe - fringe correction images
+ other - other image types defined in the translation file
+ none - images without an image type parameter
+ unknown - image types not defined in the translation file
+.fi
+.ih
+DESCRIPTION
+The \fBccdred\fR package recognizes certain standard CCD image types
+identified in the image header. The tasks may select images of a
+particular CCD image type from image lists with the parameter
+\fIccdtype\fR and also recognize and take special actions for
+calibration images.
+
+In order to make use of CCD image type information the header keyword
+identifying the image type must be specified in the instrument
+translation file. This entry has the form
+
+ imagetyp keyword
+
+where keyword is the image header keyword. This allows the package to
+access the image type string. There must also be a translation between
+the image type strings and the CCD types as recognized by the package.
+This information consists of lines in the instrument translation file
+of the form
+
+ header package
+
+where header is the exact string given in the image header and package
+is one of the types recognized by the package. The image header string
+can be virtually anything and if it contains blanks it must be
+quoted. The package image types are those given above except for
+the null string, "none", and "unknown". That is, these types may
+be specified as a CCD image type in selecting images but not as a translations
+of image type strings.
+
+There may be more than one image type that maps to the same package
+type. In particular other standard CCD image types, such as comparison
+spectra, multiple exposure, standard star, etc., should be mapped to
+object or other. There may also be more than one type of flat field, i.e. dome
+flat, sky flat, and lamp flat. For more on the instrument translation
+file see the help for \fBinstruments\fR.
+.ih
+EXAMPLES
+1. The example entries in the instrument translation file are from the 1986
+NOAO CCD image header format produced by the CAMERA format tape writer.
+
+.nf
+ imagetyp data-typ
+
+ 'OBJECT (0)' object
+ 'DARK (1)' dark
+ 'PROJECTOR FLAT (2)' flat
+ 'SKY FLAT (3)' other
+ 'COMPARISON LAMP (4)' other
+ 'BIAS (5)' zero
+ 'DOME FLAT (6)' flat
+.fi
+
+The image header keyword describing the image type is "data-typ".
+The values of the image type strings in the header contain blanks so they
+are quoted. Also the case of the strings is important. Note that there
+are two types of flat field images and two types of other images.
+
+2. One way to check the image types is with the task \fBccdlist\fR.
+
+.nf
+ cl> ccdlist *.imh
+ Zero.imh[504,1][real][zero][1][OT]:FOCUS L98-193
+ Flat1.imh[504,1][real][flat][1][OTZ]:dflat 6v+blue 5s
+ ccd002.imh[504,504][real][unknown][1][OTZF]:FOCUS L98-193
+ ccd003.imh[544,512][short][object][1]:L98-193
+ ccd004.imh[544,512][short][object][1]:L98-193
+ ccd005.imh[544,512][short][object][1]:L98-193
+ oldformat.imh[544,512][short][none][1]:M31 V
+.fi
+
+The unknown type has a header image type of "MUL (8)". The old format
+image does not have any header type.
+
+3. To select only images of a particular type:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd003.imh[544,512][short][object][1]:L98-193
+ ccd004.imh[544,512][short][object][1]:L98-193
+ ccd005.imh[544,512][short][object][1]:L98-193
+ cl> ccdlist *.imh ccdtype=unknown
+ ccd002.imh[504,504][real][unknown][1][OTZF]:FOCUS L98-193
+ cl> ccdlist *.imh ccdtype=none
+ oldformat.imh[544,512][short][none][1]:M31 V
+.fi
+
+4. To process images with \fBccdproc\fR:
+
+.nf
+ cl> ccdproc *.imh
+ cl> ccdproc *.imh ccdtype=object
+.fi
+
+In the first case all the images will be processed (the default value of
+\fIccdtype\fR is ""). However, the task recognizes the calibration
+images, such as zero level and flat fields, and processes them appropriately.
+In the second case only object images are processed and all other images
+are ignored (except if needed as a calibration image).
+.ih
+SEE ALSO
+instruments
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/combine.hlp b/noao/imred/quadred/src/quad/doc/combine.hlp
new file mode 100644
index 00000000..b3c0848e
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/combine.hlp
@@ -0,0 +1,1030 @@
+.help combine Sep92 noao.imred.ccdred
+.ih
+NAME
+combine -- Combine CCD images using various algorithms
+.ih
+USAGE
+combine input output
+.ih
+PARAMETERS
+.ls input
+List of CCD images to combine. Images of a particular CCD image type may be
+selected with the parameter \fIccdtype\fR with the remaining images ignored.
+.le
+.ls output
+Output combined image or list of images. If the \fIproject\fR parameter is
+no (the typical case for CCD acquisition) then there will be one output image
+or, if the \fIsubsets\fR parameter is selected, one output image per subset.
+If the images consist of stacks then the \fIproject\fR option allows combining
+each input stack into separate output images as given by the image list.
+.le
+.ls plfile = "" (optional)
+Output pixel list file or list of files. If no name is given or the
+list ends prematurely then no file is produced. The pixel list file
+is a map of the number of pixels rejected or, equivalently,
+the total number of input images minus the number of pixels actually used.
+The file name is also added to the output image header under the
+keyword BPM.
+.le
+.ls sigma = "" (optional)
+Output sigma image or list of images. If no name is given or the list ends
+prematurely then no image is produced. The sigma is standard deviation,
+corrected for a finite population, of the input pixel values (excluding
+rejected pixels) about the output combined pixel values.
+.le
+
+.ls ccdtype = ""
+CCD image type to combine. If specified only input images of the specified
+type are combined. See \fBccdtypes\fR for the possible image types.
+.le
+.ls subsets = no
+Combine images by subset parameter? If yes then the input images are
+grouped by subset parameter and each group combined into a separate output
+image. The subset identifier is appended to the output image
+name(s). See \fBsubsets\fR for more on the subset parameter.
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+offsetting, masking, thresholding, and rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "none" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation performed on the pixels remaining after offsetting,
+masking and thresholding. The algorithms are discussed in the
+DESCRIPTION section. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the nlow and nhigh pixels
+ ccdclip - Reject pixels using CCD noise parameters
+ crreject - Reject only positive pixels using CCD noise parameters
+ sigclip - Reject pixels using a sigma clipping algorithm
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+ pclip - Reject pixels using sigma based on percentiles
+.fi
+
+.le
+.ls project = no
+Project (combine) across the highest dimension of the input images? If
+no then all the input images are combined to a single output image. If
+yes then the highest dimension elements of each input image are combined to
+an output image and optional pixel list and sigma images. Each element of
+the highest dimension may have a separate offset but there can only be one
+mask image.
+.le
+.ls outtype = "real" (short|integer|long|real|double)
+Output image pixel datatype. The pixel datatypes are "double",
+"real", "long", "integer", and "short" with highest precedence first.
+If none is specified then the highest precedence datatype of the input
+images is used. The datatypes may be abbreviated to a single character.
+.le
+.ls offsets = "none" (none|grid|<filename>)
+Input image offsets. The offsets may be specified in a file consisting
+of one line per image with the offsets in each dimension forming the
+columns. The special case of a grid may be specified by the string:
+
+.nf
+ grid [n1] [s1] [n2] [s2] ...
+.fi
+
+where ni is the number of images in dimension i and si is the step
+in dimension i. For example "grid 5 100 5 100" specifies a 5x5
+grid with origins offset by 100 pixels.
+.le
+.ls masktype = "none" (none|goodvalue|badvalue|goodbits|badbits)
+Type of pixel masking to use. If "none" then no pixel masking is done
+even if an image has an associated pixel mask. The other choices
+are to select the value in the pixel mask to be treated as good
+(goodvalue) or bad (badvalue) or the bits (specified as a value)
+to be treated as good (goodbits) or bad (badbits). The pixel mask
+file name comes from the image header keyword BPM.
+.le
+.ls maskvalue = 0
+Mask value used with the \fImasktype\fR parameter. If the mask type
+selects good or bad bits the value may be specified using IRAF notation
+for decimal, octal, or hexadecimal; i.e 12, 14b, 0cx to select bits 3
+and 4.
+.le
+.ls blank = 0.
+Output value to be used when there are no pixels.
+.le
+
+.ls scale = "none" (none|mode|median|mean|exposure|@<file>|!<keyword>)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, scale
+by the exposure time in the image header, scale by the values in a specified
+file, or scale by a specified image header keyword. When specified in
+a file the scales must be one per line in the order of the input
+images.
+.le
+.ls zero = "none" (none|mode|median|mean|@<file>|!<keyword>)
+Additive zero level image shifts to be applied. The choices are none or
+shift by the mode, median, or mean of the specified statistics section,
+shift by values given in a file, or shift by values given by an image
+header keyword. When specified in a file the zero values must be one
+per line in the order of the input images.
+.le
+.ls weight = "none" (none|mode|median|mean|exposure|@<file>|!<keyword>)
+Weights to be applied during the final averaging. The choices are none,
+the mode, median, or mean of the specified statistics section, the exposure
+time, values given in a file, or values given by an image header keyword.
+When specified in a file the weights must be one per line in the order of
+the input images.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling and
+weighting. If no section is given then the entire region of the input is
+sampled (for efficiency the images are sampled if they are big enough).
+When the images are offset relative to each other one can precede the image
+section with one of the modifiers "input", "output", "overlap". The first
+interprets the section relative to the input image (which is equivalent to
+not specifying a modifier), the second interprets the section relative to
+the output image, and the last selects the common overlap and any following
+section is ignored.
+.le
+
+.ce
+Algorithm Parameters
+.ls lthreshold = INDEF, hthreshold = INDEF
+Low and high thresholds to be applied to the input pixels. This is done
+before any scaling, rejection, and combining. If INDEF the thresholds
+are not used.
+.le
+.ls nlow = 1, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+These numbers are converted to fractions of the total number of input images
+so that if no rejections have taken place the specified number of pixels
+are rejected while if pixels have been rejected by masking, thresholding,
+or nonoverlap, then the fraction of the remaining pixels, truncated
+to an integer, is used.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls sigscale = 0.1 (ccdclip, crreject, sigclip, avsigclip)
+This parameter determines when poisson corrections are made to the
+computation of a sigma for images with different scale factors. If all
+relative scales are within this value of unity and all relative zero level
+offsets are within this fraction of the mean then no correction is made.
+The idea is that if the images are all similarly though not identically
+scaled, the extra computations involved in making poisson corrections for
+variations in the sigmas can be skipped. A value of zero will apply the
+corrections except in the case of equal images and a large value can be
+used if the sigmas of pixels in the images are independent of scale and
+zero level.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See the DESCRIPTION section for further details.
+.le
+.ls grow = 0
+Number of pixels to either side of a rejected pixel along image lines
+to also be rejected. This applies only to pixels rejected by one of
+the rejection algorithms and not the masked or threshold rejected pixels.
+.le
+
+PACKAGE PARAMETERS
+
+The package parameters are used to specify verbose and log output and the
+instrument and header definitions.
+.ih
+DESCRIPTION
+A set of CCD images are combined by weighted averaging or medianing. Pixels
+may be rejected from the combining by using pixel masks, threshold levels,
+and rejection algorithms. The images may be scaled multiplicatively or
+additively based on image statistics, image header keywords, or text files
+before rejection. The images may be combined with integer pixel coordinate
+offsets to produce an image bigger than any of the input images.
+This task is a variant of the \fBimages.imcombine\fR task specialized
+for CCD images.
+
+The input images to be combined are specified by a list. A subset or
+subsets of the input list may be selected using the parameters
+\fIccdtype\fR and \fIsubsets\fR. The \fIccdtype\fR parameter selects only
+images of a specified standard CCD image type. The \fIsubsets\fR parameter
+breaks up the input list into sublists of common subset parameter (filter,
+grating, etc.). For more information see \fBccdtypes\fR and
+\fBsubsets\fR. This selection process is useful with wildcard templates to
+combine, for example, the flat field images for each filter in one step
+(see \fBflatcombine\fR). When subsets of the input list are used the
+output image and optional pixel file and sigma image are given by root names
+with a subset identifier appended by the task.
+
+If the \fBproject\fR parameter is yes then the highest dimension elements
+of each input image are combined to make an output image of one lower
+dimension. There is no limit to the number of elements combined in this
+case. This case is If the \fBproject\fR is no then the entire input list
+is combined to form a single output image per subset. In this case the
+images must all have the same dimensionality but they may have different
+sizes. There is a software limit of approximately 100 images in this
+case.
+
+The output image header is a copy of the first image in the combined set.
+In addition, the number of images combined is recorded under the keyword
+NCOMBINE, the exposure time is updated as the weighted average of the input
+exposure times, and any pixel list file created is recorded under the
+keyword BPM. The output pixel type is set by the parameter \fIouttype\fR.
+If left blank then the input datatype of highest precision is used.
+
+In addition to one or more output combined images there may also be a pixel
+list image containing the number of pixels rejected at each point in the
+output image, an image containing the sigmas of the pixels combined about
+the final output combined pixels, and a log file. The pixel list image is
+in the compact pixel list format which can be used as an image in other
+programs. The sigma computation is the standard deviation corrected for a
+finite population (the n/(n-1) factor) including weights if a weighted
+average is used.
+
+Other input/output parameters are \fIdelete\fR and \fIclobber\fR. The
+\fIdelete\fR parameter may be set to "yes" to delete the input images
+used in producing an output image after it has been created. This is
+useful for minimizing disk space, particularly with large
+sets of calibration images needed to achieve high statistical accuracy
+in the final calibration image. The \fBclobber\fR parameter allows
+the output image names to be existing images which are overwritten (at
+the end of the operation).
+
+An outline of the steps taken by the program is given below and the
+following sections elaborate on the steps.
+
+.nf
+o Set the input image offsets and the final output image size.
+o Set the input image scales and weights
+o Write the log file output
+.fi
+
+For each output image line:
+
+.nf
+o Get input image lines that overlap the output image line
+o Reject masked pixels
+o Reject pixels outside the threshold limits
+o Reject pixels using the specified algorithm
+o Reject neighboring pixels along each line
+o Combine remaining pixels using the weighted average or median
+o Compute sigmas of remaining pixels about the combined values
+o Write the output image line, rejected pixel list, and sigmas
+.fi
+
+
+OFFSETS
+
+The images to be combined need not be of the same size or overlap. They
+do have to have the same dimensionality which will also be the dimensionality
+of the output image. Any dimensional images supported by IRAF may be
+used. Note that if the \fIproject\fR flag is yes then the input images
+are the elements of the highest dimension; for example the planes of a
+three dimensional image.
+
+The overlap of the images is determined by a set of integer pixel offsets
+with an offset for each dimension of each input image. For example
+offsets of 0, 10, and 20 in the first dimension of three images will
+result in combining the three images with only the first image in the
+first 10 colums, the first two images in the next 10 columns and
+all three images starting in the 31st column. At the 31st output column
+the 31st column of the first image will be combined with the 21st column
+of the second image and the 1st column of the third image.
+
+The output image size is set by the maximum extent in each dimension
+of any input image after applying the offsets. In the above example if
+all the images have 100 columns then the output image will have 130
+columns corresponding to the 30 column offset in the third image.
+
+The input image offsets are set using the \fIoffset\fR parameter. There
+are three ways to specify the offsets. If the word "none" or the empty
+string "" are used then all offsets will be zero and all pixels with the
+same coordinates will be combined. The output image size will be equal to
+the biggest dimensions of the input images.
+
+If the input images have offsets in a regular grid or one wants to make
+an output image in which the input images are "mosaiced" together in
+a grid then the special offset string beginning with the word "grid"
+is used. The format is
+
+.nf
+ grid [n1] [s1] [n2] [s2] ...
+.fi
+
+where ni is the number of images in dimension i and si is the step in
+dimension i. For example "grid 5 100 5 100" specifies a 5x5 grid with
+origins offset by 100 pixels. Note that one must insure that the input
+images are specified in the correct order. This may best be accomplished
+using a "@" list. One useful application of the grid is to make a
+nonoverlapping mosaic of a number of images for display purposes. Suppose
+there are 16 images which are 100x100. The offset string "grid 4 101 4
+101" will produce a mosaic with a one pixel border having the value set
+by \fIblank\fR parameter between the images.
+
+The offsets may be defined in a file by specifying the file name
+in the \fIoffset\fR parameter. (Note that the special file name STDIN
+may be used to type in the values terminated by the end-of-file
+character). The file consists of a line for each input image. The lines
+must be in the same order as the input images and so an "@" list may
+be useful. The lines consist of whitespace separated offsets one for
+each dimension of the images. In the first example cited above the
+offset file might contain:
+
+.nf
+ 0 0
+ 10 0
+ 20 0
+.fi
+
+where we assume the second dimension has zero offsets.
+
+The offsets need not have zero for one of the images. The offsets may
+include negative values or refer to some arbitrary common point.
+When the offsets are read by the program it will find the minimum
+value in each dimension and subtract it from all the other offsets
+in that dimension. The above example could also be specified as:
+
+.nf
+ 225 15
+ 235 15
+ 245 15
+.fi
+
+There may be cases where one doesn't want the minimum offsets reset
+to zero. If all the offsets are positive and the comment "# Absolute"
+appears in the offset file then the images will be combined with
+blank values between the first output pixel and the first overlapping
+input pixel. Continuing with the above example, the file
+
+.nf
+ # Absolute
+ 10 10
+ 20 10
+ 30 10
+.fi
+
+will have the first pixel of the first image in the 11th pixel of the
+output image. Note that there is no way to "pad" the other side of
+the output image.
+
+
+SCALES AND WEIGHTS
+
+In order to combine images with rejection of pixels based on deviations
+from some average or median they must be scaled to a common level. There
+are two types of scaling available, a multiplicative intensity scale and an
+additive zero point shift. The intensity scaling is defined by the
+\fIscale\fR parameter and the zero point shift by the \fIzero\fR
+parameter. These parameters may take the values "none" for no scaling,
+"mode", "median", or "mean" to scale by statistics of the image pixels,
+"exposure" (for intensity scaling only) to scale by the exposure time
+keyword in the image header, any other image header keyword specified by
+the keyword name prefixed by the character '!', and the name of a file
+containing the scale factors for the input image prefixed by the
+character '@'.
+
+Examples of the possible parameter values are shown below where
+"myval" is the name of an image header keyword and "scales.dat" is
+a text file containing a list of scale factors.
+
+.nf
+ scale = none No scaling
+ zero = mean Intensity offset by the mean
+ scale = exposure Scale by the exposure time
+ zero = !myval Intensity offset by an image keyword
+ scale = @scales.dat Scales specified in a file
+.fi
+
+The image statistics factors are computed by sampling a uniform grid
+of points with the smallest grid step that yields less than 10000
+pixels; sampling is used to reduce the time need to compute the statistics.
+If one wants to restrict the sampling to a region of the image the
+\fIstatsec\fR parameter is used. This parameter has the following
+syntax:
+
+.nf
+ [input|output|overlap] [image section]
+.fi
+
+The initial modifier defaults to "input" if absent. The modifiers are useful
+if the input images have offsets. In that case "input" specifies
+that the image section refers to each input image, "output" specifies
+that the image section refers to the output image coordinates, and
+"overlap" specifies the mutually overlapping region of the input images.
+In the latter case an image section is ignored.
+
+The statistics are as indicated by their names. In particular, the
+mode is a true mode using a bin size which is a fraction of the
+range of the pixels and is not based on a relationship between the
+mode, median, and mean. Also masked pixels are excluded from the
+computations as well as during the rejection and combining operations.
+
+The "exposure" option in the intensity scaling uses the exposure time
+from the image header. If one wants to use a nonexposure time image
+header keyword the !<keyword> syntax is available.
+
+If both an intensity scaling and zero point shift are selected the
+multiplicative scaling is done first. Use of both makes sense
+if the intensity scaling is the exposure time to correct for
+different exposure times and then the zero point shift allows for
+sky brightness changes.
+
+The image statistics and scale factors are recorded in the log file
+unless they are all equal, which is equivalent to no scaling. The
+intensity scale factors are normalized to a unit mean and the zero
+point shifts are adjust to a zero mean.
+
+Scaling affects not only the mean values between images but also the
+relative pixel uncertainties. For example scaling an image by a
+factor of 0.5 will reduce the effective noise sigma of the image
+at each pixel by the square root of 0.5. Changes in the zero
+point also changes the noise sigma if the image noise characteristics
+are Poissonian. In the various rejection algorithms based on
+identifying a noise sigma and clipping large deviations relative to
+the scaled median or mean, one may need to account for the scaling induced
+changes in the image noise characteristics.
+
+In those algorithms it is possible to eliminate the "sigma correction"
+while still using scaling. The reasons this might be desirable are 1) if
+the scalings are similar the corrections in computing the mean or median
+are important but the sigma corrections may not be important and 2) the
+image statistics may not be Poissonian, either inherently or because the
+images have been processed in some way that changes the statistics. In the
+first case because computing square roots and making corrections to every
+pixel during the iterative rejection operation may be a significant
+computational speed limit the parameter \fIsigscale\fR selects how
+dissimilar the scalings must be to require the sigma corrections. This
+parameter is a fractional deviation which, since the scale factors are
+normalized to unity, is the actual minimum deviation in the scale factors.
+For the zero point shifts the shifts are normalized by the mean shift
+before adjusting the shifts to a zero mean. To always use sigma scaling
+corrections the parameter is set to zero and to eliminate the correction in
+all cases it is set to a very large number.
+
+If the final combining operation is "average" then the images may be
+weighted during the averaging. The weights are specified in the
+same way as the scale factors. In addition
+the NCOMBINE keyword, if present, will be used in the weights.
+The weights, scaled to a unit sum, are printed in the log output.
+
+The weights are only used for the final weighted average and sigma image
+output. They are not used to form averages in the various rejection
+algorithms.
+
+
+PIXEL MASKS
+
+A pixel mask is a type of IRAF file having the extension ".pl" which
+identifies an integer value with each pixel of the images to which it is
+applied. The integer values may denote regions, a weight, a good or bad
+flag, or some other type of integer or integer bit flag. In the common
+case where many values are the same this file is compacted to be small and
+efficient to use. It is also most compact and efficient if the majority of
+the pixels have a zero mask value so frequently zero is the value for good
+pixels. Note that these files, while not stored as a strict pixel array,
+may be treated as images in programs. This means they may be created by
+programs such as \fBmkpattern\fR, edited by \fBimedit\fR, examined by
+\fBimexamine\fR, operated upon by \fBimarith\fR, graphed by \fBimplot\fR,
+and displayed by \fBdisplay\fR.
+
+At the time of introducing this task, generic tools for creating
+pixel masks have yet to be written. There are two ways to create a
+mask in V2.10. First if a regular integer image can be created
+then it can be converted to pixel list format with \fBimcopy\fR:
+
+.nf
+ cl> imcopy template plfile.pl
+.fi
+
+by specifically using the .pl extension on output. Other programs that
+can create integer images (such \fBmkpattern\fR or \fBccdred.badpiximage\fR)
+can create the pixel list file directly by simply using the ".pl"
+extension in the output image name.
+
+To use pixel masks with \fBcombine\fR one must associate a pixel
+mask file with an image by entering the pixel list file name in the
+image header under the keyword BPM (bad pixel mask). This can be
+done with \fBhedit\fR. Note that the same pixel mask may be associated
+with more than one image as might be the case if the mask represents
+defects in the detector used to obtain the images.
+
+If a pixel mask is associated with an image the mask is used when the
+\fImasktype\fR parameter is set to a value other than "none". Note that
+when it is set to "none" mask information is not used even if it exists for
+the image. The values of \fImasktype\fR which apply masks are "goodvalue",
+"badvalue", "goodbits", and "badbits". They are used in conjunction with
+the \fImaskvalue\fR parameter. When the mask type is "goodvalue" the
+pixels with mask values matching the specified value are included in
+combining and all others are rejected. Similarly, for a mask type of
+"badvalue" the pixels with mask values matching the specified value are
+rejected and all others are accepted. The bit types are useful for
+selecting a combination of attributes in a mask consisting of bit flags.
+The mask value is still an integer but is interpreted by bitwise comparison
+with the values in the mask file.
+
+If a mask operation is specified and an image has no mask image associated
+with it then the mask values are taken as all zeros. In those cases be
+careful that zero is an accepted value otherwise the entire image will be
+rejected.
+
+
+THRESHOLD REJECTION
+
+In addition to rejecting masked pixels, pixels in the unscaled input
+images which are below or above the thresholds given by the parameters
+\fIlthreshold\fR and \fIhthreshold\fR are rejected. Values of INDEF
+mean that no threshold value is applied. Threshold rejection may be used
+to exclude very bad pixel values or as an alternative way of masking
+images. In the latter case one can use a task like \fBimedit\fR
+or \fBimreplace\fR to set parts of the images to be excluded to some
+very low or high magic value.
+
+
+REJECTION ALGORITHMS
+
+The \fIreject\fR parameter selects a type of rejection operation to
+be applied to pixels not masked or thresholded. If no rejection
+operation is desired the value "none" is specified.
+
+MINMAX
+.in 4
+A specified fraction of the highest and lowest pixels are rejected.
+The fraction is specified as the number of high and low pixels, the
+\fInhigh\fR and \fInlow\fR parameters, when data from all the input images
+are used. If pixels have been rejected by offseting, masking, or
+thresholding then a matching fraction of the remaining pixels, truncated
+to an integer, are used. Thus,
+
+.nf
+ nl = n * nlow/nimages + 0.001
+ nh = n * nhigh/nimages + 0.001
+.fi
+
+where n is the number of pixels surviving offseting, masking, and
+thresholding, nimages is the number of input images, nlow and nhigh
+are task parameters and nl and nh are the final number of low and
+high pixels rejected by the algorithm. The factor of 0.001 is to
+adjust for rounding of the ratio.
+
+As an example with 10 input images and specifying one low and two high
+pixels to be rejected the fractions to be rejected are 0.1 and 0.2
+and the number rejected as a function of n is:
+
+.nf
+ n 0 1 2 3 4 5 6 7 8 9 10
+ nl 0 0 0 0 0 1 1 1 1 1 2
+ nh 0 0 0 0 0 0 0 0 0 0 1
+.fi
+
+.in -4
+CCDCLIP
+.in 4
+If the images are obtained using a CCD with known read out noise, gain, and
+sensitivity noise parameters and they have been processed to preserve the
+relation between data values and photons or electrons then the noise
+characteristics of the images are well defined. In this model the sigma in
+data values at a pixel with true value <I>, as approximated by the median
+or average with the lowest and highest value excluded, is given by:
+
+.nf
+ sigma = ((rn / g) ** 2 + <I> / g + (s * <I>) ** 2) ** 1/2
+.fi
+
+where rn is the read out noise in electrons, g is the gain in
+electrons per data value, s is a sensitivity noise given as a fraction,
+and ** is the exponentiation operator. Often the sensitivity noise,
+due to uncertainties in the pixel sensitivities (for example from the
+flat field), is not known in which case a value of zero can be used.
+See the task \fBstsdas.wfpc.noisemodel\fR for a way to determine
+these vaues (though that task expresses the read out noise in data
+numbers and the sensitivity noise parameter as a percentage).
+
+The read out noise is specified by the \fIrdnoise\fR parameter. The value
+may be a numeric value to be applied to all the input images or a image
+header keyword containing the value for each image. Similarly, the
+parameter \fIgain\fR specifies the gain as either a value or image header
+keyword and the parameter \fIsnoise\fR specifies the sensitivity
+noise parameter as either a value or image header keyword.
+
+The algorithm operates on each output pixel independently. It starts by
+taking the median or unweighted average (excluding the minimum and maximum)
+of the unrejected pixels provided there are at least two input pixels. The
+expected sigma is computed from the CCD noise parameters and pixels more
+that \fIlsigma\fR times this sigma below or \fIhsigma\fR times this sigma
+above the median or average are rejected. The process is then iterated
+until no further pixels are rejected. If the average is used as the
+estimator of the true value then after the first round of rejections the
+highest and lowest values are no longer excluded. Note that it is possible
+to reject all pixels if the average is used and is sufficiently skewed by
+bad pixels such as cosmic rays.
+
+If there are different CCD noise parameters for the input images
+(as might occur using the image header keyword specification) then
+the sigmas are computed for each pixel from each image using the
+same estimated true value.
+
+If the images are scaled and shifted and the \fIsigscale\fR threshold
+is exceedd then a sigma is computed for each pixel based on the
+image scale parameters; i.e. the median or average is scaled to that of the
+original image before computing the sigma and residuals.
+
+After rejection the number of retained pixels is checked against the
+\fInkeep\fR parameter. If there are fewer pixels retained than specified
+by this parameter the pixels with the smallest residuals in absolute
+value are added back. If there is more than one pixel with the same
+absolute residual (for example the two pixels about an average
+or median of two will have the same residuals) they are all added
+back even if this means more than \fInkeep\fR pixels are retained.
+Note that the \fInkeep\fR parameter only applies to the pixels used
+by the clipping rejection algorithm and does not apply to threshold
+or bad pixel mask rejection.
+
+This is the best clipping algorithm to use if the CCD noise parameters are
+adequately known. The parameters affecting this algorithm are \fIreject\fR
+to select this algorithm, \fImclip\fR to select the median or average for
+the center of the clipping, \fInkeep\fR to limit the number of pixels
+rejected, the CCD noise parameters \fIrdnoise, gain\fR and \fIsnoise\fR,
+\fIlsigma\fR and \fIhsigma\fR to select the clipping thresholds,
+and \fIsigscale\fR to set the threshold for making corrections to the sigma
+calculation for different image scale factors.
+
+.in -4
+CRREJECT
+.in 4
+This algorithm is identical to "ccdclip" except that only pixels above
+the average are rejected based on the \fIhsigma\fR parameter. This
+is appropriate for rejecting cosmic ray events and works even with
+two images.
+
+.in -4
+SIGCLIP
+.in 4
+The sigma clipping algorithm computes at each output pixel the median or
+average excluding the high and low values and the sigma about this
+estimate. There must be at least three input pixels, though for this method
+to work well there should be at least 10 pixels. Values deviating by more
+than the specified sigma threshold factors are rejected. These steps are
+repeated, except that after the first time the average includes all values,
+until no further pixels are rejected or there are fewer than three pixels.
+
+After rejection the number of retained pixels is checked against the
+\fInkeep\fR parameter. If there are fewer pixels retained than specified
+by this parameter the pixels with the smallest residuals in absolute
+value are added back. If there is more than one pixel with the same
+absolute residual (for example the two pixels about an average
+or median of two will have the same residuals) they are all added
+back even if this means more than \fInkeep\fR pixels are retained.
+Note that the \fInkeep\fR parameter only applies to the pixels used
+by the clipping rejection algorithm and does not apply to threshold
+or bad pixel mask rejection.
+
+The parameters affecting this algorithm are \fIreject\fR to select
+this algorithm, \fImclip\fR to select the median or average for the
+center of the clipping, \fInkeep\fR to limit the number of pixels
+rejected, \fIlsigma\fR and \fIhsigma\fR to select the
+clipping thresholds, and \fIsigscale\fR to set the threshold for
+making corrections to the sigma calculation for different image scale
+factors.
+
+.in -4
+AVSIGCLIP
+.in 4
+The averaged sigma clipping algorithm assumes that the sigma about the
+median or mean (average excluding the low and high values) is proportional
+to the square root of the median or mean at each point. This is
+described by the equation:
+
+.nf
+ sigma(column,line) = sqrt (gain(line) * signal(column,line))
+.fi
+
+where the \fIestimated\fR signal is the mean or median (hopefully excluding
+any bad pixels) and the gain is the \fIestimated\fR proportionality
+constant having units of photons/data number.
+
+This noise model is valid for images whose values are proportional to the
+number of photons recorded. In effect this algorithm estimates a
+detector gain for each line with no read out noise component when
+information about the detector noise parameters are not known or
+available. The gain proportionality factor is computed
+independently for each output line by averaging the square of the residuals
+(at points having three or more input values) scaled by the median or
+mean. In theory the proportionality should be the same for all rows but
+because of the estimating process will vary somewhat.
+
+Once the proportionality factor is determined, deviant pixels exceeding the
+specified thresholds are rejected at each point by estimating the sigma
+from the median or mean. If any values are rejected the median or mean
+(this time not excluding the extreme values) is recomputed and further
+values rejected. This is repeated until there are no further pixels
+rejected or the number of remaining input values falls below three. Note
+that the proportionality factor is not recomputed after rejections.
+
+If the images are scaled differently and the sigma scaling correction
+threshold is exceedd then a correction is made in the sigma
+calculations for these differences, again under the assumption that
+the noise in an image scales as the square root of the mean intensity.
+
+After rejection the number of retained pixels is checked against the
+\fInkeep\fR parameter. If there are fewer pixels retained than specified
+by this parameter the pixels with the smallest residuals in absolute
+value are added back. If there is more than one pixel with the same
+absolute residual (for example the two pixels about an average
+or median of two will have the same residuals) they are all added
+back even if this means more than \fInkeep\fR pixels are retained.
+Note that the \fInkeep\fR parameter only applies to the pixels used
+by the clipping rejection algorithm and does not apply to threshold
+or bad pixel mask rejection.
+
+This algorithm works well for even a few input images. It works better if
+the median is used though this is slower than using the average. Note that
+if the images have a known read out noise and gain (the proportionality
+factor above) then the "ccdclip" algorithm is superior. The two algorithms
+are related in that the average sigma proportionality factor is an estimate
+of the gain.
+
+The parameters affecting this algorithm are \fIreject\fR to select
+this algorithm, \fImclip\fR to select the median or average for the
+center of the clipping, \fInkeep\fR to limit the number of pixels
+rejected, \fIlsigma\fR and \fIhsigma\fR to select the
+clipping thresholds, and \fIsigscale\fR to set the threshold for
+making corrections to the sigma calculation for different image scale
+factors.
+
+.in -4
+PCLIP
+.in 4
+The percentile clipping algorithm is similar to sigma clipping using the
+median as the center of the distribution except that, instead of computing
+the sigma of the pixels from the CCD noise parameters or from the data
+values, the width of the distribution is characterized by the difference
+between the median value and a specified "percentile" pixel value. This
+width is then multipled by the scale factors \fIlsigma\fR and \fIhsigma\fR
+to define the clipping thresholds above and below the median. The clipping
+is not iterated.
+
+The pixel values at each output point are ordered in magnitude and the
+median is determined. In the case of an even number of pixels the average
+of the two middle values is used as the median value and the lower or upper
+of the two is the median pixel when counting from the median pixel to
+selecting the percentile pixel. The parameter \fIpclip\fR selects the
+percentile pixel as the number (if the absolute value is greater
+than unity) or fraction of the pixels from the median in the ordered set.
+The direction of the percentile pixel from the median is set by the sign of
+the \fIpclip\fR parameter with a negative value signifying pixels with
+values less than the median. Fractional values are internally converted to
+the appropriate number of pixels for the number of input images. A minimum
+of one pixel and a maximum corresponding to the extreme pixels from the
+median are enforced. The value used is reported in the log output. Note
+that the same percentile pixel is used even if pixels have been rejected by
+offseting, masking, or thresholding; for example, if the 3nd pixel below
+the median is specified then the 3rd pixel will be used whether there are
+10 pixels or 5 pixels remaining after the preliminary steps.
+
+Some examples help clarify the definition of the percentile pixel. In the
+examples assume 10 pixels. The median is then the average of the
+5th and 6th pixels. A \fIpclip\fR value of 2 selects the 2nd pixel
+above the median (6th) pixel which is the 8th pixel. A \fIpclip\fR
+value of -0.5 selects the point halfway between the median and the
+lowest pixel. In this case there are 4 pixels below the median,
+half of that is 2 pixels which makes the percentile pixel the 3rd pixel.
+
+The percentile clipping algorithm is most useful for clipping small
+excursions, such as the wings of bright objects when combining
+disregistered observations for a sky flat field, that are missed when using
+the pixel values to compute a sigma. It is not as powerful, however, as
+using the CCD noise parameters (provided they are accurately known) to clip
+about the median.
+
+The parameters affecting this algorithm are \fIreject\fR to select this
+algorithm, \fIpclip\fR to select the percentile pixel, \fInkeep\fR to limit
+the number of pixels rejected, and \fIlsigma\fR and \fIhsigma\fR to select
+the clipping thresholds.
+
+.in -4
+GROW REJECTION
+
+Neighbors of pixels rejected by the rejection algorithms along image lines
+may also be rejected. The number of neighbors to be rejected on either
+side is specified by the \fIgrow\fR parameter. The rejection only
+applies to neighbors along each image line. This is because the
+task operates independently on each image line and does not have the
+ability to go back to previous lines or maintain a list of rejected
+pixels to later lines.
+
+This rejection step is also checked against the \fInkeep\fR parameter
+and only as many pixels as would not violate this parameter are
+rejected. Unlike it's application in the rejection algorithms at
+this stage there is no checking on the magnitude of the residuals
+and the pixels retained which would otherwise be rejected are randomly
+selected.
+
+
+COMBINING
+
+After all the steps of offsetting the input images, masking pixels,
+threshold rejection, scaling, and applying a rejection algorithms the
+remaining pixels are combined and output. The pixels may be combined
+by computing the median or by computing a weighted average.
+
+
+SIGMA OUTPUT
+
+In addition to the combined image and optional sigma image may be
+produced. The sigma computed is the standard deviation, corrected for a
+finite population by a factor of n/(n-1), of the unrejected input pixel
+values about the output combined pixel values.
+.ih
+EXAMPLES
+1. To average and median images without any other features:
+
+.nf
+ cl> combine obj* avg combine=average reject=none
+ cl> combine obj* med combine=median reject=none
+.fi
+
+2. To reject cosmic rays:
+
+.nf
+ cl> combine obs1,obs2 Obs reject=crreject rdnoise=5.1, gain=4.3
+.fi
+
+3. To make a grid for display purposes with 21 64x64 images:
+
+.nf
+ cl> combine @list grid offset="grid 5 65 5 65"
+.fi
+
+4. To apply a mask image with good pixels marked with a zero value and
+ bad pixels marked with a value of one:
+
+.nf
+ cl> hedit ims* bpm badpix.pl add+ ver-
+ cl> combine ims* final combine=median masktype=goodval
+.fi
+
+5. To scale image by the exposure time and then adjust for varying
+ sky brightness and make a weighted average:
+
+.nf
+ cl> combine obj* avsig combine=average reject=avsig \
+ >>> scale=exp zero=mode weight=exp expname=exptime
+.fi
+.ih
+TIME REQUIREMENTS
+The following times were obtain with a Sun 4/470. The tests combine
+1000x200 images consisting of Poisson noise and cosmic rays generated
+with the \fBartdata\fR package. The times, especially the total time,
+are approximate and depend on user loads.
+
+.nf
+IMAGES: Number of images (1000x200) and datatype (R=real, S=short)
+COMBINE: Combine option
+REJECT: Rejection option with grow = 0
+ minmax: nlow = 1, nhigh = 1
+ ccdclip: lsigma = 3., hsigma = 3, sigscale = 0.
+ sigclip: lsigma = 3., hsigma = 3, sigscale = 0.
+ avsigclip: lsigma = 3., hsigma = 3, sigscale = 0.
+ pclip: lsigma = 3., hsigma = 3, pclip = -0.5
+ /a: mclip = no (clip about the average)
+ /m: mclip = yes (clip about the median)
+O M T S: Features used (Y=yes, N=no)
+O: offset = "grid 5 10 2 10"
+M: masktype = goodval, maskval = 0
+ Pixel mask has 2 bad lines and 20 bad columns
+T: lthreshold = INDEF, hthreshold = 1100.
+S: scale = mode, zero = none, weight = mode
+TIME: cpu time in seconds, total time in minutes and seconds
+
+
+IMAGES COMBINE REJECT O M T S TIME
+
+ 10R average none N N N N 1.3 0:08
+ 10R average minmax N N N N 4.3 0:10
+ 10R average pclip N N N N 17.9 0:32
+ 10R average ccdclip/a N N N N 11.6 0:21
+ 10R average crreject/a N N N N 11.4 0:21
+ 10R average sigclip/a N N N N 13.6 0:29
+ 10R average avsigclip/a N N N N 15.9 0:35
+ 10R average ccdclip/m N N N N 16.9 0:32
+ 10R average crreject/m N N N N 17.0 0:28
+ 10R average sigclip/m N N N N 19.6 0:42
+ 10R average avsigclip/m N N N N 20.6 0:43
+
+ 10R median none N N N N 6.8 0:17
+ 10R median minmax N N N N 7.8 0:15
+ 10R median pclip N N N N 16.9 1:00
+ 10R median ccdclip/a N N N N 18.0 0:34
+ 10R median crreject/a N N N N 17.7 0:30
+ 10R median sigclip/a N N N N 21.1 1:13
+ 10R median avsigclip/a N N N N 23.1 0:41
+ 10R median ccdclip/m N N N N 16.1 0:27
+ 10R median crreject/m N N N N 16.0 0:27
+ 10R median sigclip/m N N N N 18.1 0:29
+ 10R median avsigclip/m N N N N 19.6 0:32
+
+ 10R average none N N N Y 6.1 0:36
+ 10R median none N N N Y 10.4 0:49
+ 10R median pclip N N N Y 20.4 1:10
+ 10R median ccdclip/m N N N Y 19.5 0:36
+ 10R median avsigclip/m N N N Y 23.0 1:06
+
+ 10R average none N Y N N 3.5 0:12
+ 10R median none N Y N N 8.9 0:21
+ 10R median pclip N Y N N 19.9 0:45
+ 10R median ccdclip/m N Y N N 18.0 0:44
+ 10R median avsigclip/m N Y N N 20.9 0:28
+
+ 10R average none Y N N N 4.3 0:13
+ 10R median none Y N N N 9.6 0:21
+ 10R median pclip Y N N N 21.8 0:54
+ 10R median ccdclip/m Y N N N 19.3 0:44
+ 10R median avsigclip/m Y N N N 22.8 0:51
+
+ 10R average none Y Y Y Y 10.8 0:22
+ 10R median none Y Y Y Y 16.1 0:28
+ 10R median pclip Y Y Y Y 27.4 0:42
+ 10R median ccdclip/m Y Y Y Y 25.5 0:39
+ 10R median avsigclip/m Y Y Y Y 28.9 0:44
+
+ 10S average none N N N N 2.2 0:06
+ 10S average minmax N N N N 4.6 0:12
+ 10S average pclip N N N N 18.1 0:33
+.fi
+.ih
+REVISIONS
+.ls COMBINE V2.10.2
+The weighting was changed from using the square root of the exposure time
+or image statistics to using the values directly. This corresponds
+to variance weighting. Other options for specifying the scaling and
+weighting factors were added; namely from a file or from a different
+image header keyword. The \fInkeep\fR parameter was added to allow
+controlling the maximum number of pixels to be rejected by the clipping
+algorithms. The \fIsnoise\fR parameter was added to include a sensitivity
+or scale noise component to the noise model. Errors will now delete
+the output images.
+.le
+.ls COMBINE V2.10
+This task was greatly revised to provide many new features. These features
+are:
+
+.nf
+ o Bad pixel masks
+ o Combining offset and different size images
+ o Blank value for missing data
+ o Combining across the highest dimension (the project option)
+ o Separating threshold rejection, the rejection algorithms,
+ and the final combining statistic
+ o New CCDCLIP, CRREJECT, and PCLIP algorithms
+ o Rejection now may reject more than one pixel per output pixel
+ o Choice of a central median or average for clipping
+ o Choice of final combining operation
+ o Simultaneous multiplicative and zero point scaling
+.fi
+.le
+.ih
+LIMITATIONS
+Though this task is essentially not limited by the physical
+limits of the host (number of open files, amount of memory) there is a
+software limit in the IRAF virtual operating system of about 120 separate
+images which may be combined. To combine more images one may combine smaller
+groups and then combine those or one may stack the images into a higher
+dimensional image using \fBimstack\fR and use the \fIproject\fR option.
+.ih
+SEE ALSO
+image.imcombine, instruments, ccdtypes, icfit, ccdred, guide, darkcombine,
+flatcombine, zerocombine, onedspec.scombine wfpc.noisemodel
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/contents.ms b/noao/imred/quadred/src/quad/doc/contents.ms
new file mode 100644
index 00000000..8ba2624a
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/contents.ms
@@ -0,0 +1,34 @@
+.sp 1i
+.ps +2
+.ft B
+.ce
+Contents
+.sp 3
+.ps -2
+.ft R
+.sp
+1.\h'|0.4i'\fBIntroduction\fP\l'|5.6i.'\0\01
+.sp
+2.\h'|0.4i'\fBGetting Started\fP\l'|5.6i.'\0\02
+.sp
+3.\h'|0.4i'\fBProcessing Your Data\fP\l'|5.6i.'\0\05
+.br
+\h'|0.4i'3.1.\h'|0.9i'Combining Calibration Images\l'|5.6i.'\0\06
+.br
+\h'|0.4i'3.2.\h'|0.9i'Calibrations and Corrections\l'|5.6i.'\0\07
+.sp
+4.\h'|0.4i'\fBSpecial Processing Operations\fP\l'|5.6i.'\0\08
+.br
+\h'|0.4i'4.1.\h'|0.9i'Spectroscopic Flat Fields\l'|5.6i.'\0\08
+.br
+\h'|0.4i'4.2.\h'|0.9i'Illumination Corrections\l'|5.6i.'\0\09
+.br
+\h'|0.4i'4.3.\h'|0.9i'Sky Flat Fields\l'|5.6i.'\010
+.br
+\h'|0.4i'4.4.\h'|0.9i'Illumination Corrected Flat Fields\l'|5.6i.'\010
+.br
+\h'|0.4i'4.5.\h'|0.9i'Fringe Corrections\l'|5.6i.'\010
+.sp
+5.\h'|0.4i'\fBSummary\fP\l'|5.6i.'\011
+.sp
+\h'|0.4i'\fBReferences\fP\l'|5.6i.'\011
diff --git a/noao/imred/quadred/src/quad/doc/cosmicrays.hlp b/noao/imred/quadred/src/quad/doc/cosmicrays.hlp
new file mode 100644
index 00000000..c50a129f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/cosmicrays.hlp
@@ -0,0 +1,220 @@
+.help cosmicrays Dec87 noao.imred.ccdred
+.ih
+NAME
+cosmicrays -- Detect and replace cosmic rays
+.ih
+USAGE
+cosmicrays input output
+.ih
+PARAMETERS
+.ls input
+List of input images in which to detect cosmic rays.
+.le
+.ls output
+List of output images in which the detected cosmic rays will be replaced
+by an average of neighboring pixels. If the output image name differs
+from the input image name then a copy of the input image is made with
+the detected cosmic rays replaced. If no output images are specified
+then the input images are modified in place. In place modification of
+an input image also occurs when the output image name is the same as
+the input image name.
+.le
+.ls badpix = ""
+List of bad pixel files to be created, one for each input image. If no
+file names are given then no bad pixel file is created. The bad pixel
+file is a simple list of pixel coordinates for each replaced cosmic ray.
+This file may be used in conjunction with \fBbadpixelimage\fR to create
+a mask image.
+.le
+.ls ccdtype = ""
+If specified only the input images of the desired CCD image type will be
+selected.
+.le
+.ls threshold = 25.
+Detection threshold above the mean of the surrounding pixels for cosmic
+rays. The threshold will depend on the noise characteristics of the
+image and how weak the cosmic rays may be for detection. A typical value
+is 5 or more times the sigma of the background.
+.le
+.ls fluxratio = 2.
+The ratio (as a percent) of the mean neighboring pixel flux to the candidate
+cosmic ray pixel for rejection. The value depends on the seeing and the
+characteristics of the cosmic rays. Typical values are in the range
+2 to 10 percent.
+.le
+.ls npasses = 5
+Number of cosmic ray detection passes. Since only the locally strongest
+pixel is considered a cosmic ray, multiple detection passes are needed to
+detect and replace multiple pixel cosmic ray events.
+.le
+.ls window = 5
+Size of cosmic ray detection window. A square window of either 5 by 5 or
+7 by 7 is used to detect cosmic rays. The smaller window allows detection
+in the presence of greater background gradients but is less sensitive at
+discriminating multiple event cosmic rays from stars. It is also marginally
+faster.
+.le
+.ls interactive = yes
+Examine parameters interactively? A plot of the mean flux within the
+detection window (x100) vs the flux ratio (x100) is plotted and the user may
+set the flux ratio threshold, delete and undelete specific events, and
+examine specific events. This is useful for new data in which one is
+uncertain of an appropriate flux ratio threshold. Once determined the
+task need not be used interactively.
+.le
+.ls answer
+This parameter is used for interactive queries when processing a list of
+images. The responses may be "no", "yes", "NO", or "YES". The upper case
+responses permanently enable or disable the interactive review while
+the lower case reponses allow selective examination of certain input
+images.
+.le
+.ih
+OTHER PARAMETERS
+There are other parameters which may be defined by the package, as is the
+case with \fBccdred\fR, or as part of the task, as is the case with
+standalone version in the \fBgeneric\fR package.
+
+.ls verbose
+If yes then a time stamped log of the operation is printed on the standard
+output.
+.le
+.ls logfile
+If a log file is specified then a time stamped log of the operation is
+recorded.
+.le
+.ls plotfile
+If a plot file is specified then the graph of the flux ratio (x100) vs
+the mean flux (x100) is recorded as metacode. This may be spooled or examined
+later.
+.le
+.ls graphics = "stdgraph"
+Interactive graphic output device for interactive examination of the
+detection parameters.
+.le
+.ls cursor = ""
+Interactive graphics cursor input. If null the graphics display cursor
+is used, otherwise a file containing cursor input may be specified.
+.le
+.ls instrument
+The \fBccdred\fR instrument file is used for mapping header keywords and
+CCD image types.
+.le
+.ih
+CURSOR COMMANDS
+
+.nf
+d Mark candidate for replacement (applys to '+' points)
+q Quit and replace the selected pixels
+r Redraw the graph
+s Make a surface plot for the candidate nearest the cursor
+t Set the flux ratio threshold at the y cursor position
+u Mark candidate to not be replaced (applys to 'x' points)
+w Adjust the graph window (see \fBgtools\fR)
+.fi
+
+There are no colon commands except those for the windowing options (type
+:\help or see \fBgtools\fR).
+.ih
+DESCRIPTION
+Cosmic ray events in each input image are detected and replaced by the
+average of the four neighbors. The replacement may be performed
+directly on the input image if no output image is specified or if the
+output image name is the same as the input image name. If a new image
+is created it is a copy of the input image except for the replaced
+pixels. The processing keyword CRCOR is added to the output image
+header. Optional output includes a log file to which a processing log
+is appended, a verbose log output to the standard output (the same as
+that in the log file), a plot file showing the parameters of the
+detected cosmic ray candidates and the flux ratio threshold used, and a
+bad pixel file containing the coordinates of the replaced pixels. The
+bad pixel file may be used for plotting purposes or to create a mask
+image for display and analysis using the task \fBbadpiximage\fR. This
+bad pixel file will be replaced by the IRAF bad pixel facility when it
+becomes available. If one wants more than a simple mask image then by
+creating a different output image a difference image between the
+original and the modified image may be made using \fBimarith\fR.
+
+This task may be applied to an image previously processed to detect
+additional cosmic rays. A warning will be given (because of the
+CRCOR header parameter) and the previous processing header keyword will
+be overwritten.
+
+The cosmic ray detection algorithm consists of the following steps.
+First a pixel must be the brightest pixel within the specified
+detection window (either 5x5 or 7x7). The mean flux in the surrounding
+pixels with the second brightest pixel excluded (which may also be a
+cosmic ray event) is computed and the candidate pixel must exceed this
+mean by the amount specified by the parameter \fIthreshold\fR. A plane
+is fit to the border pixels of the window and the fitted background is
+subtracted. The mean flux (now background subtracted) and the ratio of
+this mean to the cosmic ray candidate (the brightest pixel) are
+computed. The mean flux (x100) and the ratio (x100) are recorded for
+interactive examination if desired.
+
+Once the list of cosmic ray candidates has been created and a threshold
+for the flux ratio established (either by the parameter \fIfluxratio\fR
+or modified interactively) the pixels with ratios below the threshold
+are replaced in the output by the average of the four neighboring pixels
+(with the second strongest pixel in the detection window excluded if it is
+one of these pixels). Additonal pixels may then be detected and replaced
+in further passes as specified by the parameter \fInpasses\fR. Note that
+only pixels in the vicinity of replaced pixels need be considered in
+further passes.
+
+The division between the peaks of real objects and cosmic rays is made
+based on the flux ratio between the mean flux (excluding the center
+pixel and the second strongest pixel) and the candidate pixel. This
+threshold depends on the point spread function and the distribution of
+multiple cosmic ray events and any additional neighboring light caused
+by the events. This threshold is not strongly coupled to small changes
+in the data so that once it is set for a new type of image data it may
+be used for similar images. To set it initially one may examine the
+scatter plot of the flux ratio as a function of the mean flux. This
+may be done interactively or from the optional plot file produced.
+
+When the interactive flag is set the user is queried for each image.
+Responses may be made for specific images or for all images by using
+lower or upper case answers respectively. When the parameters are
+examined interactively the user may change the flux ratio threshold
+('t' key). Changes made are stored in the parameter file and, thus,
+learned for further images. Pixels to be deleted are marked by crosses
+and pixels which are peaks of objects are marked by pluses. The user
+may explicitly delete or undelete any point if desired but this is only
+for special cases near the threshold. In the future keys for
+interactive display of the specific detections will be added.
+Currently a surface plot of any candidate may be displayed graphically
+in four 90 degree rotated views using the 's' key. Note that the
+initial graph does not show all the points some of which are clearly
+cosmic rays because they have negative mean flux or flux ratio. To
+view all data one must rewindow the graph with the 'w' key or ":/"
+commands (see \fBgtools\fR).
+.ih
+EXAMPLES
+1. To replace cosmic rays in a set of images ccd*:
+
+.nf
+ cl> cosmicrays ccd* new//ccd*
+ ccd001: Examine parameters interactively? (yes):
+ [A scatter plot graph is made. One can adjust the threshold.]
+ [Looking at a few points using the 's' key can be instructive.]
+ [When done type 'q'.]
+ ccd002: Examine parameters interactively? (yes): NO
+ [No further interactive examination is done.]
+.fi
+
+After cleaning one typically displays the images and possibly blinks them.
+A difference image or mask image may also be created.
+
+2. To create a mask image a bad pixel file must be specified. In the
+following we replace the cosmic rays in place and create a bad pixel
+file and mask image:
+
+.nf
+ cl> cosmicrays ccd001 ccd001 badpix=ccd001.bp
+ cl> badpiximage ccd001.bp ccd001 ccd001bp
+.fi
+.ih
+SEE ALSO
+badpixelimage gtools
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/darkcombine.hlp b/noao/imred/quadred/src/quad/doc/darkcombine.hlp
new file mode 100644
index 00000000..ef9eb81c
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/darkcombine.hlp
@@ -0,0 +1,125 @@
+.help darkcombine Sept93 arcon.quad
+.ih
+NAME
+darkcombine -- Combine and process dark count images
+.ih
+USAGE
+darkcombine input
+.ih
+PARAMETERS
+.ls input
+List of dark count images to combine. The \fIccdtype\fR parameter
+may be used to select the zero level images from a list containing all
+types of data.
+.le
+.ls output = "Dark"
+Output dark count root image name.
+.le
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "avsigclip" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation. See \fBcombine\fR for details.
+.le
+.ls ccdtype = "zero"
+CCD image type to combine. If no image type is given then all input images
+are combined.
+.le
+.ls process = no
+Process the input images before combining?
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+.ls scale = "none" (none|mode|median|mean|exposure)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, or scale
+by the exposure time given in the image header.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling.
+If no section is given then the entire region of the image is
+sampled (for efficiency the images are sampled if they are big enough).
+.le
+
+.ce
+Algorithm Parameters
+.ls nlow = 1, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See \fBcombine\fR for further details.
+.le
+.ls blank = 0.
+Output value to be used when there are no pixels.
+.le
+.ih
+DESCRIPTION
+The dark count images in the input image list are combined.
+The input images may be processed first if desired.
+The original images may be deleted automatically if desired.
+
+This task is a script which applies \fBquadproc\fR and \fBcombine\fR. The
+parameters and combining algorithms are described in detail in the help for
+\fBcombine\fR. This script has default parameters specifically set for
+dark count images and simplifies the combining parameters. There are other
+combining options not included in this task. For these additional
+features, such as thresholding, offseting, masking, and projecting, use
+\fBcombine\fR.
+
+The version of \fBdarkcombine\fR in the \fBquad\fR package differs from that
+in \fBccdred\fR in that \fBquadproc\fR rather than \fBccdproc\fR is used to
+process the images if this is requested. The \fBquad\fR version MUST be
+used if process=yes and the input list contains any multi-readout images which
+have not been overscan corrected and trimmed.
+.ih
+EXAMPLES
+1. The image data contains four dark count images. To automatically select
+them and combine them as a background job using the default combining algorithm:
+
+ cl> darkcombine ccd*.imh&
+.ih
+SEE ALSO
+quadproc, combine
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/flatcombine.hlp b/noao/imred/quadred/src/quad/doc/flatcombine.hlp
new file mode 100644
index 00000000..c463655d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/flatcombine.hlp
@@ -0,0 +1,139 @@
+.help flatcombine Sept93 arcon.quad
+.ih
+NAME
+flatcombine -- Combine and process flat field images
+.ih
+USAGE
+flatcombine input
+.ih
+PARAMETERS
+.ls input
+List of flat field images to combine. The \fIccdtype\fR parameter
+may be used to select the flat field images from a list containing all
+types of data.
+.le
+.ls output = "Flat"
+Output flat field root image name. The subset ID is appended.
+.le
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "avsigclip" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation. See \fBcombine\fR for details.
+.le
+.ls ccdtype = "flat"
+CCD image type to combine. If no image type is given then all input images
+are combined.
+.le
+.ls process = no
+Process the input images before combining?
+.le
+.ls subsets = yes
+Combine images by subset parameter? If yes then the input images are
+grouped by subset parameter and each group combined into a separate output
+image. The subset identifier is appended to the output and sigma image
+names. See \fBsubsets\fR for more on the subset parameter. This is generally
+used with flat field images.
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+.ls scale = "none" (none|mode|median|mean|exposure)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, or scale
+by the exposure time given in the image header.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling.
+If no section is given then the entire region of the image is
+sampled (for efficiency the images are sampled if they are big enough).
+.le
+
+.ce
+Algorithm Parameters
+.ls nlow = 1, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See \fBcombine\fR for further details.
+.le
+.ls blank = 1.
+Output value to be used when there are no pixels.
+.le
+.ih
+DESCRIPTION
+The flat field images in the input image list are combined. If there
+is more than one subset (such as a filter or grating) then the input
+flat field images are grouped by subset and an combined separately.
+The input images may be processed first if desired. However if all
+zero level bias effects are linear then this is not necessary and some
+processing time may be saved. The original images may be deleted
+automatically if desired.
+
+This task is a script which applies \fBquadproc\fR and \fBcombine\fR. The
+parameters and combining algorithms are described in detail in the help for
+\fBcombine\fR. This script has default parameters specifically set for
+flat field images and simplifies the combining parameters. There are other
+combining options not included in this task. For these additional
+features, such as thresholding, offseting, masking, and projecting, use
+\fBcombine\fR.
+
+The version of \fBzerocombine\fR in the \fBquad\fR package differs from that
+in \fBccdred\fR in that \fBquadproc\fR rather than \fBccdproc\fR is used to
+process the images if this is requested. The \fBquad\fR version MUST be
+used if process=yes and the input list contains any multi-readout images which
+have not been overscan corrected and trimmed.
+.ih
+EXAMPLES
+1. The image data contains four flat field images for three filters.
+To automatically select them and combine them as a background job
+using the default combining algorithm:
+
+ cl> flatcombine ccd*.imh&
+
+The final images are "FlatV", "FlatB", and "FlatR".
+.ih
+SEE ALSO
+quadproc, combine, subsets
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/flatfields.hlp b/noao/imred/quadred/src/quad/doc/flatfields.hlp
new file mode 100644
index 00000000..94766960
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/flatfields.hlp
@@ -0,0 +1,177 @@
+.help flatfields Jun87 noao.imred.ccdred
+
+.ih
+NAME
+flatfields -- Discussion of CCD flat field calibrations
+.ih
+DESCRIPTION
+This topic describes the different types of CCD flat fields and
+the tasks available in the \fBccdred\fR and spectroscopy packages for
+creating them. Flat field calibration is the most important operation
+performed on CCD data. This operation calibrates the relative response
+of the detector at each pixel. In some cases this is as simple as
+taking a special type of observation called a flat field. However, in
+many cases this calibration observation must be corrected for
+iillumination, scanning, wavelength, and aperture effects.
+
+The discussion is in three sections; direct imaging, scan mode,
+and spectroscopy. Though there are many similarities between these
+modes of operation there are important differences in how corrections
+are applied to the basic flat field observations. The application of
+the flat field calibrations to the observations using \fBccdproc\fR is
+the same in all cases, however.
+.sh
+1. Direct Imaging
+The starting point for determining the flat field calibration is an
+observation of something which should have uniform response at all
+points on the detector. In addition the color of the light falling at
+each pixel should be the same as that in an observation so the same
+filter must be used when determining the flat field (the issue of the
+matching the color of the objects observed at the appropriate pixels is
+ignored here). The best calibration observation is of a blank sky. If
+an accurate blank sky observation can be obtained then this is all that
+is needed for a flat field calibration. This type of flat field might
+be called a \fIsky flat\fR, though this term is more often used for a
+type of flat field described below. There are two difficulties with
+this type of calibration; finding a really blank sky and getting a
+sufficiently accurate measurement without using all the observing
+time.
+
+It is usually not possible to get a blank sky observation accurate
+enough to calibrate the individual pixels without introducing
+undesirable noise. What is generally done is to use a lamp to either
+uniformly illuminate a part of the dome or directly illuminate the
+field of view. The first type of observation is called a \fIdome
+flat\fR and the second is called a \fIprojection flat\fR. We shall call
+both of these types of observations \fBlamp flat fields\fR. If the
+iillumination is truely uniform then these types of observations are
+sufficient for flat field calibration. To get a very accurate flat
+field many observations are made and then combined (see
+\fBflatcombine\fR).
+
+Unfortunately, it is sometimes the case that the lamp flat fields
+do not illuminate the telescope/detector in the same way as the actual
+observations. Calibrating with these flat fields will introduce a
+residual large scale iillumination pattern, though it will correctly
+calibrate the relative pixel responses locally. There are two ways to
+correct for this effect. The first is to correct the flat field
+observation. The second is to apply the uncorrected flat field to the
+observations and then apply an \fIiillumination\fR correction as a
+separate operation. The first is more efficient since it consists of a
+single correction applied to each observation but in some cases the
+approximate correction is desired immediately, the observation needed
+to make the correction has not been taken yet, or the residual
+iillumination error is not discovered until later.
+
+For the two methods there are two types of correction. One is to
+use a blank sky observation to correct for the residual iillumination
+pattern. This is different than using the sky observation directly as
+a flat field calibration in that only the large scale pattern is
+needed. Determining the large scale iillumination does not require high
+signal-to-noise at each pixel and faint objects in the image can be
+either eliminated or ignored. The second method is to remove the large
+scale shape from the lamp flat field. This is not as good as using a
+blank sky observation but, if there is no such observation and the
+iillumination pattern is essentially only in the lamp flat field, this
+may be sufficient.
+
+From the above two paragraphs one sees there are four options.
+There is a task in the \fBccdred\fR package for each of these options.
+To correct a lamp flat field observation by a blank sky observation,
+called a \fIsky flat\fR, the task is \fBmkskyflat\fR. To correct the
+flat field for its own large scale gradients, called an \fIiillumination
+flat\fR, the task is \fBmkillumflat\fR. To create a secondary
+correction to be applied to data processed with the lamp flat field
+image the tasks are \fBmkskycor\fR and \fBmkillumcor\fR which are,
+respectively, based on a blank sky observation and the lamp flat field
+iillumination pattern.
+
+With this introduction turn to the individual documentation for these
+four tasks for further details.
+.sh
+2. Scan Mode
+There are two types of scan modes supported by the \fBccdred\fR
+package; \fIshortscan\fR and \fIlongscan\fR (see \fBccdproc\fR for
+further details). They both affect the manner in which flat field
+calibrations are handled. The shortscan mode produces images which are
+the same as direct images except that the light recorded at each pixel
+was collected by a number of different pixels. This improves the flat
+field calibration. If the flat field images, of the same types
+described in the direct imaging section, are observed in the same way
+as all other observations, i.e. in scan mode, then there is no
+difference from direct imaging (except in the quality of the flat
+fields). There is a statistical advantage to observing the lamp or sky
+flat field without scanning and then numerically averaging to simulate
+the result of the scanning. This improves the accuracy of
+the flat fields and might possibly allow direct blank sky observations
+to be used for flat fields. The numerical scanning is done in
+\fBccdproc\fR by setting the appropriate scanning parameters.
+
+In longscan mode the CCD detector is read out in such a way that
+each output image pixel is the sum of the light falling on all pixels
+along the direction of the scan. This reduces the flat field calibration
+to one dimension, one response value for each point across the scan.
+The one dimensional calibration is obtained from a longscan observation
+by averaging all the readout lines.
+This is done automatically in \fBccdproc\fR by setting the appropriate
+parameters. In this case very good flat fields can be obtained from
+one or more blank sky observations or an unscanned lamp observation. Other
+corrections are not generally used.
+.sh
+3. Spectroscopy
+Spectroscopic flat fields differ from direct imaging in that the
+spectrum of the sky or lamp and transmission variations with wavelength
+are part of the observation. Application of such images will introduce
+the inverse of the spectrum and transmission into the observation. It
+also distorts the observed counts making signal-to-noise estimates
+invalid. This, and the low signal in the dispersed light, makes it
+difficult to use blank sky observations directly as flat fields. As
+with direct imaging, sky observation may be used to correct for
+iillumination errors if necessary. At sufficiently high dispersion the
+continuous lamp spectrum may be flat enough that the spectral signature
+of the lamp is not a problem. Alternatively, flux calibrating the
+spectra will also remove the flat field spectral signature. The
+spectroscopic flat fields also have to be corrected for regions outside
+of the slit or apertures to avoid bad response effects when applying
+the flat field calibration to the observations.
+
+The basic scheme for removing the spectral signature is to average
+all the lines or columns across the dispersion and within the aperture
+to form an estimate of the spectrum. In addition to the averaging, a
+smooth curve is fit to the lamp spectrum to remove noise. This smooth
+shape is then divided back into each line or column to eliminate the
+shape of the spectrum without changing the shape of the spectrum
+in the spatial direction or the small scale response variations.
+Regions outside of the apertures are replaced by unity.
+This method requires that the dispersion be aligned fairly close to
+either the CCD lines or columns.
+
+This scheme is used in both longslit and multiaperture spectra.
+The latter includes echelle, slitlets, aperture masks, and fiber feeds.
+For narrow apertures which do not have wider slits for the lamp
+exposures there may be problems with flexure and defining a good
+composite spectrum. The algorithm for longslit spectra is simpler and
+is available in the task \fBresponse\fR in the \fBlongslit\fR package.
+For multiaperture data there are problems of defining where the spectra
+lie and avoiding regions off of the aperture where there is no signal.
+The task which does this is \fBapnormalize\fR in the \fBapextract\fR
+package. Note that the lamp observations must first be processed
+explicitly for bias and dark count corrections.
+
+Longslit spectra may also suffer the same types of iillumination
+problems found in direct imaging. However, in this case the iillumination
+pattern is determined from sky observations (or the flat field itself)
+by finding the large scale pattern across the dispersion and at a number
+of wavelengths while avoiding the effects of night sky spectrum. The
+task which makes this type of correction in the \fBlongslit\fR package
+is \fBiillumination\fR. This produces an iillumination correction.
+To make sky flats or the other types of corrections image arithmetic
+is used. Note also that the sky observations must be explicitly
+processed through the flat field stage before computing the iillumination.
+.ih
+SEE ALSO
+.nf
+ccdproc, guide, mkillumcor, mkillumflat, mkskycor, mkskyflat
+apextract.apnormalize, longslit.response, longslit.iillumination
+.fi
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/guide.hlp b/noao/imred/quadred/src/quad/doc/guide.hlp
new file mode 100644
index 00000000..d7639a6f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/guide.hlp
@@ -0,0 +1,715 @@
+.help guide Sept93 arcon.quad
+.ce
+User's Guide to the QUAD Package
+.sh
+1. Introduction
+
+ This guide provides a brief description of the \fBquad\fR package including
+examples of its use for reducing simple CCD data. The \fBquad\fR package
+contains all the basic tasks necessary for the reduction of CCD data obtained
+using the CTIO array controller Arcon. It is based on the IRAF CCD reduction
+package \fBccdred\fR written by Frank Valdes. It incorporates a few special
+tasks needed to deal with the peculiarities of Arcon data but most of the
+routines are taken directly from the standard package. The way in which one
+uses the routines and the basic reduction recipe is unchanged.
+
+This guide is generic in that it is not tied to any particular type of data.
+There may be more specific guides (or "cookbooks") for your data. Detailed
+descriptions of the tasks and features of the package are provided in the help
+documentation for the package.
+
+With Arcon the CCD is often readout using four ("quad") or two ("dual")
+amplifiers in order to reduce readout time. A feature of such images is that
+each readout typically has a slightly different, DC bias level, gain, and
+readout noise. As a result both zero frames and uniformly illuminated
+exposures show a characteristic chequer board pattern, the sections of the
+image read through each amplifier having different levels. In addition, there
+will be a separate overscan strip, used to monitor the zero level, for each
+readout. The location of these overscan strips in the raw frame depends on
+which amplifiers are used (for more on this see \fBquadreadout\fR). Because
+of these peculiarities the \fBquad\fR package must be used for the first
+reduction steps, overscan correction and trimming, of multi-readout images;
+subsequent steps can be performed using \fBquad\fR or \fBccdred\fR. Either
+package can be used for the complete reduction of conventional single readout
+CCD images.
+
+ The purpose of \fBquad\fR package is to provide tools for the removal of
+all the "instrumental signatures" from CCD data taken with Arcon. The standard
+reduction operations are: replacement of bad columns and lines by interpolation
+from neighboring columns and lines; subtraction of a bias level
+determined from overscan or prescan columns or lines; subtraction of a
+zero level using a zero exposure time calibration image; subtraction
+of a dark count calibration image appropriately scaled to the dark time
+exposure; division by a scaled flat field calibration image; division
+by an iillumination image (derived from a blank sky image); subtraction
+of a scaled fringe image (also derived from a blank sky image); and
+trimming the image of unwanted lines or columns such as the overscan
+strip. Any set of these operations may be done simultaneously over a list of
+images in a highly efficient manner. The reduction operations are
+recorded in the image header and may also be logged on the terminal and
+in a log file.
+
+ The package also provides tools for combining multiple exposures
+of object and calibration images to improve the statistical accuracy of
+the observations and to remove transient bad pixels. The combining
+operation scales images of different exposure times, adjusts for
+variable sky background, statistically weights the images by their
+signal-to-noise, and provides a number of useful algorithms for
+detecting and rejecting transient bad pixels.
+
+ Other tasks are provided for listing reduction information about
+the images, deriving secondary calibration images (such as sky
+corrected flat fields or iillumination correction images), and easily
+setting the package parameters for different instruments.
+
+The \fBquad\fR package also includes some tasks which can be used for the
+examination of multi-readout images, prior to reducing them, by for instance
+calculating simple image statistics, generating histograms, etc.
+
+ There are several important features provided by the \fBccdred\fR package
+underpining \fBquad\fR, which makes the reduction of CCD images convenient;
+particularly to minimize record keeping. One of these is the ability to
+recognize the different types of CCD images. This ability allows the user to
+select a certain class of images to be processed or listed and allows the
+processing tasks to identify calibration images and process them differently
+from object images. The standard CCD image types are \fIobject\fR,
+\fIzero\fR level, \fIdark\fR count, and \fIflat\fR field. For more on
+the image types see \fBccdtypes\fR.
+
+ The tasks can also identify the different filters (or other subset
+parameter) which require different flat field images. This means you don't
+have to separate the images by filter and process each set separately.
+This feature is discussed further in \fBsubsets\fR.
+
+ The tasks keep track of the reduction steps completed on each
+image and ignore images which have been processed. This feature,
+along with recognizing the image types and subsets, makes it possible to
+specify all the images to a task with a wildcard template, such as
+"*.imh", rather than indicating each image by name. You will find this
+extremely important with large sets of observations.
+
+ A fundamental aspect of the package is that the processing
+modifies the images. In other words, the reduction operations are
+performed directly on the image. This "feature" further simplifies
+record keeping, frees the user from having to form unique output image
+names, and minimizes the amount of disk space required. There
+are two safety features in this process. First, the modifications do
+not take effect until the operation is completed on the image. This
+allows you to abort the task without messing up the image data and
+protects data if the computer crashes. The second feature is that
+there is a package parameter which may be set to make a backup of the
+input data with a particular prefix such as "orig" or "imdir$". This
+backup feature may be used when there is sufficient disk space, when learning
+to use the package, or just to be cautious.
+
+ In a similar effort to efficiently manage disk space, when combining
+images into a master object or calibration image there is an option to
+delete the input images upon completion of the combining operation.
+Generally this is desirable when there are many calibration exposures,
+such as zero level or flat field images, which are not used after they
+are combined into a final calibration image.
+
+ The following sections guide you through the basic use of the
+\fBquad\fR package. Only the important parameters which you might
+want to change are described. It is assumed that the support personnel
+have created the necessary instrument files (see \fBinstruments\fR)
+which will set the default parameters for the data you will be
+reducing. If this is not the case you may need to delve more deeply
+into the details of the tasks. Information about all the parameters
+and how the various tasks operate are given in the help documentation
+for the tasks and in additional special help topics. Some useful help
+documentation is indicated in the discussion and also in the
+\fBReferences\fR section.
+.sh
+2. Getting Started
+
+ The first step is to load \fBquad\fR. This is done by loading
+the \fBarcon\fR package, and then the \fBquad\fR package. Loading a
+package consists of typing its name.
+
+ When you load the \fBquad\fR package the menu of tasks or commands
+is listed. This appears as follows:
+
+.nf
+ cl> quad
+ badpiximage combine mkillumcor qstatistics
+ ccdgroups cosmicrays mkillumflat quadproc
+ ccdhedit darkcombine mkskycor quadscale
+ ccdinstrument flatcombine mkskyflat setinstrument
+ ccdlist mkfringecor qhistogram zerocombine
+.fi
+
+A summary of the tasks and additional help topics is obtained by typing:
+
+ cl> help
+
+This list and how to get additional help on specific topics is described
+in the \fBReferences\fR section at the end of this guide.
+
+ The first command to use is \fBsetinstrument\fR, which sets the package
+appropriately for the CCD images to be reduced. The support personnel
+should tell you the instrument identification, but if not a list
+of known instruments may be listed by using '?' for the instrument name.
+
+.nf
+ cl> setinstrument
+ Instrument ID (type ? for a list) \fI<enter instrument id or ?>\fR
+ <Set quad package parameters using eparam>
+ <Set quadproc task parameters using eparam>
+.fi
+
+This task sets the default parameters and then allows you to modify the
+package parameters and the processing parameters using the parameter
+editor \fBeparam\fR. If you are not familiar with \fBeparam\fR see the
+help or CL introduction documentation. For most terminals you move up
+and down through the parameters with the terminal arrow keys, you
+change the parameters by simply typing the desired value, and you exit
+with control Z or control D. Note that you can change parameters for
+any task at any time with \fBeparam\fR and you do not have to run
+\fBsetinstrument\fR again, even if you logout, until you need to reduce
+data from a different instrument.
+
+ The \fBquad\fR package parameters control general I/O functions of
+the tasks in the package. The parameters you might wish to change are
+the output pixel type and the verbose option. Except when the input
+images are short integers, the noise is significantly greater than one
+digital unit, and disk space is critical, it is probably better to
+allow the processing to convert the images to real pixel datatype. The
+verbose parameter simply prints the information written to the log file
+on the terminal. This can be useful when little else is being done and
+you are just beginning. However, when doing background processing and
+other IRAF reduction tasks it is enough to simply look at the end of
+the logfile with the task \fBtail\fR to see the current state of the
+processing.
+
+ The \fBquadproc\fR parameters control the CCD processing. There are
+many parameters but they all may be conveniently set at this point.
+Many of the parameters have default values set appropriately for the
+instrument you specified. The images to be processed can be specified
+later. What needs to be set are the processing operations that you
+want done and the parameters required for each operation. The
+processing operations are selected by entering yes or no for each one.
+The following items briefly describe each of the possible processing
+operations and the additional parameters required.
+
+.ls \fIfixpix\fR - Fix bad CCD lines and columns?
+The bad pixels (cosmetic defects) in the detector are given in a file
+specified by the parameter \fIfixfile\fR. This information is used
+to replace the pixels by interpolating from the neighboring pixels.
+A standard file for your instrument may be set by \fBsetinstrument\fR
+or if the word "image" is given then the file is defined in the instrument
+data file. For more on the bad pixel file see \fBinstruments\fR.
+.le
+.ls \fIoverscan\fR - Apply overscan strip correction?
+The overscan or prescan region is specified by the parameter
+\fIbiassec\fR. This is given as an IRAF image section. For guidance on
+seting this parameter see the help page for \fBquadproc\fR. The overscan
+region is averaged along the readout axis, specified by the parameter
+\fIreadaxis\fR, to create a one dimensional bias vector. This bias is
+fit by a function to remove cosmic rays and noise. There are a number
+of parameters at the end of the parameter list which control the
+fitting.
+.le
+.ls \fItrim\fR - Trim the image?
+The image is trimmed to the image section given by the parameter
+\fItrimsec\fR. For guidance on setting this parameter see the help page
+for \fBquadproc\fR.
+.le
+.ls \fIzerocor\fR - Apply zero level correction?
+The zero level image to be subtracted is specified by the parameter
+\fIzero\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.le
+.ls \fIdarkcor\fR - Apply dark count correction?
+The dark count image to be subtracted is specified by the parameter
+\fIdark\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.le
+.ls \fIflatcor\fR - Apply flat field correction?
+The flat field images to be used are specified by the parameter
+\fIflat\fR. There must be one flat field image for each filter
+or subset (see \fBsubsets\fR) to be processed. If a flat field
+image is not given then the calibration image will be sought
+in the list of images to be processed.
+.le
+.ls \fIreadcor\fR - Convert zero level image to readout correction?
+If a one dimensional zero level readout correction vector is to be subtracted
+instead of a two dimensional zero level image then, when this parameter is set,
+the zero level images will be averaged to one dimension. The readout axis
+must be specified by the parameter \fIreadaxis\fR. The default for your
+instrument is set by \fBsetinstrument\fR.
+.le
+.ls \fIscancor\fR - Convert flat field image to scan correction?
+If the instrument is operated in a scan mode then a correction to the
+flat field may be required. There are two types of scan modes, "shortscan"
+and "longscan". In longscan mode flat field images will be averaged
+to one dimension and the readout axis must be specified. Shortscan mode
+is a little more complicated. The scan correction is used if the flat
+field images are not observed in scan mode. The number of scan lines
+must be specified by the parameter \fInscan\fR. If they are observed in
+scan mode, like the object observations, then the scan correction
+operations should \fInot\fR be specified. For details of scan mode operations
+see \fBquadproc\fR. The scan parameters
+should be set by \fBsetinstrument\fR. If in doubt consult someone
+familiar with the instrument and mode of operation.
+.le
+
+ This description of the parameters is longer than the actual operation of
+setting the parameters. The only parameters likely to change during processing
+are the calibration image parameters.
+
+ When processing many images using the same calibration files a modest
+performance improvement can be achieved by keeping (caching) the
+calibration images in memory to avoid disk accesses. This option
+is available by specifying the amount of memory available for image
+caching with the parameter \fImax_cache\fR. If the value is zero then
+the images are accessed from disk as needed while if there is
+sufficient memory the calibration images may be kept in memory during
+the task execution.
+.sh
+3. Processing Your Data
+
+ The processing path depends on the type of data, the type of
+instrument, types of calibration images, and the observing
+sequence. In this section we describe two types of operations common
+in reducing most data; combining calibration images and performing the
+standard calibration and correction operations. Some additional special
+operations are described in the following section.
+
+ However, the first thing you might want to try before any
+processing is to get a listing of the CCD images showing the CCD image
+types, subsets, and processing flags. The task for this is
+\fBccdlist\fR. It has three types of output; a short one line per
+image format, a longer format which shows the state of the processing,
+and a format which prints the image names only (used to create files
+containing lists of images of a particular CCD image type). To get a
+quick listing type:
+
+.nf
+ cl> ccdlist *.imh
+ ccd001.imh[544,512][short][unknown][V]:FOCUS L98-193
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+ ccd045.imh[544,512][short][flat][V]:dflat 5s
+ ccd066.imh[544,512][short][flat][B]:dflat 5s
+ ccd103.imh[544,512][short][flat][R]:dflat 5s
+ ccd104.imh[544,512][short][zero][]:bias
+ ccd105.imh[544,512][short][dark][]:dark 3600s
+.fi
+
+ The example shows only a sample of the images. The short format
+listing tells you the name of the image, its size and pixel type, the
+CCD image type as seen by the package, the subset identifier (in this
+case the filter), and the title. If the data had been processed then
+there would also be processing flags. If the CCD image types do not
+seem right then there may be a problem with the instrument
+specification.
+
+ Many of the tasks in the \fBquad\fR package have the parameter
+\fIccdtype\fR which selects a particular type of image. To list
+only the object images from the previous example:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+.fi
+
+If no CCD image type is specified (by using the null string "")
+then all image types are selected. This may be
+necessary if your instrument data does not contain image type identifications.
+.sh
+3.1 Combining Calibration Images
+
+ If you do not need to combine calibration images because you only
+have one image of each type, you can skip this section. Calibration
+images, particularly zero level and flat field images, are combined in
+order to minimize the effects of noise and reject cosmic ray hits in the
+calibrations. The basic tool for combining images is the task
+\fBcombine\fR. There are simple variants of this task whose default
+parameters are set appropriately for each type of calibration image.
+These are the ones you will use for calibration images leaving
+\fBcombine\fR for combining object images. Zero level images are
+combined with \fBzerocombine\fR, dark count images with
+\fBdarkcombine\fR, and flat field images with \fBflatcombine\fR.
+
+ For example, to combine flat field images the command is:
+
+.nf
+ cl> flatcombine *.imh
+ Jun 1 14:26 combine: maxreject
+ Images N Exp Mode Scale Offset Weight
+ ccd045.imh 1 5.0 INDEF 1.000 0. 0.048
+ ccd046.imh 1 5.0 INDEF 1.000 0. 0.048
+ <... list of files ...>
+ ccd065.imh 1 5.0 INDEF 1.000 0. 0.048
+ ----------- ------ ------
+ FlatV.imh 21 5.0
+.fi
+
+This output is printed when verbose mode is set. The same information
+is recorded in the log file. In this case the flat fields are combined
+by rejecting the maximum value at each point in the image (the
+"maxreject" algorithm). The images are scaled by the exposure times,
+which are all the same in this example. The mode is not evaluated for
+exposure scaling and the relative weights are the same because the
+exposure times are the same. The example only shows part of the
+output; \fBflatcombine\fR automatically groups the flat field images by
+filter to produce the calibration images "FlatV", "FlatB", and
+"FlatR".
+.sh
+3.2 Calibrations and Corrections
+
+ Processing the CCD data is easy and largely automated.
+First, set the task parameters with the following command:
+
+ cl> eparam quadproc
+
+You may have already set the parameters when you ran
+\fBsetinstrument\fR, though the calibration image parameters
+\fIzero\fR, \fIdark\fR, and \fIflat\fR may still need to be set or
+changed. Once this is done simply give the command
+
+.nf
+ cl> quadproc *.imh
+ ccd003: Jun 1 15:13 Overscan section is [520:540,*] with mean=485.0
+ ccd003: Jun 1 15:14 Trim data section is [3:510,3:510]
+ ccd003: Jun 1 15:14 Overscan section is [520:540,*] with mean=485.0
+ FlatV: Jun 1 15:14 Trim data section is [3:510,3:510]
+ FlatV: Jun 1 15:15 Overscan section is [520:540,*] with mean=486.4
+ ccd003: Jun 1 15:15 Flat field image is FlatV.imh with scale=138.2
+ ccd004: Jun 1 15:16 Trim data section is [3:510,3:510]
+ ccd004: Jun 1 15:16 Overscan section is [520:540,*] with mean=485.2
+ ccd004: Jun 1 15:16 Flat field image is FlatV.imh with scale=138.2
+ <... more ...>
+ ccd013: Jun 1 15:22 Trim data section is [3:510,3:510]
+ ccd013: Jun 1 15:23 Overscan section is [520:540,*] with mean=482.4
+ FlatB: Jun 1 15:23 Trim data section is [3:510,3:510]
+ FlatB: Jun 1 15:23 Overscan section is [520:540,*] with mean=486.4
+ ccd013: Jun 1 15:24 Flat field image is FlatB.imh with scale=132.3
+ <... more ...>
+.fi
+
+ The output shown is with verbose mode set. It is the same as
+recorded in the log file. It illustrates the principle of automatic
+calibration image processing. The first object image, "ccd003", was
+being processed when the flat field image was required. Since the
+image was taken with the V filter the appropriate flat field was
+determined to be "FlatV". Since it had not been processed, the
+processing of "ccd003" was interrupted to process "FlatV". The
+processed calibration image may have been cached if there was enough
+memory. Once "FlatV" was processed (note that the flat field was not
+flattened because the task knows this image is a flat field) the
+processing of "ccd003" was completed. The next image, "ccd004", is
+also a V filter image so the already processed, and possibly cached,
+flat field "FlatV" is used again. The first B band image is "ccd013"
+and, as before, the B filter flat field calibration image is processed
+automatically. The same automatic calibration processing and image
+caching occurs when using zero level and dark count calibration
+images.
+
+ Commonly the processing is done with the verbose mode turned off
+and the task run as a background job. This is done with the commands
+
+.nf
+ cl> quad.verbose=no
+ cl> quadproc *.imh &
+.fi
+
+The already processed images in the input list are recognized as having been
+processed and are not affected. To check the status of the processing we
+can look at the end of the log file with:
+
+ cl> tail logfile
+
+After processing we can repeat the \fBccdlist\fR command to find:
+
+.nf
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[508,508][real][object][V][OTF]:N2968 V 600s
+ ccd015.imh[508,508][real][object][B][OTF]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R][OTF]:N4036 R 600s
+.fi
+
+The processing flags indicate the images have been overscan corrected,
+trimmed, and flat fielded.
+
+ As you can see, processing images is very easy. There is one source
+of minor confusion for beginning users and that is dealing with calibration
+images. First, there is no reason that calibration images
+may not be processed explicitly with \fBquadproc\fR, just remember to set
+the \fIccdtype\fR to the calibration image type or to "". When processing
+object images the calibration images to be used may be specified either
+with the task parameter for the particular calibration image or by
+including the calibration image in the list of input images. Calibration
+images specified by parameter value take precedence and the task
+does not check its CCD image type. Calibration images given in the
+input list must have a valid CCD image type. In case too many
+calibration images are specified, say because the calibration images
+combined to make the master calibration images were not deleted and
+so are part of the image list "*.imh", only the first one will be used.
+Another point to know is that flat field, iillumination, and fringe images
+are subset (filter) dependent and so a calibration image for each filter
+must be specified.
+.sh
+4. Special Processing Operations
+
+ The special processing operations are mostly concerned with the
+flat field response correction. There are also special processing
+operations available in \fBquadproc\fR for one dimensional readout
+corrections in the zero level and flat field calibrations. These
+were described briefly above and in more detail in \fBquadproc\fR
+and are not discussed further in this guide. The processing
+operations described in this section are for preparing flat fields
+for two dimensional spectroscopic data, for correcting flat fields
+for iilluminations effects, for making a separate iillumination correction,
+and for applying corrections for fringe effects. For additional
+discussion about flat fields and iillumination corrections see the
+help topic \fBflatfields\fR.
+.sh
+4.1 Spectroscopic Flat Fields
+
+ For spectroscopic data the flat fields may have to be processed to
+remove the general shape of the lamp spectrum and to replace regions outside
+of the aperture where there is no flat field information with values that
+will not cause bad response effects when the flat field is applied to the
+data. If the shape of the lamp spectrum is not important and if the
+longslit spectra have the regions outside of the slit either off the
+detector or trimmed then you may use the flat field without special
+processing.
+
+ First you must process the flat field images explicitly with
+
+ cl> quadproc *.imh ccdtype=flat
+
+where "*.imh" may be replaced with any list containing the flat fields.
+If zero level and dark count corrections are required these calibration
+images must be available at this time.
+
+ Load the \fBtwodspec\fR package and then either the \fBlongslit\fR
+package, for longslit data, or the \fBapextract\fR package, for
+multiaperture data such as echelles, multifiber, or aperture mask
+spectra. The task for removing the longslit quartz spectrum is
+\fBresponse\fR. There is also a task for removing iillumination
+effects, including the slit profile, from longslit spectra called
+\fBiillumination\fR. For more about processing longslit spectra see the
+help for these tasks and the paper \fIReduction of Longslit Spectra
+with IRAF\fR. The cookbook \fIReduction of Longslit Spectroscopic
+Data Using IRAF (KPNO ICCD and Cryogenic Camera Data)\fR also provides
+a very good discussion even if your data is from a different instrument.
+
+ For multiaperture data the task for removing the relative shapes of
+the spectra is called \fBapnormalize\fR. Again, consult the help documentation
+for this task for further details. Since you will probably also be
+using the package for extracting the spectra you may be interested
+in the document \fIThe IRAF APEXTRACT Package\fR.
+.sh
+4.2 Iillumination Corrections
+
+ The flat field calibration images may not have the same iillumination
+pattern as the observations of the sky due to the way the lamp illuminates the
+optical system. In this case when the flat field correction is applied
+to the data there will be gradients in the sky background. To remove
+these gradients a blank sky calibration image is heavily smoothed
+to produce an iillumination image. The iillumination image
+is then divided into the images during processing to correct for the
+iillumination difference between the flat field and the objects.
+Like the flat fields, the iillumination corrections images may be subset
+dependent so there should be an iillumination image for each subset.
+
+The task which makes iillumination correction images is \fBmkskycor\fR.
+Some examples are
+
+.nf
+ cl> mkskycor sky004 Illum004
+ cl> mkskycor sky*.imh ""
+.fi
+
+In the first example the sky image "sky004" is used to make the iillumination
+correction image "Illum004". In the second example the sky images are
+converted to iillumination correction images by specifying no output image
+names. Like \fBquadproc\fR if the input images have not been processed they
+are first processed automatically.
+
+To apply the iillumination correction
+
+.nf
+ cl> quadproc *.imh ccdtype=object illumcor+ illum=Illum004
+ cl> quadproc *.imh ccdtype=object illumcor+ illum=sky*.imh
+.fi
+
+The iillumination images could also be set using \fBeparam\fR or given
+on the command line.
+.sh
+4.3 Sky Flat Fields
+
+ You will notice that when you process images with an iillumination
+correction you are dividing each image by a flat field calibration and
+an iillumination correction. If the iillumination corrections are not
+done as a later step but at the same time as the rest of the processing
+one will get the same calibration by multiplying the flat field by
+the iillumination correction and using this product alone as the
+flat field. Such an image is called a \fIsky flat\fR since it is
+a flat field which has been corrected to yield a flat sky when applied
+to the observations. This approach has the advantage of one less
+calibration image and two less computations (scaling and dividing the
+iillumination correction). As an added short cut, rather than compute
+the iillumination image with \fBmkskycor\fR and then multiplying, the
+task \fBmkskyflat\fR does all this in one step. Thus, \fBmkskyflat\fR
+takes an input blank sky image, processes it if needed, determines the
+appropriate flat field (sky flats are also subset dependent) from the
+\fBquadproc\fR parameters or the input image list, and produces an
+output sky flat. Further if no output image is specified the task
+converts the input blank sky calibration image into a sky flat.
+
+ Two examples in which a new image is created and in which the
+input images are converted to sky flats are
+
+.nf
+ cl> mkskyflat sky004 Skyflat
+ cl> mkskyflat sky*.imh ""
+.fi
+.sh
+4.4 Iillumination Corrected Flat Fields
+
+ A third method to account for iillumination problems in the flat fields
+is to remove the large scale pattern from the flat field itself. This is
+useful if there are no reasonable blank sky calibration images and the
+astronomical exposures are evenly illuminated but the flat fields are not.
+This is done by smoothing the flat field images instead of blank sky
+images. As with using the sky images there are two methods, creating
+an iillumination correction to be applied as a separate step or fixing
+the original flat field. The smoothing algorithm is
+the same as that used in the other tasks. The tasks to make these types
+of corrections are \fBmkillumcor\fR and \fBmkillumflat\fR. The usage
+is pretty much the same as the other iillumination correction tasks
+except that it is more reasonable to replace the original flat fields
+by the corrected flat fields when fixing the flat field. Examples
+of an iillumination correction and removing the iillumination pattern
+from the flat field are
+
+.nf
+ cl> mkillumcor flat025 Illum025
+ cl> mkillumflat flat*.imh ""
+.fi
+
+As with the other tasks, the input images are processed if necessary.
+.sh
+4.5 Fringe Corrections
+
+ Some CCD detectors suffer from fringing effects due to the night
+sky emission lines which are not removed by the other calibration
+and correction operations. To correct for the fringing you need a
+really blank sky image. There is not yet a task to remove objects from
+sky images because this is often done with an interactive image display
+tool (which will soon be added). The blank sky image is heavily smoothed
+to determine the mean sky background and then this is subtracted from the
+original image. The image should then be essentially zero except for the
+fringe pattern. This fringe correction image is scaled to the same
+exposure time as the image to be corrected and then subtracted to remove
+the fringing. Note that since the night sky lines are variable there
+may need to be an additional scaling applied. Determining this scaling
+requires either an interactive display tool or a very clever task.
+Such tasks will also be added in the future.
+
+ The task to make a fringe correction image is \fBmkfringecor\fR.
+the sky background is determined in exactly the same way as the iillumination
+pattern, in fact the same sky image may be used for both the sky
+iillumination and for the fringe correction. The task works consistently
+with the "mk" tasks in that the input images are processed first if needed
+and then the output correction image is produced with the specified name
+or replaces the input image if no output image is specified.
+As examples,
+
+.nf
+ cl> mkfringecor sky004 Fringe
+ cl> mkfringecor sky*.imh ""
+.fi
+.sh
+6. Summary
+
+ The \fBquad\fR package is very easy to use. First load the package;
+it is in the \fBarcon\fR package. If this is your first time reducing data
+from a particular instrument or if you have changed instruments then run
+\fBsetinstrument\fR. Set the processing parameters for the operations you want
+performed. If you need to combine calibration images to form a master
+calibration image use one of the combine tasks. Spectroscopic flat fields may
+need to be processed first in order to remove the lamp spectrum.
+Finally, just type
+
+ cl> quadproc *.imh&
+.sh
+7. References
+
+ A general guide to using IRAF is \fIA User's Introduction to the IRAF
+Command Language\fR. This document may be found in the IRAF documentation
+sets and is available from the National Optical Astronomy Observatories,
+Central Computer Services (NOAO-CCS).
+
+ A more detailed description of the \fBccdred\fR package, on which
+\fBquad\fR is based, including a discussion of the design and some of the
+algorithms see \fIThe IRAF CCD Reduction Package -- CCDRED\fR by F. Valdes.
+This paper is available from NOAO-CCS and appears in the proceedings of the
+Santa Cruz Summer Workshop in Astronomy and Astrophysics, \fIInstrumentation
+for Ground-Based Optical Astronomy: Present and Future\fR, edited by
+Lloyd B. Robinson and published by Springer-Verlag.
+
+ The task descriptions and supplementary documentation are available
+on-line through the help task by typing
+
+ cl> help \fItopic\fR
+
+where \fItopic\fR is one of the following.
+
+.nf
+ SPECIAL TASKS FOR MULTI-READOUT CCD IMAGES
+
+ quadproc - Process multi-readout CCD images
+ quadscale - Apply correction for amplifier dependent gain differences
+ qstatistics - Calculate image statistics for multi-readout CCD images
+ qhistogram - Make histogram of multi-readout CCD image
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ zerocombine - Combine and process zero level images
+
+
+ STANDARD CCDRED TASKS
+
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdinstrument - Review and edit instrument translation files
+ ccdlist - List CCD processing information
+ combine - Combine CCD images
+ cosmicrays - Detect and replace cosmic rays
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field iillumination correction images
+ mkillumflat - Make iillumination corrected flat fields
+ mkskycor - Make sky iillumination correction images
+ mkskyflat - Make sky corrected flat field images
+ setinstrument - Set instrument parameters
+
+ ADDITIONAL HELP TOPICS
+
+ ccdgeometry - Discussion of CCD coordinate/geometry keywords
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ package - CCD image reduction package
+ quadreadout - Description of multi-readout CCD data
+ subsets - Description of CCD subsets
+.fi
+
+Printed copies of the on-line help documentation may be made with the
+command
+
+ cl> help topic | lprint
+
+ In addition to the package documentation for \fBquad\fR,
+\fBlongslit\fR, and \fBapextract\fR there may be specific guides for
+certain instruments. These specific guides, called "cookbooks", give
+specific examples and parameter values for the CCD data.
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/guide.ms b/noao/imred/quadred/src/quad/doc/guide.ms
new file mode 100644
index 00000000..62d87bb9
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/guide.ms
@@ -0,0 +1,794 @@
+.RP
+.TL
+User's Guide to the CCDRED Package
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+P.O. Box 26732, Tucson, Arizona 85726
+June 1987
+Revised February 1988
+.AB
+The IRAF CCD reduction package, \fBccdred\fR, provides tools
+for the easy and efficient reduction of CCD images. The standard
+reduction operations are replacement of bad pixels, subtraction of an
+overscan or prescan bias, subtraction of a zero level image,
+subtraction of a dark count image, division by a flat field calibration
+image, division by an illumination correction, subtraction of a fringe
+image, and trimming unwanted lines or columns. Another common
+operation provided by the package is scaling and combining images with
+a number of algorithms for rejecting cosmic rays. Data in the image
+header is used to make the reductions largely automated and
+self-documenting though the package may still be used in the absence of
+this data. Also a translation mechanism is used to relate image header
+parameters to those used by the package to allow data from a variety of
+observatories and instruments to be processed. This guide provides a brief
+description of the IRAF CCD reduction package and examples of reducing
+simple CCD data.
+.AE
+.NH
+Introduction
+.LP
+ This guide provides a brief description of the IRAF CCD reduction
+package \fBccdred\fR and examples of reducing simple CCD data. It is a
+generic guide in that it is not tied to any particular type of data.
+There may be more specific guides (or "cookbooks") for your data.
+Detailed descriptions of the tasks and features of the package are
+provided in the help documentation for the package.
+
+ The purpose of the CCDRED package is to provide tools for the easy
+and efficient reduction of CCD images. The standard reduction
+operations are replacement of bad columns and lines by interpolation
+from neighboring columns and lines, subtraction of a bias level
+determined from overscan or prescan columns or lines, subtraction of a
+zero level using a zero length exposure calibration image, subtraction
+of a dark count calibration image appropriately scaled to the dark time
+exposure, division by a scaled flat field calibration image, division
+by an illumination image (derived from a blank sky image), subtraction
+of a scaled fringe image (also derived from a blank sky image), and
+trimming the image of unwanted lines or columns such as the overscan
+strip. Any set of operations may be done simultaneously over a list of
+images in a highly efficient manner. The reduction operations are
+recorded in the image header and may also be logged on the terminal and
+in a log file.
+
+ The package also provides tools for combining multiple exposures
+of object and calibration images to improve the statistical accuracy of
+the observations and to remove transient bad pixels. The combining
+operation scales images of different exposure times, adjusts for
+variable sky background, statistically weights the images by their
+signal-to-noise, and provides a number of useful algorithms for
+detecting and rejecting transient bad pixels.
+
+ Other tasks are provided for listing reduction information about
+the images, deriving secondary calibration images (such as sky
+corrected flat fields or illumination correction images), and easily
+setting the package parameters for different instruments.
+
+ There are several important features provided by the package to
+make the reduction of CCD images convenient; particularly to minimize
+record keeping. One of these is the ability to recognize the different
+types of CCD images. This ability allows the user to select a certain
+class of images to be processed or listed and allows the processing
+tasks to identify calibration images and process them differently from
+object images. The standard CCD image types are \fIobject\fR,
+\fIzero\fR level, \fIdark\fR count, and \fIflat\fR field. For more on
+the image types see \fBccdtypes\fR.
+
+ The tasks can also identify the different filters (or other subset
+parameter) which require different flat field images. This means you don't
+have to separate the images by filter and process each set separately.
+This feature is discussed further in \fBsubsets\fR.
+
+ The tasks keep track of the reduction steps completed on each
+image and ignore images which have been processed. This feature,
+along with recognizing the image types and subsets, makes it possible to
+specify all the images to a task with a wildcard template, such as
+"*.imh", rather than indicating each image by name. You will find this
+extremely important with large sets of observations.
+
+ A fundamental aspect of the package is that the processing
+modifies the images. In other words, the reduction operations are
+performed directly on the image. This "feature" further simplifies
+record keeping, frees the user from having to form unique output image
+names, and minimizes the amount of disk space required. There
+are two safety features in this process. First, the modifications do
+not take effect until the operation is completed on the image. This
+allows you to abort the task without messing up the image data and
+protects data if the computer crashes. The second feature is that
+there is a package parameter which may be set to make a backup of the
+input data with a particular prefix such as "orig" or "imdir$". This
+backup feature may be used when there is sufficient disk space, when learning
+to use the package, or just to be cautious.
+
+ In a similar effort to efficiently manage disk space, when combining
+images into a master object or calibration image there is an option to
+delete the input images upon completion of the combining operation.
+Generally this is desirable when there are many calibration exposures,
+such as zero level or flat field images, which are not used after they
+are combined into a final calibration image.
+
+ The following sections guide you through the basic use of the
+\fBccdred\fR package. Only the important parameters which you might
+want to change are described. It is assumed that the support personnel
+have created the necessary instrument files (see \fBinstruments\fR)
+which will set the default parameters for the data you will be
+reducing. If this is not the case you may need to delve more deeply
+into the details of the tasks. Information about all the parameters
+and how the various tasks operate are given in the help documentation
+for the tasks and in additional special help topics. Some useful help
+documentation is indicated in the discussion and also in the
+\fBReferences\fR section.
+.NH
+Getting Started
+.LP
+ The first step is to load \fBccdred\fR. This is done by loading
+the \fBnoao\fR package, followed by the image reduction package
+\fBimred\fR, and finally the \fBccdred\fR package. Loading a
+package consists of typing its name. Note that some of these packages may be
+loaded automatically when you logon to IRAF.
+
+ When you load the \fBccdred\fR package the menu of tasks or commands
+is listed. This appears as follows:
+
+.nf
+.KS
+.ft L
+ cl> ccdred
+ badpiximage ccdtest mkfringecor setinstrument
+ ccdgroups combine mkillumcor zerocombine
+ ccdhedit cosmicrays mkillumflat
+ ccdlist darkcombine mkskycor
+ ccdproc flatcombine mkskyflat
+.ft R
+.KE
+.fi
+
+A summary of the tasks and additional help topics is obtained by typing:
+
+.ft L
+ cl> help
+.ft R
+
+This list and how to get additional help on specific topics is described
+in the \fBReferences\fR section at the end of this guide.
+
+ The first command to use is \fBsetinstrument\fR, which sets the package
+appropriately for the CCD images to be reduced. The support personnel
+should tell you the instrument identification, but if not a list
+of known instruments may be listed by using '?' for the instrument name.
+
+.nf
+.ft L
+ cl> setinstrument
+ Instrument ID (type ? for a list) \fI<enter instrument id or ?>
+ <Set ccdred package parameters using eparam>
+ <Set ccdproc task parameters using eparam>
+.ft R
+.fi
+
+This task sets the default parameters and then allows you to modify the
+package parameters and the processing parameters using the parameter
+editor \fBeparam\fR. If you are not familiar with \fBeparam\fR see the
+help or CL introduction documentation. For most terminals you move up
+and down through the parameters with the terminal arrow keys, you
+change the parameters by simply typing the desired value, and you exit
+with control Z or control D. Note that you can change parameters for
+any task at any time with \fBeparam\fR and you do not have to run
+\fBsetinstrument\fR again, even if you logout, until you need to reduce
+data from a different instrument.
+
+ The \fBccdred\fR package parameters control general I/O functions of
+the tasks in the package. The parameters you might wish to change are
+the output pixel type and the verbose option. Except when the input
+images are short integers, the noise is significantly greater than one
+digital unit, and disk space is critical, it is probably better to
+allow the processing to convert the images to real pixel datatype. The
+verbose parameter simply prints the information written to the log file
+on the terminal. This can be useful when little else is being done and
+you are just beginning. However, when doing background processing and
+other IRAF reduction tasks it is enough to simply look at the end of
+the logfile with the task \fBtail\fR to see the current state of the
+processing.
+
+ The \fBccdproc\fR parameters control the CCD processing. There are
+many parameters but they all may be conveniently set at this point.
+Many of the parameters have default values set appropriately for the
+instrument you specified. The images to be processed can be specified
+later. What needs to be set are the processing operations that you
+want done and the parameters required for each operation. The
+processing operations are selected by entering yes or no for each one.
+The following items briefly describe each of the possible processing
+operations and the additional parameters required.
+
+.LP
+\fIfixpix\fR - Fix bad CCD lines and columns?
+.IP
+The bad pixels (cosmetic defects) in the detector are given in a file
+specified by the parameter \fIfixfile\fR. This information is used
+to replace the pixels by interpolating from the neighboring pixels.
+A standard file for your instrument may be set by \fBsetinstrument\fR
+or if the word "image" is given then the file is defined in the instrument
+data file. For more on the bad pixel file see \fBinstruments\fR.
+.LP
+\fIoverscan\fR - Apply overscan strip correction?
+.IP
+The overscan or prescan region is specified by the parameter
+\fIbiassec\fR. This is given as an IRAF image section. The overscan
+region is averaged along the readout axis, specified by the parameter
+\fIreadaxis\fR, to create a one dimensional bias vector. This bias is
+fit by a function to remove cosmic rays and noise. There are a number
+of parameters at the end of the parameter list which control the
+fitting. The default overscan bias section and fitting parameters for
+your instrument should be set by \fBsetinstrument\fR. If the word
+"image" is given the overscan bias section is defined in the image
+header or the instrument translation file. If an overscan section is
+not set you can use \fBimplot\fR to determine the columns or rows for
+the bias region and define an overscan image section. If you are
+unsure about image sections consult with someone or read the
+introductory IRAF documentation.
+.LP
+\fItrim\fR - Trim the image?
+.IP
+The image is trimmed to the image section given by the parameter
+\fItrimsec\fR. A default trim section for your instrument should be
+set by \fBsetinstrument\fR, however, you may override this default if
+desired. If the word "image" is given the data
+image section is given in the image header or the instrument
+translation file. As with the overscan image section it is
+straightforward to specify, but if you are unsure consult someone.
+.LP
+\fIzerocor\fR - Apply zero level correction?
+.IP
+The zero level image to be subtracted is specified by the parameter
+\fIzero\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.LP
+\fIdarkcor\fR - Apply dark count correction?
+.IP
+The dark count image to be subtracted is specified by the parameter
+\fIdark\fR. If none is given then the calibration image will be sought
+in the list of images to be processed.
+.LP
+\fIflatcor\fR - Apply flat field correction?
+.IP
+The flat field images to be used are specified by the parameter
+\fIflat\fR. There must be one flat field image for each filter
+or subset (see \fBsubsets\fR) to be processed. If a flat field
+image is not given then the calibration image will be sought
+in the list of images to be processed.
+.LP
+\fIreadcor\fR - Convert zero level image to readout correction?
+.IP
+If a one dimensional zero level readout correction vector is to be subtracted
+instead of a two dimensional zero level image then, when this parameter is set,
+the zero level images will be averaged to one dimension. The readout axis
+must be specified by the parameter \fIreadaxis\fR. The default for your
+instrument is set by \fBsetinstrument\fR.
+.LP
+\fIscancor\fR - Convert flat field image to scan correction?
+.IP
+If the instrument is operated in a scan mode then a correction to the
+flat field may be required. There are two types of scan modes, "shortscan"
+and "longscan". In longscan mode flat field images will be averaged
+to one dimension and the readout axis must be specified. Shortscan mode
+is a little more complicated. The scan correction is used if the flat
+field images are not observed in scan mode. The number of scan lines
+must be specified by the parameter \fInscan\fR. If they are observed in
+scan mode, like the object observations, then the scan correction
+operations should \fInot\fR be specified. For details of scan mode operations
+see \fBccdproc\fR. The scan parameters
+should be set by \fBsetinstrument\fR. If in doubt consult someone
+familiar with the instrument and mode of operation.
+.LP
+
+ This description of the parameters is longer than the actual operation of
+setting the parameters. The only parameters likely to change during processing
+are the calibration image parameters.
+
+ When processing many images using the same calibration files a modest
+performance improvement can be achieved by keeping (caching) the
+calibration images in memory to avoid disk accesses. This option
+is available by specifying the amount of memory available for image
+caching with the parameter \fImax_cache\fR. If the value is zero then
+the images are accessed from disk as needed while if there is
+sufficient memory the calibration images may be kept in memory during
+the task execution.
+.NH
+Processing Your Data
+.LP
+ The processing path depends on the type of data, the type of
+instrument, types of calibration images, and the observing
+sequence. In this section we describe two types of operations common
+in reducing most data; combining calibration images and performing the
+standard calibration and correction operations. Some additional special
+operations are described in the following section.
+
+ However, the first thing you might want to try before any
+processing is to get a listing of the CCD images showing the CCD image
+types, subsets, and processing flags. The task for this is
+\fBccdlist\fR. It has three types of of output; a short one line per
+image format, a longer format which shows the state of the processing,
+and a format which prints the image names only (used to create files
+containing lists of images of a particular CCD image type). To get a
+quick listing type:
+
+.nf
+.ft L
+ cl> ccdlist *.imh
+ ccd001.imh[544,512][short][unknown][V]:FOCUS L98-193
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+ ccd045.imh[544,512][short][flat][V]:dflat 5s
+ ccd066.imh[544,512][short][flat][B]:dflat 5s
+ ccd103.imh[544,512][short][flat][R]:dflat 5s
+ ccd104.imh[544,512][short][zero][]:bias
+ ccd105.imh[544,512][short][dark][]:dark 3600s
+.ft R
+.fi
+
+ The example shows only a sample of the images. The short format
+listing tells you the name of the image, its size and pixel type, the
+CCD image type as seen by the package, the subset identifier (in this
+case the filter), and the title. If the data had been processed then
+there would also be processing flags. If the CCD image types do not
+seem right then there may be a problem with the instrument
+specification.
+
+ Many of the tasks in the \fBccdred\fR package have the parameter
+\fIccdtype\fR which selects a particular type of image. To list
+only the object images from the previous example:
+
+.nf
+.ft L
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[544,512][short][object][V]:N2968 V 600s
+ ccd015.imh[544,512][short][object][B]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R]:N4036 R 600s
+.ft R
+.fi
+
+If no CCD image type is specified (by using the null string "")
+then all image types are selected. This may be
+necessary if your instrument data does not contain image type identifications.
+.NH 2
+Combining Calibration Images
+.LP
+ If you do not need to combine calibration images because you only
+have one image of each type, you can skip this section. Calibration
+images, particularly zero level and flat field images, are combined in
+order to minimize the effects of noise and reject bad pixels in the
+calibrations. The basic tool for combining images is the task
+\fBcombine\fR. There are simple variants of this task whose default
+parameters are set appropriately for each type of calibration image.
+These are the ones you will use for calibration images leaving
+\fBcombine\fR for combining object images. Zero level images are
+combined with \fBzerocombine\fR, dark count images with
+\fBdarkcombine\fR, and flat field images with \fBflatcombine\fR.
+
+ For example, to combine flat field images the command is:
+
+.nf
+.ft L
+ cl> flatcombine *.imh
+ Jun 1 14:26 combine: maxreject
+ Images N Exp Mode Scale Offset Weight
+ ccd045.imh 1 5.0 INDEF 1.000 0. 0.048
+ ccd046.imh 1 5.0 INDEF 1.000 0. 0.048
+ \fI<... list of files ...>\fL
+ ccd065.imh 1 5.0 INDEF 1.000 0. 0.048
+ ----------- ------ ------
+ FlatV.imh 21 5.0
+.ft R
+.fi
+
+This output is printed when verbose mode is set. The same information
+is recorded in the log file. In this case the flat fields are combined
+by rejecting the maximum value at each point in the image (the
+"maxreject" algorithm). The images are scaled by the exposure times,
+which are all the same in this example. The mode is not evaluated for
+exposure scaling and the relative weights are the same because the
+exposure times are the same. The example only shows part of the
+output; \fBflatcombine\fR automatically groups the flat field images by
+filter to produce the calibration images "FlatV", "FlatB", and
+"FlatR".
+.NH 2
+Calibrations and Corrections
+.LP
+ Processing the CCD data is easy and largely automated.
+First, set the task parameters with the following command:
+
+.ft L
+ cl> eparam ccdproc
+.ft R
+
+You may have already set the parameters when you ran
+\fBsetinstrument\fR, though the calibration image parameters
+\fIzero\fR, \fIdark\fR, and \fIflat\fR may still need to be set or
+changed. Once this is done simply give the command
+
+.nf
+.ft L
+ cl> ccdproc *.imh
+ ccd003: Jun 1 15:13 Overscan section is [520:540,*] with mean=485.0
+ ccd003: Jun 1 15:14 Trim data section is [3:510,3:510]
+ ccd003: Jun 1 15:14 Overscan section is [520:540,*] with mean=485.0
+ FlatV: Jun 1 15:14 Trim data section is [3:510,3:510]
+ FlatV: Jun 1 15:15 Overscan section is [520:540,*] with mean=486.4
+ ccd003: Jun 1 15:15 Flat field image is FlatV.imh with scale=138.2
+ ccd004: Jun 1 15:16 Trim data section is [3:510,3:510]
+ ccd004: Jun 1 15:16 Overscan section is [520:540,*] with mean=485.2
+ ccd004: Jun 1 15:16 Flat field image is FlatV.imh with scale=138.2
+ \fI<... more ...>\fL
+ ccd013: Jun 1 15:22 Trim data section is [3:510,3:510]
+ ccd013: Jun 1 15:23 Overscan section is [520:540,*] with mean=482.4
+ FlatB: Jun 1 15:23 Trim data section is [3:510,3:510]
+ FlatB: Jun 1 15:23 Overscan section is [520:540,*] with mean=486.4
+ ccd013: Jun 1 15:24 Flat field image is FlatB.imh with scale=132.3
+ \fI<... more ...>\fL
+.ft R
+.fi
+
+ The output shown is with verbose mode set. It is the same as
+recorded in the log file. It illustrates the principle of automatic
+calibration image processing. The first object image, "ccd003", was
+being processed when the flat field image was required. Since the
+image was taken with the V filter the appropriate flat field was
+determined to be "FlatV". Since it had not been processed, the
+processing of "ccd003" was interrupted to process "FlatV". The
+processed calibration image may have been cached if there was enough
+memory. Once "FlatV" was processed (note that the flat field was not
+flattened because the task knows this image is a flat field) the
+processing of "ccd003" was completed. The next image, "ccd004", is
+also a V filter image so the already processed, and possibly cached,
+flat field "FlatV" is used again. The first B band image is "ccd013"
+and, as before, the B filter flat field calibration image is processed
+automatically. The same automatic calibration processing and image
+caching occurs when using zero level and dark count calibration
+images.
+
+ Commonly the processing is done with the verbose mode turned off
+and the task run as a background job. This is done with the commands
+
+.nf
+.ft L
+ cl> ccdred.verbose=no
+ cl> ccdproc *.imh &
+.ft R
+.fi
+
+The already processed images in the input list are recognized as having been
+processed and are not affected. To check the status of the processing we
+can look at the end of the log file with:
+
+.ft L
+ cl> tail logfile
+.ft R
+
+After processing we can repeat the \fBccdlist\fR command to find:
+
+.nf
+.ft L
+ cl> ccdlist *.imh ccdtype=object
+ ccd007.imh[508,508][real][object][V][OTF]:N2968 V 600s
+ ccd015.imh[508,508][real][object][B][OTF]:N3098 B 500s
+ ccd024.imh[544,512][short][object][R][OTF]:N4036 R 600s
+.ft R
+.fi
+
+The processing flags indicate the images have been overscan corrected,
+trimmed, and flat fielded.
+
+ As you can see, processing images is very easy. There is one source
+of minor confusion for beginning users and that is dealing with calibration
+images. First, there is no reason that calibration images
+may not be processed explicitly with \fBccdproc\fR, just remember to set
+the \fIccdtype\fR to the calibration image type or to "". When processing
+object images the calibration images to be used may be specified either
+with the task parameter for the particular calibration image or by
+including the calibration image in the list of input images. Calibration
+images specified by parameter value take precedence and the task
+does not check its CCD image type. Calibration images given in the
+input list must have a valid CCD image type. In case too many
+calibration images are specified, say because the calibration images
+combined to make the master calibration images were not deleted and
+so are part of the image list "*.imh", only the first one will be used.
+Another point to know is that flat field, illumination, and fringe images
+are subset (filter) dependent and so a calibration image for each filter
+must be specified.
+.NH
+Special Processing Operations
+.LP
+ The special processing operations are mostly concerned with the
+flat field response correction. There are also special processing
+operations available in \fBccdproc\fR for one dimensional readout
+corrections in the zero level and flat field calibrations. These
+were described briefly above and in more detail in \fBccdproc\fR
+and are not discussed further in this guide. The processing
+operations described in this section are for preparing flat fields
+for two dimensional spectroscopic data, for correcting flat fields
+for illuminations effects, for making a separate illumination correction,
+and for applying corrections for fringe effects. For additional
+discussion about flat fields and illumination corrections see the
+help topic \fBflatfields\fR.
+.NH 2
+Spectroscopic Flat Fields
+.LP
+ For spectroscopic data the flat fields may have to be processed to
+remove the general shape of the lamp spectrum and to replace regions outside
+of the aperture where there is no flat field information with values that
+will not cause bad response effects when the flat field is applied to the
+data. If the shape of the lamp spectrum is not important and if the
+longslit spectra have the regions outside of the slit either off the
+detector or trimmed then you may use the flat field without special
+processing.
+
+ First you must process the flat field images explicitly with
+
+.ft L
+ cl> ccdproc *.imh ccdtype=flat
+.ft R
+
+where "*.imh" may be replaced with any list containing the flat fields.
+If zero level and dark count corrections are required these calibration
+images must be available at this time.
+
+ Load the \fBtwodspec\fR package and then either the \fBlongslit\fR
+package, for longslit data, or the \fBapextract\fR package, for
+multiaperture data such as echelles, multifiber, or aperture mask
+spectra. The task for removing the longslit quartz spectrum is
+\fBresponse\fR. There is also a task for removing illumination
+effects, including the slit profile, from longslit spectra called
+\fBillumination\fR. For more about processing longslit spectra see the
+help for these tasks and the paper \fIReduction of Longslit Spectra
+with IRAF\fR. The cookbook \fIReduction of Longslit Spectroscopic
+Data Using IRAF (KPNO ICCD and Cryogenic Camera Data)\fR also provides
+a very good discussion even if your data is from a different instrument.
+
+ For multiaperture data the task for removing the relative shapes of
+the spectra is called \fBapnormalize\fR. Again, consult the help documentation
+for this task for further details. Since you will probably also be
+using the package for extracting the spectra you may be interested
+in the document \fIThe IRAF APEXTRACT Package\fR.
+.NH 2
+Illumination Corrections
+.LP
+ The flat field calibration images may not have the same illumination
+pattern as the observations of the sky due to the way the lamp illuminates the
+optical system. In this case when the flat field correction is applied
+to the data there will be gradients in the sky background. To remove
+these gradients a blank sky calibration image is heavily smoothed
+to produce an illumination image. The illumination image
+is then divided into the images during processing to correct for the
+illumination difference between the flat field and the objects.
+Like the flat fields, the illumination corrections images may be subset
+dependent so there should be an illumination image for each subset.
+
+The task which makes illumination correction images is \fBmkskycor\fR.
+Some examples are
+
+.nf
+.ft L
+ cl> mkskycor sky004 Illum004
+ cl> mkskycor sky*.imh ""
+.ft R
+.fi
+
+In the first example the sky image "sky004" is used to make the illumination
+correction image "Illum004". In the second example the sky images are
+converted to illumination correction images by specifying no output image
+names. Like \fBccdproc\fR if the input images have not been processed they
+are first processed automatically.
+
+To apply the illumination correction
+
+.nf
+.ft L
+ cl> ccdproc *.imh ccdtype=object illumcor+ illum=Illum004
+ cl> ccdproc *.imh ccdtype=object illumcor+ illum=sky*.imh
+.ft R
+.fi
+
+The illumination images could also be set using \fBeparam\fR or given
+on the command line.
+.NH 2
+Sky Flat Fields
+.LP
+ You will notice that when you process images with an illumination
+correction you are dividing each image by a flat field calibration and
+an illumination correction. If the illumination corrections are not
+done as a later step but at the same time as the rest of the processing
+one will get the same calibration by multiplying the flat field by
+the illumination correction and using this product alone as the
+flat field. Such an image is called a \fIsky flat\fR since it is
+a flat field which has been corrected to yield a flat sky when applied
+to the observations. This approach has the advantage of one less
+calibration image and two less computations (scaling and dividing the
+illumination correction). As an added short cut, rather than compute
+the illumination image with \fBmkskycor\fR and then multiplying, the
+task \fBmkskyflat\fR does all this in one step. Thus, \fBmkskyflat\fR
+takes an input blank sky image, processes it if needed, determines the
+appropriate flat field (sky flats are also subset dependent) from the
+\fBccdproc\fR parameters or the input image list, and produces an
+output sky flat. Further if no output image is specified the task
+converts the input blank sky calibration image into a sky flat.
+
+ Two examples in which a new image is created and in which the
+input images are converted to sky flats are
+
+.nf
+.ft L
+ cl> mkskyflat sky004 Skyflat
+ cl> mkskyflat sky*.imh ""
+.ft R
+.fi
+.NH 2
+Illumination Corrected Flat Fields
+.LP
+ A third method to account for illumination problems in the flat fields
+is to remove the large scale pattern from the flat field itself. This is
+useful if there are no reasonable blank sky calibration images and the
+astronomical exposures are evenly illuminated but the flat fields are not.
+This is done by smoothing the flat field images instead of blank sky
+images. As with using the sky images there are two methods, creating
+an illumination correction to be applied as a separate step or fixing
+the original flat field. The smoothing algorithm is
+the same as that used in the other tasks. The tasks to make these types
+of corrections are \fBmkillumcor\fR and \fBmkillumflat\fR. The usage
+is pretty much the same as the other illumination correction tasks
+except that it is more reasonable to replace the original flat fields
+by the corrected flat fields when fixing the flat field. Examples
+of an illumination correction and removing the illumination pattern
+from the flat field are
+
+.nf
+.ft L
+ cl> mkillumcor flat025 Illum025
+ cl> mkillumflat flat*.imh ""
+.ft R
+.fi
+
+As with the other tasks, the input images are processed if necessary.
+.NH 2
+Fringe Corrections
+.LP
+ Some CCD detectors suffer from fringing effects due to the night
+sky emission lines which are not removed by the other calibration
+and correction operations. To correct for the fringing you need a
+really blank sky image. There is not yet a task to remove objects from
+sky images because this is often done with an interactive image display
+tool (which will soon be added). The blank sky image is heavily smoothed
+to determine the mean sky background and then this is subtracted from the
+original image. The image should then be essentially zero except for the
+fringe pattern. This fringe correction image is scaled to the same
+exposure time as the image to be corrected and then subtracted to remove
+the fringing. Note that since the night sky lines are variable there
+may need to be an additional scaling applied. Determining this scaling
+requires either an interactive display tool or a very clever task.
+Such tasks will also be added in the future.
+
+ The task to make a fringe correction image is \fBmkfringecor\fR.
+the sky background is determined in exactly the same way as the illumination
+pattern, in fact the same sky image may be used for both the sky
+illumination and for the fringe correction. The task works consistently
+with the "mk" tasks in that the input images are processed first if needed
+and then the output correction image is produced with the specified name
+or replaces the input image if no output image is specified.
+As examples,
+
+.nf
+.ft L
+ cl> mkfringecor sky004 Fringe
+ cl> mkfringecor sky*.imh ""
+.ft R
+.fi
+.NH
+Demonstration
+.LP
+ A simple demonstration task is available. To run this demonstration
+load the \fBccdtest\fR package; this is a subpackage of the main
+\fBccdred\fR package. Then simply type
+
+.ft L
+ cl> demo
+.ft R
+
+The demonstration will then create some artificial CCD data and reduce
+them giving descriptive comments as it goes along. This demonstration uses
+the "playback" facility of the command language and is actually substituting
+it's own commands for terminal input. Initially you must type carriage return
+or space after each comment ending with "...". If you wish to have the
+demonstration run completely automatically at it's own speed then type 'g'
+a the "..." prompt. Thereafter, it will simple pause long enough to give
+you a chance to read the comments. When the demo is finished you will
+need to remove the files created. However, feel free to examine the reduced
+images, the log file, etc. \fINote that the demonstration changes the
+setup parameters so be sure to run \fBsetinstrument\fI again and check
+the setup parameters.\fR
+.NH
+Summary
+.LP
+ The \fBccdred\fR package is very easy to use. First load the package;
+it is in the \fBimred\fR package which is in the \fBnoao\fR package.
+If this is your first time reducing data from a particular instrument
+or if you have changed instruments then run \fBsetinstrument\fR.
+Set the processing parameters for the operations you want performed.
+If you need to combine calibration images to form a master calibration
+image use one of the combine tasks. Spectroscopic flat fields may
+need to be processed first in order to remove the lamp spectrum.
+Finally, just type
+
+.ft L
+ cl> ccdproc *.imh&
+.ft R
+.SH
+References
+.LP
+ A general guide to using IRAF is \fIA User's Introduction to the IRAF
+Command Language\fR. This document may be found in the IRAF documentation
+sets and is available from the National Optical Astronomy Observatories,
+Central Computer Services (NOAO-CCS).
+
+ A more detailed description of the \fBccdred\fR package including
+a discussion of the design and some of the algorithms see \fIThe IRAF
+CCD Reduction Package -- CCDRED\fR" by F. Valdes. This paper is available
+from NOAO-CCS and appears in the proceedings of the Santa Cruz Summer
+Workshop in Astronomy and Astrophysics, \fIInstrumentation for Ground-Based
+Optical Astronomy: Present and Future\fR, edited by Lloyd B. Robinson and
+published by Springer-Verlag.
+
+ The task descriptions and supplementary documentation are available
+in printed form in the IRAF documentation sets, a special set
+containing documentation for just the \fBccdred\fR package, and on-line
+through the help task by typing
+
+.ft L
+ cl> help \fItopic\fR
+.ft R
+
+where \fItopic\fR is one of the following.
+
+.nf
+.ft L
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdlist - List CCD processing information
+ ccdproc - Process CCD images
+ ccdtest - CCD test and demonstration package
+ combine - Combine CCD images
+ cosmicrays - Detect and replace cosmic rays
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+setinstrument - Set instrument parameters
+ zerocombine - Combine and process zero level images
+
+ ADDITIONAL HELP TOPICS
+
+ ccdred - CCD image reduction package
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ subsets - Description of CCD subsets
+.ft R
+.fi
+
+Printed copies of the on-line help documentation may be made with the
+command
+
+.ft L
+ cl> help \fItopic\fL | lprint
+.ft R
+
+ In addition to the package documentation for \fBccdred\fR,
+\fBlongslit\fR, and \fBapextract\fR there may be specific guides for
+certain instruments. These specific guides, called "cookbooks", give
+specific examples and parameter values for the CCD data.
diff --git a/noao/imred/quadred/src/quad/doc/instruments.hlp b/noao/imred/quadred/src/quad/doc/instruments.hlp
new file mode 100644
index 00000000..2b2c92f7
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/instruments.hlp
@@ -0,0 +1,248 @@
+.help instruments Jun87 noao.imred.ccdred
+
+.ih
+NAME
+instruments -- Instrument specific data files
+.ih
+DESCRIPTION
+The \fBccdred\fR package has been designed to accommodate many different
+instruments, detectors, and observatories. This is done by having
+instrument specific data files. Note that by instrument we mean a
+combination of detector, instrument, application, and observatory, so
+there might be several "instruments" associated with a particular CCD
+detector. Creating and maintaining the instrument files is generally
+the responsibility of the support staff, though the user may create or
+copy and modify his/her own instrument/application specific files. The
+task \fBsetinstrument\fR makes this information available to the user
+and package easily.
+
+There are three instrument data files, all of which are optional. The
+package may be used without the instrument files but much of the
+convenience of the package, particularly with respect to using the CCD
+image types, will be lost. The three files are an instrument image
+header translation file, an initialization task which mainly sets
+default task parameters, and a bad pixel file identifying the cosmic
+bad pixels in the detector. These files are generally stored in a
+system data directory which is a subdirectory of the logical
+directory "ccddb$". Each file has a root name which identifies the
+instrument.
+.sh
+1. Instrument Translation File
+The instrument translation file translates the parameter names used by
+the \fBccdred\fR pacakge into instrument specific parameters and also
+supplies instrument specific default values. The package parameter
+\fIccdred.instrument\fR specifies this file to the package. The task
+\fBsetinstrument\fR sets this parameter, though it can be set
+explicitly like any other parameter. For the standard instrument
+translation file the root name is the instrument identification and the
+extension is "dat" ("*.dat" files are protected from being removed in a
+"stripped" system, i.e. when all nonessential files are removed).
+Private instrument files may be given any name desired.
+
+The instrument translation proceeds as follows. When a package task needs
+a parameter for an image, for example "imagetyp", it looks in the instrument
+translation file. If the file is not found or none is specified then the
+image header keyword that is requested has the same name. If an
+instrument translation file is defined then the requested
+parameter is translated to an image header keyword, provided a translation
+entry is given. If no translation is given the package name is used. For
+example the package parameter "imagetyp" might be translated to "data-typ"
+(the old NOAO CCD keyword). If the parameter is not found then the default
+value specified in the translation file, if present, is returned. For recording
+parameter information in the header, such as processing flags, the
+translation is also used. The default value has no meaning in this case.
+For example, if the flag specifying that the image has been corrected
+by a flat field is to be set then the package parameter name "flatcor"
+might be translated to "ff-flag". If no translation is given then the
+new image header parameter is entered as "flatcor".
+
+The format of the translation file are lines consisting of the package
+parameter name, followed by the image header keyword, followed by the
+default value. The first two fields are parameter names. The fields
+are separated by whitespace (blanks and tabs). String default values
+containing blanks must be quoted. An example is given below.
+
+.nf
+ exptime itime
+ darktime itime
+ imagetyp data-typ
+ subset f1pos
+ biassec biassec [411:431,2:573]
+ datasec datasec [14:385,2:573]
+
+ fixpix bp-flag 0
+ overscan bt-flag 0
+ zerocor bi-flag 0
+ darkcor dk-flag 0
+ flatcor ff-flag 0
+ fringcor fr-flag 0
+.fi
+
+The first two lines translate the CCD image type, and the subset parameter
+without default values (see \fBccdtypes\fR and \fBsubsets\fR for more
+information). The next two lines give the overscan bias strip
+section and the data section with default values for the instrument.
+Note that these parameters may be overridden in the task \fBccdproc\fR.
+The blank line is ignored.
+
+The next set of translations requires further discussion. For processing
+flags the package assumes that the absence of a keyword means that the
+processing has not been done. If processing is always to be done with
+the \fBCCDRED\fR package and no processing keywords are recorded in the raw data
+then these parameters should be absent (unless you don't like the names
+used by the package). However, for compatibility with the original NOAO
+CCD images, which may be processed outside of IRAF and which use 0 as the
+no processing value, the processing flags are translated and the false values
+are indicated by the default values.
+
+In addition to the parameter name translations the translation file
+contains translations between the value of the image type parameter
+and the image types used by the package. These lines
+consist of the image header type string as the first field (with quotes
+if there are blanks) and the image type as recognized by the package. The
+following example will make this clearer.
+
+.nf
+ 'OBJECT (0)' object
+ 'DARK (1)' dark
+ 'PROJECTOR FLAT (2)' flat
+ 'SKY FLAT (3)' other
+ 'COMPARISON LAMP (4)' other
+ 'BIAS (5)' zero
+ 'DOME FLAT (6)' flat
+.fi
+
+The values of the image type strings in the header contain blanks so they
+are quoted. Also the case of the strings is important. Note that there
+are two types of flat field images and three types of object images.
+
+The CCD image types recognized by the package are:
+
+.nf
+ zero - zero level image such as a bias or preflash
+ dark - dark count image
+ flat - flat field image
+ illum - iillumination image such as a sky image
+ fringe - fringe correction image
+ object - object image
+.fi
+
+There may be more than one image type that maps to the same package
+type. In particular other standard CCD image types, such as comparison
+spectra, multiple exposure, standard star, etc., should be mapped to
+object or other. There may also be more than one type of flat field,
+i.e. dome flat, sky flat, and lamp flat. For more on the CCD image
+types see \fBccdtypes\fR.
+
+The complete set of package parameters are given below.
+The package parameter names are generally the same as the
+standard image header keywords being adopted by NOAO.
+
+.nf
+ General Image Header and Default Parameters
+ ccdmean darktime exptime fixfile
+ imagetyp ncombine biassec subset
+ title datasec
+
+ CCDRED Processing Flags
+ ccdproc darkcor fixpix flatcor
+ fringcor illumcor overscan trim
+ zerocor
+
+ CCDRED CCD Image Types
+ dark flat fringe illum
+ none object unknown zero
+.fi
+
+The translation mechanism described here may become more
+sophisticated in the future and a general IRAF system facility may be
+implemented eventually. For the present the translation mechanism is
+quite simple.
+.sh
+2. Instrument Setup Script
+The task \fBsetinstrument\fR translates an instrument ID into a
+CL script in the instrument directory. This script is then executed.
+Generally this script simply sets the task parameters for an
+instrument/application. However, it could do anything else the support
+staff desires. Below are the first few lines of a typical instrument setup
+script.
+
+.nf
+ ccdred.instrument = "ccddb$kpno/example.dat"
+ ccdred.pixeltype = "real"
+ ccdproc.fixpix = yes
+ ccdproc.overscan = yes
+ ccdproc.trim = yes
+ ccdproc.zerocor = no
+ ccdproc.darkcor = no
+ ccdproc.flatcor = yes
+ ccdproc.biassec = "[411:431,2:573]"
+ ccdproc.datasec = "[14:385,2:573]"
+.fi
+
+The instrument parameter should always be set unless there is no
+translation file for the instrument. The \fBccdproc\fR parameters
+illustrate setting the appropriate processing flags for the
+instrument. The overscan bias and trim data sections show an alternate
+method of setting these instrument specific parameters. They may be
+set in the setup script in which case they are given explicitly in the
+user parameter list for \fBccdproc\fR. If the value is "image" then
+the parameters may be determined either through the default value in
+the instrument translation file, as illustrated in the previous
+section, or from the image header itself.
+
+The instrument setup script for setting default task parameters may be
+easily created by the support person as follows. Set the package
+parameters using \fBeparam\fR or with CL statements. Setting the
+parameters might involve testing. When satisfied with the way the
+package is set then the parameters may be dumped to a setup script
+using the task \fBdparam\fR. The final step is editing this script to
+delete unimportant and query parameters. For example,
+
+.nf
+ cl> dparam ccdred >> file.cl
+ cl> dparam ccdproc >> file.cl
+ cl> dparam combine >> file.cl
+ ...
+ cl> ed file.cl
+.fi
+.sh
+3. Instrument Bad Pixel File
+The bad pixel file describes the bad pixels, columns, and lines in the
+detector which are to be replaced by interpolation when processing the
+images. This file is clearly detector specific. The file consists of
+lines describing rectangular regions of the image.
+The regions are specified by four numbers giving the starting and ending
+columns followed by the starting and ending lines. The starting and
+ending points may be the same to specify a single column or line. The
+example below illustrates a bad pixel file.
+
+.nf
+ # RCA1 CCD untrimmed
+ 25 25 1 512
+ 108 108 1 512
+ 302 302 403 512
+ 1 512 70 70
+ 245 246 312 315
+.fi
+
+If there is a comment line in the file containing the word "untrimmed"
+then the coordinates of the bad pixel regions apply to the original image.
+If the image has been trimmed and the bad pixels are replaced at a later
+stage then this word indicates that the trim region be determined from the
+image header and the necessary coordinate conversion made. If the word
+"untrimmed" does not appear then the coordinates are assumed to apply to
+the image directly; i.e. the trimmed coordinates if the image has been
+trimmed or the original coordinates if the image has not been trimmed.
+The standard bad pixel files should always refer to the original, untrimmed
+coordinates.
+
+The first two bad pixel regions are complete bad columns (the image
+is 512 x 512), the next line is a partial bad column, the next line is
+a bad line, and the last line is a small bad region. These files are
+easy to create, provided you have a good image to work from and a way
+to measure the positions with an image or graphics display.
+.ih
+SEE ALSO
+ccdtypes, subsets, setinstrument
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/mkfringecor.hlp b/noao/imred/quadred/src/quad/doc/mkfringecor.hlp
new file mode 100644
index 00000000..797f4d11
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/mkfringecor.hlp
@@ -0,0 +1,90 @@
+.help mkfringecor Feb88 noao.imred.ccdred
+.ih
+NAME
+mkfringecor -- Make fringe correction images from sky images
+.ih
+USAGE
+mkfringecor input output
+.ih
+PARAMETERS
+.ls input
+List of input images for making fringe correction images.
+.le
+.ls output
+List of output fringe correction images. If none is
+specified or if the name is the same as the input image then the output
+image replaces the input image.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input images. If none is specified
+then all types are used.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed background.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+The input blank sky images are automatically processed up through the
+iillumination correction before computing the fringe correction images.
+The fringe corrections are subset dependent.
+The slowly varying background is determined and subtracted leaving only
+the fringe pattern caused by the sky emission lines. These fringe images
+are then scaled and subtracted from the observations by \fBccdproc\fR.
+The background is determined by heavily smoothing the image using a
+moving "boxcar" average. The effects of the objects and fringes in the
+image is minimized by using a sigma clipping algorithm to detect and
+exclude them from the average. Note, however, that objects left in the
+fringe image will affect the fringe corrected observations. Any objects
+in the sky image should be removed using \fBskyreplace\fR (not yet
+available).
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+To minimize the effects of the fringes and any objects in the blank sky
+calibration images a sigma clipping algorithm is used to detect and
+exclude features from the background. This is done by computing the
+rms of the image lines relative to the smoothed background and
+excluding points exceeding the specified threshold factors times the
+rms. This is done before each image line is added to the moving
+average, except for the first few lines where an iterative process is
+used.
+.ih
+EXAMPLES
+1. The two examples below make an fringe correction image from a blank
+sky image, "sky017". In the first example a separate fringe
+image is created and in the second the fringe image replaces the
+sky image.
+
+.nf
+ cl> mkskycor sky017 Fringe
+ cl> mkskycor sky017 frg017
+.fi
+.ih
+SEE ALSO
+ccdproc
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/mkillumcor.hlp b/noao/imred/quadred/src/quad/doc/mkillumcor.hlp
new file mode 100644
index 00000000..0effd7a2
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/mkillumcor.hlp
@@ -0,0 +1,92 @@
+.help mkillumcor Oct88 noao.imred.ccdred
+.ih
+NAME
+mkillumcor -- Make flat field iillumination correction images
+.ih
+USAGE
+mkillumcor input output
+.ih
+PARAMETERS
+.ls input
+List of input images for making flat field iillumination correction images.
+.le
+.ls output
+List of output flat field iillumination correction images. If none is
+specified or if the name is the same as the input image then the output
+image replaces the input image.
+.le
+.ls ccdtype = "flat"
+CCD image type to select from the input images. If none is specified
+then all types are used.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude deviant points from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed iillumination.
+.le
+.ls divbyzero = 1.
+The iillumination correction is the inverse of the smoothed flat field.
+This may produce division by zero. A warning is given if division
+by zero takes place and the result (the iillumination correction value)
+is replaced by the value of this parameter.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+First, the input flat field images are automatically processed if
+needed. Then, the large scale iillumination pattern of the images is
+determined by heavily smoothing them using a moving "boxcar" average.
+The iillumination correction, the inverse of the iillumination pattern,
+is applied by \fBccdproc\fR to CCD images to remove the iillumination
+pattern introduced by the flat field. The combination of the flat
+field calibration and the iillumination correction based on the flat
+field is equivalent to removing the iillumination from the flat field
+(see \fBmkillumflat\fR). This two step calibration is generally used
+when the observations have been previously flat field calibrated. This
+task is closely related to \fBmkskycor\fR which determines the
+iillumination correction from a blank sky image; this is preferable to
+using the iillumination from the flat field as it corrects for the
+residual iillumination error. For a general discussion of the options
+for flat fields and iillumination corrections see \fBflatfields\fR.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+To minimize the effects of bad pixels a sigma clipping algorithm is
+used to detect and reject these pixels from the iillumination. This is
+done by computing the rms of the image lines relative to the smoothed
+iillumination and excluding points exceeding the specified threshold
+factors times the rms. This is done before each image line is added to
+the moving average, except for the first few lines where an iterative
+process is used.
+.ih
+EXAMPLES
+1. The example below makes an iillumination correction image from the
+flat field image, "flat017".
+
+ cl> mkillumcor flat017 Illum
+.ih
+SEE ALSO
+ccdproc, flatfields, mkillumflat, mkskycor, mkskyflat
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/mkillumflat.hlp b/noao/imred/quadred/src/quad/doc/mkillumflat.hlp
new file mode 100644
index 00000000..8288fb85
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/mkillumflat.hlp
@@ -0,0 +1,101 @@
+.help mkillumflat Oct88 noao.imred.ccdred
+.ih
+NAME
+mkillumflat -- Make illumination corrected flat fields
+.ih
+USAGE
+mkillumflat input output
+.ih
+PARAMETERS
+.ls input
+List of input flat field images to be illumination corrected.
+.le
+.ls output
+List of output illumination corrected flat field images.
+If none is specified or if the name is the same as the
+input image then the output image replaces the input image.
+.le
+.ls ccdtype = "flat"
+CCD image type to select from the input images.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed illumination.
+.le
+.ls divbyzero = 1.
+The illumination flat field is the ratio of the flat field to a
+smoothed flat field. This may produce division by zero. A warning is
+given if division by zero takes place and the result (the illumination
+corrected flat field value) is replaced by the value of this
+parameter.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+First, the input flat field images are processed as needed. Then the
+large scale illumination pattern of the images is removed. The
+illumination pattern is determined by heavily smoothing the image using
+a moving "boxcar" average. The output image is the ratio of the input
+image to the illumination pattern. The illumination pattern is
+normalized by its mean to preserve the mean level of the input image.
+
+When this task is applied to flat field images only the small scale
+response effects are retained. This is appropriate if the flat field
+images have illumination effects which differ from the astronomical
+images and blank sky images are not available for creating sky
+corrected flat fields. When a high quality blank sky image is
+available the related task \fBmkskyflat\fR should be used. Note that
+the illumination correction, whether from the flat field or a sky
+image, may be applied as a separate step by using the task
+\fBmkillumcor\fR or \fBmkskycor\fR and applying the illumination
+correction as a separate operation in \fBccdproc\fR. However, creating
+an illumination corrected flat field image before processing is more
+efficient since one less operation per image processed is needed. For
+more discussion about flat fields and illumination corrections see
+\fBflatfields\fR.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+To minimize the effects of bad pixels a sigma clipping algorithm is
+used to detect and reject these pixels from the illumination. This is
+done by computing the rms of the image lines relative to the smoothed
+illumination and excluding points exceeding the specified threshold
+factors times the rms. This is done before each image line is added to
+the moving average, except for the first few lines where an iterative
+process is used.
+.ih
+EXAMPLES
+1. Two examples in which a new image is created and in which the
+input flat fields are corrected in place are:
+
+.nf
+ cl> mkllumflat flat004 FlatV
+ cl> mkillumflat flat* ""
+.fi
+.ih
+SEE ALSO
+ccdproc, flatfields, mkfringecor, mkillumcor, mkskycor, mkskyflat
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/mkskycor.hlp b/noao/imred/quadred/src/quad/doc/mkskycor.hlp
new file mode 100644
index 00000000..15cfacf6
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/mkskycor.hlp
@@ -0,0 +1,103 @@
+.help mkskycor Feb88 noao.imred.ccdred
+.ih
+NAME
+mkskycor -- Make sky iillumination correction images
+.ih
+USAGE
+mkskycor input output
+.ih
+PARAMETERS
+.ls input
+List of input images for making sky iillumination correction images.
+.le
+.ls output
+List of output flat field iillumination correction images. If none is
+specified or if the name is the same as the input image then the output
+image replaces the input image.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input images. If none is specified
+then all types are used.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed iillumination.
+.le
+.ls ccdproc (parameter set)
+CCD processing parameters.
+.le
+.ih
+DESCRIPTION
+The large scale iillumination pattern of the input images, generally
+blank sky calibration images, is determined by heavily smoothing
+the image using a moving "boxcar" average. The effects of objects in
+the image may be minimized by using a sigma clipping algorithm to
+detect and exclude the objects from the average. This
+iillumination image is applied by \fBccdproc\fR to CCD images to remove
+the iillumination pattern.
+
+The input images are automatically processed up through flat field
+calibration before computing the iillumination. The iillumination
+correction is that needed to make the processed images flat
+over large scales. The input images are generally blank sky calibration
+images which have the same iillumination and instrumental effects
+as the object observations. Object images may be used but removal
+of the objects may not be very good; particularly large, bright objects.
+For further discussion of flat fields and iillumination corrections
+see \fBflatfields\fR.
+
+You will notice that when you process images with an iillumination
+correction you are dividing each image by a flat field calibration and
+an iillumination correction. If the iillumination corrections are not
+done as a later step but at the same time as the rest of the processing
+one will get the same calibration by multiplying the flat field by the
+iillumination correction and using this product alone as the flat
+field. This approach has the advantage of one less calibration image
+and two less computations (scaling and dividing the iillumination
+correction). Such an image, called a \fIsky flat\fR, may be created by
+\fBmkskyflat\fR as an alternative to this task.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+Blank sky images may not be completely blank so a sigma clipping
+algorithm may be used to detect and exclude objects from the
+iillumination pattern. This is done by computing the rms of the image
+lines relative to the smoothed background and excluding points
+exceeding the specified threshold factors times the rms. This is done
+before each image line is added to the moving average, except for the
+first few lines where an iterative process is used.
+.ih
+EXAMPLES
+1. The two examples below make an iillumination image from a blank sky image,
+"sky017". In the first example a separate iillumination image is created
+and in the second the iillumination image replaces the sky image.
+
+.nf
+ cl> mkskycor sky017 Illum
+ cl> mkskycor sky017 sky017
+.fi
+.ih
+SEE ALSO
+ccdproc, flatfields, mkillumcor, mkillumflat, mkskyflat
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/mkskyflat.hlp b/noao/imred/quadred/src/quad/doc/mkskyflat.hlp
new file mode 100644
index 00000000..3d9ac1ca
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/mkskyflat.hlp
@@ -0,0 +1,111 @@
+.help mkskyflat Feb88 noao.imred.ccdred
+.ih
+NAME
+mkskyflat -- Make sky corrected flat field images
+.ih
+USAGE
+mkskyflat input output
+.ih
+PARAMETERS
+.ls input
+List of blank sky images to be used to create sky corrected flat field
+calibration images.
+.le
+.ls output
+List of output sky corrected flat field calibration images (called
+sky flats). If none is specified or if the name is the same as the
+input image then the output image replaces the input image.
+.le
+.le
+.ls ccdtype = ""
+CCD image type to select from the input images.
+.le
+.ls xboxmin = 5, xboxmax = 0.25, yboxmin = 5, yboxmax = 0.25
+Minimum and maximum smoothing box size along the x and y axes. The
+minimum box size is used at the edges and grows to the maximum size in
+the middle of the image. This allows the smoothed image to better
+represent gradients at the edge of the image. If a size is less then 1
+then it is interpreted as a fraction of the image size. If a size is
+greater than or equal to 1 then it is the box size in pixels. A size
+greater than the size of image selects a box equal to the size of the
+image.
+.le
+.ls clip = yes
+Clean the input images of objects? If yes then a clipping algorithm is
+used to detect and exclude objects from the smoothing.
+.le
+.ls lowsigma = 2.5, highsigma = 2.5
+Sigma clipping thresholds above and below the smoothed iillumination.
+.le
+.ls ccdproc (pset)
+CCD processing parameter set.
+.le
+.ih
+DESCRIPTION
+A sky corrected flat field calibration image, called a sky flat, is a
+flat field that when applied to observations of the sky have no large
+scale gradients. Flat field images are generally obtained by exposures
+to lamps either illuminating the telescope field or a surface in the dome
+at which the telescope is pointed. Because the detector is not illuminated
+in the same way as an observation of the sky there may be large
+scale iillumination patterns introduced into the observations with such
+a flat field. To correct this type of flat field a blank sky observation
+(which has been divided by the original flat field) is heavily smoothed
+to remove the noise leaving only the residual large scale iillumination
+pattern. This iillumination pattern is divided into the original flat
+field to remove this residual.
+
+The advantage of creating a sky flat field is that when processing
+the observations no additional operations are required. However,
+if the observations have already been processed with the original
+flat field then the residual iillumination pattern of blank sky
+calibration images may be created as an iillumination correction
+to be applied by \fBccdproc\fR. Such a correction is created by the
+task \fBmkskycor\fR. If a good blank sky image is not
+available then it may be desirable to remove the iillumination pattern
+of the flat field image using \fBmkillumflat\fR or \fBmkillumcor\fR
+provided the sky observations are truly uniformly illuminated.
+For more on flat fields and iillumination corrections see \fBflatfields\fR.
+
+The input, blank sky images are first processed, based on the
+\fBccdproc\fR parameters, if needed. These parameters also determine
+the flat field image to be used in making the sky flat. The residual
+iillumination pattern is determined by heavily smoothing the image using
+a moving "boxcar" average. The effects of objects in the input image
+may be minimized by using a sigma clipping algorithm to detect and
+exclude the objects from the average. The output image is ratio of the
+flat field image, for the same subset as the input image, to the
+residual iillumination pattern determined from the processed blank sky
+input image. The iillumination pattern is normalized by its mean to
+preserve the mean level of the flat field image.
+
+The smoothing algorithm is a moving average over a two dimensional
+box. The algorithm is unconvential in that the box size is not fixed.
+The box size is increased from the specified minimum at the edges to
+the maximum in the middle of the image. This permits a better estimate
+of the background at the edges, while retaining the very large scale
+smoothing in the center of the image. Note that the sophisticated
+tools of the \fBimages\fR package may be used for smoothing but this
+requires more of the user and, for the more sophisticated smoothing
+algorithms such as surface fitting, more processing time.
+
+Blank sky images may not be completely blank so a sigma clipping
+algorithm may be used to detect and exclude objects from the
+iillumination pattern. This is done by computing the rms of the image
+lines relative to the smoothed background and excluding points
+exceeding the specified threshold factors times the rms. This is done
+before each image line is added to the moving average, except for the
+first few lines where an iterative process is used.
+.ih
+EXAMPLES
+1. Two examples in which a new image is created and in which the
+input sky images are converted to sky flats are:
+
+.nf
+ cl> mkskyflat sky004 Skyflat
+ cl> mkskyflat sky* ""
+.fi
+.ih
+SEE ALSO
+ccdproc, flatfields, mkfringecor, mkillumcor, mkillumflat, mkskycor
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/quad.hlp b/noao/imred/quadred/src/quad/doc/quad.hlp
new file mode 100644
index 00000000..7f8754ee
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/quad.hlp
@@ -0,0 +1,121 @@
+.help package Sep93 arcon.quad
+.ih
+NAME
+quad -- reduction package for CCD images obtained with Arcon
+
+.ih
+USAGE
+quad
+
+This package \fBmust\fR be used in place of \fBccdred\fR for the first steps
+(overscan correction and trimming) of multi-readout (quad or dual) images.
+Either package can be used for the subsequent stages, and for the complete
+reduction of single readout images.
+
+.ih
+PARAMETERS
+.ls pixeltype = "real real"
+Output pixel datatype and calculation datatype. When images are processed
+or created the output pixel datatype is determined by this parameter.
+The allowed types are "short" for short integer, and "real" for real
+floating point. The calculation datatypes are also short and real with a
+default of real if none is specified. Note that Arcon generates images of type
+"ushort" (unsigned 16-bit integers). In general both the output and calculation
+types should, therefore, be set to "real" to avoid truncation and wrap
+around errors, although this means that the reduced images will occupy twice as
+much disk space.
+.le
+.ls verbose = no
+Print log information to the standard output?
+.le
+.ls logfile = "logfile"
+Text log file. If no filename is specified then no log file is kept.
+.le
+.ls plotfile = ""
+Log metacode plot file for the overscan bias vector fits. If
+no filename is specified then no metacode plot file is kept.
+.le
+.ls backup = ""
+Backup prefix for backup images. If no prefix is specified then no backup
+images are kept when processing. If specified then the backup image
+has the specified prefix.
+.le
+.ls instrument = ""
+CCD instrument translation file. This is usually set with \fBsetinstrument\fR.
+.le
+.ls ssfile = "subsets"
+Subset translation file used to define the subset identifier. See
+\fBsubsets\fR for more.
+.le
+.ls graphics = "stdgraph"
+Interactive graphics output device when fitting the overscan bias vector.
+.le
+.ls cursor = ""
+Graphics cursor input. The default is the standard graphics cursor.
+.le
+.ls version = "Version 2.0 - Sept 93"
+Package version.
+.le
+.ih
+DESCRIPTION
+The \fBquad\fR package contains all the basic tasks necessary for the
+reduction of CCD data obtained with Arcon. With Arcon images are often readout
+using four ("quad") or two ("dual") amplifiers in order to reduce readout time.
+The \fBquad\fR package includes the few special tasks needed to deal with such
+multi-readout data, as well as many standard tasks taken directly from the
+\fBccdred\fR package. The \fBquad\fR package must be used for the first
+reduction steps, overscan correction and trimming, of multi-readout images;
+subsequent steps can be performed using \fBquad\fR or \fBccdred\fR. Either
+package can be used for the complete reduction of conventional single readout
+CCD images.
+
+The \fBquad\fR package also contains the tasks \fBqstatistics\fR and
+\fBqhistogram\fR which can be used for examining raw multi-readout images.
+
+The \fBquad\fR package task itself has several parameters which are common to
+many of the tasks in the package. When images are processed or new image are
+created the output pixel datatype is that specified by the parameter
+\fBpixeltype\fR. Note that CCD processing replaces the original image by the
+processed image so the pixel type of the CCD images may change during
+processing. It is unlikely that real images will be processed to short images
+but the reverse is quite likely. Processing images from short to real
+pixel datatypes will generally increase the amount of disk space
+required (a factor of 2 on most computers).
+
+The tasks produce log output which may be printed on the standard
+output (the terminal unless redirected) and appended to a file. The
+parameter \fIverbose\fR determines whether processing information
+is printed. This may be desirable initially, but when using background
+jobs the verbose output should be turned off. The user may look at
+the end of the log file (for example with \fBtail\fR) to determine
+the status of the processing.
+
+The package was designed to work with data from many different observatories
+and instruments. In order to accomplish this an instrument translation
+file is used to define a mapping between the package parameters and
+the particular image header format. The instrument translation file
+is specified to the package by the parameter \fIinstrument\fR. This
+parameter is generally set by the task \fBsetinstrument\fR. The other
+file used is a subset file. This is generally created and maintained
+by the package and the user need not do anything. For more sophisticated
+users see \fBinstruments\fR and \fBsubsets\fR.
+
+The package has very little graphics
+output. The exception is the overscan bias subtraction. The bias
+vector is logged in the metacode plot file if given. The plot file
+may be examined with the tasks in the \fBplot\fR package such as
+\fBgkimosaic\fR. When interactively fitting the overscan vector
+the graphics input and output devices must be specified. The defaults
+should apply in most cases.
+
+Because processing replaces the input image by the processed image it
+may be desired to save the original image. This may be done by
+specifying a backup prefix with the parameter \fIbackup\fR. For
+example, if the prefix is "orig" and the image is "ccd001", the backup
+image will be "origccd001". The prefix may be a directory but if so it must
+end with '/' or '$' (for logical directories) and the directory must already
+exist.
+.ih
+SEE ALSO
+instruments, setinstrument, subsets
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/quadman.hlp b/noao/imred/quadred/src/quad/doc/quadman.hlp
new file mode 100644
index 00000000..55bfe10d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/quadman.hlp
@@ -0,0 +1,1330 @@
+.help quadman Sep93 Version-0.0
+.ce
+\fIV-ARCON. Reduction of CCD data obtained with Arcon\fR
+
+.nf
+ 1. Introduction
+ 2. Getting the Data
+ 3. Reducing the Data
+ 3.1 Introduction
+ 3.2 Reading the Data from Tape
+ 3.3 Setting Up the Translation File
+ 3.3.1 Setinstrument for CCD Used
+ 3.3.2 Setting the Subsets Parameter and Preparations
+ 3.4 Preparing Individual Calibration Frames
+ 3.5 Processing the calibration frames
+ 3.6 Preparing a Final Flat Field
+ 3.7 Processing the Images
+ 3.8 Creating the Bad Pixel File
+ 3.9 Writing the Data to Tape
+
+
+
+
+
+
+ A thinly disguised version of CCDMAN
+
+
+ Lisa Wells
+
+ Mario Hamuy
+
+ September 30, 1993
+.fi
+.bp
+.ls \fI1. Introduction\fR
+
+CCDRED is a package in IRAF used for CCD data reduction. It is primarily used
+for CCD imaging although various aspects are also used for spectral reduction.
+This document is intended as a guideline to reducing direct CCD images using
+IRAF. If you are reducing spectra, see Section III or IV of
+this manual. If you do not have experience using IRAF we suggest that
+you start by
+reading "An Introduction to IRAF" which will give you a general idea of IRAF
+structure as well as a few fundamental tools. If you plan to use this package
+extensively, there is a demo on the computer as well which will run through
+the reduction process interactively. Once you login and enter IRAF
+type the following:
+
+.nf
+ cl> noao
+ no> imred
+ im> ccdred
+ cc> ccdtest
+ cc>
+.fi
+
+The cc> prompt indicates the package has been loaded. 'ccdtest' contains several
+tasks one of which is 'demo'. Now type 'demo' and follow the instructions. It
+will prompt you, from time to time, to continue to the next section.
+
+The examples shown here are just that, examples. The user must decide
+upon the reduction procedure, naming, convention, etc..., appropriate for
+his/her own data and use the cookbook and examples as guidelines only. The
+examples are shown with prompts, for the package containing the tasks (do not
+type the prompts, of course). It is strongly recommended that you perform an
+'lpar' on every task immediately before using it, unless you are familiar with
+all of its parameters. This is not always shown in the examples, but is normal
+practice even among seasoned IRAF users.
+
+IRAF uses two conventions that you should always keep in mind.
+First, images consist of lines and columns (not rows and columns). Keep in mind
+that the mountain Forth systems for the CCDs are zero-indexed and use rows and
+lines. IRAF uses one-indexed coordinates for images (see figure 1). Second,
+the "order" of a function is the number of independent coefficients for a
+polynomial, or the number of intervals for a spline curve. For example, a cubic
+(third-degree) polynomial is described as "order=4" (four coefficients).
+
+If you require personal assistance in your reductions please contact
+either Mauricio Navarrete or Nelson Saavedra on Tololo (ex 422),
+or Mario Hamuy(ex 210) in La Serena.
+.le
+.bp
+.ls \fI2. Getting the Data\fR
+
+Many observers often get confused about the number and kind of calibration
+frames that must be taken to properly reduce their data. During a whole
+run you should get the following:
+
+1) Bias frames for each night are essential no matter what setup and chip
+you are using. 20-30 of these should be taken and combined.
+
+2) Dome flats should be taken for each filter you are using. Preferably
+this should be done every day, but you can get by with just one per run,
+as long as you take a sufficient number of them, say 20-30.
+
+3) Twilight sky flats are necessary if you want to do photometry to better
+than 2%-3%, if you are doing surface photometry of extended objects,
+or if sky subtraction is critical. We suggest that everyone take sky flats
+since it is a good check of your dome flat iillumination. For some CCDs it
+is better to use a sky flat to flatten your objects and this may depend
+upon the filters being used.
+It was found at the 0.9-m and 1.5-m telescopes that
+sky flats do better in flattening your
+images in the U and B filters. It is therefore
+suggested that you concentrate on getting many U and B sky flats (10 or more)
+since you will probably process your data using them.
+These will be combined in the first steps of the reduction.
+
+4) Darks are worth taking to check that things are working but dark correction
+is not necessary for any of the CCDs now used at CTIO. The dark current
+should be <10 e-/hour/pixel, if greater, then something is wrong and you
+should get it fixed.
+
+5) Photometric standard stars should be taken when needed and as many as
+necessary (>20) to properly calibrate your objects.
+
+You should refer to the instrument manual for more details. We
+suggest that you start taking your calibration frames early in the afternoon so
+that you have enough time left for supper. It is important to note on your
+calibration frames, the positions of bad pixels and then avoid observing
+your objects on these regions of the CCD, especially if you plan to do
+photometry. At the end of the reductions, you may wish to use a bad pixel map
+to correct the bad pixels. This will be discussed later (section 3.8)
+in more detail.
+.le
+.bp
+.ce
+\fI3. Reducing the Data\fR
+.ls \fI3.1 Introduction\fR
+
+A full reduction of CCD data requires the following operations (see the
+flow diagram on the next page):
+
+.nf
+ 1) Combine the bias, flats and darks.
+ 2) Fit and subtract a readout bias given by the overscan strip.
+ 3) Trim the image of overscan strip and border rows and columns.
+ 4) Subtract the dark, if appropriate.
+ 5) Prepare a final flat.
+ 6) Divide by the flat field.
+ 7) Fix the bad pixels in all the images.
+ 8) Fringing corrections may be done at the end.
+.fi
+
+The most general processing, described in this manual, consists of
+overscan subtracting, trimming, bias subtracting, dark subtracting,
+and flat fielding your data. Although dark subtraction is rarely used,
+it is included in this example for generality.
+
+Individual bias, darks, dome and sky flats must be properly combined to give
+good signal to noise calibration frames and to remove cosmic rays. The
+algorithm used to combine the images must have 10 or more frames to do a good
+job of cleaning the cosmic rays. IRAF offers several algorithms for combining
+the individual frames. You should always carefully inspect all the individual
+frames, and the final image to check
+for potential problems.
+
+Having obtained the combined calibration images you should flatten your sky flat
+using the dome flat and examine the result for any significant iillumination
+variation. If these variations are significant they may be fit in order to
+correct your dome flat.
+Fringing corrections may be
+applied. This should be done separately. This is only needed for the RCA
+chips at the 4 meter PF/CCD and the TI chips in the I band. We do not
+currently support this but direction can be given as to what might possibly
+work.
+
+At this level
+the images should be finished except for fixing any bad pixels that may not
+have been taken care of in the flat fielding. Once this is done you may
+do photometry. A photometry manual is available to do your analysis,
+see section VI of this manual, which describe the use of the
+aperture photometry routines in IRAF and the transformation from
+instrumental to standard magnitudes. There is also a manual which was
+written by the IRAF group in Tucson which is a good guide; "A User's
+Guide to Stellar CCD Photometry with IRAF".
+.bp
+.nf
+
+ ==========================
+ = Set Instrument =
+ = and Translation File =
+ ==========================
+ *****
+ ***
+ *
+ ==================================
+ = Combining Calibration Frames =
+ = and Removing Cosmic Rays =
+ ==================================
+ *****
+ ***
+ *
+ ===============================
+ = Processing =
+ = Calibration Frames =
+ ===============================
+ *****
+ ***
+ *
+ ================================
+ = Preparing a Flat Field =
+ ================================
+ *****
+ ***
+ *
+ ==========================
+ = Processing the data =
+ ==========================
+ *****
+ ***
+ *
+ ==========================
+ = Fixing bad pixels =
+ ==========================
+ *****
+ ***
+ *
+ ==========================
+ = Fringe Corrections =
+ = (optional) =
+ ==========================
+.fi
+.le
+.bp
+.ls \fI3.2 Reading the data from Tape\fR
+
+Load the 'dataio' package and allocate the appropriate tape drive:
+
+.nf
+ cl> dataio
+ da> alloc mta
+.fi
+
+If you wish to read fits files from an exabyte, use 'mtx' as the device
+designation. Now mount your tape on the tape drive and be sure you've
+removed the write ring. It is best to create a separate directory for
+each night. This is done using the command 'mkdir night1'. Now change
+to this directory by typing 'cd night1'. Read the data using the task
+'rfits'. You must specify the tape drive name, the list of files you wish
+to read and the "root" file name. If you transferred your data to the
+SUN using 'getpix' then you may wish to use the naming convention given
+to your files, in this case just set the parameter 'oldirafname' to yes.
+In choosing the root file name, it is usually a good idea to include a
+digit in the name to indicate the tape number (eg, "tape1" for the
+first tape; files would be called "tape10001, tape10002,.."); alternatively,
+an offset may be added (eg, offset=89 means the first files would be
+called "tape10090, tape10091,.." or 1+89, 2+89,..).
+
+.nf
+ da> epar rfits (check the parameter list)
+ da> rfits mta 1-999 tape1
+.fi
+
+The file list "1-999" should more than cover the number of files on tape;
+the task will end gracefully at the end of the tape. When finished,
+rewind the tape and deallocate the drive,
+
+.nf
+ da> rew mta
+ da> dealloc mta
+.fi
+
+and remove your tape from the drive. We assume that you kept the old IRAF
+name given to your files throughout the rest of this manual.
+
+.nf
+ \fIrfits\fR
+
+ fits_file = "mta" FITS data source
+ file_list = "1-999" File list
+ iraf_file = "tape1" IRAF filename
+ (make_image = yes) Create an IRAF image?
+ (long_header = no) Print FITS header cards?
+(short_header = yes) Print short header?
+ (datatype = "") IRAF data type
+ (blank = 0.) Blank value
+ (scale = yes) Scale the data?
+ (oldirafname = no) Use old IRAF name in place of iraf_file?
+ (offset = 0) Tape file offset
+ (mode = "ql")
+.fi
+.le
+.bp
+.ce
+\fI3.3 Setting Up the Translation File\fR
+.ls \fI3.3.1 Setinstrument for CCD Used\fR
+
+Start by loading the 'imred' and 'ccdred' packages.
+
+.nf
+ cl> noao
+ no> imred
+ im> ccdred
+ cc>
+.fi
+
+Because the 'ccdred' package was
+designed to work with data from many different observatories and instruments,
+an \fIinstrument translation file\fR is required to define a mapping
+between the package parameters and the particular image header format.
+An example of a translation file is shown on the next page.
+You must define a translation file using the task 'setinstrument'.
+Edit the parameters for the task 'setinstrument' according to the
+list given below and run it.
+The choices for instrument can be found in Appendix A. If the CCD you
+used is not listed, use the generic "ccd" or "cfccd" in the instrument
+parameter in the task.
+
+.nf
+ cc> epar setinstrument (check the parameter list)
+ cc> setinstrument
+.fi
+
+The task will run and prompt you for the instrument ID (a "?" will list
+the possible choices). Answer 'cfccd' and the task will send you into
+the parameter editing mode for
+the task 'ccdred'. It will automatically set the \fIinstrument\fR
+(translation) parameter
+so do not change it. It is a good idea while processing your calibration
+frames to save a copy of the unprocessed images. This will be done
+by the package 'ccdred' if you specify a prefix or subdirectory in the
+parameter 'backup'. In our example, the files will be saved with the prefix
+'B'. When you type 'ctrl-z' to exit you will then be sent to 'ccdproc'
+to edit its parameters. We will edit
+'ccdproc' later so don't worry about it for now.
+
+.nf
+ \fIsetinstrument parameters\fR
+
+ \fIinstrument\fR = "cfccd" Instrument ID (type ? for a list)
+ (\fIsite\fR = "ctio") Site ID
+ (\fIdirectory\fR = "ccddb$") Instrument directory
+ (review = yes) Review instrument parameters?
+ query = "" Instrument ID (type q to quit)
+ (mode = "ql")
+
+ \fIccdred parameters\fR
+
+ (pixeltype = "real real") Output and calculation pixel datatypes
+ (verbose = no) Print log information to the standard output?
+ (logfile = "ccdlog") Text log file
+ (plotfile = "ccdplot") Log metacode plot file
+ (\fIbackup\fR = "B") Backup directory or prefix
+ (\fIinstrument\fR = "ccddb$ctio/cfccd.dat") CCD instrument file
+ (\fIssfile\fR = "subsets") Subset translation file
+ (graphics = "stdgraph") Interactive graphics output device
+ (cursor = "") Graphics cursor input
+ (version = "2: October 1987")
+ (mode = "ql")
+ ($nargs = 0)
+
+.fi
+
+An example of the \fIinstrument translation file\fR follows.
+
+.nf
+
+ exptime exptime
+ darktime darktime
+ imagetyp imagetyp
+ subset filters
+ biassec biassec
+ datasec datasec
+ trimsec trimsec
+ fixfile fixfile
+
+ fixpix bp-flag 0
+ overscan bt-flag 0
+ zerocor bi-flag 0
+ darkcor dk-flag 0
+ flatcor ff-flag 0
+ fringcor fr-flag 0
+
+ OBJECT object
+ DARK dark
+ "PROJECTOR FLAT" flat
+ "SKY FLAT" other
+ COMPARISON other
+ BIAS zero
+ "DOME FLAT" flat
+ MASK other
+.fi
+
+
+The instrument file consists of first, the IRAF parameter and its associated
+image header keyword from the objects. These header values may vary depending
+upon the instrument being used.
+The second section is the
+header flag section which is added to the object as it is being processed to
+mark that the operation has been done. Once an image is zero corrected, the
+keyword bi-flag parameter is added to the image header and set to '0' so that
+the process is not repeated later. The last section is the image-type
+specification. This is used when the imagetyp parameter is read. The header
+value 'OBJECT' is translated to 'object', and a 'SKY FLAT' becomes 'other'.
+If you need to modify the translation file you may copy this file to your
+directory and change the appropriate parameters. If the name of the imagetype
+contains 2 words, then you must put quotes around the designation, for example,
+look at the 'SKY FLAT' line in the above file. Comparison arcs are not used
+in direct imaging and are not needed.
+
+Without the proper translation file, the task 'ccdlist' will give the following:
+
+.nf
+ cc> ccdlist *.imh
+
+ bias001.imh[400,420][short][unknown][]:median of 4 ave of 6 bias
+ flat002.imh[400,420][short][unknown][]:median of 4 ave of 6 flat
+ flat003.imh[400,420][short][unknown][]:median of 4 ave of 6 flat
+ obj004.imh[400,420][short][unknown][]:IC 481 V-filter ........
+ obj005.imh[400,420][short][unknown][]:IC 481 B-filter.........
+ : : : : : : : :
+ obj036.imh[400,420][short][unknown][]:Sky flat B-filter .......
+ obj037.imh[400,420][short][unknown][]:Sky flat V-filter .......
+ : : : : : : : :
+.fi
+
+The [unknown] in the listing means that the frame type is unknown, ie, if
+it is a dark, flat, bias,... etc. The blank brackets should contain the
+filter type and will be taken care of in the next section.
+.le
+.bp
+.ls \fI3.3.2 Setting the Subsets Parameter and Preparing for Processing\fR
+
+The 'ccdred' package groups observations into subsets. The image header
+parameter used to identify the subsets is defined in the \fIinstrument
+translation file\fR. For example, to select subsets by the header parameter
+'filters' the instrument translation file would contain the line
+
+.nf
+ subset filters
+.fi
+
+Now do a 'ccdlist' and you should see the
+correct image type specified for each frame:
+
+.nf
+ cc> ccdlist *.imh
+
+ bias001.imh[400,420][short][zero][]:median of 4 ave of 6 bias
+ flat002.imh[400,420][short][flat][]:median of 4 ave of 6 flat
+ flat003.imh[400,420][short][flat][]:median of 4 ave of 6 flat
+ obj004.imh[400,420][short][object][]:IC 481 V-filter ......
+ obj005.imh[400,420][short][object][]:IC 481 B-filter.......
+ : : : : : : :
+ : : : : : : :
+ obj036.imh[400,420][short][object][]:Sky flat B-filter .....
+ obj037.imh[400,420][short][object][]:Sky flat V-filter .....
+ : : : : : : :
+.fi
+
+Doing this will create a file in your directory called 'subsets'. This is a
+file with a listing of the filter type read from the image header consisting
+of the combination of the upper and lower filter bolt positions. The flats
+were taken with a color balance filter so they have different values from the
+objects in the list. Therefore, you need to edit this file to set the filter
+types properly. The bias and dark were probably taken with a setting that
+corresponds to one of the filters, but it will be ignored. If however, the
+filter type set in the header corresponds to something other than a standard
+filter, set this to some arbitrary value, for example below it is assumed that
+the filter positions for the bias were '0 1', so this is set to '0'.
+
+.nf
+ cc> edit subsets
+.fi
+
+An example of the necessary changes is:
+
+.nf
+ \fISubsets Before | Subsets After\fR
+ '1 0' 1 | '1 0' U
+ '2 0' 2 | '2 0' B
+ '3 0' 3 | '3 0' V
+ '4 0' 4 | '4 0' R
+ '5 0' 5 | '5 0' I
+ '0 1' 01 | '0 1' 0
+ '1 1' 11 | '1 1' U
+ '2 1' 21 | '2 1' B
+ '3 1' 31 | '3 1' V
+ '4 1' 41 | '4 1' R
+ '5 1' 51 | '5 1' I
+.fi
+
+If any of the parameters are not specified properly after doing 'ccdlist',
+then the translation file may be set up improperly. Consult
+Mauricio Navarrete, Nelson Saavedra, or Mario Hamuy.
+
+You must also specify the overscan and trim region.
+The overscan is the region to the far right of the image when plotting a line
+of the image (see figure 2a). The overscan region should be
+specified by the beginning and ending column followed by the beginning and
+ending line. This region should begin at least 5-10 pixels from the edge of
+the active region of the CCD. The correct position can be found by plotting
+several lines using ':l #1 #2' (#1 and #2 specify a range of lines to be
+averaged and plotted) in 'implot', and expanding the right side (see figure 2b).
+Notice the signal decays toward the right just the other side of the step with
+an e-folding distance of a few pixels. The overscan strip should begin in 3 to
+4 e-folding distances from the break. You want to fit this region all along
+the image so the range of lines should include everything. Be sure to write
+down these values so you don't forget them.
+
+The trim region is the section of the image to be kept after processing.
+Choose this area excluding the overscan region
+and any bad lines or columns at the edges of the image. Figures 2c and 2d show
+a plot of a line and column, respectively. They show the edges which should be
+trimmed from the image. Later (section 3.5)
+you will edit the parameters for 'ccdproc' and enter the overscan and trim
+section.
+
+The bad pixel file will be used after the images have been flattened. In many
+cases the flatfielding will correct the bad pixel regions also so it is best
+left until the end. If you specify the bad pixel file during the processing
+be aware that the corrections are done on the file first.
+.le
+.bp
+.ls \fI3.4 Preparing Individual Calibration Frames\fR
+
+If you have taken many calibration frames (bias, dome flats, darks
+and sky flats) that you wish to combine in order to remove cosmic
+rays, we suggest you to perform a combination of images separately for
+each night. In some cases, when the instrument response remains
+stable during your run, it is worth combining data from many nights,
+especially the darks and sky flats, to improve the signal-to-noise
+ratio. If you have already combined the data on the mountain skip
+over this section and go on to the next one.
+
+Most likely you used "getpix" to transfer your data from the LSI
+to the SUN so you have preserved the naming convention. This makes
+it very easy to combine your images.
+
+There are tasks which combine each type of object together. For the
+bias do an 'epar' on the 'zerocombine' task, and fill the parameters
+according to the list given below. We suggest selecting the 'avsigclip'
+rejection operation which applies a
+sigma clipping algorithm to each pixel to remove cosmic rays. This
+algorithm requires at least three input images (best with
+more than 10) to work effectively. According to your particular needs you may
+select other options to combine the data like minmax, ccdclip, etc.
+No scaling is performed on the biases.
+If your bias frames vary greatly, then there
+is something wrong with your setup. In this case, have someone check
+out the electronics.
+
+.nf
+ cc> epar zerocombine (check the list given below)
+.fi
+
+Then combine the bias frames. Optionally you may add a '&' at the end
+of the command line to submit the task as a background job.
+
+.nf
+ cc> zerocombine bias*.imh output=zero1 &
+.fi
+
+Now, proceed with the combination of the darks.
+
+.nf
+ cc> epar darkcombine (check the list given below)
+ cc> darkcombine dark*.imh output=dark1 &
+.fi
+
+Before combining the flat frames, you must
+find a region free of bad pixels to be used to scale the individual
+flat frames. The box should be chosen where there is signal.
+Once you have determined the position of the box, you
+should first run 'imstat' for all your images using this region.
+
+.nf
+ cc> imstat flat*[200:250,300:330]
+
+ # IMAGE NPIX MEAN STDDEV MIN MAX
+ flat001.imh[200:250,300:330] 37901 5489. 1510. 678. 9020.
+ flat002.imh[200:250,300:330] 37901 5485. 1508. 694. 8484.
+ flat003.imh[200:250,300:330] 37901 5478. 1507. 691. 8472.
+ flat004.imh[200:250,300:330] 37901 5475. 1506. 671. 8397.
+ flat005.imh[200:250,300:330] 37901 5474. 1506. 659. 8540.
+ flat006.imh[200:250,300:330] 37901 5472. 1505. 663. 8422.
+ flat007.imh[200:250,300:330] 37901 5467. 1504. 655. 15513.
+ flat008.imh[200:250,300:330] 37901 5464. 1502. 673. 8471.
+ flat009.imh[200:250,300:330] 37901 5458. 1501. 684. 8503.
+.fi
+
+If the mean varies by a large amount, say greater than a few percent, then
+you should use the mode section for the scaling.
+In the example below the flats are combined with scaling.
+Enter this region in the 'statsec' parameter in the following format,
+
+.nf
+ [x1:x2,y1:y2]
+.fi
+
+where the values x1 and x2 are the limits of the box in the x-axis, y1
+and y2 are the limits along the y-axis. This will only be
+used if 'median' is specified in the 'scale' parameter of the task
+'flatcombine'. In this example we have chosen to combine the individual
+flats using the 'ccdclip' option, which rejects pixels using the CCD noise
+parameters, namely the read-out-noise and the gain. You must enter these
+values in the parameters 'rdnoise' and 'gain' in 'flatcombine'. In this
+example we have entered a read-out-noise of 5 electrons, and a gain of
+2.5 electrons/ADU.
+
+.nf
+ cc> epar flatcombine (check the list given below)
+.fi
+
+Proceed with the combination of the flats.
+
+.nf
+ cc> flatcombine flat.*imh output=flat1 &
+.fi
+
+If you have 3 or more sky flats, they may be combined in the same manner
+as the dome flats (change the 'ccdtype' parameter to 'other').
+
+.nf
+ cc> epar flatcombine (check the list given below)
+ cc> flatcombine sky.*imh output=sky1 &
+.fi
+
+The output at this point is a set of images clean of cosmic rays called zero1,
+dark1, flat1, and sky1, where the 1 stands for the first night and the flats
+and skys end in their respective filter type ie, flat1U, flat1B,... Using the
+parameter subsets, the combination occurred for all the flats and skys of the
+same filter type (subset). We suggest that you examine these images, either by
+displaying, or imploting them. For example;
+
+.nf
+ cc> display zero1
+ cc> implot zero1
+.fi
+
+We suggest also to look at the logfile created by the package 'ccdred' in
+order to check that the images were properly combined by the same filter
+type.
+
+.nf
+ cc> page ccdlog
+.fi
+.bp
+
+.nf
+ \fIzerocombine\fR
+
+ input = "bias*.imh" List of zero level images to combine
+ (output = "zero1") Output zero level name
+ (\fIcombine\fR = "average") Type of combine operation
+ (\fIreject\fR = "avsigclip") Type of rejection
+ (\fIccdtype\fR = "zero") CCD image type to combine
+ (process = no) Process images before combining?
+ (delete = no) Delete input images after combining?
+ (clobber = no) Clobber existing output image?
+ (\fIscale\fR = "none") Image scaling
+ (statsec = "") Image section for computing statistics
+ (nlow = 0) minmax: Number of low pixels to reject
+ (nhigh = 1) minmax: Number of high pixels to reject
+ (nkeep = 1) Minimum to keep (pos) or maximum to reject
+ (mclip = yes) Use median in sigma clipping algorithms?
+ (lsigma = 3.) Lower sigma clipping factor
+ (hsigma = 3.) Upper sigma clipping factor
+ (rdnoise = "0") ccdclip: CCD readout noise (electrons)
+ (gain = "1") ccdclip: CCD gain (electrons/DN)
+ (snoise = "0.") ccdclip: Sensitivity noise (fraction)
+ (pclip = -0.5) pclip: Percentile clipping parameter
+ (blank = 0.) Value if there are no pixels
+ (mode = "ql")
+
+ \fIdarkcombine\fR
+
+ input = "dark.*imh" List of dark images to combine
+ (output = "dark1") Output flat field root name
+ (\fIcombine\fR = "average") Type of combine operation
+ (\fIreject\fR = "avsigclip") Type of rejection
+ (\fIccdtype\fR = "dark") CCD image type to combine
+ (process = no) Process images before combining?
+ (delete = no) Delete input images after combining?
+ (clobber = no) Clobber existing output image?
+ (\fIscale\fR = "exposure") Image scaling
+ (statsec = "") Image section for computing statistics
+ (nlow = 1) minmax: Number of low pixels to reject
+ (nhigh = 1) minmax: Number of high pixels to reject
+ (nkeep = 1) Minimum to keep (pos) or maximum to reject
+ (mclip = yes) Use median in sigma clipping algorithms?
+ (lsigma = 3.) Lower sigma clipping factor
+ (hsigma = 3.) Upper sigma clipping factor
+ (rdnoise = "0.") ccdclip: CCD readout noise (electrons)
+ (gain = "1.") ccdclip: CCD gain (electrons/DN)
+ (snoise = "0.") ccdclip: Sensitivity noise (fraction)
+ (pclip = -0.5) pclip: Percentile clipping parameter
+ (blank = 0.) Value if there are no pixels
+ (mode = "ql")
+
+ \fIflatcombine for dome flats\fR
+
+ input = "flat.*imh" List of flat field images to combine
+ (output = "flat1") Output flat field root name
+ (combine = "median") Type of combine operation
+ (reject = "ccdclip") Type of rejection
+ (ccdtype = "flat") CCD image type to combine
+ (process = no) Process images before combining?
+ (subsets = yes) Combine images by subset parameter?
+ (delete = no) Delete input images after combining?
+ (clobber = no) Clobber existing output image?
+ (scale = "median") Image scaling
+ (statsec = "[m:n,p:q]") Image section for computing statistics
+ (nlow = 1) minmax: Number of low pixels to reject
+ (nhigh = 1) minmax: Number of high pixels to reject
+ (nkeep = 1) Minimum to keep (pos) or maximum to reject
+ (mclip = yes) Use median in sigma clipping algorithms?
+ (lsigma = 3.) Lower sigma clipping factor
+ (hsigma = 3.) Upper sigma clipping factor
+ (rdnoise = "5") ccdclip: CCD readout noise (electrons)
+ (gain = "2.5") ccdclip: CCD gain (electrons/DN)
+ (snoise = "0.") ccdclip: Sensitivity noise (fraction)
+ (pclip = -0.5) pclip: Percentile clipping parameter
+ (blank = 1.) Value if there are no pixels
+ (mode = "ql")
+
+ \fIflatcombine for sky flats\fR
+
+ input = "sky.*imh" List of flat field images to combine
+ (output = "sky1") Output flat field root name
+ (combine = "median") Type of combine operation
+ (reject = "ccdclip") Type of rejection
+ (ccdtype = "other") CCD image type to combine
+ (process = no) Process images before combining?
+ (subsets = yes) Combine images by subset parameter?
+ (delete = no) Delete input images after combining?
+ (clobber = no) Clobber existing output image?
+ (scale = "median") Image scaling
+ (statsec = "[m:n,p:q]") Image section for computing statistics
+ (nlow = 1) minmax: Number of low pixels to reject
+ (nhigh = 1) minmax: Number of high pixels to reject
+ (nkeep = 1) Minimum to keep (pos) or maximum to reject
+ (mclip = yes) Use median in sigma clipping algorithms?
+ (lsigma = 3.) Lower sigma clipping factor
+ (hsigma = 3.) Upper sigma clipping factor
+ (rdnoise = "5") ccdclip: CCD readout noise (electrons)
+ (gain = "2.5") ccdclip: CCD gain (electrons/DN)
+ (snoise = "0.") ccdclip: Sensitivity noise (fraction)
+ (pclip = -0.5) pclip: Percentile clipping parameter
+ (blank = 1.) Value if there are no pixels
+ (mode = "ql")
+.fi
+.le
+.bp
+.ls \fI3.5 Processing the calibration frames\fR
+
+Although all the following steps may be done only in one step, our
+approach is to do them separately to allow you to start at any point in
+this section in case you have already started reducing the images on
+the mountain.
+
+You must start by overscan subtracting and trimming 'zero1'. Edit the
+parameters for the 'ccdproc' task according to the list given below.
+There is a series of parameters that are set to 'yes' and 'no'. You must set
+only 'overscan' and 'trim' to 'yes' and the rest to 'no'. Also
+'ccdtype' must be
+set to 'zero'. Be sure to properly specify the parameters
+'biassec' and 'trimsec' which you determined while reading section 3.3.2.
+The range of columns comes first separated by a ':' and the range of lines
+is again separated by a ':', i.e., [29:425,21:402].
+
+\fIDo not change these parameters until having
+processed all your images\fR.
+
+.nf
+ cc> epar ccdproc (check the list given below)
+ cc> ccdproc zero1
+.fi
+
+If the parameter 'interactive' was set to 'yes' you will be required to
+fit interactively the overscan region with a certain function. Once
+presented by the task with the plot of the overscan region (see figure 3)
+you may change the fitting function, for example, with ':function chebyshev'
+and its order with
+':order 4'. To try a new fit type 'f'. We suggest a spline3 of order 2.
+You may also select the sample to be fit using the 's' command twice.
+If you are happy with
+the fit type 'q' to quit. The task will then process 'zero1' accordingly.
+
+Continue by overscan subtracting, trimming and bias subtracting 'dark1'.
+In this case you have to change in 'ccdproc' the 'ccdtype' parameter
+to 'dark' and the
+'zerocor' parameter to 'yes'.
+
+.nf
+ cc> epar ccdproc (check the list given below)
+ cc> ccdproc dark1
+.fi
+
+The dark image must be examined before proceeding. \fIFor instance, if the
+dark level is low enough (<10 counts/hour/pixel)
+compared to the flats, you will
+probably disregard dark subtracting your images, to avoid
+introducing noise in your data.\fR
+However, if you notice some structure in the dark image, it would be
+worth trying to dark correct the data. In the following examples, for
+generality's sake, we consider dark subtraction as part of the overall
+processing. If this is not your case, do not forget to set the 'darkcor'
+parameter in 'ccdproc' to 'no' and leave it alone.
+
+Then process the flats. Check that the 'ccdtype' parameter is set
+to 'flat', that 'overscan', 'trim', 'zerocor' and 'darkcor' are all set
+to 'yes' and execute the task.
+
+.nf
+ cc> epar ccdproc (check the list given below)
+ cc> ccdproc flat1*imh
+.fi
+
+IRAF records all the reduction operations in the image headers. You
+may check from the headers of 'zero1', 'dark1', 'sky1' and 'flat1' that
+BT-FLAG, BI-FLAG, DK-FLAG are properly set. For instance,
+
+.nf
+ cc> imheader flat1R l+
+.fi
+
+It is also possible to check this by using the 'ccdlist' task which will check
+the header automatically and list the operations which have been performed on
+the image.
+
+.nf
+ cc> ccdlist flat1R
+
+.fi
+You should get the following,
+
+.nf
+ flat1R[496,501][real][flat][R][OTZ]:dflat R
+
+.fi
+'ccdlist' should tell you the image name (flat1R), the image size [496,501]
+left after trimming, the pixel type [real] of the image (the processed
+images should normally have pixels in real format, as opposed to
+the raw images which generally have pixels in 'short' format),
+the image type [flat], the filter used in the observation
+[R], the level of processing of the image [OTZ], and the image title (dflat R).
+
+At this level you should have the flats processed up through [OTZ] which
+means that the image has been [O]verscan subtracted, [T]rimmed, and
+[Z]ero subtracted. Additional codes for other operations in 'ccdproc'
+are [F]lat correction, [B]ad pixel correction, [I]illumination correction.
+
+.nf
+ \fIccdproc for zero1\fR
+
+ images = "zero1" List of CCD images to correct
+ (\fIccdtype\fR = "zero") CCD image type to correct
+ (max_cache = 0) Maximum image caching memory (in Mbytes)
+ (noproc = no) List processing steps only?
+ (fixpix = no) Fix bad CCD lines and columns?
+ (\fIoverscan\fR = yes) Apply overscan strip correction?
+ (\fItrim\fR = yes) Trim the image?
+ (\fIzerocor\fR = no) Apply zero level correction?
+ (\fIdarkcor\fR = no) Apply dark count correction?
+ (\fIflatcor\fR = no) Apply flat field correction?
+ (illumcor = no) Apply iillumination correction?
+ (fringecor = no) Apply fringe correction?
+ (readcor = no) Convert zero level image to readout cor?
+ (scancor = no) Convert flat field image to scan correction?
+ (readaxis = "line") Read out axis (column|line)
+ (fixfile = "") File describing the bad lines and columns
+ (\fIbiassec\fR = "[m:n,*]") Overscan strip image section
+ (\fItrimsec\fR = "[r:s,t:u]") Trim data section
+ (\fIzero\fR = "zero1") Zero level calibration image
+ (\fIdark\fR = "dark1") Dark count calibration image
+ (\fIflat\fR = "") Flat field images
+ (illum = "") Iillumination correction images
+ (fringe = "") Fringe correction images
+ (minreplace = 1.) Minimum flat field value
+ (scantype = "shortscan") Scan type (shortscan|longscan)
+ (nscan = 1) Number of short scan lines\n
+ (interactive = yes) Fit overscan interactively?
+ (function = "spline3") Fitting function
+ (order = 2) Number of polynomial terms or spline pieces
+ (sample = "*") Sample points to fit
+ (naverage = 1) Number of sample points to combine
+ (niterate = 1) Number of rejection iterations
+ (low_reject = 3.) Low sigma rejection factor
+ (high_reject = 3.) High sigma rejection factor
+ (grow = 0.) Rejection growing radius
+ (mode = "ql")
+
+ \fIccdproc for dark1\fR
+
+ images = "dark1" List of CCD images to correct
+ (\fIccdtype\fR = "dark") CCD image type to correct
+ (max_cache = 0) Maximum image caching memory (in Mbytes)
+ (noproc = no) List processing steps only?
+ (fixpix = no) Fix bad CCD lines and columns?
+ (\fIoverscan\fR = yes) Apply overscan strip correction?
+ (\fItrim\fR = yes) Trim the image?
+ (\fIzerocor\fR = yes) Apply zero level correction?
+ (\fIdarkcor\fR = no) Apply dark count correction?
+ (\fIflatcor\fR = no) Apply flat field correction?
+ (illumcor = no) Apply iillumination correction?
+ (fringecor = no) Apply fringe correction?
+ (readcor = no) Convert zero level image to readout corr?
+ (scancor = no) Convert flat field image to scan correction?
+ (readaxis = "line") Read out axis (column|line)
+ (fixfile = "") File describing the bad lines and columns
+ (\fIbiassec\fR = "[m:n,*]") Overscan strip image section
+ (\fItrimsec\fR = "[r:s,t:u]") Trim data section
+ (\fIzero\fR = "zero1") Zero level calibration image
+ (\fIdark\fR = "dark1") Dark count calibration image
+ (\fIflat\fR = "") Flat field images
+ (illum = "") Iillumination correction images
+ (fringe = "") Fringe correction images
+ (minreplace = 1.) Minimum flat field value
+ (scantype = "shortscan") Scan type (shortscan|longscan)
+ (nscan = 1) Number of short scan lines\n
+ (interactive = yes) Fit overscan interactively?
+ (function = "spline3") Fitting function
+ (order = 2) Number of polynomial terms or spline pieces
+ (sample = "*") Sample points to fit
+ (naverage = 1) Number of sample points to combine
+ (niterate = 1) Number of rejection iterations
+ (low_reject = 3.) Low sigma rejection factor
+ (high_reject = 3.) High sigma rejection factor
+ (grow = 0.) Rejection growing radius
+ (mode = "ql")
+
+ \fIccdproc for flat1\fR
+
+ images = "flat1*imh" List of CCD images to correct
+ (\fIccdtype\fR = "flat") CCD image type to correct
+ (max_cache = 0) Maximum image caching memory (in Mbytes)
+ (noproc = no) List processing steps only?
+ (fixpix = no) Fix bad CCD lines and columns?
+ (\fIoverscan\fR = yes) Apply overscan strip correction?
+ (\fItrim\fR = yes) Trim the image?
+ (\fIzerocor\fR = yes) Apply zero level correction?
+ (\fIdarkcor\fR = yes) Apply dark count correction?
+ (\fIflatcor\fR = no) Apply flat field correction?
+ (illumcor = no) Apply iillumination correction?
+ (fringecor = no) Apply fringe correction?
+ (readcor = no) Convert zero level image to readout corr?
+ (scancor = no) Convert flat field image to scan correction?
+ (readaxis = "line") Read out axis (column|line)
+ (fixfile = "") File describing the bad lines and columns
+ (\fIbiassec\fR = "[m:n,*]") Overscan strip image section
+ (\fItrimsec\fR = "[r:s,t:u]") Trim data section
+ (\fIzero\fR = "zero1") Zero level calibration image
+ (\fIdark\fR = "dark1") Dark count calibration image
+ (\fIflat\fR = "") Flat field images
+ (illum = "") Iillumination correction images
+ (fringe = "") Fringe correction images
+ (minreplace = 1.) Minimum flat field value
+ (scantype = "shortscan") Scan type (shortscan|longscan)
+ (nscan = 1) Number of short scan lines\n
+ (interactive = yes) Fit overscan interactively?
+ (function = "spline3") Fitting function
+ (order = 2) Number of polynomial terms or spline pieces
+ (sample = "*") Sample points to fit
+ (naverage = 1) Number of sample points to combine
+ (niterate = 1) Number of rejection iterations
+ (low_reject = 3.) Low sigma rejection factor
+ (high_reject = 3.) High sigma rejection factor
+ (grow = 0.) Rejection growing radius
+ (mode = "ql")
+.fi
+.le
+.bp
+.ls \fI3.6 Preparing a Final Flat Field\fR
+
+Next we want to check the iillumination of the flat by applying it to the sky
+flat. Load the 'imred' and 'ccdred' packages if they are
+not already loaded.
+There is no tried and true method for testing the iillumination of
+the sky flats except for applying them on a long exposure taken in the same
+filter and implotting it or doing image statistics.
+
+We must use 'ccdproc' to overscan subtract, trim, bias subtract, dark subtract
+(if needed), and flat field the sky images. Again, we assume here
+that the dark is
+necessary for the reductions. Be sure that the images you are using for
+the flat field division have the proper filter identification.
+First edit the parameters for 'ccdproc', and then run
+this task to process your skys to see if the iillumination of the dome
+flat is uniform;
+
+.nf
+ cl> noao
+ im> imred
+ im> ccdred
+ cc> epar ccdproc (check the parameter list)
+ cc> ccdproc sky1R
+.fi
+
+Now we must check the sky to see if it really is flat. This is done
+using 'implot' and plotting some lines or a group of lines,
+
+.nf
+ cc> implot sky1R
+ :l 150 200
+ :c 200 250
+ q
+.fi
+
+Ideally if the iillumination was uniform, sky1R should not show any variation.
+In plotting several lines together, you may get a small slope (see figure 4).
+If you see any
+variations, including a slope even near the edges of the image, then you must
+run the task 'mkskyflat'. Select a smoothing box size which preserves the large
+scale features while adequately suppressing the noise. The scale of the
+largest features expected shouldn't be smaller than say 1/10 to 1/12 the
+image size.
+
+.nf
+ cc> epar mkskyflat (check the parameter list)
+ cc> mkskyflat sky1R finalflat1R
+.fi
+
+The output image is the corrected flat image so call it "finalflat1R".
+We need to recopy the sky flat to process it again. Do an imcopy of the sky1R
+from the backup image after deleting the processed one.
+
+.nf
+ cc> imdel sky1R
+ cc> imcopy Bsky1R sky1R
+ cc> epar ccdproc (change the parameter 'flat' from
+ flat1R to finalflat1R)
+ cc> ccdproc sky1R
+.fi
+
+Again, check to see if the sky is really flat. If it is flat
+(see figure 5), then you are
+done and must process the flats for your other filters. If it is not flat,
+go back to the beginning and ask for help. It may be that you need to play
+with the smoothing parameters. Repeat these steps until you are satisfied.
+Now you can process your images.
+
+.nf
+
+ \fIccdproc for sky flats\fR
+
+ images = "sky1*imh" List of CCD images to correct
+ (\fIccdtype\fR = "other") CCD image type to correct
+ (max_cache = 0) Maximum image caching memory (in Mbytes)
+ (noproc = no) List processing steps only?
+ (fixpix = no) Fix bad CCD lines and columns?
+ (\fIoverscan\fR = yes) Apply overscan strip correction?
+ (\fItrim\fR = yes) Trim the image?
+ (\fIzerocor\fR = yes) Apply zero level correction?
+ (\fIdarkcor\fR = yes) Apply dark count correction?
+ (\fIflatcor\fR = yes) Apply flat field correction?
+ (illumcor = no) Apply iillumination correction?
+ (fringecor = no) Apply fringe correction?
+ (readcor = no) Convert zero level image to readout corr?
+ (scancor = no) Convert flat field image to scan correction?
+ (readaxis = "line") Read out axis (column|line)
+ (fixfile = "") File describing the bad lines and columns
+ (\fIbiassec\fR = "[m:n,*]") Overscan strip image section
+ (\fItrimsec\fR = "[r:s,t:u]") Trim data section
+ (\fIzero\fR = "zero1") Zero level calibration image
+ (\fIdark\fR = "dark1") Dark count calibration image
+ (\fIflat\fR = "flat1*imh") Flat field images
+ (illum = "") Iillumination correction images
+ (fringe = "") Fringe correction images
+ (minreplace = 1.) Minimum flat field value
+ (scantype = "shortscan") Scan type (shortscan|longscan)
+ (nscan = 1) Number of short scan lines\n
+ (interactive = yes) Fit overscan interactively?
+ (function = "spline3") Fitting function
+ (order = 2) Number of polynomial terms or spline pieces
+ (sample = "*") Sample points to fit
+ (naverage = 1) Number of sample points to combine
+ (niterate = 1) Number of rejection iterations
+ (low_reject = 3.) Low sigma rejection factor
+ (high_reject = 3.) High sigma rejection factor
+ (grow = 0.) Rejection growing radius
+ (mode = "ql")
+.fi
+.bp
+.nf
+ \fImkskyflat\fR
+
+ input = "sky1R" Input CCD images
+ output = "finalflat1R" Output images (same as input if none)
+ (ccdtype = "") CCD image type to select
+ (\fIxboxmin\fR = 0.1) Minimum smoothing box size in x at edges
+ (\fIxboxmax\fR = 0.1) Maximum smoothing box size in x
+ (\fIyboxmin\fR = 0.1) Minimum smoothing box size in y at edges
+ (\fIyboxmax\fR = 0.1) Maximum smoothing box size in y
+ (clip = yes) Clip input pixels?
+ (lowsigma = 2.5) Low clipping sigma
+ (highsigma = 2.5) High clipping sigma
+ (ccdproc = "") CCD processing parameters
+ (mode = "ql")
+.fi
+.le
+.bp
+.ls \fI3.7 Processing the Images\fR
+
+This can be setup to be done all at once. The final flat has been made so now
+we divide it into your objects.
+If the calibration frames themselves
+have not been processed it will do this as well. The program takes into
+account the differing filters. Be sure that you have deleted all the individual
+calibration frames that were combined. Edit the parameters for 'ccdproc' and run
+it. For tasks that take a long time, it is a good idea to set them going as a
+background job and send the output to a file which you can watch to keep track
+of the progress. You may wish to make a list of the objects to be processed
+to avoid reprocessing your sky flats.
+
+.nf
+ cc> epar ccdproc (check the parameter list)
+ cc> ccdproc obj*.imh
+.fi
+
+To check the progress of the task, just type
+
+.nf
+ cc> tail ccdlog
+.fi
+
+This will print out the last lines of the file named 'ccdlog'
+which is the latest
+operation done by the task 'ccdproc'.
+Once this is done, check your images to see
+that they have been properly flat fielded. If you are satisfied, then you may
+now check your images for bad pixel regions, the structure of which may not have
+been processed out of your images.
+
+We suggest also to do a 'ccdlist' on the objects in order to
+check whether the frames have been duly processed, i.e.,
+
+.nf
+ cc> ccdlist obj*.imh
+.fi
+
+You should get a listing of your images in the format that follows,
+
+.nf
+
+ obj1010.imh[496,501][real][object][R][OTZF]:rubin 152 R
+ obj1011.imh[496,501][real][object][I][OTZF]:rubin 152 I
+ obj1012.imh[496,501][real][object][B][OTZF]:rubin 149 B
+ obj1013.imh[496,501][real][object][V][OTZF]:rubin 149 V
+ obj1014.imh[496,501][real][object][R][OTZF]:rubin 149 R
+ obj1015.imh[496,501][real][object][I][OTZF]:rubin 149 I
+ obj1016.imh[496,501][real][object][B][OTZF]:rubin 149 B
+ obj1017.imh[496,501][real][object][V][OTZF]:rubin 149 V
+ obj1018.imh[496,501][real][object][R][OTZF]:rubin 149 R
+ obj1019.imh[496,501][real][object][I][OTZF]:rubin 149 I
+.fi
+
+where the [OTZF] code reveals the degree of processing of the images.
+
+.nf
+ \fIccdproc for objects\fR
+
+ images = "obj*imh)" List of CCD images to correct
+ (\fIccdtype\fR = "object") CCD image type to correct
+ (max_cache = 0) Maximum image caching memory (in Mbytes)
+ (noproc = no) List processing steps only?
+ (fixpix = no) Fix bad CCD lines and columns?
+ (\fIoverscan\fR = yes) Apply overscan strip correction?
+ (\fItrim\fR = yes) Trim the image?
+ (\fIzerocor\fR = yes) Apply zero level correction?
+ (\fIdarkcor\fR = yes) Apply dark count correction?
+ (\fIflatcor\fR = yes) Apply flat field correction?
+ (illumcor = no) Apply iillumination correction?
+ (fringecor = no) Apply fringe correction?
+ (readcor = no) Convert zero level image to readout corr?
+ (scancor = no) Convert flat field image to scan correction?
+ (readaxis = "line") Read out axis (column|line)
+ (fixfile = "") File describing the bad lines and columns
+ (\fIbiassec\fR = "[m:n,*]") Overscan strip image section
+ (\fItrimsec\fR = "[r:s,t:u]") Trim data section
+ (\fIzero\fR = "zero1") Zero level calibration image
+ (\fIdark\fR = "dark1") Dark count calibration image
+ (\fIflat\fR = "finalflat1*imh") Flat field images
+ (illum = "") Iillumination correction images
+ (fringe = "") Fringe correction images
+ (minreplace = 1.) Minimum flat field value
+ (scantype = "shortscan") Scan type (shortscan|longscan)
+ (nscan = 1) Number of short scan lines\n
+ (interactive = yes) Fit overscan interactively?
+ (function = "spline3") Fitting function
+ (order = 2) Number of polynomial terms or spline pieces
+ (sample = "*") Sample points to fit
+ (naverage = 1) Number of sample points to combine
+ (niterate = 1) Number of rejection iterations
+ (low_reject = 3.) Low sigma rejection factor
+ (high_reject = 3.) High sigma rejection factor
+ (grow = 0.) Rejection growing radius
+ (mode = "ql")
+
+.fi
+
+.le
+.bp
+.ls \fI3.8 Creating the Bad Pixel File\fR
+
+Now it is time to check your images to see if structure remains in the bad
+pixel regions. If you were careful to observe your objects far away from any
+obvious bad regions then you may only need to 'fix' pixels for cosmetic reasons.
+We suggest you to examine these files using the display window,
+imtool (this can only be done at one of the SUN consoles), and then plotting
+them using 'implot'. Start by loading the 'noao', 'images', 'tv', 'imred',
+and 'ccdred' packages. Display
+a processed image and plot it using 'implot',
+
+.nf
+ cl> noao
+ no> images
+ im> tv
+ tv> imred
+ im> ccdred
+ cc> display obj002 1
+ cc> implot obj002
+.fi
+
+Do not exit from implot, but move the cursor to the imtool window and press
+'F6'. The cursor coordinates are now visible at the bottom of the screen.
+Find the position of a bad pixel using this, return to the graph and
+plot it using 'implot' with the ':l #'(line) and ':c #'(column) commands.
+You can define them more precisely using 'implot' by overplotting lines and
+columns around the center of the bad regions. Do this by typing 'o' followed
+by ':c #' and ':l #'. Once you have these regions written down, check some of
+your images to see if the bad pixels are a problem, ie >1% in signal. If it is
+a problem then create a file called 'badpix' with the listing of
+the pixels you wish to fix;
+
+.nf
+ cc> edit badpix (according to the following format)
+.fi
+
+The following example is to illustrate the format of a bad pixel file.
+
+.nf
+ 84 87 1 450
+ 87 90 105 109
+ 87 89 110 111
+ 124 126 112 116
+ 206 206 80 80
+.fi
+
+Each line stands for a rectangular region of the image to be fixed. The regions
+are specified by four numbers giving the starting and ending columns followed
+by the starting and ending lines. The starting and ending points may be the
+same to specify a single column, line or pixel. Note that each region is
+"fixed" by interpolating across its shortest dimension and that regions are
+processed in the order that they appear in the bad pixel list. For bad regions
+of complex shape, some care may be required in specifying the regions in order
+to achieve an optimal result. \fIIt is strongly recommended that you test your
+bad pixel file by using it to correct a copy of one of your images before going
+into production\fR. This bad pixel file will be specified in the task 'ccdproc'
+in the parameter 'fixfile'. So now edit and run the task:
+
+.nf
+ cc> epar ccdproc
+ cc> ccdproc obj*.imh
+.fi
+
+Remember that 'ccdproc' already knows that the images
+have been processed so those
+operations will not be performed again. Now you have final images that are ready
+to be used. If you plan to do photometry in IRAF see section VI of this manual.
+
+.nf
+ \fIccdproc\fR
+
+ images = "obj*imh)" List of CCD images to correct
+ (\fIccdtype\fR = "object") CCD image type to correct
+ (max_cache = 0) Maximum image caching memory (in Mbytes)
+ (noproc = no) List processing steps only?
+ (\fIfixpix\fR = yes) Fix bad CCD lines and columns?
+ (\fIoverscan\fR = yes) Apply overscan strip correction?
+ (\fItrim\fR = yes) Trim the image?
+ (\fIzerocor\fR = yes) Apply zero level correction?
+ (\fIdarkcor\fR = yes) Apply dark count correction?
+ (\fIflatcor\fR = yes) Apply flat field correction?
+ (illumcor = no) Apply iillumination correction?
+ (fringecor = no) Apply fringe correction?
+ (readcor = no) Convert zero level image to readout corr?
+ (scancor = no) Convert flat field image to scan correction?
+ (readaxis = "line") Read out axis (column|line)
+ (\fIfixfile\fR = "badpix") File describing the bad lines and columns
+ (\fIbiassec\fR = "[m:n,*]") Overscan strip image section
+ (\fItrimsec\fR = "[r:s,t:u]") Trim data section
+ (\fIzero\fR = "zero1") Zero level calibration image
+ (\fIdark\fR = "dark1") Dark count calibration image
+ (\fIflat\fR = "finalflat1*imh") Flat field images
+ (illum = "") Iillumination correction images
+ (fringe = "") Fringe correction images
+ (minreplace = 1.) Minimum flat field value
+ (scantype = "shortscan") Scan type (shortscan|longscan)
+ (nscan = 1) Number of short scan lines\n
+ (interactive = yes) Fit overscan interactively?
+ (function = "spline3") Fitting function
+ (order = 2) Number of polynomial terms or spline pieces
+ (sample = "*") Sample points to fit
+ (naverage = 1) Number of sample points to combine
+ (niterate = 1) Number of rejection iterations
+ (low_reject = 3.) Low sigma rejection factor
+ (high_reject = 3.) High sigma rejection factor
+ (grow = 0.) Rejection growing radius
+ (mode = "ql")
+
+.fi
+.le
+.bp
+.ls \fI3.9 Writing the Data to Tape\fR
+
+Allocate the device, for the SUNs it is "mta". Then mount your tape, don't
+forget to put in a write ring. If you wish to use the exabyte drive, use "mtx"
+for the device name. Load the package 'dataio' and do an epar on 'wfits'.
+
+.nf
+ cl> dataio
+ da> epar wfits
+ da> wfits *.imh mta.6250 new+
+.fi
+
+The parameter newtape should be "yes"
+if you are starting on a new tape. Otherwise you will not want to overwrite
+data which has already been placed on a tape, so use "no".
+
+After writing all the desired files to tape, deallocate the drive and retrieve
+your tape.
+
+.nf
+ da> dealloc mta
+.fi
+.le
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/quadproc.hlp b/noao/imred/quadred/src/quad/doc/quadproc.hlp
new file mode 100644
index 00000000..8d445097
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/quadproc.hlp
@@ -0,0 +1,672 @@
+.help quadproc Sept93 arcon.quad
+.ih
+NAME
+quadproc -- Process multi-readout CCD images
+.ih
+USAGE
+quadproc images
+.ih
+PARAMETERS
+.ls images
+List of input CCD images to process. The list may include processed
+images and calibration images.
+.le
+.ls ccdtype = ""
+CCD image type to select from the input image list. If no type is given
+then all input images will be selected. The recognized types are described
+in \fBccdtypes\fR.
+.le
+.ls max_cache = 0
+Maximum image caching memory (in Mbytes). If there is sufficient memory
+the calibration images, such as zero level, dark count, and flat fields,
+will be cached in memory when processing many input images. This
+reduces the disk I/O and makes the task run a little faster. If the
+value is zero image caching is not used.
+.le
+.ls noproc = no
+List processing steps only?
+.le
+
+.ce
+PROCESSING SWITCHES
+.ls fixpix = yes
+Fix bad CCD lines and columns by linear interpolation from neighboring
+lines and columns? If yes then a bad pixel file must be specified.
+.le
+.ls overscan = yes
+Apply overscan or prescan bias correction? If yes then the overscan
+image section and the readout axis must be specified.
+.le
+.ls trim = yes
+Trim the image of the overscan region and bad edge lines and columns?
+If yes then the trim section must be specified.
+.le
+.ls zerocor = yes
+Apply zero level correction? If yes a zero level image must be specified.
+.le
+.ls darkcor = yes
+Apply dark count correction? If yes a dark count image must be specified.
+.le
+.ls flatcor = yes
+Apply flat field correction? If yes flat field images must be specified.
+.le
+.ls illumcor = no
+Apply iillumination correction? If yes iillumination images must be specified.
+.le
+.ls fringecor = no
+Apply fringe correction? If yes fringe images must be specified.
+.le
+.ls readcor = no
+Convert zero level images to readout correction images? If yes then
+zero level images are averaged across the readout axis to form one
+dimensional zero level readout correction images.
+.le
+.ls scancor = no
+Convert flat field images to scan mode flat field images? If yes then the
+form of scan mode correction is specified by the parameter \fIscantype\fR.
+.le
+
+.ce
+PROCESSING PARAMETERS
+.ls readaxis = "line"
+Read out axis specified as "line" or "column".
+.le
+.ls fixfile
+File describing the bad lines and columns. If "image" is specified then
+the file is specified in the image header or instrument translation file.
+See Section 2. of Description for further information on bad pixel files.
+.le
+.ls biassec
+Overscan bias strip image section. If "image" is specified then the overscan
+bias section is specified in the image header or instrument translation file.
+See Section 3. of Description for further information on setting this parmeter.
+.le
+.ls trimsec
+image section for trimming. If "image" is specified then the trim
+image section is specified in the image header or instrument translation file.
+See Section 4. of Description for further information on setting this parmeter.
+.le
+.ls zero = ""
+Zero level calibration image. The zero level image may be one or two
+dimensional. The CCD image type and subset are not checked for these
+images and they take precedence over any zero level calibration images
+given in the input list.
+.le
+.ls dark = ""
+Dark count calibration image. The CCD image type and subset are not checked
+for these images and they take precedence over any dark count calibration
+images given in the input list.
+.le
+.ls flat = ""
+Flat field calibration images. The flat field images may be one or
+two dimensional. The CCD image type is not checked for these
+images and they take precedence over any flat field calibration images given
+in the input list. The flat field image with the same subset as the
+input image being processed is selected.
+.le
+.ls illum = ""
+Iillumination correction images. The CCD image type is not checked for these
+images and they take precedence over any iillumination correction images given
+in the input list. The iillumination image with the same subset as the
+input image being processed is selected.
+.le
+.ls fringe = ""
+Fringe correction images. The CCD image type is not checked for these
+images and they take precedence over any fringe correction images given
+in the input list. The fringe image with the same subset as the
+input image being processed is selected.
+.le
+.ls minreplace = 1.
+When processing flat fields, pixel values below this value (after
+all other processing such as overscan, zero, and dark corrections) are
+replaced by this value. This allows flat fields processed by \fBquadproc\fR
+to be certain to avoid divide by zero problems when applied to object
+images.
+.le
+.ls scantype = "shortscan"
+Type of scan format used in creating the CCD images. The modes are:
+.ls "shortscan"
+The CCD is scanned over a number of lines and then read out as a regular
+two dimensional image. In this mode unscanned flat fields are numerically
+scanned to form scanned flat fields comparable to the observations. If
+the flat field calibration images are taken in scanned mode then
+\fIscancor\fR should be no and the processing performed in the same manner
+as in unscanned mode.
+.le
+.ls "longscan"
+In this mode the CCD is clocked and read out continuously to form a long
+strip. Flat fields are averaged across the readout axis to
+form a one dimensional flat field readout correction image. This assumes
+that all recorded image lines are clocked over the entire active area of the
+CCD.
+.le
+.le
+.ls nscan
+Number of scan readout lines used in short scan mode. This parameter is used
+when the scan type is "shortscan".
+.le
+
+.ce
+OVERSCAN FITTING PARAMETERS
+.ls interactive = no
+Fit the overscan vector interactively? If yes the overscan vector is fit
+interactively using the \fBicfit\fR package. If no then the fitting parameters
+given below are used.
+.le
+.ls function = "legendre"
+Overscan fitting function. The function types are "legendre" polynomial,
+"chebyshev" polynomial, "spline1" linear spline, and "spline3" cubic
+spline.
+.le
+.ls order = 1
+Number of polynomial terms or spline pieces in the overscan fit.
+.le
+.ls sample = "*"
+Sample points to use in the overscan fit. The string "*" specified all
+points otherwise an \fBicfit\fR range string is used.
+.le
+.ls naverage = 1
+Number of points to average or median to form fitting points. Positive
+numbers specify averages and negative numbers specify medians.
+.le
+.ls niterate = 1
+Number of rejection iterations to remove deviant points from the overscan fit.
+If 0 then no points are rejected.
+.le
+.ls low_reject = 3., high_reject = 3.
+Low and high sigma rejection factors for rejecting deviant points from the
+overscan fit.
+.le
+.ls grow = 0.
+One dimensional growing radius for rejection of neighbors to deviant points.
+.le
+.ih
+DESCRIPTION
+\fBQuadproc\fR processes CCD images to remove all "instrumental signatures" from
+the data. The operations performed are:
+.ls
+.nf
+o correct detector defects (bad lines and columns)
+o determine readout bias level using overscan and subtract it
+o trim off the overscan regions and unwanted border pixels
+o subtract zero level bias
+o subtract dark counts
+o correct for pixel-to-pixel sensitivity variations
+o correct for non-uniform iillumination
+o correct for fringing
+.fi
+.le
+.sp 1
+\fBQuadproc\fR is a cl script based on the task \fBccdproc\fR in the
+\fBccdred\fR package. It is specifically designed to deal with Arcon data
+obtained in multi-readout mode (see \fBquadformat\fR). A feature of such
+images is that each readout typically has a slightly different, DC bias
+level, gain, and readout noise. As a result both zero frames and uniformly
+illuminated exposures show a characteristic chequer board pattern, the
+sections of the image read through each amplifier having different levels.
+In addition, there will be a separate overscan strip, used to monitor the zero
+level, for each readout. The location of these overscan strips in the raw
+frame depends on which amplifiers are used. \fBQuadproc\fR splits each
+multi-readout image into subimages, one for each amplifier, and also calculates
+the biassec and trimsec appropriately for each. It then calls \fBccdproc\fR to
+perform the first three operations listed above. The sub-images are then glued
+back together. Finaly, \fBccdproc\fR is called a second time to perform all the
+remaining reduction steps.
+
+\fBQuadproc\fR MUST be used for the reduction of multi-readout data up to and
+including the trimming step, and it is convenient to use it for the entire
+reduction process. However, once ALL images have been trimmed it is possible
+to finish the reductions using \fBccdproc\fR if the \fBquad\fR package is not
+available at your home institution. \fBQuadproc\fR recognizes mono-readout
+images and processes them directly using \fBccdproc\fR. If your images are a
+mixture of multi- and mono- readout use \fBquadproc\fR; if you only have
+mono-readout data use \fBccdproc\fR.
+
+\fBQuadproc\fR is identical to \fBccdproc\fR in the way it is used, and has
+exactly the same parameters; as far as possible it also behaves in the same way.
+To run it, all one has to do is set the parameters and then begin processing
+the images. The task takes care of most of the record keeping and
+automatically does the prerequisite processing of calibration images. For
+ease of reference, the following sections provide a simple outline of how to
+use the task, together with a description of the operations performed. They
+are taken almost verbatim from the help page for \fBccdproc\fR. If you are
+already familiar with that task you should read sections 2., 3. and 4. below,
+which include information on the preparation of the badpixel file, and on how
+to specify \fBbiassec\fR and \fBtrimsec\fR parameters. See section 12. for a
+description of the differences between the two tasks. For a user's guide and
+cookbook for the \fBquad\fR package see \fBguide\fR.
+.sh
+1. Parameters
+There are many parameters but they may be easily reviewed and modified using
+the task \fBeparam\fR.
+The input CCD images to be processed are given as an image list.
+Previously processed images are ignored and calibration images are
+recognized, provided the CCD image types are in the image header (see
+\fBinstruments\fR and \fBccdtypes\fR). \fBQuadproc\fR separates multi- and
+mono-readout images in the input list and handles them accordingly.
+Therefore it is permissible to use simple image templates such as "*.imh".
+The \fIccdtype\fR parameter may be used to select only certain types of CCD
+images to process (see \fBccdtypes\fR).
+
+The processing operations are selected by boolean (yes/no) parameters.
+Because calibration images are recognized and processed appropriately,
+the processing operations for object images should be set. Any combination of
+operations may be specified. Two of the operations, \fBreadcor\fR and \fBscancor\fR, are only applicable to zero level and flat field images respectively. These
+are used for certain types of CCDs and modes of operation.
+
+The processing steps selected have related parameters which must be
+set. These are things like image sections defining the overscan and
+trim regions and calibration images. There are a number of parameters
+used for fitting the overscan or prescan bias section. These are
+parameters used by the standard IRAF curve fitting package \fBicfit\fR.
+The parameters are described in more detail in the following sections.
+
+In addition to the task parameters there are package parameters
+which affect \fBquadproc\fR. These include the instrument and subset
+files, the text and plot log files, the output pixel datatype,
+the verbose parameter for logging to the terminal, and the backup
+prefix. These are described in \fBquad\fR.
+
+Calibration images are specified by task parameters and/or in the
+input image list. If more than one calibration image is specified
+then the first one encountered is used. Calibration images specified by
+task parameters take precedence over calibration images in the input list.
+These images also need not have a CCD image type parameter since the task
+parameter identifies the type of calibration image. This method is
+best if there is only one calibration image for all images
+to be processed, almost always true for zero level and dark
+count images. If no calibration image is specified by task parameter
+then calibration images in the input image list are identified and
+used. This requires that the images have CCD image types recognized
+by the package. This method is useful if one may simply say "*.imh"
+as the image list to process all images or if the images are broken
+up into groups, in "@" files for example, each with their own calibration
+frames.
+.sh
+2. Fixpix
+Regions of bad lines and columns may be replaced by linear
+interpolation from neighboring lines and columns when the parameter
+\fIfixpix\fR is set. The bad regions are specified in a bad pixel
+file. The file consists of lines with four fields, the starting and
+ending columns and the starting and ending lines. Any number of
+regions may be specified. Currently, the coordinates given for the bad regions
+must be those that would be applicable if the CCD was used in SINGLE READOUT
+MODE, even if multi-readout images are being reduced. A task is being written
+to aid in the preparation of an appropriate bad-pixel file given measurements
+made on a raw multi-readout image.
+
+Comment lines beginning with the character '#' may be included. If a comment
+line preceding the bad regions contains the word "untrimmed" then the
+coordinate system refers to the original format of the images; i.e. before
+trimming. If an image has been trimmed previously then the trim region
+specified in the image header is used to convert the coordinates in the bad
+pixel file to those of the trimmed image. If the file does not contain the
+word "untrimmed" then the coordinate system must match that of the image
+being corrected; i.e. untrimmed coordinates if the image has not been
+trimmed and trimmed coordinates if the image has been trimmed.
+Standard bad pixel files should always be specified in terms of the original
+format.
+
+The bad pixel file may be specified explicitly with the parameter \fIfixfile\fR
+or indirectly if the parameter has the value "image". In the latter case
+the instrument file must contain the name of the file.
+.sh
+3. Overscan
+The portion of the image used to determine the readout bias level is specified
+with the parameter \fBbiassec\fR. This may be an explicit image section, or it
+may be set to the special value "image". In the latter case the value given in
+the image header is used. The image header value uses the entire overscan
+strip without allowing any margin between the data section and the bias
+section. Because Arcon uses a DC-coupled preamplifier the transition
+between data and overscan is very sharp indeed. Nonetheless, we recommend that
+you do skip the first few pixels of the overscan strip. To decide this issue
+for yourself, use implot to plot the average of several lines from a high
+exposure level image such as a flat field. Expand the transition region
+between data and overscan and decide how many pixels of the overscan are
+contaminated.
+
+In the case of multi-readout images, the way in which an explicit value for
+\fBbiassec\fR must be set, is unfortunately somewhat non-intuitive. Currently,
+the value recorded in the image header is that which would be appropriate had
+the detector been read out using a single amplifier; an explicit image section
+must be specified in the same way. \fBQuadproc\fR calculates the sections
+to use for the sub-images corresponding to each readout based on such "single
+readout" sections. To determine the section you must enter, use \fBimhead\fR
+or \fBhselect\fR to determine the value of \fBbiassec\fR stored in the image
+header. If this is, for instance, "[1025:1060,1:1028]" then setting
+\fBbiassec\fR = "[1029:1060,1:1028]" would leave a margin of 4 pixels
+(1029 - 1025). Note that if two readouts are used in the horizontal direction
+(quad or serial-split dual readout) the overscan strip for each amplifier is
+only half as wide as that in single readout mode. Thus in the example a 15
+pixel (36 / 2 - 3) wide strip is used for each readout.
+
+If an overscan or prescan correction is specified (\fIoverscan\fR
+parameter) then the specified image section is averaged
+along the readout axis (\fIreadaxis\fR parameter) to form a
+correction vector. A function is fit to this vector and for each readout
+line (image line or column) the function value for that line is
+subtracted from the image line. The fitting function is generally
+either a constant (polynomial of 1 term) or a high order function
+which fits the large scale shape of the overscan vector. Bad pixel
+rejection is also used to eliminate cosmic ray events. The function
+fitting may be done interactively using the standard \fBicfit\fR
+iteractive graphical curve fitting tool. Regardless of whether the fit
+is done interactively, the overscan vector and the fit may be recorded
+for later review in a metacode plot file named by the parameter
+\fIquad.plotfile\fR. The mean value of the bias function is also recorded in
+the image header and log file.
+
+The overscan subtraction performed by \fBquadproc\fR corrects the
+amplifier-to-amplifier differences in the bias level, so that no
+readout structure should be visible in processed zero images. However, you
+will still see the chequer board structure in flatfield and object exposures
+(unless the sky level is zero) because of gain difference between the
+amplifiers.
+.sh
+4. Trim
+When the parameter \fItrim\fR is set the input image will be trimmed to
+the image section given by the parameter \fItrimsec\fR. This may be an explicit
+image section, or it may be set to the special value "image". In the latter
+case the value given in the image header is used. The image header value keeps
+the entire imaging section of the CCD.
+
+In the case of multi-readout images, the way in which an explicit value for
+\fBtrimsec\fR must be set, is unfortunately somewhat non-intuitive. Currently,
+the value recorded in the image header is that which would be appropriate had
+the detector been read out using a single amplifier; an explicit image section
+must be specified in the same way. \fBQuadproc\fR calculates the sections
+to use for the sub-images corresponding to each readout based on such "single
+readout" sections. In addition one is currently restricted to trimming exactly
+the same number of columns from each side of the CCD; there is no such
+restriction on the number of lines which can be trimmed from the top and bottom
+edges of the image. To determine the section you must enter, use \fBimhead\fR
+or \fBhselect\fR to determine the value of \fBtrimsec\fR stored in the image
+header. If this is, for instance, "[1:1024,1:1028]" then setting
+\fBtrimsec\fR = "[10:1015,20:998]" would trim 9 columns from the left and right
+edges and 19 and 29 lines from the bottom and top edges respectively. If you
+need to perform an asymmetric trim in the horizontal direction this can be
+done, after processing, by using \fBimcopy\fR to copy the required portion of
+the image.
+
+The trim section used for science images should, of course, be the same as
+that used for the calibration images.
+.sh
+5. Zerocor
+After the readout bias is subtracted, as defined by the overscan or prescan
+region, there may still be a zero level bias. This level may be two
+dimensional or one dimensional (the same for every readout line). A
+zero level calibration is obtained by taking zero length exposures;
+generally many are taken and combined. To apply this zero
+level calibration the parameter \fIzerocor\fR is set. In addition if
+the zero level bias is only readout dependent then the parameter \fIreadcor\fR
+is set to reduce two dimensional zero level images to one dimensional
+images. The zero level images may be specified by the parameter \fIzero\fR
+or given in the input image list (provided the CCD image type is defined).
+
+When the zero level image is needed to correct an input image it is checked
+to see if it has been processed and, if not, it is processed automatically.
+Processing of zero level images consists of bad pixel replacement,
+overscan correction, trimming, and averaging to one dimension if the
+readout correction is specified.
+.sh
+6. Darkcor
+Dark counts are subtracted by scaling a dark count calibration image to
+the same exposure time as the input image and subtracting. The
+exposure time used is the dark time which may be different than the
+actual integration or exposure time. A dark count calibration image is
+obtained by taking a very long exposure with the shutter closed; i.e.
+an exposure with no light reaching the detector. The dark count
+correction is selected with the parameter \fIdarkcor\fR and the dark
+count calibration image is specified either with the parameter
+\fIdark\fR or as one of the input images. The dark count image is
+automatically processed as needed. Processing of dark count images
+consists of bad pixel replacement, overscan and zero level correction,
+and trimming.
+.sh
+7. Flatcor
+The relative detector pixel response is calibrated by dividing by a
+scaled flat field calibration image. A flat field image is obtained by
+exposure to a spatially uniform source of light such as an lamp or
+twilight sky. Flat field images may be corrected for the spectral
+signature in spectroscopic images (see \fBresponse\fR and
+\fBapnormalize\fR), or for iillumination effects (see \fBmkillumflat\fR
+or \fBmkskyflat\fR). For more on flat fields and iillumination corrections
+see \fBflatfields\fR. The flat field response is dependent on the
+wavelength of light so if different filters or spectroscopic wavelength
+coverage are used a flat field calibration for each one is required.
+The different flat fields are automatically selected by a subset
+parameter (see \fBsubsets\fR).
+
+Flat field calibration is selected with the parameter \fBflatcor\fR
+and the flat field images are specified with the parameter \fBflat\fR
+or as part of the input image list. The appropriate subset is automatically
+selected for each input image processed. The flat field image is
+automatically processed as needed. Processing consists of bad pixel
+replacement, overscan subtraction, zero level subtraction, dark count
+subtraction, and trimming. Also if a scan mode is used and the
+parameter \fIscancor\fR is specified then a scan mode correction is
+applied (see below). The processing also computes the mean of the
+flat field image which is used later to scale the flat field before
+division into the input image. For scan mode flat fields the ramp
+part is included in computing the mean which will affect the level
+of images processed with this flat field. Note that there is no check for
+division by zero in the interest of efficiency. If division by zero
+does occur a fatal error will occur. The flat field can be fixed by
+replacing small values using a task such as \fBimreplace\fR or
+during processing using the \fIminreplace\fR parameter. Note that the
+\fIminreplace\fR parameter only applies to flat fields processed by
+\fBquadproc\fR.
+.sh
+8. Illumcor
+CCD images processed through the flat field calibration may not be
+completely flat (in the absence of objects). In particular, a blank
+sky image may still show gradients. This residual nonflatness is called
+the iillumination pattern. It may be introduced even if the detector is
+uniformly illuminated by the sky because the flat field lamp
+iillumination may be nonuniform. The iillumination pattern is found from a
+blank sky, or even object image, by heavily smoothing and rejecting
+objects using sigma clipping. The iillumination calibration image is
+divided into the data being processed to remove the iillumination
+pattern. The iillumination pattern is a function of the subset so there
+must be an iillumination correction image for each subset to be
+processed. The tasks \fBmkillumcor\fR and \fBmkskycor\fR are used to
+create the iillumination correction images. For more on iillumination
+corrections see \fBflatfields\fR.
+
+An alternative to treating the iillumination correction as a separate
+operation is to combine the flat field and iillumination correction
+into a corrected flat field image before processing the object
+images. This will save some processing time but does require creating
+the flat field first rather than correcting the images at the same
+time or later. There are two methods, removing the large scale
+shape of the flat field and combining a blank sky image iillumination
+with the flat field. These methods are discussed further in the
+tasks which create them; \fBmkillumcor\fR and \fBmkskycor\fR.
+.sh
+9. Fringecor
+There may be a fringe pattern in the images due to the night sky lines.
+To remove this fringe pattern a blank sky image is heavily smoothed
+to produce an iillumination image which is then subtracted from the
+original sky image. The residual fringe pattern is scaled to the
+exposure time of the image to be fringe corrected and then subtracted.
+Because the intensity of the night sky lines varies with time an
+additional scaling factor may be given in the image header.
+The fringe pattern is a function of the subset so there must be
+a fringe correction image for each subset to be processed.
+The task \fBmkfringecor\fR is used to create the fringe correction images.
+.sh
+10. Readcor
+If a zero level correction is desired (\fIzerocor\fR parameter)
+and the parameter \fIreadcor\fR is yes then a single zero level
+correction vector is applied to each readout line or column. Use of a
+readout correction rather than a two dimensional zero level image
+depends on the nature of the detector or if the CCD is operated in
+longscan mode (see below). The readout correction is specified by a
+one dimensional image (\fIzero\fR parameter) and the readout axis
+(\fIreadaxis\fR parameter). If the zero level image is two dimensional
+then it is automatically processed to a one dimensional image by
+averaging across the readout axis. Note that this modifies the zero
+level calibration image.
+.sh
+11. Scancor
+CCD detectors may be operated in several modes in astronomical
+applications. The most common is as a direct imager where each pixel
+integrates one point in the sky or spectrum. However, the design of most CCD's
+allows the sky to be scanned across the CCD while shifting the
+accumulating signal at the same rate. \fBQuadproc\fR provides for two
+scanning modes called "shortscan" and "longscan". The type of scan
+mode is set with the parameter \fIscanmode\fR.
+
+In "shortscan" mode the detector is scanned over a specified number of
+lines (not necessarily at sideral rates). The lines that scroll off
+the detector during the integration are thrown away. At the end of the
+integration the detector is read out in the same way as an unscanned
+observation. The advantage of this mode is that the small scale flat
+field response is averaged in one dimension over the number of lines
+scanned. A flat field may be observed in the same way in which case
+there is no difference in the processing from unscanned imaging and the
+parameter \fIscancor\fR should be no. However, one obtains an increase
+in the statistical accuracy of the flat fields if they are not scanned
+during the observation but digitally scanned during the processing. In
+shortscan mode with \fIscancor\fR set to yes, flat field images are
+digitally scanned, if needed, by the specified number of scan lines
+(\fInscan\fR parameter).
+
+In "longscan" mode the detector is continuously read out to produce
+an arbitrarily long strip. Provided data which has not passed over
+the entire detector is thrown away, the flat field corrections will
+be one dimensional. If \fIscancor\fR is specified and the
+scan mode is "longscan" then a one dimensional flat field correction
+will be applied. If the specified flat field (\fIflat\fR parameter)
+is a two dimensional image then when the flat field image is processed
+it will be averaged across the readout axis to form a one dimensional
+correction image.
+.sh
+12. Outline of Processing Steps
+
+Because of the special handling required for multi-readout data
+\fBquadproc\fR internally reduces the data in two stages.
+
+.ls Stage one
+The operations which may be performed in the first stage are badpixel
+correction, determination and subtraction of the readout bias level, and
+trimming. This stage is only performed if one or more of the \fBfixpix\fR,
+\fBoverscan\fR or \fBtrim\fR flags is set to yes.
+
+First, all the calibration images which will be needed are identified. Any
+which were obtained in multi-readout mode AND which have not already been
+trimmed are selected for processing during this stage. This is necessary to
+ensure that the calibration images will be reduced properly. Similarly, the
+input list is searched and all multi-readout images, which have not already
+been trimmed are selected for processing.
+
+The images selected in this way are then processed sequentially. Each is split
+into separate images one for each amplifier. The values of the trimsec and
+biassec header keywords for each of these sub-images are set as required.
+\fBccdproc\fR is then run to correct bad pixels, determine and subtract the
+readout bias and trim each sub-image. Finaly, the pieces are glued back
+together again to form the complete image and the header information is
+tidied up. The resulting image is initialy created as a temporary image.
+When stage one processing is complete the original image is deleted (or
+renamed using the specified backup prefix) and the corrected image replaces
+the original image. Using a temporary image protects the data in the
+event of an abort or computer failure. Keeping the original image name
+eliminates much of the record keeping and the need to generate new
+image names.
+.le
+.ls Stage two
+\fBCcdproc\fR is now run a second time to process ALL input images. For those
+images which were NOT selected for processing during stage one all the selected
+processing steps are carried out during this second pass. For those which were
+selected in stage one only the remaining processing steps will be performed.
+Again the output processed image is initialy created as a temporary image.
+When stage two processing is complete the original image is deleted (or
+renamed using the specified backup prefix) and the corrected image replaces
+the original image.
+.le
+
+The following difference in the behaviour of \fBquadproc\fB and \fBccdproc\fR
+should be noted:
+.ls
+Because it is a script, and because it is reads and writes each image several
+times during processing \fBquadproc\fR is not very efficiant. This will be
+rectified when the present prototype code is replaced by the final version.
+.le
+.ls
+If backups are enable then \fBquadproc\fR will produce two intermediate
+images for every input image which is modified in both processing stages.
+These backup images may quickly fill up the available disk space.
+.le
+.ls
+Images may not be processed in the order they appear in the input list. Stage
+one processing is performed (if necessary) on all calibration images, then on
+all images in the input list. Any images which have already been trimmed, or
+which were taken in mono-readout mode will be skipped. Stage two processing is
+then done sequentially on all images in the input list.
+.le
+.sh
+13. Processing Arithmetic
+The \fBquadproc\fR task has two data paths, one for real image pixel datatypes
+and one for short integer pixel datatype. In addition internal arithmetic
+is based on the rules of FORTRAN. For efficiency there is
+no checking for division by zero in the flat field calibration.
+The following rules describe the processing arithmetic and data paths.
+
+.ls (1)
+If the input, output, or any calibration image is of type real the
+real data path is used. This means all image data is converted to
+real on input. If all the images are of type short all input data
+is kept as short integers. Thus, if all the images are of the same type
+there is no datatype conversion on input resulting in greater
+image I/O efficiency.
+.le
+.ls (2)
+In the real data path the processing arithmetic is always real and,
+if the output image is of short pixel datatype, the result
+is truncated.
+.le
+.ls (3)
+The overscan vector and the scale factors for dark count, flat field,
+iillumination, and fringe calibrations are always of type real. Therefore,
+in the short data path any processing which includes these operations
+will be coerced to real arithmetic and the result truncated at the end
+of the computation.
+.le
+.sh
+14. In the Absence of Image Header Information
+The tasks in the \fBquad\fR package are most convenient to use when
+the CCD image type, subset, and exposure time are contained in the
+image header. This is true for all data obtained with Arcon. The ability to
+redefine which header parameters contain this information makes it possible
+to use the package at many different observatories (see \fBinstruments\fR).
+However, in the absence of any image header information the tasks may still
+be used effectively. There are two ways to proceed. One way is to use
+\fBccdhedit\fR to place the information in the image header.
+
+The second way is to specify the processing operations more explicitly
+than is needed when the header information is present. The parameter
+\fIccdtype\fR is set to "" or to "none". The calibration images are
+specified explicitly by task parameter since they cannot be recognized
+in the input list. Only one subset at a time may be processed.
+
+If dark count and fringe corrections are to be applied the exposure
+times must be added to all the images. Alternatively, the dark count
+and fringe images may be scaled explicitly for each input image. This
+works because the exposure times default to 1 if they are not given in
+the image header.
+.ih
+EXAMPLES
+The user's \fBguide\fR presents a tutorial in the use of this task.
+
+1. In general all that needs to be done is to set the task parameters
+and enter
+
+ cl> quadproc *.imh &
+
+This will run in the background and process all images which have not
+been processed previously.
+.ih
+SEE ALSO
+quadformat, ccdproc, instruments, ccdtypes, flatfields, icfit, quad, guide,
+mkillumcor, mkskycor, mkfringecor
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/quadreadout.hlp b/noao/imred/quadred/src/quad/doc/quadreadout.hlp
new file mode 100644
index 00000000..355fdb61
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/quadreadout.hlp
@@ -0,0 +1,19 @@
+With Arcon it is possible to read out a CCD using more than one amplifier in
+parallel, leading to a substantial reduction in readout time. Once properly
+reduced, such data is indistinguishable from that obtained when reading out
+through only a single amplifier. However, the raw images are rather different
+and consequently must be processed somewhat differently. Firstly, each readout
+will typically have a slightly different, zero level, gain, and readout noise,
+and may differ slightly in its departures from perfect linearity. As a result
+both zero frames and uniformly illuminated exposures will show a characteristic
+chequer board pattern, the sections of the data read through each amplifier
+having different levels. Secondly, there will be a separate overscan strip,
+used to monitor the zero level, for each readout. The location of these overscan
+strips in the raw frame depends on which amplifiers are used. When all four
+amplifiers are used the four overscan regions form a vertical stripe down the
+centre of the raw image. A CCD read out through two amplifiers can have its
+two overscan strips, running side by side down the center of the picture; or
+they may be one above the other, on the same side of the picture; or the strip
+for the upper and lower halves of the CCD can be at opposited sides of the
+image (in which case there will also be a horizontal displacement of the data
+in the two sections).
diff --git a/noao/imred/quadred/src/quad/doc/setinstrument.hlp b/noao/imred/quadred/src/quad/doc/setinstrument.hlp
new file mode 100644
index 00000000..410dd20f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/setinstrument.hlp
@@ -0,0 +1,97 @@
+.help setinstrument Oct87 noao.imred.ccdred
+.ih
+NAME
+setinstrument -- Set instrument parameters
+.ih
+USAGE
+setinstrument instrument
+.ih
+PARAMETERS
+.ls instrument
+Instrument identification for instrument parameters to be set. If '?'
+then a list of the instrument identifiers is printed.
+.le
+.ls site = "kpno"
+Site ID.
+.le
+.ls directory = "ccddb$"
+Instrument directory containing instrument files. The instrument files
+are found in the subdirectory given by the site ID.
+.le
+.ls review = yes
+Review the instrument parameters? If yes then \fBeparam\fR is run for
+the parameters of \fBccdred\fR and \fBccdproc\fR.
+.le
+.ls query
+Parameter query if initial instrument is not found.
+.le
+.ih
+DESCRIPTION
+The purpose of the task is to allow the user to easily set default
+parameters for a new instrument. The default parameters are generally
+defined by support personal in an instrument directory for a particular
+site. The instrument directory is the concatenation of the specified
+directory and the site. For example if the directory is "ccddb$" and
+the site is "kpno" then the instrument directory is "ccddb$kpno/".
+The user may have his own set of instrument files in a local directory.
+The current directory is used by setting the directory and site to the
+null string ("").
+
+The user specifies an instrument identifier. This instrument may
+be specific to a particular observatory, telescope, instrument, and
+detector. If the character '?' is specified or the instrument file is
+not found then a list of instruments
+in the instrument directory is produced by paging the file "instruments.men".
+The task then performs the following operations:
+.ls (1)
+If an instrument translation file with the name given by the instrument
+ID and the extension ".dat" is found then the instrument translation
+file parameter, \fIccdred.instrument\fR, is set to this file.
+If it does not exist then the user is queried again. Note that a
+null instrument, "", is allowed to set no translation file.
+.le
+.ls (2)
+If an instrument setup script with the name given by the instrument ID
+and the extension ".cl" is found then the commands in the file are
+executed (using the command \fIcl < script\fR. This script generally
+sets default parameters.
+.le
+.ls (3)
+If the review flag is set the task \fBeparam\fR is run to allow the user
+to examine and modify the parameters for the package \fBccdred\fR and task
+\fBccdproc\fR.
+.le
+.ih
+EXAMPLES
+1. To get a list of the instruments;
+
+.nf
+ cl> setinstrument ?
+ [List of instruments]
+
+2. To set the instrument and edit the processing parameters:
+
+ cl> setinstrument ccdlink
+ [Edit CCDRED parameters]
+ [Edit CCDPROC parameters]
+
+3. To use your own instrument translation file and/or setup script in
+your working directory.
+
+ cl> setinst.site=""
+ cl> setinst.dir=""
+ cl> setinst myinstrument
+
+To make these files see help under \fBinstruments\fR. Copying and modifying
+system files is also straightforward.
+
+ cl> copy ccddb$kpno/fits.dat .
+ cl> edit fits.dat
+ cl> setinst.site=""
+ cl> setinst.dir=""
+ cl> setinst fits
+.fi
+.ih
+SEE ALSO
+instruments, ccdred, ccdproc
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/subsets.hlp b/noao/imred/quadred/src/quad/doc/subsets.hlp
new file mode 100644
index 00000000..2823bd6d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/subsets.hlp
@@ -0,0 +1,97 @@
+.help subsets Jun87 noao.imred.ccdred
+.ih
+NAME
+subsets -- Description of CCD subsets
+.ih
+DESCRIPTION
+The \fBccdred\fR package groups observation into subsets.
+The image header parameter used to identify the subsets is defined
+in the instrument translation file (see help for \fBinstruments\fR).
+For example to select subsets by the header parameter "filters" the
+instrument translation file would contain the line:
+
+ subset filters
+
+Observations are generally grouped into subsets based on a common
+instrument configuration such as a filter, aperture mask,
+grating setting, etc. This allows combining images from several
+different subsets automatically and applying the appropriate
+flat field image when processing the observations. For example
+if the subsets are by filter then \fBflatcombine\fR will search
+through all the images, find the flat field images (based on the
+CCD type parameter), and combine the flat field images from
+each filter separately. Then when processing the images the
+flat field with the same filter as the observation is used.
+
+Each subset is assigned a short identifier. This is listed when
+using \fBccdlist\fR and is appended to a root name when combining
+images. Because the subset parameter in the image header may be
+any string there must be a mapping applied to generate unique
+identifiers. This mapping is defined in the file given by
+the package parameter \fIccdred.ssfile\fR. The file consists of
+lines with two fields:
+
+ 'subset string' subset_id
+
+where the subset string is the image header string and the subset_id is
+the identifier. A field must be quoted if it contains blanks. The
+user may create this file but generally it is created by the tasks. The
+tasks use the first word of the subset string as the default identifier
+and a number is appended if the first word is not unique. The
+following steps define the subset identifier:
+
+.ls (1)
+Search the subset file, if present, for a matching subset string and
+use the defined subset identifier.
+.le
+.ls (2)
+If there is no matching subset string use the first word of the
+image header subset string and, if it is not unique,
+add successive integers until it is unique.
+.le
+.ls (3)
+If the identifier is not in the subset file create the file and add an
+entry if necessary.
+.le
+.ih
+EXAMPLES
+1. The subset file is "subsets" (the default). The subset parameter is
+translated to "f1pos" in the image header (the old NOAO CCD parameter)
+which is an integer filter position. After running a task, say
+"ccdlist *.imh" to cause all filters to be checked, the subset file contains:
+
+.nf
+ '2' 2
+ '5' 5
+ '3' 3
+.fi
+
+The order reflects the order in which the filters were encountered.
+Suppose the user wants to have more descriptive names then the subset
+file can be created or edited to the form:
+
+.nf
+ '2' U
+ '3' B
+ '4' V
+.fi
+
+(This is only an example and does not mean these are standard filters.)
+
+2. As another example suppose the image header parameter is "filter" and
+contains more descriptive strings. The subset file might become:
+
+.nf
+ 'GG 385 Filter' GG
+ 'GG 495 Filter' GG1
+ 'RG 610 Filter' RG
+ 'H-ALPHA' H_ALPHA
+.fi
+
+In this case use of the first word was not very good but it is unique.
+It is better if the filters are encoded with the thought that the first
+word will be used by \fBccdred\fR; it should be short and unique.
+.ih
+SEE ALSO
+instruments
+.endhelp
diff --git a/noao/imred/quadred/src/quad/doc/zerocombine.hlp b/noao/imred/quadred/src/quad/doc/zerocombine.hlp
new file mode 100644
index 00000000..ad1b021d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/doc/zerocombine.hlp
@@ -0,0 +1,127 @@
+.help zerocombine Sep93 arcon.quad
+.ih
+NAME
+zerocombine -- Combine and process zero level images
+.ih
+USAGE
+zerocombine input
+.ih
+PARAMETERS
+.ls input
+List of zero level images to combine. The \fIccdtype\fR parameter
+may be used to select the zero level images from a list containing all
+types of data.
+.le
+.ls output = "Zero"
+Output zero level root image name.
+.le
+.ls combine = "average" (average|median)
+Type of combining operation performed on the final set of pixels (after
+rejection). The choices are
+"average" or "median". The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "minmax" (none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip)
+Type of rejection operation. See \fBcombine\fR for details.
+.le
+.ls ccdtype = "zero"
+CCD image type to combine. If no image type is given then all input images
+are combined.
+.le
+.ls process = no
+Process the input images before combining?
+.le
+.ls delete = no
+Delete input images after combining? Only those images combined are deleted.
+.le
+.ls clobber = no
+Clobber existing output images?
+.le
+.ls scale = "none" (none|mode|median|mean|exposure)
+Multiplicative image scaling to be applied. The choices are none, scale
+by the mode, median, or mean of the specified statistics section, or scale
+by the exposure time given in the image header.
+.le
+.ls statsec = ""
+Section of images to use in computing image statistics for scaling.
+If no section is given then the entire region of the image is
+sampled (for efficiency the images are sampled if they are big enough).
+.le
+
+.ce
+Algorithm Parameters
+.ls nlow = 0, nhigh = 1 (minmax)
+The number of low and high pixels to be rejected by the "minmax" algorithm.
+.le
+.ls nkeep = 1
+The minimum number of pixels to retain or the maximum number to reject
+when using the clipping algorithms (ccdclip, crreject, sigclip,
+avsigclip, or pclip). When given as a positive value this is the minimum
+number to keep. When given as a negative value the absolute value is
+the maximum number to reject. This is actually converted to a number
+to keep by adding it to the number of images.
+.le
+.ls mclip = yes (ccdclip, crreject, sigclip, avsigcliip)
+Use the median as the estimate for the true intensity rather than the
+average with high and low values excluded in the "ccdclip", "crreject",
+"sigclip", and "avsigclip" algorithms? The median is a better estimator
+in the presence of data which one wants to reject than the average.
+However, computing the median is slower than the average.
+.le
+.ls lsigma = 3., hsigma = 3. (ccdclip, crreject, sigclip, avsigclip, pclip)
+Low and high sigma clipping factors for the "ccdclip", "crreject", "sigclip",
+"avsigclip", and "pclip" algorithms. They multiply a "sigma" factor
+produced by the algorithm to select a point below and above the average or
+median value for rejecting pixels. The lower sigma is ignored for the
+"crreject" algorithm.
+.le
+.ls rdnoise = "0.", gain = "1.", snoise = "0." (ccdclip, crreject)
+CCD readout noise in electrons, gain in electrons/DN, and sensitivity noise
+as a fraction. These parameters are used with the "ccdclip" and "crreject"
+algorithms. The values may be either numeric or an image header keyword
+which contains the value.
+.le
+.ls pclip = -0.5 (pclip)
+Percentile clipping algorithm parameter. If greater than
+one in absolute value then it specifies a number of pixels above or
+below the median to use for computing the clipping sigma. If less
+than one in absolute value then it specifies the fraction of the pixels
+above or below the median to use. A positive value selects a point
+above the median and a negative value selects a point below the median.
+The default of -0.5 selects approximately the quartile point.
+See \fBcombine\fR for further details.
+.le
+.ls blank = 0.
+Output value to be used when there are no pixels.
+.le
+.ih
+DESCRIPTION
+The zero level images in the input image list are combined.
+The input images may be processed first if desired.
+The original images may be deleted automatically if desired.
+
+This task is a script which applies \fBquadproc\fR and \fBcombine\fR. The
+parameters and combining algorithms are described in detail in the help for
+\fBcombine\fR. This script has default parameters specifically set for
+zero level images and simplifies the combining parameters. There are other
+combining options not included in this task. For these additional
+features, such as thresholding, offseting, masking, and projecting, use
+\fBcombine\fR.
+
+The version of \fBzerocombine\fR in the \fBquad\fR package differs from that
+in \fBccdred\fR in that \fBquadproc\fR rather than \fBccdproc\fR is used to
+process the images if this is requested. The \fBquad\fR version MUST be
+used if process=yes and the input list contains any multi-readout images which
+have not been overscan corrected and trimmed.
+
+.ih
+EXAMPLES
+1. The image data contains four zero level images.
+To automatically select them and combine them as a background job
+using the default combining algorithm:
+
+ cl> zerocombine ccd*.imh&
+.ih
+SEE ALSO
+quadproc, combine
+.endhelp
diff --git a/noao/imred/quadred/src/quad/gainmeasure.par b/noao/imred/quadred/src/quad/gainmeasure.par
new file mode 100644
index 00000000..00e183af
--- /dev/null
+++ b/noao/imred/quadred/src/quad/gainmeasure.par
@@ -0,0 +1,6 @@
+flat1,s,a,"",,,First high level exposure
+flat2,s,a,"",,,Second high level exposure
+zero1,s,a,"",,,First zero level exposure
+zero2,s,a,"",,,Second zero level exposure
+section,s,a,"",,,Image section for calculations
+print_headers,b,h,yes,,,Print column headers
diff --git a/noao/imred/quadred/src/quad/gainmeasure.x b/noao/imred/quadred/src/quad/gainmeasure.x
new file mode 100644
index 00000000..592cf7cf
--- /dev/null
+++ b/noao/imred/quadred/src/quad/gainmeasure.x
@@ -0,0 +1,170 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+define OPT_REFLECT 4
+
+# GAINMEASURE -- Calculate the gain (e/ADU) and RON of a CCD using the CTIO
+# (Bruce Attwood) algorithm.
+# Input are a pair of high signal level exposures (Flat1, Flat2) and a pair of
+# zero exposures (Zero1, Zero2). We then calculate:
+#
+# epadu = <Flat1> + <Flat2> - (<Zero1> + <Zero2>) / var{Diff_F} - Var{Diff_Z}
+# RON = RMS {Diff_Z} * epadu / sqrt(2)
+#
+# Where:
+#
+# diff_Z = Zero1 - Zero2
+# diff_F = Flat1 - Flat2
+#
+# The statistics must be calculated for regions free of bad pixels and other
+# defects, and with reasonably uniform illumination.
+
+procedure t_gainmeasure ()
+
+pointer flat1, flat2 #TI High level images
+pointer zero1, zero2 #TI Zero level images
+char section[SZ_LINE] #TI Section for calculation
+
+char buffer[SZ_LINE]
+int npix, x1, x2, y1, y2, amp
+pointer sp, f1, f2, z1, z2, fd, zd, qg
+real f1bar, f2bar, z1bar, z2bar, fdbar, zdbar
+real f1sigma, f2sigma, z1sigma, z2sigma, fdsigma, zdsigma
+real div, epadu, ron
+bool headers
+
+pointer immap(), imgs2r()
+bool clgetb(), quadsect()
+int hdmaccf()
+
+begin
+ # Open instrument file
+ call clgstr ("instrument", buffer, SZ_FNAME)
+ call hdmopen (buffer)
+
+ # Map input images
+ call clgstr ("flat1", buffer, SZ_LINE)
+ flat1 = immap (buffer, READ_ONLY, 0)
+
+ call clgstr ("flat2", buffer, SZ_LINE)
+ flat2 = immap (buffer, READ_ONLY, 0)
+
+ call clgstr ("zero1", buffer, SZ_LINE)
+ zero1 = immap (buffer, READ_ONLY, 0)
+
+ call clgstr ("zero2", buffer, SZ_LINE)
+ zero2 = immap (buffer, READ_ONLY, 0)
+
+ # Get section over which measurement is to be made.
+ call clgstr ("section", section, SZ_LINE)
+
+ # See if headers are to be printed
+ headers = clgetb ("print_headers")
+
+ # Set-up quadgeom structure. We blithely assume all images are the same.
+ call quadalloc (qg)
+
+ if (hdmaccf (flat1, "HDR_REV") == NO) {
+ call quadgeom (flat1, qg, "", "")
+ } else {
+ call qghdr2 (flat1, qg)
+ }
+# call quaddump (qg)
+
+ if (headers) {
+ call printf ("#")
+ do amp = 1, QG_NAMPS (qg) {
+ call printf ("%9wAmp%2s%5w")
+ call pargstr (Memc[QG_AMPID (qg, amp)])
+ }
+ call printf ("\n")
+
+ call printf ("#")
+ do amp = 1, QG_NAMPS (qg) {
+ call printf ("%5wGain%4wRON%3w")
+ }
+ call printf ("\n")
+ call printf ("#")
+ do amp = 1, QG_NAMPS (qg) {
+ call printf ("%3w(e-/ADU)%2w(e-)%2w")
+ }
+ call printf ("\n")
+ }
+
+ call printf ("%1w")
+ do amp = 1, QG_NAMPS (qg) {
+
+ if (quadsect (qg, section, OPT_REFLECT, amp, x1, x2, y1, y2)) {
+
+ npix = (abs(y2 - y1) + 1) * (abs(x2 - x1) + 1)
+
+ # Allocate working arrays
+ call smark (sp)
+ call salloc (fd, npix, TY_REAL)
+ call salloc (zd, npix, TY_REAL)
+
+ # Read data
+ f1 = imgs2r (flat1, x1, x2, y1, y2)
+ f2 = imgs2r (flat2, x1, x2, y1, y2)
+ z1 = imgs2r (zero1, x1, x2, y1, y2)
+ z2 = imgs2r (zero2, x1, x2, y1, y2)
+
+ # Calculate differences
+ call asubr (Memr[f1], Memr[f2], Memr[fd], npix)
+ call asubr (Memr[z1], Memr[z2], Memr[zd], npix)
+
+ # Calculate means and standard deviations
+ call aavgr (Memr[f1], npix, f1bar, f1sigma)
+ call aavgr (Memr[f2], npix, f2bar, f2sigma)
+ call aavgr (Memr[z1], npix, z1bar, z1sigma)
+ call aavgr (Memr[z2], npix, z2bar, z2sigma)
+ call aavgr (Memr[fd], npix, fdbar, fdsigma)
+ call aavgr (Memr[zd], npix, zdbar, zdsigma)
+
+# call eprintf ("f1bar=%g f1sigma=%g\n")
+# call pargr (f1bar)
+# call pargr (f1sigma)
+# call eprintf ("f2bar=%g f2sigma=%g\n")
+# call pargr (f2bar)
+# call pargr (f2sigma)
+# call eprintf ("z1bar=%g z1sigma=%g\n")
+# call pargr (z1bar)
+# call pargr (z1sigma)
+# call eprintf ("z2bar=%g z2sigma=%g\n")
+# call pargr (z2bar)
+# call pargr (z2sigma)
+# call eprintf ("fdbar=%g fdsigma=%g\n")
+# call pargr (fdbar)
+# call pargr (fdsigma)
+# call eprintf ("zdbar=%g zdsigma=%g\n")
+# call pargr (zdbar)
+# call pargr (zdsigma)
+
+ div = fdsigma**2 - zdsigma**2
+ if (div > 0.0) {
+ epadu = ((f1bar + f2bar) - (z1bar + z2bar)) / div
+ ron = epadu * zdsigma / 1.41421356
+ } else {
+ epadu = INDEF
+ ron = INDEF
+ }
+
+ # Print results
+ call printf ("%3w%6.2f%2w%6.2f%2w")
+ call pargr (epadu)
+ call pargr (ron)
+
+ # Free working arrays
+ call sfree (sp)
+
+ }
+ }
+
+ call printf ("\n")
+
+ # Tidy up
+ call imunmap (flat1)
+ call imunmap (flat2)
+ call imunmap (zero1)
+ call imunmap (zero2)
+end
diff --git a/noao/imred/quadred/src/quad/hdrmap.com b/noao/imred/quadred/src/quad/hdrmap.com
new file mode 100644
index 00000000..5aa74185
--- /dev/null
+++ b/noao/imred/quadred/src/quad/hdrmap.com
@@ -0,0 +1,4 @@
+# Common for HDRMAP package.
+
+pointer stp # Symbol table pointer
+common /hdmcom/ stp
diff --git a/noao/imred/quadred/src/quad/hdrmap.x b/noao/imred/quadred/src/quad/hdrmap.x
new file mode 100644
index 00000000..ebcb253e
--- /dev/null
+++ b/noao/imred/quadred/src/quad/hdrmap.x
@@ -0,0 +1,544 @@
+include <error.h>
+include <syserr.h>
+
+.help hdrmap
+.nf-----------------------------------------------------------------------------
+HDRMAP -- Map translation between task parameters and image header parameters.
+
+In order for tasks to be partially independent of the image header
+parameter names used by different instruments and observatories a
+translation is made between task parameters and image header
+parameters. This translation is given in a file consisting of the task
+parameter name, the image header parameter name, and an optional
+default value. This file is turned into a symbol table. If the
+translation file is not found a null pointer is returned. The package will
+then use the task parameter names directly. Also if there is no
+translation given in the file for a particular parameter it is passed
+on directly. If a parameter is not in the image header then the symbol
+table default value, if given, is returned. This package is layered on
+the IMIO header package.
+
+ hdmopen (fname)
+ hdmclose ()
+ hdmwrite (fname, mode)
+ hdmname (parameter, str, max_char)
+ hdmgdef (parameter, str, max_char)
+ hdmpdef (parameter, str, max_char)
+ y/n = hdmaccf (im, parameter)
+ hdmgstr (im, parameter, str, max_char)
+ ival = hdmgeti (im, parameter)
+ rval = hdmgetr (im, parameter)
+ hdmpstr (im, parameter, str)
+ hdmputi (im, parameter, value)
+ hdmputr (im, parameter, value)
+ hdmgstp (stp)
+ hdmpstp (stp)
+ hdmdelf (im, parameter)
+ hdmparm (name, parameter, max_char)
+
+hdmopen -- Open the translation file and map it into a symbol table pointer.
+hdmclose -- Close the symbol table pointer.
+hdmwrite -- Write out translation file.
+hdmname -- Return the image header parameter name.
+hdmpname -- Put the image header parameter name.
+hdmgdef -- Get the default value as a string (null if none).
+hdmpdef -- Put the default value as a string.
+hdmaccf -- Return whether the image header parameter exists (regardless of
+ whether there is a default value).
+hdmgstr -- Get a string valued parameter. Return default value if not in the
+ image header. Return null string if no default or image value.
+hdmgeti -- Get an integer valued parameter. Return default value if not in
+ the image header and error condition if no default or image value.
+hdmgetr -- Get a real valued parameter. Return default value if not in
+ the image header or error condition if no default or image value.
+hdmpstr -- Put a string valued parameter in the image header.
+hdmputi -- Put an integer valued parameter in the image header.
+hdmputr -- Put a real valued parameter in the image header.
+hdmgstp -- Get the symbol table pointer to save it while another map is used.
+hdmpstp -- Put the symbol table pointer to restore a map.
+hdmdelf -- Delete a field.
+hdmparm -- Return the parameter name corresponding to an image header name.
+.endhelp -----------------------------------------------------------------------
+
+# Symbol table definitions.
+define LEN_INDEX 32 # Length of symtab index
+define LEN_STAB 1024 # Length of symtab string buffer
+define SZ_SBUF 128 # Size of symtab string buffer
+
+define SZ_NAME 79 # Size of translation symbol name
+define SZ_DEFAULT 79 # Size of default string
+define SYMLEN 80 # Length of symbol structure
+
+# Symbol table structure
+define NAME Memc[P2C($1)] # Translation name for symbol
+define DEFAULT Memc[P2C($1+40)] # Default value of parameter
+
+
+# HDMOPEN -- Open the translation file and map it into a symbol table pointer.
+
+procedure hdmopen (fname)
+
+char fname[ARB] # Image header map file
+
+int fd, open(), fscan(), nscan(), errcode()
+pointer sp, parameter, sym, stopen(), stenter()
+include "hdrmap.com"
+
+begin
+ # Create an empty symbol table.
+ stp = stopen (fname, LEN_INDEX, LEN_STAB, SZ_SBUF)
+
+ # Return if file not found.
+ iferr (fd = open (fname, READ_ONLY, TEXT_FILE)) {
+ if (errcode () != SYS_FNOFNAME)
+ call erract (EA_WARN)
+ return
+ }
+
+ call smark (sp)
+ call salloc (parameter, SZ_NAME, TY_CHAR)
+
+ # Read the file an enter the translations in the symbol table.
+ while (fscan(fd) != EOF) {
+ call gargwrd (Memc[parameter], SZ_NAME)
+ if ((nscan() == 0) || (Memc[parameter] == '#'))
+ next
+ sym = stenter (stp, Memc[parameter], SYMLEN)
+ call gargwrd (NAME(sym), SZ_NAME)
+ call gargwrd (DEFAULT(sym), SZ_DEFAULT)
+ }
+
+ call close (fd)
+ call sfree (sp)
+end
+
+
+# HDMCLOSE -- Close the symbol table pointer.
+
+procedure hdmclose ()
+
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ call stclose (stp)
+end
+
+
+# HDMWRITE -- Write out translation file.
+
+procedure hdmwrite (fname, mode)
+
+char fname[ARB] # Image header map file
+int mode # Access mode (APPEND, NEW_FILE)
+
+int fd, open(), stridxs()
+pointer sym, sthead(), stnext(), stname()
+errchk open
+include "hdrmap.com"
+
+begin
+ # If there is no symbol table do nothing.
+ if (stp == NULL)
+ return
+
+ fd = open (fname, mode, TEXT_FILE)
+
+ sym = sthead (stp)
+ for (sym = sthead (stp); sym != NULL; sym = stnext (stp, sym)) {
+ if (stridxs (" ", Memc[stname (stp, sym)]) > 0)
+ call fprintf (fd, "'%s'%30t")
+ else
+ call fprintf (fd, "%s%30t")
+ call pargstr (Memc[stname (stp, sym)])
+ if (stridxs (" ", NAME(sym)) > 0)
+ call fprintf (fd, " '%s'%10t")
+ else
+ call fprintf (fd, " %s%10t")
+ call pargstr (NAME(sym))
+ if (DEFAULT(sym) != EOS) {
+ if (stridxs (" ", DEFAULT(sym)) > 0)
+ call fprintf (fd, " '%s'")
+ else
+ call fprintf (fd, " %s")
+ call pargstr (DEFAULT(sym))
+ }
+ call fprintf (fd, "\n")
+ }
+
+ call close (fd)
+end
+
+
+# HDMNAME -- Return the image header parameter name
+
+procedure hdmname (parameter, str, max_char)
+
+char parameter[ARB] # Parameter name
+char str[max_char] # String containing mapped parameter name
+int max_char # Maximum characters in string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call strcpy (NAME(sym), str, max_char)
+ else
+ call strcpy (parameter, str, max_char)
+end
+
+
+# HDMPNAME -- Put the image header parameter name
+
+procedure hdmpname (parameter, str)
+
+char parameter[ARB] # Parameter name
+char str[ARB] # String containing mapped parameter name
+
+pointer sym, stfind(), stenter()
+include "hdrmap.com"
+
+begin
+ if (stp == NULL)
+ return
+
+ sym = stfind (stp, parameter)
+ if (sym == NULL) {
+ sym = stenter (stp, parameter, SYMLEN)
+ DEFAULT(sym) = EOS
+ }
+
+ call strcpy (str, NAME(sym), SZ_NAME)
+end
+
+
+# HDMGDEF -- Get the default value as a string (null string if none).
+
+procedure hdmgdef (parameter, str, max_char)
+
+char parameter[ARB] # Parameter name
+char str[max_char] # String containing default value
+int max_char # Maximum characters in string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call strcpy (DEFAULT(sym), str, max_char)
+ else
+ str[1] = EOS
+end
+
+
+# HDMPDEF -- PUt the default value as a string.
+
+procedure hdmpdef (parameter, str)
+
+char parameter[ARB] # Parameter name
+char str[ARB] # String containing default value
+
+pointer sym, stfind(), stenter()
+include "hdrmap.com"
+
+begin
+ if (stp == NULL)
+ return
+
+ sym = stfind (stp, parameter)
+ if (sym == NULL) {
+ sym = stenter (stp, parameter, SYMLEN)
+ call strcpy (parameter, NAME(sym), SZ_NAME)
+ }
+
+ call strcpy (str, DEFAULT(sym), SZ_DEFAULT)
+end
+
+
+# HDMACCF -- Return whether the image header parameter exists (regardless of
+# whether there is a default value).
+
+int procedure hdmaccf (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int imaccf()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ return (imaccf (im, NAME(sym)))
+ else
+ return (imaccf (im, parameter))
+end
+
+
+# HDMGSTR -- Get a string valued parameter. Return default value if not in
+# the image header. Return null string if no default or image value.
+
+procedure hdmgstr (im, parameter, str, max_char)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+char str[max_char] # String value to return
+int max_char # Maximum characters in returned string
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (call imgstr (im, NAME(sym), str, max_char))
+ call strcpy (DEFAULT(sym), str, max_char)
+ } else {
+ iferr (call imgstr (im, parameter, str, max_char))
+ str[1] = EOS
+ }
+end
+
+
+# HDMGETR -- Get a real valued parameter. Return default value if not in
+# the image header. Return error condition if no default or image value.
+
+real procedure hdmgetr (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int ip, ctor()
+real value, imgetr()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (value = imgetr (im, NAME(sym))) {
+ ip = 1
+ if (ctor (DEFAULT(sym), ip, value) == 0)
+ call error (0, "HDMGETR: No value found")
+ }
+ } else
+ value = imgetr (im, parameter)
+
+ return (value)
+end
+
+
+# HDMGETI -- Get an integer valued parameter. Return default value if not in
+# the image header. Return error condition if no default or image value.
+
+int procedure hdmgeti (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+int ip, ctoi()
+int value, imgeti()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ iferr (value = imgeti (im, NAME(sym))) {
+ ip = 1
+ if (ctoi (DEFAULT(sym), ip, value) == 0)
+ call error (0, "HDMGETI: No value found")
+ }
+ } else
+ value = imgeti (im, parameter)
+
+ return (value)
+end
+
+
+# HDMPSTR -- Put a string valued parameter in the image header.
+
+procedure hdmpstr (im, parameter, str)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+char str[ARB] # String value
+
+int imaccf(), imgftype()
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL) {
+ if (imaccf (im, NAME(sym)) == YES)
+ if (imgftype (im, NAME(sym)) != TY_CHAR)
+ call imdelf (im, NAME(sym))
+ call imastr (im, NAME(sym), str)
+ } else {
+ if (imaccf (im, parameter) == YES)
+ if (imgftype (im, parameter) != TY_CHAR)
+ call imdelf (im, parameter)
+ call imastr (im, parameter, str)
+ }
+end
+
+
+# HDMPUTI -- Put an integer valued parameter in the image header.
+
+procedure hdmputi (im, parameter, value)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+int value # Integer value to put
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imaddi (im, NAME(sym), value)
+ else
+ call imaddi (im, parameter, value)
+end
+
+
+# HDMPUTR -- Put a real valued parameter in the image header.
+
+procedure hdmputr (im, parameter, value)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+real value # Real value to put
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imaddr (im, NAME(sym), value)
+ else
+ call imaddr (im, parameter, value)
+end
+
+
+# HDMGSTP -- Get the symbol table pointer to save a translation map.
+# The symbol table is restored with HDMPSTP.
+
+procedure hdmgstp (ptr)
+
+pointer ptr # Symbol table pointer to return
+
+include "hdrmap.com"
+
+begin
+ ptr = stp
+end
+
+
+# HDMPSTP -- Put a symbol table pointer to restore a header map.
+# The symbol table is optained with HDMGSTP.
+
+procedure hdmpstp (ptr)
+
+pointer ptr # Symbol table pointer to restore
+
+include "hdrmap.com"
+
+begin
+ stp = ptr
+end
+
+
+# HDMDELF -- Delete a field. It is an error if the field does not exist.
+
+procedure hdmdelf (im, parameter)
+
+pointer im # IMIO pointer
+char parameter[ARB] # Parameter name
+
+pointer sym, stfind()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = stfind (stp, parameter)
+ else
+ sym = NULL
+
+ if (sym != NULL)
+ call imdelf (im, NAME(sym))
+ else
+ call imdelf (im, parameter)
+end
+
+
+# HDMPARAM -- Get parameter given the image header name.
+
+procedure hdmparam (name, parameter, max_char)
+
+char name[ARB] # Image header name
+char parameter[max_char] # Parameter
+int max_char # Maximum size of parameter string
+
+bool streq()
+pointer sym, sthead(), stname(), stnext()
+include "hdrmap.com"
+
+begin
+ if (stp != NULL)
+ sym = sthead (stp)
+ else
+ sym = NULL
+
+ while (sym != NULL) {
+ if (streq (NAME(sym), name)) {
+ call strcpy (Memc[stname(stp, sym)], parameter, max_char)
+ return
+ }
+ sym = stnext (stp, sym)
+ }
+ call strcpy (name, parameter, max_char)
+end
diff --git a/noao/imred/quadred/src/quad/irlincor.par b/noao/imred/quadred/src/quad/irlincor.par
new file mode 100644
index 00000000..77739715
--- /dev/null
+++ b/noao/imred/quadred/src/quad/irlincor.par
@@ -0,0 +1,12 @@
+# irlincor parameter file
+input,s,a,"",,,Input images
+output,s,a,"",,,Output images
+section,s,h,"",,,Image section to correct
+coeff1,r,h,1.0,,,First coefficient of correction equation
+coeff2,r,h,0.0,,,Second coefficient of correction equation
+coeff3,r,h,0.0,,,Third coefficient of correction equation
+coeff4,r,h,0.0,,,Fourth coefficient of correction equation
+coeff5,r,h,0.0,,,Fifth coefficient of correction equation
+coeff6,r,h,0.0,,,Sixth coefficient of correction equation
+coeff7,r,h,0.0,,,Seventh coefficient of correction equation
+maxadu,r,h,INDEF,0,,Maximum number of ADU
diff --git a/noao/imred/quadred/src/quad/mkpkg b/noao/imred/quadred/src/quad/mkpkg
new file mode 100644
index 00000000..9243e633
--- /dev/null
+++ b/noao/imred/quadred/src/quad/mkpkg
@@ -0,0 +1,56 @@
+# QUAD mkpkg file (Mon Mar 28 14:07:29 CST 1994)
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $update libpkg.a
+ $omake x_quad.x
+ $link x_quad.o libpkg.a -lxtools -o xx_quad.e
+ ;
+
+install:
+ $move xx_quad.e noaobin$x_quad.e
+ ;
+
+libpkg.a:
+
+ $ifolder (qsplits.x, qsplit.gx) $generic -k -t silrd qsplit.gx $endif
+
+ ccddelete.x
+ ccdgetparam.x
+ ccdlog.x
+ ccdprcselect.x "ccdtypes.h"
+ ccdsection.x <ctype.h>
+ ccdssselect.x "ccdtypes.h"
+ ccdsubsets.x
+ ccdtypes.x "ccdtypes.h"
+ gainmeasure.x "quadgeom.h" <imhdr.h>
+ hdrmap.x "hdrmap.com" <error.h> <syserr.h>
+ qghdr2.x "quadgeom.h" <imhdr.h>
+ qguser.x "quadgeom.h"
+ qpcalimage.x "ccdtypes.h" <error.h> <imset.h>
+ qpselect.x "ccdtypes.h"
+ qsplitd.x "quadgeom.h" <imhdr.h>
+ qspliti.x "quadgeom.h" <imhdr.h>
+ qsplitl.x "quadgeom.h" <imhdr.h>
+ qsplitr.x "quadgeom.h" <imhdr.h>
+ qsplits.x "quadgeom.h" <imhdr.h>
+ quadalloc.x "quadgeom.h" <imhdr.h>
+ quaddelete.x "quadgeom.h"
+ quadgeom.x "quadgeom.h" <imhdr.h>
+ quadgeomred.x "quadgeom.h" <imhdr.h>
+ quadjoin.x "quadgeom.h" <imhdr.h>
+ quadmap.x "quadgeom.h" <error.h> <imhdr.h>
+ quadmerge.x "quadgeom.h" <imhdr.h>
+ quadscale.x "quadgeom.h" <imhdr.h>
+ quadsections.x "quadgeom.h" <imhdr.h>
+ quadsplit.x "quadgeom.h" <imhdr.h>
+ test.x "quadgeom.h"
+ timelog.x <time.h>
+ ;
diff --git a/noao/imred/quadred/src/quad/new.par b/noao/imred/quadred/src/quad/new.par
new file mode 100644
index 00000000..8700d447
--- /dev/null
+++ b/noao/imred/quadred/src/quad/new.par
@@ -0,0 +1,8 @@
+input,s,a,"",,,Input image name
+instrument,s,a,"",,,Instrument file
+xtrim1,i,h,0,0,,
+xtrim2,i,h,0,0,,
+ytrim1,i,h,0,0,,
+ytrim2,i,h,0,0,,
+xskip1,i,h,0,0,,
+xskip2,i,h,0,0,,
diff --git a/noao/imred/quadred/src/quad/old.par b/noao/imred/quadred/src/quad/old.par
new file mode 100644
index 00000000..e77315b3
--- /dev/null
+++ b/noao/imred/quadred/src/quad/old.par
@@ -0,0 +1,2 @@
+input,s,a,"",,,Input image name
+instrument,s,a,"",,,Instrument file
diff --git a/noao/imred/quadred/src/quad/qccdproc.par b/noao/imred/quadred/src/quad/qccdproc.par
new file mode 100644
index 00000000..f20207a7
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qccdproc.par
@@ -0,0 +1,43 @@
+images,s,a,"",,,List of CCD images to correct
+ccdtype,s,h,"",,,CCD image type to correct
+max_cache,i,h,0,0,,Maximum image caching memory (in Mbytes)
+noproc,b,h,no,,,"List processing steps only?
+"
+fixpix,b,h,yes,,,Fix bad CCD lines and columns?
+overscan,b,h,yes,,,Apply overscan strip correction?
+trim,b,h,yes,,,Trim the image?
+zerocor,b,h,yes,,,Apply zero level correction?
+darkcor,b,h,no,,,Apply dark count correction?
+flatcor,b,h,yes,,,Apply flat field correction?
+illumcor,b,h,no,,,Apply illumination correction?
+fringecor,b,h,no,,,Apply fringe correction?
+readcor,b,h,no,,,Convert zero level image to readout correction?
+scancor,b,h,no,,,"Convert flat field image to scan correction?
+"
+readaxis,s,h,"line","column|line",, Read out axis (column|line)
+fixfile,s,h,"",,,File describing the bad lines and columns
+biassec,s,h,"",,,Overscan strip image section
+trimsec,s,h,"",,,Trim data section
+zero,s,h,"",,,Zero level calibration image
+dark,s,h,"",,,Dark count calibration image
+flat,s,h,"",,,Flat field images
+illum,s,h,"",,,Illumination correction images
+fringe,s,h,"",,,Fringe correction images
+minreplace,r,h,1.,,,Minimum flat field value
+scantype,s,h,"shortscan","shortscan|longscan",,Scan type (shortscan|longscan)
+nscan,i,h,1,1,,"Number of short scan lines
+"
+interactive,b,h,no,,,Fit overscan interactively?
+function,s,h,"legendre",,,Fitting function
+order,i,h,1,1,,Number of polynomial terms or spline pieces
+sample,s,h,"*",,,Sample points to fit
+naverage,i,h,1,,,Number of sample points to combine
+niterate,i,h,1,0,,Number of rejection iterations
+low_reject,r,h,3.,0.,,Low sigma rejection factor
+high_reject,r,h,3.,0.,,High sigma rejection factor
+grow,r,h,0.,0.,,"Rejection growing radius
+"
+verbose,b,h,)_.verbose,,,Print log information to the standard output?
+logfile,f,h,)_.logfile,,,Text log file
+backup,s,h,)_.backup,,,Backup directory or prefix
+output,s,h,"",,,Not used
diff --git a/noao/imred/quadred/src/quad/qdarkcombine.cl b/noao/imred/quadred/src/quad/qdarkcombine.cl
new file mode 100644
index 00000000..7f0ef6e7
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qdarkcombine.cl
@@ -0,0 +1,48 @@
+# DARKCOMBINE -- Process and combine dark count CCD images.
+
+procedure darkcombine (input)
+
+string input {prompt="List of dark images to combine"}
+file output="Dark" {prompt="Output dark image root name"}
+string combine="average" {prompt="Type of combine operation",
+ enum="average|median"}
+string reject="avsigclip" {prompt="Type of rejection",
+ enum="none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip"}
+string ccdtype="dark" {prompt="CCD image type to combine"}
+bool process=yes {prompt="Process images before combining?"}
+bool delete=no {prompt="Delete input images after combining?"}
+bool clobber=no {prompt="Clobber existing output image?"}
+string scale="exposure" {prompt="Image scaling",
+ enum="none|mode|median|mean|exposure"}
+string statsec="" {prompt="Image section for computing statistics"}
+int nlow=1 {prompt="minmax: Number of low pixels to reject"}
+int nhigh=1 {prompt="minmax: Number of high pixels to reject"}
+int nkeep=1 {prompt="Minimum to keep (pos) or maximum to reject (neg)"}
+bool mclip=yes {prompt="Use median in sigma clipping algorithms?"}
+real lsigma=3. {prompt="Lower sigma clipping factor"}
+real hsigma=3. {prompt="Upper sigma clipping factor"}
+string rdnoise="0." {prompt="ccdclip: CCD readout noise (electrons)"}
+string gain="1." {prompt="ccdclip: CCD gain (electrons/DN)"}
+string snoise="0." {prompt="ccdclip: Sensitivity noise (fraction)"}
+real pclip=-0.5 {prompt="pclip: Percentile clipping parameter"}
+real blank=0. {prompt="Value if there are no pixels"}
+
+begin
+ string ims
+
+ ims = input
+
+ # Process images first if desired.
+ if (process == YES)
+ quadproc (ims, ccdtype=ccdtype)
+
+ # Combine the dark images.
+ combine (ims, output=output, plfile="", sigma="", combine=combine,
+ reject=reject, ccdtype=ccdtype, subsets=no, delete=delete,
+ clobber=clobber, project=no, outtype="real", offsets="none",
+ masktype="none", blank=blank, scale=scale, zero="none", weight=no,
+ statsec=statsec, lthreshold=INDEF, hthreshold=INDEF, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, lsigma=lsigma, hsigma=hsigma,
+ rdnoise=rdnoise, gain=gain, snoise=snoise, sigscale=0.1,
+ pclip=pclip, grow=0)
+end
diff --git a/noao/imred/quadred/src/quad/qflatcombine.cl b/noao/imred/quadred/src/quad/qflatcombine.cl
new file mode 100644
index 00000000..7a0eb7ce
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qflatcombine.cl
@@ -0,0 +1,49 @@
+# FLATCOMBINE -- Process and combine flat field CCD images.
+
+procedure flatcombine (input)
+
+string input {prompt="List of flat field images to combine"}
+file output="Flat" {prompt="Output flat field root name"}
+string combine="average" {prompt="Type of combine operation",
+ enum="average|median"}
+string reject="avsigclip" {prompt="Type of rejection",
+ enum="none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip"}
+string ccdtype="flat" {prompt="CCD image type to combine"}
+bool process=yes {prompt="Process images before combining?"}
+bool subsets=yes {prompt="Combine images by subset parameter?"}
+bool delete=no {prompt="Delete input images after combining?"}
+bool clobber=no {prompt="Clobber existing output image?"}
+string scale="mode" {prompt="Image scaling",
+ enum="none|mode|median|mean|exposure"}
+string statsec="" {prompt="Image section for computing statistics"}
+int nlow=1 {prompt="minmax: Number of low pixels to reject"}
+int nhigh=1 {prompt="minmax: Number of high pixels to reject"}
+int nkeep=1 {prompt="Minimum to keep (pos) or maximum to reject (neg)"}
+bool mclip=yes {prompt="Use median in sigma clipping algorithms?"}
+real lsigma=3. {prompt="Lower sigma clipping factor"}
+real hsigma=3. {prompt="Upper sigma clipping factor"}
+string rdnoise="0." {prompt="ccdclip: CCD readout noise (electrons)"}
+string gain="1." {prompt="ccdclip: CCD gain (electrons/DN)"}
+string snoise="0." {prompt="ccdclip: Sensitivity noise (fraction)"}
+real pclip=-0.5 {prompt="pclip: Percentile clipping parameter"}
+real blank=1. {prompt="Value if there are no pixels"}
+
+begin
+ string ims
+
+ ims = input
+
+ # Process images first if desired.
+ if (process == YES)
+ quadproc (ims, ccdtype=ccdtype)
+
+ # Combine the flat field images.
+ combine (ims, output=output, plfile="", sigma="", combine=combine,
+ reject=reject, ccdtype=ccdtype, subsets=subsets, delete=delete,
+ clobber=clobber, project=no, outtype="real", offsets="none",
+ masktype="none", blank=blank, scale=scale, zero="none", weight=no,
+ statsec=statsec, lthreshold=INDEF, hthreshold=INDEF, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, lsigma=lsigma, hsigma=hsigma,
+ rdnoise=rdnoise, gain=gain, snoise=snoise, sigscale=0.1,
+ pclip=pclip, grow=0)
+end
diff --git a/noao/imred/quadred/src/quad/qghdr2.x b/noao/imred/quadred/src/quad/qghdr2.x
new file mode 100644
index 00000000..6a483a5b
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qghdr2.x
@@ -0,0 +1,216 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+define SZ_KEYWRD 8 # Chars in FITS keyword
+
+# QGHDR2 -- Set up section information in quadgeom structure based on
+# information in the image header.
+
+procedure qghdr2 (im, qg)
+
+pointer im #I Pointer to input image.
+pointer qg #IO Pointer to open quadgeom structure.
+
+pointer sp, keyword, hdrvalue, section
+int amp
+int ax1, ax2, axs, ay1, ay2, ays
+int bx1, bx2, bxs, by1, by2, bys
+int cx1, cx2, cxs, cy1, cy2, cys
+int dx1, dx2, dxs, dy1, dy2, dys
+int tx1, tx2, txs, ty1, ty2, tys
+
+int hdmaccf()
+
+begin
+
+ # Get stack space
+ call smark (sp)
+ call salloc (keyword, SZ_KEYWRD, TY_CHAR)
+ call salloc (hdrvalue, SZ_LINE, TY_CHAR)
+ call salloc (section, SZ_LINE, TY_CHAR)
+
+ # Get input image dimensions.
+ QG_NX (qg, 0) = IM_LEN(im, 1)
+ QG_NY (qg, 0) = IM_LEN(im, 2)
+
+ # Get number of active amplifiers in Y and X.
+ call hdmgstr (im, "nampsyx", Memc[hdrvalue], SZ_LINE)
+ call sscan (Memc[hdrvalue])
+ call gargi (QG_NAMPSY(qg))
+ call gargi (QG_NAMPSX(qg))
+
+ QG_NAMPS(qg) = QG_NAMPSY(qg) * QG_NAMPSX(qg)
+ if (QG_NAMPS(qg) > QG_MAXAMPS)
+ call error (0, "CCD has too many read-outs for this program")
+
+ # Get decode and order list of active amplifiers.
+ call hdmgstr (im, "amplist", Memc[hdrvalue], SZ_LINE)
+ call ampnames (qg, Memc[hdrvalue])
+
+ # Read geometry keywords for each amplifier from header.
+ do amp = 1, QG_NAMPS (qg) {
+
+ # Ampsec (ASECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_KEYWRD, "ASEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ call hdmgstr (im, Memc[keyword], Memc[section], SZ_LINE)
+
+ ax1 = 1
+ ax2 = QG_NX(qg, 0) / QG_NAMPSX(qg)
+ axs = 1
+ ay1 = 1
+ ay2 = QG_NY(qg, 0) / QG_NAMPSY(qg)
+ ays = 1
+
+ call ccd_section (Memc[section], ax1, ax2, axs, ay1, ay2, ays)
+ QG_AX1(qg, amp) = ax1
+ QG_AX2(qg, amp) = ax2
+ QG_AY1(qg, amp) = ay1
+ QG_AY2(qg, amp) = ay2
+
+ # Set X and Y dimensions of subimage read out by each amplifier
+ QG_NX(qg, amp) = ax2 - ax1 + 1
+ QG_NY(qg, amp) = ay2 - ay1 + 1
+
+ # Datasec (DSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_KEYWRD, "DSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ call hdmgstr (im, Memc[keyword], Memc[section], SZ_LINE)
+
+ dx1 = ax1
+ dx2 = ax2
+ dxs = 1
+ dy1 = ay1
+ dy2 = ay2
+ dys = 1
+ call ccd_section (Memc[section], dx1, dx2, dxs, dy1, dy2, dys)
+ QG_DX1(qg, amp) = dx1 - ax1 + 1
+ QG_DX2(qg, amp) = dx2 - ax1 + 1
+ QG_DY1(qg, amp) = dy1 - ay1 + 1
+ QG_DY2(qg, amp) = dy2 - ay1 + 1
+
+ # CCDsec (CSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_KEYWRD, "CSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ call hdmgstr (im, Memc[keyword], Memc[section], SZ_LINE)
+
+ cx1 = dx1
+ cx2 = dx2
+ cxs = 1
+ cy1 = dy1
+ cy2 = dy2
+ cys = 1
+ call ccd_section (Memc[section], cx1, cx2, cxs, cy1, cy2, cys)
+ QG_CX1(qg, amp) = cx1
+ QG_CX2(qg, amp) = cx2
+ QG_CY1(qg, amp) = cy1
+ QG_CY2(qg, amp) = cy2
+
+ # Trimsec (TSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_KEYWRD, "TSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if (hdmaccf (im, Memc[keyword]) == YES) {
+ call hdmgstr (im, Memc[keyword], Memc[section], SZ_LINE)
+
+ tx1 = dx1
+ tx2 = dx2
+ txs = 1
+ ty1 = dy1
+ ty2 = dy2
+ tys = 1
+ call ccd_section (Memc[section], tx1, tx2, txs, ty1, ty2, tys)
+ QG_TX1(qg, amp) = tx1 - ax1 + 1
+ QG_TX2(qg, amp) = tx2 - ax1 + 1
+ QG_TY1(qg, amp) = ty1 - ay1 + 1
+ QG_TY2(qg, amp) = ty2 - ay1 + 1
+
+ QG_PHANTOM(qg, amp) = NO
+
+ } else {
+ QG_TX1(qg, amp) = 0
+ QG_TX2(qg, amp) = 0
+ QG_TY1(qg, amp) = 0
+ QG_TY2(qg, amp) = 0
+
+ # If the image has not been reduced this must be a phantom
+ if (hdmaccf (im, "trim") == NO) {
+ QG_PHANTOM(qg, amp) = YES
+ } else {
+ QG_PHANTOM(qg, amp) = NO
+ }
+ }
+
+ # Biassec (BSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_KEYWRD, "BSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if (hdmaccf (im, Memc[keyword]) == YES) {
+ call hdmgstr (im, Memc[keyword], Memc[section], SZ_LINE)
+
+ bx1 = 0
+ bx2 = 0
+ bxs = 1
+ by1 = 0
+ by2 = 0
+ bys = 1
+ call ccd_section (Memc[section], bx1, bx2, bxs, by1, by2, bys)
+ QG_BX1(qg, amp) = bx1 - ax1 + 1
+ QG_BX2(qg, amp) = bx2 - ax1 + 1
+ QG_BY1(qg, amp) = by1 - ay1 + 1
+ QG_BY2(qg, amp) = by2 - ay1 + 1
+ } else {
+ QG_BX1(qg, amp) = 0
+ QG_BX2(qg, amp) = 0
+ QG_BY1(qg, amp) = 0
+ QG_BY2(qg, amp) = 0
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+procedure ampnames (qg, amplist)
+
+pointer qg #I/O Pointer to open quadgeom structure
+char amplist[ARB] #I List of active amplifiers
+
+int amp, nch
+pointer sp, ampnum
+
+int strdic(), itoc()
+
+begin
+ call smark (sp)
+ call salloc (ampnum, QG_NAMPS (qg), TY_INT)
+
+ # parse amplist into array of ordinal numbers
+ call sscan (amplist)
+ do amp = 1, QG_NAMPS (qg) {
+ call gargi (Memi[ampnum+amp-1])
+ }
+
+ # Sort ordinal numbers into increasing order
+ call asrti (Memi[ampnum], Memi[ampnum], QG_NAMPS(qg))
+
+ # Convert ordinal numbers back into id strings
+ do amp = 1, QG_NAMPS (qg) {
+ call malloc (QG_AMPID(qg, amp), SZ_AMPID, TY_CHAR)
+ nch = itoc (Memi[ampnum+amp-1], Memc[QG_AMPID(qg, amp)], SZ_AMPID)
+ }
+
+ # Set AMPTYPE codes
+ do amp = 1, QG_NAMPS (qg) {
+ QG_AMPTYPE (qg, amp) = strdic (Memc[QG_AMPID (qg, amp)],
+ Memc[QG_AMPID (qg, amp)], SZ_AMPID, AMPDICT)
+ }
+
+ call sfree (sp)
+
+end
diff --git a/noao/imred/quadred/src/quad/qguser.x b/noao/imred/quadred/src/quad/qguser.x
new file mode 100644
index 00000000..5d4bf349
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qguser.x
@@ -0,0 +1,126 @@
+include "quadgeom.h"
+
+# QGUSER -- modify open quadgeom structure for user specified trim and
+# overscan.
+
+procedure qguser (qg, xtrim1, xtrim2, ytrim1, ytrim2, xskip1, xskip2)
+
+pointer qg # Pointer to open quadgeom structure.
+int xtrim1 # Number of pixels to trim at right.
+int xtrim2 # Number of pixels to trim at left.
+int ytrim1 # Number of pixels to trim at bottom.
+int ytrim2 # Number of pixels to trim at top.
+int xskip1 # Number of pixels to skip at start of overscan in X.
+int xskip2 # Number of pixels to skip at end of overscan in X.
+
+int amp, x, y
+int bx1, bx2, by1, by2
+
+begin
+
+ # Modify overscan margins
+ Do amp = 1, QG_NAMPS (qg) {
+
+ switch (QG_AMPTYPE(qg, amp)) {
+ case AMP11, AMP21: # Left hand side
+ if (IS_INDEFI (xskip1)) {
+ bx1 = QG_BX1(qg, amp)
+ } else {
+ bx1 = QG_DX2(qg, amp) + xskip1 + 1
+ }
+
+ if (IS_INDEFI (xskip2)) {
+ bx2 = QG_BX2(qg, amp)
+ } else {
+ bx2 = QG_AX2(qg, amp) - QG_AX1(qg, amp) - xskip2 + 1
+ }
+
+ case AMP12, AMP22: # Right hand side
+ if (IS_INDEFI (xskip2)) {
+ bx1 = QG_BX1(qg, amp)
+ } else {
+ bx1 = 1 + xskip2
+ }
+ if (IS_INDEFI (xskip1)) {
+ bx2 = QG_BX2(qg, amp)
+ } else {
+ bx2 = QG_DX1(qg, amp) - xskip1 - 1
+ }
+
+ }
+ by1 = QG_BY1(qg, amp)
+ by2 = QG_BY2(qg, amp)
+
+ if (bx1 > bx2) {
+ bx1 = 0
+ bx2 = 0
+ by1 = 0
+ by2 = 0
+ }
+
+ QG_BX1(qg, amp) = bx1
+ QG_BX2(qg, amp) = bx2
+ QG_BY1(qg, amp) = by1
+ QG_BY2(qg, amp) = by2
+
+ }
+
+ # Modify trim margins
+
+ # Set left hand edge
+ if (! IS_INDEFI(xtrim1)) {
+ do y = 1, QG_NAMPSY(qg) {
+ do x = 1, QG_NAMPSX(qg) {
+
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM(qg, amp) == NO) {
+ QG_TX1(qg, amp) = QG_DX1(qg, amp) + xtrim1
+ break
+ }
+ }
+ }
+ }
+
+ # Set right hand edge
+ if (! IS_INDEFI(xtrim2)) {
+ do y = 1, QG_NAMPSY(qg) {
+ do x = QG_NAMPSX(qg), 1, -1 {
+
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM(qg, amp) == NO) {
+ QG_TX2(qg, amp) = QG_DX2(qg, amp) - xtrim2
+ break
+ }
+ }
+ }
+ }
+
+
+ # Set lower edge
+ if (! IS_INDEFI(ytrim1)) {
+ do x = 1, QG_NAMPSX(qg) {
+ do y = 1, QG_NAMPSY(qg) {
+
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM(qg, amp) == NO) {
+ QG_TY1(qg, amp) = QG_DY1(qg, amp) + ytrim1
+ break
+ }
+ }
+ }
+ }
+
+ # Set upper edge
+ if (! IS_INDEFI(ytrim2)) {
+ do x = 1, QG_NAMPSX(qg) {
+ do y = QG_NAMPSY(qg), 1, -1 {
+
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM(qg, amp) == NO) {
+ QG_TY2(qg, amp) = QG_DY2(qg, amp) - ytrim2
+ break
+ }
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qhistogram.cl b/noao/imred/quadred/src/quad/qhistogram.cl
new file mode 100644
index 00000000..4b5c1958
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qhistogram.cl
@@ -0,0 +1,58 @@
+procedure qhistogram (image)
+
+begin
+ string tmp, meta, im, subimage, amp, section
+ int nx, ny
+# real zz1, zz2, mean, mode, min, max, sigma
+
+ im = image
+
+ tmp = mktemp ("uparm$tmp")
+ fdtmp = tmp
+ meta = mktemp ("uparm$meta")
+
+ # Project image section on to quadrant boundaries.
+ #quadsections (im, window=window, section="", template="$I$S $A $S\n",
+ #xskip1=INDEF, xskip2=INDEF, xtrim1=INDEF, xtrim2=INDEF,
+ #ytrim1=INDEF, ytrim2=INDEF, >> tmp)
+ quadsections (im, window=window, section="", template="$I$S $A $S\n",
+ >> tmp)
+
+# # Set up histogram limits
+# switch (substr (scaling, 1, 1) {
+# case "s": set
+# zz1 = z1
+# zz2 = z2
+
+# case minmax"
+
+
+ if (listout) {
+ printf ("%s\n", im)
+ while (fscan (fdtmp, subimage, amp, section) != EOF) {
+
+ printf ("\tAmp%s: section=%s\n\n", amp, section)
+
+ imhist (subimage, z1=z1, z2=z2, binwidth=binwidth, nbins=nbins,
+ autoscale=autoscale, top_closed=top_closed, hist_type=hist_type,
+ listout=listout, plot_type=plot_type, logy=logy, device=device)
+ }
+
+ } else {
+ while (fscan (fdtmp, subimage) != EOF) {
+
+ imhist (subimage, z1=z1, z2=z2, binwidth=binwidth, nbins=nbins,
+ autoscale=autoscale, top_closed=top_closed, hist_type=hist_type,
+ listout=listout, plot_type=plot_type, logy=logy, device=device,
+ >>G meta)
+
+ }
+ ccdgetparam (im, "nampsyx") | scan (ny, nx)
+ gkim (meta, device=device, output=plotfile, nx=nx, ny=ny, rotate=no,
+ fill=yes, interactive=no, cursor="")
+
+ delete (meta, ver-)
+ }
+
+ delete (tmp, ver-)
+end
diff --git a/noao/imred/quadred/src/quad/qhistogram.par b/noao/imred/quadred/src/quad/qhistogram.par
new file mode 100644
index 00000000..1ba40ec1
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qhistogram.par
@@ -0,0 +1,17 @@
+image,s,a,,,,Image name
+window,s,h,"datasec","|datasec|trimsec|biassec|reflect|duplicate|",,Window to apply to image
+z1,r,h,INDEF,,,Minimum histogram intensity
+z2,r,h,INDEF,,,Maximum histogram intensity
+binwidth,r,h,INDEF,,,Resolution of histogram in intensity units
+nbins,i,h,512,1,,Number of bins in histogram
+autoscale,b,h,yes,,,Adjust nbins and z2 for integer data?
+top_closed,b,h,no,,,Include z2 in the top bin?
+hist_type,s,h,"normal","normal|cumulative|difference|second_difference",,"Type of histogram"
+listout,b,h,no,,,List instead of plot histogram?
+plot_type,s,h,"line","line|box",,Type of vectors to plot
+logy,b,h,yes,,,Log scale y-axis?
+device,s,h,"stdgraph",,,Output graphics device
+plotfile,s,h,"",,,"Output graphics file
+"
+fdtmp,*s,h,,,,Internal use only
+mode,s,h,ql,,,
diff --git a/noao/imred/quadred/src/quad/qnoproc.cl b/noao/imred/quadred/src/quad/qnoproc.cl
new file mode 100644
index 00000000..9f7d048f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qnoproc.cl
@@ -0,0 +1,77 @@
+procedure qnoproc (image_list)
+
+begin
+ string image, buffer, imtype
+ int i, len, nampsx, nampsy, nlines
+ bool dofix, dotrim, doover
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ i = strlen (imtype)
+
+ dofix = fixpix
+ doover = overscan
+ dotrim = trim
+
+ fd = image_list
+ while (fscan (fd, image) != EOF) {
+
+ len = strlen (image)
+ if (substr(image, len-i+1, len) == imtype) {
+ image = substr (image, 1, len-i)
+ }
+
+ # Report what processing steps will be performed by qproc
+ printf ("%s:", image)
+
+ if (fixpix) {
+ ccdgetparam (image, "fixpix") | scan (buffer)
+ dofix = (buffer == "UNDEFINED!")
+ }
+
+ if (overscan) {
+ ccdgetparam (image, "overscan") | scan (buffer)
+ doover = (buffer == "UNDEFINED!")
+ }
+
+ if (trim) {
+ ccdgetparam (image, "trim") | scan (buffer)
+ dotrim = (buffer == "UNDEFINED!")
+ }
+
+ if (dofix || dotrim || doover) {
+ ccdgetparam (image, "nampsyx") | scan (nampsy, nampsx)
+ if (nampsx == 2 && nampsy == 2) {
+ printf (" (Quad-readout image)\n")
+ } else if (nampsx == 2 || nampsy == 2) {
+ printf (" (Dual-readout image: nampsx=%d nampsy=%d)\n",
+ nampsx, nampsy)
+ } else {
+ printf ("\n")
+ }
+
+ if (doover) {
+ printf (" [TO BE DONE] Trim section is:\n")
+ #quadsections (image, window="trimsec", section="",
+ #template="%18tAMP$A $S\n", xskip1=xskip1, xskip2=xskip2,
+ #xtrim1=xtrim1, xtrim2=xtrim2, ytrim1=ytrim1, ytrim2=ytrim2)
+ quadsections (image, window="trimsec", section="",
+ template="%18tAMP$A $S\n")
+ }
+
+ if (dofix)
+ printf (" [TO BE DONE] Bad pixel file is %s\n", fixfile)
+
+ if (doover) {
+ printf (" [TO BE DONE] Overscan section is:\n")
+ #quadsections (image, window="biassec", section="",
+ #template="%18tAMP$A $S\n", xskip1=xskip1, xskip2=xskip2,
+ #xtrim1=xtrim1, xtrim2=xtrim2, ytrim1=ytrim1, ytrim2=ytrim2)
+ quadsections (image, window="biassec", section="",
+ template="%18tAMP$A $S\n")
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qnoproc.par b/noao/imred/quadred/src/quad/qnoproc.par
new file mode 100644
index 00000000..7e01c141
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qnoproc.par
@@ -0,0 +1,15 @@
+image_list,s,a,"",,,List of CCD images to correct
+fixpix,b,h,yes,,,Fix bad CCD lines and columns?
+overscan,b,h,yes,,,Apply overscan strip correction?
+trim,b,h,yes,,,Trim the image?
+fixfile,s,h,"",,,"File describing the bad lines and columns
+
+# TRIM AND OVERSCAN MARGINS (overide header values)"
+xskip1,i,h,INDEF,0,,X pixels to skip at start of overscan
+xskip2,i,h,INDEF,0,,X pixels to skip at end of overscan
+xtrim1,i,h,INDEF,0,,X pixels to trim at start of data
+xtrim2,i,h,INDEF,0,,X pixels to trim at end of data
+ytrim1,i,h,INDEF,0,,Y pixels to trim at start of data
+ytrim2,i,h,INDEF,0,,"Y pixels to trim at end of data
+"
+fd,*s,h,,,,Internal use only
diff --git a/noao/imred/quadred/src/quad/qpcalimage.par b/noao/imred/quadred/src/quad/qpcalimage.par
new file mode 100644
index 00000000..ddeb506d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qpcalimage.par
@@ -0,0 +1,3 @@
+quadproc,pset,h,,,,CCD processing parameters
+only_param,b,h,no,,,Only return calibration images from parameters
+check,b,h,yes,,,Complain if any calibration images are unspecified
diff --git a/noao/imred/quadred/src/quad/qpcalimage.x b/noao/imred/quadred/src/quad/qpcalimage.x
new file mode 100644
index 00000000..2e0ee40b
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qpcalimage.x
@@ -0,0 +1,525 @@
+include <error.h>
+include <imset.h>
+include "ccdtypes.h"
+
+define SZ_SUBSET 16 # Maximum size of subset string
+define IMAGE Memc[$1+($2-1)*SZ_FNAME] # Image string
+define SUBSET Memc[$1+($2-1)*SZ_SUBSET] # Subset string
+
+procedure t_qpcalimage ()
+
+pointer im, subsets, list
+int i, j
+bool flatcor, illumcor, fringecor, found, check
+char instrument[SZ_LINE], image[SZ_FNAME], buffer[SZ_SUBSET-1]
+
+pointer immap(), imtopenp()
+int imtgetim()
+bool clgetb(), streq()
+
+begin
+ # Open list of images and instrument file
+ list = imtopenp ("images")
+ call clgstr ("instrument", instrument, SZ_LINE)
+ call hdmopen (instrument)
+
+ if (clgetb ("only_param")) {
+ call cal_open (NULL)
+ } else {
+ call cal_open (list)
+ }
+
+ check = clgetb ("check")
+
+ if (clgetb ("zerocor")) {
+ iferr (call cal_find (ZERO, "", image, SZ_FNAME)) {
+ if (check) {
+ call erract (EA_WARN)
+ }
+
+ } else {
+ call printf ("%s\n")
+ call pargstr (image)
+ }
+ }
+
+ if (clgetb ("darkcor")) {
+ iferr (call cal_find (DARK, "", image, SZ_FNAME)) {
+ if (check)
+ call erract (EA_WARN)
+
+ } else {
+ call printf ("%s\n")
+ call pargstr (image)
+ }
+ }
+
+ flatcor = clgetb ("flatcor")
+ illumcor = clgetb ("illumcor")
+ fringecor = clgetb ("fringecor")
+
+ if (flatcor || illumcor || fringecor) {
+
+ i = 1
+ found = false
+ while (imtgetim (list, image, SZ_FNAME) != EOF) {
+ # Open the image. Silently skip any non-existant images
+ iferr (im = immap (image, READ_ONLY, 0))
+ next
+
+ call ccdsubset (im, buffer, SZ_SUBSET-1)
+ call imunmap (im)
+
+ # Check to see if we have already dealt with this subset
+ do j = 1, i - 1 {
+ found = (streq (buffer, SUBSET (subsets, j)))
+ if (found)
+ break
+ }
+
+ if (!found) {
+
+ # Add subset to list of processed subsets
+ if (i == 1)
+ call malloc (subsets, i * SZ_SUBSET, TY_CHAR)
+ else
+ call realloc (subsets, i * SZ_SUBSET, TY_CHAR)
+
+ call strcpy (buffer, SUBSET(subsets, i), SZ_SUBSET-1)
+ i = i + 1
+
+ # Find and print names of associated calibration images
+ if (flatcor) {
+ iferr (call cal_find (FLAT, buffer, image, SZ_FNAME)) {
+ if (check)
+ call erract (EA_WARN)
+
+ } else {
+ call printf ("%s\n")
+ call pargstr (image)
+ }
+ }
+
+ if (illumcor) {
+ iferr (call cal_find (ILLUM, buffer, image, SZ_FNAME)) {
+ if (check)
+ call erract (EA_WARN)
+
+ } else {
+ call printf ("%s\n")
+ call pargstr (image)
+ }
+ }
+
+ if (fringecor) {
+ iferr (call cal_find (FRINGE, buffer, image, SZ_FNAME)){
+ if (check)
+ call erract (EA_WARN)
+
+ } else {
+ call printf ("%s\n")
+ call pargstr (image)
+ }
+ }
+ }
+ }
+ }
+
+ call hdmclose ()
+ call imtclose (list)
+ call mfree (subsets, TY_CHAR)
+ call cal_close ()
+
+end
+
+# CAL_FIND -- Return a calibration image of the specified type and subset
+# CAL_IMAGE -- Return a calibration image for a specified input image.
+# CAL_OPEN -- Open the calibration image list.
+# CAL_CLOSE -- Close the calibration image list.
+# CAL_LIST -- Add images to the calibration image list.
+#
+# The open procedure is called first to get the calibration image
+# lists and add them to an internal list. Calibration images from the
+# input list are also added so that calibration images may be specified
+# either from the calibration image list parameters or in the input image list.
+# Existence errors and duplicate calibration images are ignored.
+# Validity checks are made when the calibration images are requested.
+#
+# During processing the calibration image names are requested for each input
+# image. The calibration image list is searched for a calibration image of
+# the right type and subset. If more than one is found the first one is
+# returned and a warning given for the others. The warning is only issued
+# once. If no calibration image is found then an error is returned.
+#
+# The calibration image list must be closed at the end of processing the
+# input images.
+
+# CAL_FIND -- Return a calibration image of a particular type and subset.
+# Search the calibration list for the first calibration image of the desired
+# type and subset. Print a warning if there is more than one possible
+# calibration image and return an error if there is no calibration image.
+
+procedure cal_find (ccdtype, subset, image, maxchars)
+
+int ccdtype #I Callibration CCD image type
+char subset[ARB] #I Calibration image subset
+char image[maxchars] #O Calibration image (returned)
+int maxchars #I Maximum number chars in image name
+
+int i
+char errmsg[SZ_LINE]
+bool strne()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, images, nimages
+
+begin
+
+ switch (ccdtype) {
+ case ZERO, DARK:
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+
+ call strcpy (IMAGE(images,i), image, maxchars)
+ return
+ }
+
+ case FLAT, ILLUM, FRINGE:
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ if (strne (SUBSET(subsets,i), subset))
+ next
+
+ call strcpy (IMAGE(images,i), image, maxchars)
+ return
+ }
+ }
+
+ # If no calibration image is found then it is an error.
+ switch (ccdtype) {
+ case ZERO:
+ call error (0, "No zero level calibration image found")
+ case DARK:
+ call error (0, "No dark count calibration image found")
+ case FLAT:
+ call sprintf (errmsg, SZ_LINE,
+ "No flat field calibration image of subset %s found")
+ call pargstr (subset)
+ call error (0, errmsg)
+ case ILLUM:
+ call sprintf (errmsg, SZ_LINE,
+ "No illumination calibration image of subset %s found")
+ call pargstr (subset)
+ call error (0, errmsg)
+ case FRINGE:
+ call sprintf (errmsg, SZ_LINE,
+ "No fringe calibration image of subset %s found")
+ call pargstr (subset)
+ call error (0, errmsg)
+ }
+end
+
+# CAL_IMAGE -- Return a calibration image of a particular type.
+# Search the calibration list for the first calibration image of the desired
+# type and subset. Print a warning if there is more than one possible
+# calibration image and return an error if there is no calibration image.
+
+procedure cal_image (im, ccdtype, image, maxchars)
+
+pointer im # Image to be processed
+int ccdtype # Callibration CCD image type
+char image[maxchars] # Calibration image (returned)
+int maxchars # Maximum number chars in image name
+
+int i, n
+pointer sp, subset, str
+bool strne(), ccd_cmp()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, images, nimages
+
+begin
+ call smark (sp)
+ call salloc (subset, SZ_SUBSET, TY_CHAR)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ switch (ccdtype) {
+ case ZERO, DARK:
+ n = 0
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ n = n + 1
+ if (n == 1)
+ call strcpy (IMAGE(images,i), image, maxchars)
+ else {
+# call eprintf (
+# "Warning: Extra calibration image %s ignored\n")
+# call pargstr (IMAGE(images,i))
+
+ # Reset the image type to eliminate further warnings.
+ Memi[ccdtypes+i-1] = UNKNOWN
+ }
+ }
+ case FLAT, ILLUM, FRINGE:
+ call ccdsubset (im, Memc[subset], SZ_SUBSET)
+
+ n = 0
+ do i = 1, nimages {
+ if (Memi[ccdtypes+i-1] != ccdtype)
+ next
+ if (strne (SUBSET(subsets,i), Memc[subset]))
+ next
+ n = n + 1
+ if (n == 1)
+ call strcpy (IMAGE(images,i), image, maxchars)
+ else {
+# call eprintf (
+# "Warning: Extra calibration image %s ignored\n")
+# call pargstr (IMAGE(images,i))
+
+ # Reset the image type to eliminate further warnings.
+ Memi[ccdtypes+i-1] = UNKNOWN
+ }
+ }
+ }
+
+ # If no calibration image is found then it is an error.
+ if (n == 0)
+ switch (ccdtype) {
+ case ZERO:
+ call error (0, "No zero level calibration image found")
+ case DARK:
+ call error (0, "No dark count calibration image found")
+ case FLAT:
+ call sprintf (Memc[str], SZ_LINE,
+ "No flat field calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ case ILLUM:
+ call sprintf (Memc[str], SZ_LINE,
+ "No illumination calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ case FRINGE:
+ call sprintf (Memc[str], SZ_LINE,
+ "No fringe calibration image of subset %s found")
+ call pargstr (Memc[subset])
+ call error (0, Memc[str])
+ }
+
+ # Check that the input image is not the same as the calibration image.
+ call imstats (im, IM_IMAGENAME, Memc[str], SZ_LINE)
+ if (ccd_cmp (Memc[str], image)) {
+ call sprintf (Memc[str], SZ_LINE,
+ "Calibration image %s is the same as the input image")
+ call pargstr (image)
+ call error (0, Memc[str])
+ }
+
+ call sfree (sp)
+end
+
+
+# CAL_OPEN -- Create a list of calibration images from the input image list
+# and the calibration image lists.
+
+procedure cal_open (list)
+
+int list # List of input images
+int list1 # List of calibration images
+
+pointer sp, str
+int ccdtype, strdic(), imtopenp()
+bool clgetb()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subset numbers
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, images, nimages
+
+errchk cal_list
+
+begin
+ call smark (sp)
+ call salloc (str, SZ_LINE, TY_CHAR)
+
+ call clgstr ("ccdtype", Memc[str], SZ_LINE)
+ call xt_stripwhite (Memc[str])
+ if (Memc[str] == EOS)
+ ccdtype = NONE
+ else
+ ccdtype = strdic (Memc[str], Memc[str], SZ_LINE, CCDTYPES)
+
+ # Add calibration images to list.
+ nimages = 0
+ if (ccdtype != ZERO && clgetb ("zerocor")) {
+ list1 = imtopenp ("zero")
+ call cal_list (list1, ZERO)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && clgetb ("darkcor")) {
+ list1 = imtopenp ("dark")
+ call cal_list (list1, DARK)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ clgetb ("flatcor")) {
+ list1 = imtopenp ("flat")
+ call cal_list (list1, FLAT)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ ccdtype != ILLUM && clgetb ("illumcor")) {
+ list1 = imtopenp ("illum")
+ call cal_list (list1, ILLUM)
+ call imtclose (list1)
+ }
+ if (ccdtype != ZERO && ccdtype != DARK && ccdtype != FLAT &&
+ ccdtype != FRINGE && clgetb ("fringecor")) {
+ list1 = imtopenp ("fringe")
+ call cal_list (list1, FRINGE)
+ call imtclose (list1)
+ }
+ if (list != NULL) {
+ call cal_list (list, UNKNOWN)
+ call imtrew (list)
+ }
+
+ call sfree (sp)
+end
+
+
+# CAL_CLOSE -- Free memory from the internal calibration image list.
+
+procedure cal_close ()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subset
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, images, nimages
+
+begin
+ if (nimages > 0) {
+ call mfree (ccdtypes, TY_INT)
+ call mfree (subsets, TY_CHAR)
+ call mfree (images, TY_CHAR)
+ }
+end
+
+
+# CAL_LIST -- Add calibration images to an internal list.
+# Map each image and get the CCD image type and subset.
+# If the ccdtype is given as a procedure argument this overrides the
+# image header type. For the calibration images add the type, subset,
+# and image name to dynamic arrays. Ignore duplicate names.
+
+procedure cal_list (list, listtype)
+
+pointer list # Image list
+int listtype # CCD type of image in list.
+ # Overrides header type if not UNKNOWN.
+
+int i, ccdtype, ccdtypei(), imtgetim()
+pointer sp, image, im, immap()
+bool streq()
+
+pointer ccdtypes # Pointer to array of calibration ccdtypes
+pointer subsets # Pointer to array of calibration subsets
+pointer images # Pointer to array of calibration image names
+int nimages # Number of images
+common /calib/ ccdtypes, subsets, images, nimages
+
+begin
+ call smark (sp)
+ call salloc (image, SZ_FNAME, TY_CHAR)
+
+ while (imtgetim (list, Memc[image], SZ_FNAME) != EOF) {
+ # Open the image. If an explicit type is given it is an
+ # error if the image can't be opened.
+ iferr (im = immap (Memc[image], READ_ONLY, 0)) {
+ if (listtype == UNKNOWN)
+ next
+ else
+ call erract (EA_ERROR)
+ }
+
+ # Override image header CCD type if a list type is given.
+ if (listtype == UNKNOWN)
+ ccdtype = ccdtypei (im)
+ else
+ ccdtype = listtype
+
+ switch (ccdtype) {
+ case ZERO, DARK, FLAT, ILLUM, FRINGE:
+ # Check for duplication.
+ for (i=1; i<=nimages; i=i+1)
+ if (streq (Memc[image], IMAGE(images,i)))
+ break
+ if (i <= nimages)
+ break
+
+ # Allocate memory for a new image.
+ if (i == 1) {
+ call malloc (ccdtypes, i, TY_INT)
+ call malloc (subsets, i * SZ_SUBSET, TY_CHAR)
+ call malloc (images, i * SZ_FNAME, TY_CHAR)
+ } else {
+ call realloc (ccdtypes, i, TY_INT)
+ call realloc (subsets, i * SZ_FNAME, TY_CHAR)
+ call realloc (images, i * SZ_FNAME, TY_CHAR)
+ }
+
+ # Enter the ccdtype, subset, and image name.
+ Memi[ccdtypes+i-1] = ccdtype
+ call ccdsubset (im, SUBSET(subsets,i), SZ_SUBSET-1)
+ call strcpy (Memc[image], IMAGE(images,i), SZ_FNAME-1)
+ nimages = i
+ }
+ call imunmap (im)
+ }
+# call eprintf ("nimages=%d\n")
+# call pargi (nimages)
+# do i = 1, nimages {
+# call eprintf ("ccdtype=%d subset=%s image=%s\n")
+# call pargi (Memi[ccdtypes+i-1])
+# call pargstr (SUBSET (subsets, i))
+# call pargstr (IMAGE (images, i))
+# }
+
+ call sfree (sp)
+end
+
+# CCD_CMP -- Compare two image names with extensions ignored.
+
+bool procedure ccd_cmp (image1, image2)
+
+char image1[ARB] # First image
+char image2[ARB] # Second image
+
+int i, j, strmatch(), strlen(), strncmp()
+bool streq()
+
+begin
+ if (streq (image1, image2))
+ return (true)
+
+ i = max (strmatch (image1, ".imh"), strmatch (image1, ".hhh"))
+ if (i == 0)
+ i = strlen (image1)
+ j = max (strmatch (image2, ".imh"), strmatch (image2, ".hhh"))
+ if (j == 0)
+ j = strlen (image2)
+
+ return (strncmp (image1, image2, max (i, j)) == 0)
+end
diff --git a/noao/imred/quadred/src/quad/qproc.cl b/noao/imred/quadred/src/quad/qproc.cl
new file mode 100644
index 00000000..d0040d18
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qproc.cl
@@ -0,0 +1,109 @@
+procedure qproc (image_list)
+
+begin
+ struct buffer
+ string image, answr, imtype
+ int i, len, nampsx, nampsy
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ i = strlen (imtype)
+
+ cache ("quadsplit", "quadjoin", "qccdproc", "quadproc")
+
+
+ # Validate fixfile
+ if (fixpix) {
+ match ("single_readout", fixfile) | scan (buffer)
+ if (stridx ("#", buffer) == 0) {
+ buffer = "fixfile " // fixfile //
+ " cannot be used with multi-readout images"
+ error (0, buffer)
+ }
+ }
+
+ # Initialise interactive query
+ if (qccdproc.interactive) {
+ answer.p_value = "yes"
+ answr = "yes"
+ } else {
+ answr = "NO"
+ }
+
+ fd = image_list
+ while (fscan (fd, image) != EOF) {
+
+ len = strlen (image)
+ if (substr(image, len-i+1, len) == imtype) {
+ image = substr (image, 1, len-i)
+ }
+
+ # Split out one image for each quadrant and set header sections
+ #quadsplit (image, output="",
+ #xskip1=xskip1, xskip2=xskip2, xtrim1=xtrim1, xtrim2=xtrim2,
+ #ytrim1=ytrim1, ytrim2=ytrim2, clobber=yes)
+ quadsplit (image, output="", clobber=yes)
+
+
+ # Find out of interactive fit is required for this image
+ if (answr == "yes" || answr == "no") {
+ printf ("Fit overscan vector for %s interactively\n", image) |
+ scan (buffer)
+ answer.p_prompt=buffer
+ answr = answer
+ }
+
+ # Overscan correct and trim
+ if (answr == "yes" || answr == "YES") {
+ qccdproc.interactive = yes
+
+ print ("YES") | qccdproc (image//".??"//imtype, fixpix=fixpix,
+ overscan=overscan, trim=trim, readaxis=readaxis,
+ fixfile=fixfile, biassec="image", trimsec="image",
+ ccdtype="", max_cache=0, noproc=no, zerocor=no, darkcor=no,
+ flatcor=no, illumcor=no, fringecor=no, readcor=no,
+ scancor=no, zero="", dark="", flat="", illum="", fringe="",
+ minreplace=1., scantype="shortscan", nscan=1, backup="",
+ logfile="", verbose=no, >> "dev$null")
+
+ # Set parameters of quadproc used for overscan fitting to match
+ # the ccdproc values which may have been adjusted interactively.
+ # We do this on every pass in case there is a later interupt
+ # of task execution.
+ quadproc.function.p_value = qccdproc.function
+ quadproc.order.p_value = qccdproc.order
+ quadproc.sample.p_value = qccdproc.sample
+ quadproc.naverage.p_value = qccdproc.naverage
+ quadproc.niterate.p_value = qccdproc.niterate
+ quadproc.low_reject.p_value = qccdproc.low_reject
+ quadproc.high_reject.p_value = qccdproc.high_reject
+ quadproc.grow.p_value = qccdproc.grow
+
+ # Force the parameter update
+ update ("quadproc")
+
+ } else {
+ qccdproc.interactive = no
+
+ qccdproc (image//".??"//imtype, fixpix=fixpix,
+ overscan=overscan, trim=trim, readaxis=readaxis,
+ fixfile=fixfile, biassec="image", trimsec="image",
+ ccdtype="", max_cache=0, noproc=no, zerocor=no, darkcor=no,
+ flatcor=no, illumcor=no, fringecor=no, readcor=no,
+ scancor=no, zero="", dark="", flat="", illum="", fringe="",
+ minreplace=1., scantype="shortscan", nscan=1, backup="",
+ logfile="", verbose=no)
+ }
+
+ # Combine processed quadrants into single image
+ quadjoin (image, output="", delete=yes)
+
+ }
+
+ # Reset interactive flag if we haven't recieved a definative NO
+ if (answr == "no") {
+ qccdproc.interactive = yes
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qproc.par b/noao/imred/quadred/src/quad/qproc.par
new file mode 100644
index 00000000..6a713a5d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qproc.par
@@ -0,0 +1,15 @@
+image_list,s,a,"",,,List of CCD images to correct
+fixpix,b,h,yes,,,Fix bad CCD lines and columns?
+overscan,b,h,yes,,,Apply overscan strip correction?
+trim,b,h,yes,,,Trim the image?
+readaxis,s,h,"line","column|line",, Read out axis (column|line)
+fixfile,s,h,"",,,File describing the bad lines and columns
+xskip1,i,h,INDEF,0,,X pixels to skip at start of overscan
+xskip2,i,h,INDEF,0,,X pixels to skip at end of overscan
+xtrim1,i,h,INDEF,0,,X pixels to trim at start of data
+xtrim2,i,h,INDEF,0,,X pixels to trim at end of data
+ytrim1,i,h,INDEF,0,,Y pixels to trim at start of data
+ytrim2,i,h,INDEF,0,,Y pixels to trim at end of data
+answer,s,ql,"yes","|yes|no|YES|NO|",,"Fit overscan vector for xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx interactively
+"
+fd,*s,h,,,,Internal use only
diff --git a/noao/imred/quadred/src/quad/qpselect.par b/noao/imred/quadred/src/quad/qpselect.par
new file mode 100644
index 00000000..d1a7aa56
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qpselect.par
@@ -0,0 +1,4 @@
+input,s,a,"",,,Input image list
+output,s,h,"STDOUT",,,Output image list
+ccdtype,s,h,"",,,CCD image type to be listed
+stop,b,h,"no",,,"Stop, rather than pass, selected images"
diff --git a/noao/imred/quadred/src/quad/qpselect.x b/noao/imred/quadred/src/quad/qpselect.x
new file mode 100644
index 00000000..8bd2acb2
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qpselect.x
@@ -0,0 +1,108 @@
+# QPSELECT -- Filter a list of image names passing on only those that are of
+# the specified ccdtype -AND-
+#
+# If stop = yes
+# 1) Are multi-readout -AND-
+# 2) Have not been trimmed
+# If stop = no
+# 1) Are single-readout -OR-
+# 2) Have been trimmed
+
+include "ccdtypes.h"
+
+procedure t_qpselect ()
+
+pointer inlist #TI List of input image name.
+char output[SZ_FNAME] #TI List of output image names.
+char instrument[SZ_FNAME] #TI Instrument translation file.
+char ccdtype[SZ_LINE] #TI ccdtype to select.
+bool stop #TI stop rather than pass selected images
+
+int type, nampx, nampy
+char image[SZ_LINE], nampsyx[SZ_LINE]
+pointer fdout, im
+
+int strdic(), imtopenp(), imtgetim(), hdmaccf(), imaccess()
+int ccdtypei()
+bool clgetb()
+
+pointer open(), immap()
+
+begin
+ # Open input and output image lists
+ inlist = imtopenp ("input")
+ call clgstr ("output", output, SZ_LINE)
+ fdout = open (output, APPEND, TEXT_FILE)
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Get ccdtype to select.
+ call clgstr ("ccdtype", ccdtype, SZ_LINE)
+ type = strdic (ccdtype, ccdtype, SZ_LINE, CCDTYPES)
+
+ # Get stop
+ stop = clgetb ("stop")
+
+ while (imtgetim (inlist, image, SZ_LINE) != EOF) {
+
+ # Silently skip any non-existant images
+ if (imaccess (image, READ_ONLY) == NO)
+ next
+
+ im = immap (image, READ_ONLY, 0)
+
+ if ((ccdtype[1] != EOS) && (type != ccdtypei (im))) {
+ call imunmap (im)
+ next
+ }
+
+ if (stop) {
+
+ if (hdmaccf (im, "trim") == YES) {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+
+ } else if (hdmaccf (im, "nampsyx") == NO) {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+
+ } else {
+
+ call hdmgstr (im, "nampsyx", nampsyx, SZ_LINE)
+ call sscan (nampsyx)
+ call gargi (nampx)
+ call gargi (nampy)
+
+ if (nampx == 1 && nampy == 1) {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+ }
+ }
+
+ } else {
+
+ if ((hdmaccf (im, "trim") == NO) &&
+ (hdmaccf (im, "nampsyx") == YES)) {
+
+ call hdmgstr (im, "nampsyx", nampsyx, SZ_LINE)
+ call sscan (nampsyx)
+ call gargi (nampx)
+ call gargi (nampy)
+
+ if (nampx != 1 || nampy != 1) {
+ call fprintf (fdout, "%s\n")
+ call pargstr (image)
+ }
+ }
+ }
+
+ call imunmap (im)
+ }
+
+ # Tidy up
+ call close (fdout)
+ call hdmclose ()
+ call imtclose (inlist)
+end
diff --git a/noao/imred/quadred/src/quad/qsplit.gx b/noao/imred/quadred/src/quad/qsplit.gx
new file mode 100644
index 00000000..3d5b1873
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qsplit.gx
@@ -0,0 +1,97 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QSPLITx -- Split multi-readout image into separate images one for each
+# readout.
+
+procedure qsplit$t (in, out, qg)
+
+pointer in #I Image pointer for input image
+pointer out[ARB] #I Image pointer for output images
+pointer qg #I pointer to quadgeom structure
+
+long ivec[IM_MAXDIM], ovec[IM_MAXDIM, QG_MAXAMPS]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+bool all_phantom
+
+int imgnl$t(), impnl$t()
+
+begin
+ # Setup start vectors for sequential reads ...
+ call amovkl (long(1), ivec, IM_MAXDIM)
+ # ... and writes
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ovec[1, amp], IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+
+ # Check to see if there are any non phantom regions in this tier
+ all_phantom = true
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ all_phantom = false
+ break
+ }
+ }
+
+ if (all_phantom) {
+
+ # Reset start vector for reads to skip phantom data
+ ivec[2] = ivec[2] + long (QG_NY (qg, amp2))
+
+ } else {
+
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnl$t (in, ibuf, ivec)
+ ptr = ibuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ junk = impnl$t (out[amp], obuf, ovec[1, amp])
+ call amov$t (Mem$t[ptr], Mem$t[obuf], QG_NX(qg, amp))
+ }
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+ }
+end
+
+# QJOINx -- Join multi-readout sub-images into a single image.
+
+procedure qjoin$t (in, out, qg)
+
+pointer in[ARB] #I Image pointer for input images.
+pointer out #I Image pointer for output image.
+pointer qg #I pointer to quadgeom structure.
+
+long ivec[IM_MAXDIM, QG_MAXAMPS], ovec[IM_MAXDIM]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+
+int imgnl$t(), impnl$t()
+
+begin
+ # Setup start vectors for sequential reads ...
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ivec[1, amp], IM_MAXDIM)
+ # ... and writes
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = impnl$t (out, obuf, ovec)
+ ptr = obuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ junk = imgnl$t (in[amp], ibuf, ivec[1, amp])
+ call amov$t (Mem$t[ibuf], Mem$t[ptr], QG_NX(qg, amp))
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qsplitd.x b/noao/imred/quadred/src/quad/qsplitd.x
new file mode 100644
index 00000000..b7680f4d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qsplitd.x
@@ -0,0 +1,97 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QSPLITx -- Split multi-readout image into separate images one for each
+# readout.
+
+procedure qsplitd (in, out, qg)
+
+pointer in #I Image pointer for input image
+pointer out[ARB] #I Image pointer for output images
+pointer qg #I pointer to quadgeom structure
+
+long ivec[IM_MAXDIM], ovec[IM_MAXDIM, QG_MAXAMPS]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+bool all_phantom
+
+int imgnld(), impnld()
+
+begin
+ # Setup start vectors for sequential reads ...
+ call amovkl (long(1), ivec, IM_MAXDIM)
+ # ... and writes
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ovec[1, amp], IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+
+ # Check to see if there are any non phantom regions in this tier
+ all_phantom = true
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ all_phantom = false
+ break
+ }
+ }
+
+ if (all_phantom) {
+
+ # Reset start vector for reads to skip phantom data
+ ivec[2] = ivec[2] + long (QG_NY (qg, amp2))
+
+ } else {
+
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnld (in, ibuf, ivec)
+ ptr = ibuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ junk = impnld (out[amp], obuf, ovec[1, amp])
+ call amovd (Memd[ptr], Memd[obuf], QG_NX(qg, amp))
+ }
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+ }
+end
+
+# QJOINx -- Join multi-readout sub-images into a single image.
+
+procedure qjoind (in, out, qg)
+
+pointer in[ARB] #I Image pointer for input images.
+pointer out #I Image pointer for output image.
+pointer qg #I pointer to quadgeom structure.
+
+long ivec[IM_MAXDIM, QG_MAXAMPS], ovec[IM_MAXDIM]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+
+int imgnld(), impnld()
+
+begin
+ # Setup start vectors for sequential reads ...
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ivec[1, amp], IM_MAXDIM)
+ # ... and writes
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = impnld (out, obuf, ovec)
+ ptr = obuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ junk = imgnld (in[amp], ibuf, ivec[1, amp])
+ call amovd (Memd[ibuf], Memd[ptr], QG_NX(qg, amp))
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qspliti.x b/noao/imred/quadred/src/quad/qspliti.x
new file mode 100644
index 00000000..84aa5fc7
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qspliti.x
@@ -0,0 +1,97 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QSPLITx -- Split multi-readout image into separate images one for each
+# readout.
+
+procedure qspliti (in, out, qg)
+
+pointer in #I Image pointer for input image
+pointer out[ARB] #I Image pointer for output images
+pointer qg #I pointer to quadgeom structure
+
+long ivec[IM_MAXDIM], ovec[IM_MAXDIM, QG_MAXAMPS]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+bool all_phantom
+
+int imgnli(), impnli()
+
+begin
+ # Setup start vectors for sequential reads ...
+ call amovkl (long(1), ivec, IM_MAXDIM)
+ # ... and writes
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ovec[1, amp], IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+
+ # Check to see if there are any non phantom regions in this tier
+ all_phantom = true
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ all_phantom = false
+ break
+ }
+ }
+
+ if (all_phantom) {
+
+ # Reset start vector for reads to skip phantom data
+ ivec[2] = ivec[2] + long (QG_NY (qg, amp2))
+
+ } else {
+
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnli (in, ibuf, ivec)
+ ptr = ibuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ junk = impnli (out[amp], obuf, ovec[1, amp])
+ call amovi (Memi[ptr], Memi[obuf], QG_NX(qg, amp))
+ }
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+ }
+end
+
+# QJOINx -- Join multi-readout sub-images into a single image.
+
+procedure qjoini (in, out, qg)
+
+pointer in[ARB] #I Image pointer for input images.
+pointer out #I Image pointer for output image.
+pointer qg #I pointer to quadgeom structure.
+
+long ivec[IM_MAXDIM, QG_MAXAMPS], ovec[IM_MAXDIM]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+
+int imgnli(), impnli()
+
+begin
+ # Setup start vectors for sequential reads ...
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ivec[1, amp], IM_MAXDIM)
+ # ... and writes
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = impnli (out, obuf, ovec)
+ ptr = obuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ junk = imgnli (in[amp], ibuf, ivec[1, amp])
+ call amovi (Memi[ibuf], Memi[ptr], QG_NX(qg, amp))
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qsplitl.x b/noao/imred/quadred/src/quad/qsplitl.x
new file mode 100644
index 00000000..5e1d0e7e
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qsplitl.x
@@ -0,0 +1,97 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QSPLITx -- Split multi-readout image into separate images one for each
+# readout.
+
+procedure qsplitl (in, out, qg)
+
+pointer in #I Image pointer for input image
+pointer out[ARB] #I Image pointer for output images
+pointer qg #I pointer to quadgeom structure
+
+long ivec[IM_MAXDIM], ovec[IM_MAXDIM, QG_MAXAMPS]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+bool all_phantom
+
+int imgnll(), impnll()
+
+begin
+ # Setup start vectors for sequential reads ...
+ call amovkl (long(1), ivec, IM_MAXDIM)
+ # ... and writes
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ovec[1, amp], IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+
+ # Check to see if there are any non phantom regions in this tier
+ all_phantom = true
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ all_phantom = false
+ break
+ }
+ }
+
+ if (all_phantom) {
+
+ # Reset start vector for reads to skip phantom data
+ ivec[2] = ivec[2] + long (QG_NY (qg, amp2))
+
+ } else {
+
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnll (in, ibuf, ivec)
+ ptr = ibuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ junk = impnll (out[amp], obuf, ovec[1, amp])
+ call amovl (Meml[ptr], Meml[obuf], QG_NX(qg, amp))
+ }
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+ }
+end
+
+# QJOINx -- Join multi-readout sub-images into a single image.
+
+procedure qjoinl (in, out, qg)
+
+pointer in[ARB] #I Image pointer for input images.
+pointer out #I Image pointer for output image.
+pointer qg #I pointer to quadgeom structure.
+
+long ivec[IM_MAXDIM, QG_MAXAMPS], ovec[IM_MAXDIM]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+
+int imgnll(), impnll()
+
+begin
+ # Setup start vectors for sequential reads ...
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ivec[1, amp], IM_MAXDIM)
+ # ... and writes
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = impnll (out, obuf, ovec)
+ ptr = obuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ junk = imgnll (in[amp], ibuf, ivec[1, amp])
+ call amovl (Meml[ibuf], Meml[ptr], QG_NX(qg, amp))
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qsplitr.x b/noao/imred/quadred/src/quad/qsplitr.x
new file mode 100644
index 00000000..adba483d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qsplitr.x
@@ -0,0 +1,97 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QSPLITx -- Split multi-readout image into separate images one for each
+# readout.
+
+procedure qsplitr (in, out, qg)
+
+pointer in #I Image pointer for input image
+pointer out[ARB] #I Image pointer for output images
+pointer qg #I pointer to quadgeom structure
+
+long ivec[IM_MAXDIM], ovec[IM_MAXDIM, QG_MAXAMPS]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+bool all_phantom
+
+int imgnlr(), impnlr()
+
+begin
+ # Setup start vectors for sequential reads ...
+ call amovkl (long(1), ivec, IM_MAXDIM)
+ # ... and writes
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ovec[1, amp], IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+
+ # Check to see if there are any non phantom regions in this tier
+ all_phantom = true
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ all_phantom = false
+ break
+ }
+ }
+
+ if (all_phantom) {
+
+ # Reset start vector for reads to skip phantom data
+ ivec[2] = ivec[2] + long (QG_NY (qg, amp2))
+
+ } else {
+
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnlr (in, ibuf, ivec)
+ ptr = ibuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ junk = impnlr (out[amp], obuf, ovec[1, amp])
+ call amovr (Memr[ptr], Memr[obuf], QG_NX(qg, amp))
+ }
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+ }
+end
+
+# QJOINx -- Join multi-readout sub-images into a single image.
+
+procedure qjoinr (in, out, qg)
+
+pointer in[ARB] #I Image pointer for input images.
+pointer out #I Image pointer for output image.
+pointer qg #I pointer to quadgeom structure.
+
+long ivec[IM_MAXDIM, QG_MAXAMPS], ovec[IM_MAXDIM]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+
+int imgnlr(), impnlr()
+
+begin
+ # Setup start vectors for sequential reads ...
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ivec[1, amp], IM_MAXDIM)
+ # ... and writes
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = impnlr (out, obuf, ovec)
+ ptr = obuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ junk = imgnlr (in[amp], ibuf, ivec[1, amp])
+ call amovr (Memr[ibuf], Memr[ptr], QG_NX(qg, amp))
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qsplits.x b/noao/imred/quadred/src/quad/qsplits.x
new file mode 100644
index 00000000..b4eaba80
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qsplits.x
@@ -0,0 +1,97 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QSPLITx -- Split multi-readout image into separate images one for each
+# readout.
+
+procedure qsplits (in, out, qg)
+
+pointer in #I Image pointer for input image
+pointer out[ARB] #I Image pointer for output images
+pointer qg #I pointer to quadgeom structure
+
+long ivec[IM_MAXDIM], ovec[IM_MAXDIM, QG_MAXAMPS]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+bool all_phantom
+
+int imgnls(), impnls()
+
+begin
+ # Setup start vectors for sequential reads ...
+ call amovkl (long(1), ivec, IM_MAXDIM)
+ # ... and writes
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ovec[1, amp], IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+
+ # Check to see if there are any non phantom regions in this tier
+ all_phantom = true
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ all_phantom = false
+ break
+ }
+ }
+
+ if (all_phantom) {
+
+ # Reset start vector for reads to skip phantom data
+ ivec[2] = ivec[2] + long (QG_NY (qg, amp2))
+
+ } else {
+
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnls (in, ibuf, ivec)
+ ptr = ibuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ if (QG_PHANTOM (qg, amp) == NO) {
+ junk = impnls (out[amp], obuf, ovec[1, amp])
+ call amovs (Mems[ptr], Mems[obuf], QG_NX(qg, amp))
+ }
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+ }
+end
+
+# QJOINx -- Join multi-readout sub-images into a single image.
+
+procedure qjoins (in, out, qg)
+
+pointer in[ARB] #I Image pointer for input images.
+pointer out #I Image pointer for output image.
+pointer qg #I pointer to quadgeom structure.
+
+long ivec[IM_MAXDIM, QG_MAXAMPS], ovec[IM_MAXDIM]
+int amp, amp2, x, y, line, junk
+pointer ibuf, obuf, ptr
+
+int imgnls(), impnls()
+
+begin
+ # Setup start vectors for sequential reads ...
+ do amp = 1, QG_NAMPS (qg)
+ call amovkl (long(1), ivec[1, amp], IM_MAXDIM)
+ # ... and writes
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPSY(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = impnls (out, obuf, ovec)
+ ptr = obuf
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ junk = imgnls (in[amp], ibuf, ivec[1, amp])
+ call amovs (Mems[ibuf], Mems[ptr], QG_NX(qg, amp))
+ ptr = ptr + QG_NX(qg, amp)
+ }
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/qstatistics.cl b/noao/imred/quadred/src/quad/qstatistics.cl
new file mode 100644
index 00000000..a20f373f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qstatistics.cl
@@ -0,0 +1,19 @@
+procedure qstatistics (images)
+
+begin
+ string tmp
+
+ tmp = mktemp ("uparm$tmp")
+
+ #quadsections (image, window=window, section="", template="",
+ #xskip1=INDEF, xskip2=INDEF, xtrim1=INDEF, xtrim2=INDEF, ytrim1=INDEF,
+ #ytrim2=INDEF, >> tmp)
+ quadsections (image, window=window, section="", template="", >> tmp)
+
+ # Calculate image statistics
+ imstatistics ("@"//tmp, fields=fields, lower=lower, upper=upper,
+ binwidth=binwidth, format=format)
+
+
+ delete (tmp, ver-)
+end
diff --git a/noao/imred/quadred/src/quad/qstatistics.par b/noao/imred/quadred/src/quad/qstatistics.par
new file mode 100644
index 00000000..92a32f66
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qstatistics.par
@@ -0,0 +1,7 @@
+images,s,a,,,,Images
+window,s,h,"datasec","|datasec|trimsec|biassec|reflect|duplicate|",,Window to apply to image
+fields,s,h,"image,npix,mean,stddev,min,max",,,Fields to be printed
+lower,r,h,INDEF,,,Lower cutoff for pixel values
+upper,r,h,INDEF,,,Upper cutoff for pixel values
+binwidth,r,h,0.1,,,Bin width of histogram in sigma
+format,b,h,yes,,,Format output and print column labels?
diff --git a/noao/imred/quadred/src/quad/quad.cl b/noao/imred/quadred/src/quad/quad.cl
new file mode 100644
index 00000000..b08434c0
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quad.cl
@@ -0,0 +1,64 @@
+#{ QUAD -- Quad CCD reduction package
+
+noao
+imred
+
+set ccddb = "quad$ccddb/"
+set quadtest = "quad$quadtest/"
+
+package quad
+
+task quadtest.pkg = "quadtest$quadtest.cl"
+
+task quadsplit,
+ quadjoin,
+ quadscale,
+ quadsections,
+ ccddelete,
+ ccdprcselect,
+ ccdssselect,
+ ccdsection,
+ qpcalimage,
+ qpselect,
+ irlincor,
+ gainmeasure,
+# ccdgetparam = "quad$xx_quad.e"
+ ccdgetparam = "quad$x_quad.e"
+
+task quadproc = "quad$quadproc.cl"
+task qproc = "quad$qproc.cl"
+task qnoproc = "quad$qnoproc.cl"
+task qstatistics = "quad$qstatistics.cl"
+task qhistogram = "quad$qhistogram.cl"
+
+hidetask ccdgetparam, ccddelete, ccdprcselect, ccdssselect, ccdsection
+hidetask qpcalimage, qpselect, qproc, qnoproc, quadsplit, quadjoin, quadsections
+
+# CCDRED tasks.
+task badpiximage,
+ ccdgroups,
+ ccdhedit,
+ ccdinstrument,
+ ccdlist,
+ combine,
+ cosmicrays = ccdred$x_ccdred.e
+# cosmicrays,
+# mkfringecor,
+# mkillumcor,
+# mkillumflat,
+# mkskycor,
+# mkskyflat = ccdred$x_ccdred.e
+
+task setinstrument = quad$setinstrument.cl
+
+# Different default parameters
+task qccdproc = quad$x_ccdred.e
+
+# Special versions which run quadproc rather than ccdproc
+task darkcombine = quad$darkcombine.cl
+task flatcombine = quad$flatcombine.cl
+task zerocombine = quad$zerocombine.cl
+
+hidetask ccdproc
+
+clbye()
diff --git a/noao/imred/quadred/src/quad/quad.hd b/noao/imred/quadred/src/quad/quad.hd
new file mode 100644
index 00000000..b8526c66
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quad.hd
@@ -0,0 +1,33 @@
+# Help directory for the QUAD package.
+
+$defdir = "quad$"
+$doc = "quad$doc/"
+
+badpiximage hlp=doc$badpiximage.hlp
+ccdgroups hlp=doc$ccdgroups.hlp
+ccdhedit hlp=doc$ccdhedit.hlp
+ccdlist hlp=doc$ccdlist.hlp
+combine hlp=doc$combine.hlp
+cosmicrays hlp=doc$cosmicrays.hlp
+darkcombine hlp=doc$darkcombine.hlp
+flatcombine hlp=doc$flatcombine.hlp
+mkfringecor hlp=doc$mkfringecor.hlp
+mkillumcor hlp=doc$mkillumcor.hlp
+mkillumflat hlp=doc$mkillumflat.hlp
+mkskycor hlp=doc$mkskycor.hlp
+mkskyflat hlp=doc$mkskyflat.hlp
+quadproc hlp=doc$quadproc.hlp
+quadscale hlp=doc$quadscale.hlp
+qhistogram hlp=doc$qhistogram.hlp
+qstatistics hlp=doc$qstatistics.hlp
+setinstrument hlp=doc$setinstrument.hlp
+zerocombine hlp=doc$zerocombine.hlp
+
+ccdgeometry hlp=doc$ccdgeometry.hlp
+ccdinstrument hlp=doc$ccdinst.hlp
+ccdtypes hlp=doc$ccdtypes.hlp
+flatfields hlp=doc$flatfields.hlp
+guide hlp=doc$guide.hlp
+instruments hlp=doc$instruments.hlp
+package hlp=doc$quad.hlp
+subsets hlp=doc$subsets.hlp
diff --git a/noao/imred/quadred/src/quad/quad.men b/noao/imred/quadred/src/quad/quad.men
new file mode 100644
index 00000000..6b323daf
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quad.men
@@ -0,0 +1,36 @@
+ SPECIAL TASKS FOR MULTI-READOUT CCD IMAGES
+
+ quadproc - Process multi-readout CCD images
+ quadscale -
+ qstatistics - Calculate image statistics for multi-readout CCD images
+ qhistogram - Make histogram of multi-readout CCD image
+ darkcombine - Combine and process dark count images
+ flatcombine - Combine and process flat field images
+ zerocombine - Combine and process zero level images
+
+
+ STANDARD CCDRED TASKS
+
+ badpiximage - Create a bad pixel mask image from a bad pixel file
+ ccdgroups - Group CCD images into image lists
+ ccdhedit - CCD image header editor
+ ccdinstrument - Review and edit instrument translation files
+ ccdlist - List CCD processing information
+ combine - Combine CCD images
+ cosmicrays - Detect and replace cosmic rays
+ mkfringecor - Make fringe correction images from sky images
+ mkillumcor - Make flat field illumination correction images
+ mkillumflat - Make illumination corrected flat fields
+ mkskycor - Make sky illumination correction images
+ mkskyflat - Make sky corrected flat field images
+ setinstrument - Set instrument parameters
+
+ ADDITIONAL HELP TOPICS
+
+ ccdgeometry - Discussion of CCD coordinate/geometry keywords
+ ccdtypes - Description of the CCD image types
+ flatfields - Discussion of CCD flat field calibrations
+ guide - Introductory guide to using the CCDRED package
+ instruments - Instrument specific data files
+ package - CCD image reduction package
+ subsets - Description of CCD subsets
diff --git a/noao/imred/quadred/src/quad/quad.par b/noao/imred/quadred/src/quad/quad.par
new file mode 100644
index 00000000..74267f31
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quad.par
@@ -0,0 +1,12 @@
+# QUAD package parameter file
+
+pixeltype,s,h,"real real",,,Output and calculation pixel datatypes
+verbose,b,h,no,,,Print log information to the standard output?
+logfile,f,h,"logfile",,,Text log file
+plotfile,f,h,"",,,Log metacode plot file
+backup,s,h,"",,,Backup directory or prefix
+instrument,s,h,"",,,CCD instrument file
+ssfile,s,h,"subsets",,,Subset translation file
+graphics,s,h,"stdgraph",,,Interactive graphics output device
+cursor,*gcur,h,"",,,Graphics cursor input
+version,s,h,"Version 2.0 - Mar 94","Version 2.0 - Mar 94"
diff --git a/noao/imred/quadred/src/quad/quadalloc.x b/noao/imred/quadred/src/quad/quadalloc.x
new file mode 100644
index 00000000..9373340a
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadalloc.x
@@ -0,0 +1,165 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QUADOPEN -- Allocate space for quadgeom structure
+# Note: The various arrays are dimensioned as QG_MAXAMPS+1 and are ZERO indexed.
+
+procedure quadalloc (qg)
+
+pointer qg #O Pointer to opened quadgeom structure
+
+begin
+
+ call malloc (qg, QG_LENSTRUCT, TY_STRUCT)
+
+ # Zero readout counters
+ QG_NAMPS(qg) = 0
+ QG_NAMPSX(qg) = 0
+ QG_NAMPSY(qg) = 0
+
+ # Allocate and zero arrays.
+ call calloc (QG_AMPIDPTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_AMPTYPTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_NXPTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_NYPTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_DX1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_DX2PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_DY1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_DY2PTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_TX1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_TX2PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_TY1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_TY2PTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_BX1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_BX2PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_BY1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_BY2PTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_CX1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_CX2PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_CY1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_CY2PTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_AX1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_AX2PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_AY1PTR(qg), QG_MAXAMPS+1, TY_INT)
+ call calloc (QG_AY2PTR(qg), QG_MAXAMPS+1, TY_INT)
+
+ call calloc (QG_PHPTR(qg), QG_MAXAMPS+1, TY_INT)
+
+end
+
+# QUADFREE -- Free quadgeom structure
+
+procedure quadfree (qg)
+
+pointer qg #O Pointer to open quadgeom structure
+
+begin
+
+ if (qg != NULL) {
+
+ call mfree (QG_AMPIDPTR(qg), TY_INT)
+ call mfree (QG_AMPTYPTR(qg), TY_INT)
+
+ call mfree (QG_NXPTR(qg), TY_INT)
+ call mfree (QG_NYPTR(qg), TY_INT)
+
+ call mfree (QG_DX1PTR(qg), TY_INT)
+ call mfree (QG_DX2PTR(qg), TY_INT)
+ call mfree (QG_DY1PTR(qg), TY_INT)
+ call mfree (QG_DY2PTR(qg), TY_INT)
+
+ call mfree (QG_TX1PTR(qg), TY_INT)
+ call mfree (QG_TX2PTR(qg), TY_INT)
+ call mfree (QG_TY1PTR(qg), TY_INT)
+ call mfree (QG_TY2PTR(qg), TY_INT)
+
+ call mfree (QG_BX1PTR(qg), TY_INT)
+ call mfree (QG_BX2PTR(qg), TY_INT)
+ call mfree (QG_BY1PTR(qg), TY_INT)
+ call mfree (QG_BY2PTR(qg), TY_INT)
+
+ call mfree (QG_CX1PTR(qg), TY_INT)
+ call mfree (QG_CX2PTR(qg), TY_INT)
+ call mfree (QG_CY1PTR(qg), TY_INT)
+ call mfree (QG_CY2PTR(qg), TY_INT)
+
+ call mfree (QG_AX1PTR(qg), TY_INT)
+ call mfree (QG_AX2PTR(qg), TY_INT)
+ call mfree (QG_AY1PTR(qg), TY_INT)
+ call mfree (QG_AY2PTR(qg), TY_INT)
+
+ call mfree (QG_PHPTR(qg), TY_INT)
+
+ call mfree (qg, TY_STRUCT)
+ }
+end
+
+# QUADDUMP -- Print contents of quadgeom structure on STDERR
+procedure quaddump (qg)
+
+pointer qg #O Pointer to open quadgeom structure
+
+int amp
+
+begin
+
+ call eprintf ("Active amps: %d (%d in x, %d in y)\n")
+ call pargi (QG_NAMPS(qg))
+ call pargi (QG_NAMPSX(qg))
+ call pargi (QG_NAMPSY(qg))
+
+ do amp = 0, QG_NAMPS(qg) {
+ switch (amp) {
+ case 0:
+ call eprintf ("Entire image\n")
+ default:
+ call eprintf ("Amp %s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if (QG_PHANTOM (qg, amp) == YES)
+ call eprintf (" [Phantom]")
+
+ call eprintf ("\n")
+ }
+
+ call eprintf ("\tnx = %d \tny = %d \n")
+ call pargi (QG_NX(qg, amp))
+ call pargi (QG_NY(qg, amp))
+
+ call eprintf ("\tdx1 = %d \tdx2 = %d \tdy1 = %d \tdy2 = %d\n")
+ call pargi (QG_DX1(qg, amp))
+ call pargi (QG_DX2(qg, amp))
+ call pargi (QG_DY1(qg, amp))
+ call pargi (QG_DY2(qg, amp))
+
+ call eprintf ("\ttx1 = %d \ttx2 = %d \tty1 = %d \tty2 = %d\n")
+ call pargi (QG_TX1(qg, amp))
+ call pargi (QG_TX2(qg, amp))
+ call pargi (QG_TY1(qg, amp))
+ call pargi (QG_TY2(qg, amp))
+
+ call eprintf ("\tbx1 = %d \tbx2 = %d \tby1 = %d \tby2 = %d\n")
+ call pargi (QG_BX1(qg, amp))
+ call pargi (QG_BX2(qg, amp))
+ call pargi (QG_BY1(qg, amp))
+ call pargi (QG_BY2(qg, amp))
+
+ call eprintf ("\tcx1 = %d \tcx2 = %d \tcy1 = %d \tcy2 = %d\n")
+ call pargi (QG_CX1(qg, amp))
+ call pargi (QG_CX2(qg, amp))
+ call pargi (QG_CY1(qg, amp))
+ call pargi (QG_CY2(qg, amp))
+
+ call eprintf ("\tax1 = %d \tax2 = %d \tay1 = %d \tay2 = %d\n")
+ call pargi (QG_AX1(qg, amp))
+ call pargi (QG_AX2(qg, amp))
+ call pargi (QG_AY1(qg, amp))
+ call pargi (QG_AY2(qg, amp))
+ }
+end
diff --git a/noao/imred/quadred/src/quad/quaddelete.x b/noao/imred/quadred/src/quad/quaddelete.x
new file mode 100644
index 00000000..bdee65b2
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quaddelete.x
@@ -0,0 +1,39 @@
+include "quadgeom.h"
+
+# QUADDELETE -- Delete subimages, one for each readout.
+
+procedure quaddelete (qg, rootname)
+
+pointer qg #I Pointer to open quadgeom structure
+char rootname[ARB] #I Root name for subimages.
+
+int amp
+pointer fullname
+
+pointer sp
+int imaccess()
+
+begin
+ call smark (sp)
+ call salloc (fullname, SZ_LINE, TY_CHAR)
+
+ # Loop over active readouts
+ do amp = 1, QG_NAMPS(qg) {
+
+ # The sub-section image will only exist if this is not a phantom
+ if (QG_PHANTOM (qg, amp) == NO) {
+
+ # Make sub-image name
+ call sprintf (Memc[fullname], SZ_LINE, "%s.%s")
+ call pargstr (rootname)
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ # Delete the sub-image (if it exists)
+ if (imaccess (Memc[fullname], READ_ONLY) == YES) {
+ call imdelete (Memc[fullname])
+ }
+ }
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/quad/quadgeom.h b/noao/imred/quadred/src/quad/quadgeom.h
new file mode 100644
index 00000000..090b4bf2
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadgeom.h
@@ -0,0 +1,99 @@
+# QUADGEOM - Structure definitions and macros for quadgeom structure.
+
+define QG_LENSTRUCT 28 # Length of structure.
+define QG_MAXAMPS 4 # Maximum possible number of readouts.
+# The various arrays are dimensioned as QG_MAXAMPS+1.
+# QG_AAAA(0) contains quantities refering to the entire image
+# QG_AAAA(z) contains quantities refering to the zth sub-image
+
+define QG_NAMPS Memi[$1] # Total number of active readouts.
+define QG_NAMPSX Memi[$1+1] # Number of active readouts in X.
+define QG_NAMPSY Memi[$1+2] # Number of active readouts in Y.
+
+# Array of pointers to names of active readouts.
+define QG_AMPIDPTR Memi[$1+3] # --> ampid array.
+define QG_AMPID Memi[QG_AMPIDPTR($1)+$2] # ampid array.
+
+# Array of pointers to names of active readouts.
+define QG_AMPTYPTR Memi[$1+4] # --> amptype array.
+define QG_AMPTYPE Memi[QG_AMPTYPTR($1)+$2] # amptype array.
+
+# Dimensions of image from each readout.
+define QG_NXPTR Memi[$1+5] # --> X dimension array.
+define QG_NX Memi[QG_NXPTR($1)+$2] # X dimension.
+define QG_NYPTR Memi[$1+6] # --> Y dimension array.
+define QG_NY Memi[QG_NYPTR($1)+$2] # Y dimension.
+
+# datasec = "[dx1:dx2,dy1:dy2]"
+define QG_DOFF 7
+define QG_DX1PTR Memi[$1+QG_DOFF] # --> dx1 array.
+define QG_DX2PTR Memi[$1+QG_DOFF+1] # --> dx2 array.
+define QG_DY1PTR Memi[$1+QG_DOFF+2] # --> dy1 array.
+define QG_DY2PTR Memi[$1+QG_DOFF+3] # --> dy2 array.
+define QG_DX1 Memi[QG_DX1PTR($1)+$2] # dx1.
+define QG_DX2 Memi[QG_DX2PTR($1)+$2] # dx2.
+define QG_DY1 Memi[QG_DY1PTR($1)+$2] # dy1..
+define QG_DY2 Memi[QG_DY2PTR($1)+$2] # dy2.
+
+# trimsec = "[tx1:tx2,ty1:ty2]"
+define QG_TOFF 11 # QG_DOFF+4.
+define QG_TX1PTR Memi[$1+QG_TOFF] # --> tx1 array.
+define QG_TX2PTR Memi[$1+QG_TOFF+1] # --> tx2 array.
+define QG_TY1PTR Memi[$1+QG_TOFF+2] # --> ty1 array.
+define QG_TY2PTR Memi[$1+QG_TOFF+3] # --> ty2 array.
+define QG_TX1 Memi[QG_TX1PTR($1)+$2] # tx1.
+define QG_TX2 Memi[QG_TX2PTR($1)+$2] # tx2.
+define QG_TY1 Memi[QG_TY1PTR($1)+$2] # ty1.
+define QG_TY2 Memi[QG_TY2PTR($1)+$2] # ty2.
+
+# biassec = "[bx1:bx2,by1:by2]"
+define QG_BOFF 15 # QG_TOFF+4.
+define QG_BX1PTR Memi[$1+QG_BOFF] # --> bx1 array.
+define QG_BX2PTR Memi[$1+QG_BOFF+1] # --> bx2 array.
+define QG_BY1PTR Memi[$1+QG_BOFF+2] # --> by1 array.
+define QG_BY2PTR Memi[$1+QG_BOFF+3] # --> by2 array.
+define QG_BX1 Memi[QG_BX1PTR($1)+$2] # bx1.
+define QG_BX2 Memi[QG_BX2PTR($1)+$2] # bx2.
+define QG_BY1 Memi[QG_BY1PTR($1)+$2] # by1.
+define QG_BY2 Memi[QG_BY2PTR($1)+$2] # by2.
+
+# ccdsec = "[cx1:cx2,cy1:cy2]"
+define QG_COFF 19 # QG_BOFF+4.
+define QG_CX1PTR Memi[$1+QG_COFF] # --> cx1 array.
+define QG_CX2PTR Memi[$1+QG_COFF+1] # --> cx2 array.
+define QG_CY1PTR Memi[$1+QG_COFF+2] # --> cy1 array.
+define QG_CY2PTR Memi[$1+QG_COFF+3] # --> cy2 array.
+define QG_CX1 Memi[QG_CX1PTR($1)+$2] # cx1.
+define QG_CX2 Memi[QG_CX2PTR($1)+$2] # cx2.
+define QG_CY1 Memi[QG_CY1PTR($1)+$2] # cy1.
+define QG_CY2 Memi[QG_CY2PTR($1)+$2] # cy2.
+
+# ampsec = "[ax1:ax2,ay1:ay2]"
+define QG_AOFF 23 # QG_COFF+4.
+define QG_AX1PTR Memi[$1+QG_AOFF] # --> ax1 array.
+define QG_AX2PTR Memi[$1+QG_AOFF+1] # --> ax2 array.
+define QG_AY1PTR Memi[$1+QG_AOFF+2] # --> ay1 array.
+define QG_AY2PTR Memi[$1+QG_AOFF+3] # --> ay2 array.
+define QG_AX1 Memi[QG_AX1PTR($1)+$2] # ax1.
+define QG_AX2 Memi[QG_AX2PTR($1)+$2] # ax2.
+define QG_AY1 Memi[QG_AY1PTR($1)+$2] # ay1.
+define QG_AY2 Memi[QG_AY2PTR($1)+$2] # ay2.
+
+# Phantom markers
+define QG_PHOFF 27 # QG_AOFF+4
+define QG_PHPTR Memi[$1+QG_PHOFF] # --> Phantom array
+define QG_PHANTOM Memi[QG_PHPTR($1)+$2] # Phantom value
+
+# Macros to convert between array offset and grid position
+define QG_GRIDX ($2 - (($2-1)/QG_NAMPSX($1))*QG_NAMPSX($1))
+define QG_GRIDY (($2-1)/QG_NAMPSX($1)+1)
+define QG_AMP ($2 + ($3-1) * QG_NAMPSX($1))
+
+# Symbolic values for AMPTYPE codes
+define AMPDICT "|11|12|21|22|"
+define AMP11 1 # BLHC
+define AMP12 2 # BRHC
+define AMP21 3 # TLHC
+define AMP22 4 # TRHC
+
+define SZ_AMPID 2
diff --git a/noao/imred/quadred/src/quad/quadgeom.x b/noao/imred/quadred/src/quad/quadgeom.x
new file mode 100644
index 00000000..3ce173ff
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadgeom.x
@@ -0,0 +1,304 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QUADGEOM -- Set up section information in quadgeom structure based on
+# information in the image header. The sections given in the image header are
+# "whole image" sections (i.e. those that would be appropriate for single
+# readout. From these we must calculate the sections to apply to the data
+# read through each readout. The values of datasec and ccdsec are taken from
+# the header. The values of trimsec and biassec can be supplied explicitly
+# via the corresponding arguments. If these are given as "image" or "" then the
+# image header values are used.
+
+procedure quadgeom (im, qg, trimsec, biassec)
+
+pointer im #I Pointer to input image.
+pointer qg #IO Pointer to open quadgeom structure.
+char trimsec[SZ_LINE] #I Trimsec may be used to overide header value.
+char biassec[SZ_LINE] #I Biassec may be used to overide header value.
+
+char section[SZ_LINE], nampsyx[SZ_LINE]
+int nx, ny, xdata, ydata, xover, yover, amp, xamp, yamp, pre
+int dx1, dx2, dxs, dy1, dy2, dys
+int ddx1, ddx2, ddy1, ddy2
+int cx1, cx2, cxs, cy1, cy2, cys
+int ccx1, ccx2, ccy1, ccy2
+int tx1, tx2, txs, txskip1, txskip2
+int ty1, ty2, tys, tyskip1, tyskip2
+int ttx1, ttx2, tty1, tty2
+int bx1, bx2, bxs, bxskip1, bxskip2
+int by1, by2, bys, byskip1, byskip2
+int bbx1, bbx2, bby1, bby2
+
+bool streq()
+
+begin
+
+ # Get input image dimensions.
+ nx = IM_LEN(im, 1)
+ ny = IM_LEN(im, 2)
+
+ # Get number of active amplifiers in Y and X.
+ call hdmgstr (im, "nampsyx", nampsyx, SZ_LINE)
+ call sscan (nampsyx)
+ call gargi (QG_NAMPSY(qg))
+ call gargi (QG_NAMPSX(qg))
+
+ QG_NAMPS(qg) = QG_NAMPSY(qg) * QG_NAMPSX(qg)
+ if (QG_NAMPS(qg) > QG_MAXAMPS)
+ call error (0, "CCD has two many read-outs for this program")
+
+ # Get list of active amplifiers.
+ # Presently the header doesn't contain this information so we fake it
+ # since we know all the posibilities.
+ do amp = 1, QG_NAMPS(qg)
+ call malloc (QG_AMPID(qg, amp), SZ_AMPID, TY_CHAR)
+
+ switch (QG_NAMPSX(qg)) {
+ case 1:
+ switch (QG_NAMPSY(qg)) {
+ case 1: # Mono
+ QG_AMPTYPE (qg, 1) = AMP11
+ call strcpy ("11", Memc[QG_AMPID(qg, 1)], SZ_AMPID)
+
+ case 2: # Split parallels
+ call error (0, "Unsuported read-out configuration")
+ }
+
+ case 2:
+
+ switch (QG_NAMPSY(qg)) {
+ case 1: # Split serials
+ QG_AMPTYPE (qg, 1) = AMP11
+ call strcpy ("11", Memc[QG_AMPID(qg, 1)], SZ_AMPID)
+ QG_AMPTYPE (qg, 2) = AMP12
+ call strcpy ("12", Memc[QG_AMPID(qg, 2)], SZ_AMPID)
+
+ case 2: # Quad
+ QG_AMPTYPE (qg, 1) = AMP11
+ call strcpy ("11", Memc[QG_AMPID(qg, 1)], SZ_AMPID)
+ QG_AMPTYPE (qg, 2) = AMP12
+ call strcpy ("12", Memc[QG_AMPID(qg, 2)], SZ_AMPID)
+ QG_AMPTYPE (qg, 3) = AMP21
+ call strcpy ("21", Memc[QG_AMPID(qg, 3)], SZ_AMPID)
+ QG_AMPTYPE (qg, 4) = AMP22
+ call strcpy ("22", Memc[QG_AMPID(qg, 4)], SZ_AMPID)
+ }
+ }
+
+ # Set X and Y dimensions of subimage read out by each amplifier
+ QG_NX (qg, 0) = nx
+ QG_NY (qg, 0) = ny
+ do amp = 1, QG_NAMPS (qg) {
+ QG_NX(qg, amp) = nx / QG_NAMPSX (qg)
+ QG_NY(qg, amp) = ny / QG_NAMPSY (qg)
+ }
+
+ # Get datasec, trimsec and biassec parameters from image header.
+ # trimsec and biassec may be overidden by supplying an explicit
+ # section in the biassec and trimsec arguments.
+ call hdmgstr (im, "datasec", section, SZ_LINE)
+ dx1 = 1
+ dx2 = nx
+ dxs = 1
+ dy1 = 1
+ dy2 = ny
+ dys = 1
+ call ccd_section (section, dx1, dx2, dxs, dy1, dy2, dys)
+ QG_DX1(qg, 0) = dx1
+ QG_DX2(qg, 0) = dx2
+ QG_DY1(qg, 0) = dy1
+ QG_DY2(qg, 0) = dy2
+
+ if (streq (trimsec, "image") || streq (trimsec, "")) {
+ call hdmgstr (im, "trimsec", section, SZ_LINE)
+ } else {
+ call strcpy (trimsec, section, SZ_LINE)
+ }
+ tx1 = dx1
+ tx2 = dx2
+ txs = 1
+ ty1 = dy1
+ ty2 = dy2
+ tys = 1
+ call ccd_section (section, tx1, tx2, txs, ty1, ty2, tys)
+ QG_TX1(qg, 0) = tx1
+ QG_TX2(qg, 0) = tx2
+ QG_TY1(qg, 0) = ty1
+ QG_TY2(qg, 0) = ty2
+
+ if (streq (biassec, "image") || streq (biassec, "")) {
+ call hdmgstr (im, "biassec", section, SZ_LINE)
+ } else {
+ call strcpy (biassec, section, SZ_LINE)
+ }
+ bx1 = dx2 + 1
+ bx2 = nx
+ bxs = 1
+ by1 = 1
+ by2 = ny
+ bys = 1
+ call ccd_section (section, bx1, bx2, bxs, by1, by2, bys)
+ QG_BX1(qg, 0) = bx1
+ QG_BX2(qg, 0) = bx2
+ QG_BY1(qg, 0) = by1
+ QG_BY2(qg, 0) = by2
+
+ call hdmgstr (im, "ccdsec", section, SZ_LINE)
+ cx1 = dx1
+ cx2 = dx2
+ cxs = 1
+ cy1 = dy1
+ cy2 = dy2
+ cys = 1
+ call ccd_section (section, cx1, cx2, cxs, cy1, cy2, cys)
+ QG_CX1(qg, 0) = cx1
+ QG_CX2(qg, 0) = cx2
+ QG_CY1(qg, 0) = cy1
+ QG_CY2(qg, 0) = cy2
+
+ # Calculate number of data pixels and margins to leave around
+ # trimsection.
+ xdata = dx2 - dx1 + 1
+ ydata = dy2 - dy1 + 1
+ txskip1 = tx1 - dx1
+ # ************* KLUDGE! *********************
+ # The datasec is the whole image. We have no way of knowing where the
+ # division between data and overscan is supposed to be so we assume
+ # that trimsec leaves an equal margin on both sides of the true datasec.
+ if ((dx1 == 1 ) && (dx2 == nx)) {
+ dx2 = tx2 + txskip1
+ xdata = dx2 - dx1 + 1
+ cx2 = cx1 + xdata - 1
+ QG_DX2(qg, 0) = dx2
+ QG_CX2(qg, 0) = cx2
+ }
+ txskip2 = dx2 - tx2
+ tyskip1 = ty1 - dy1
+ tyskip2 = dy2 - ty2
+
+ # Calculate number of overscan pixels and margins to leave around
+ # biassec.
+ xover = nx - xdata
+ yover = ny
+ bxskip1 = bx1 - dx2 - 1
+ bxskip2 = nx - bx2
+ byskip1 = by1 - dy1
+ byskip2 = ny - by2
+
+ # Calculate number of data and overscan pixels in subimages
+ xdata = xdata / QG_NAMPSX(qg)
+ ydata = ydata / QG_NAMPSY(qg)
+ xover = xover / QG_NAMPSX(qg)
+ yover = yover / QG_NAMPSY(qg)
+
+ # Calculate datasec, trimsec, etc. for each amplifier
+ do amp = 1, QG_NAMPS(qg) {
+
+ # Assume there are no phantoms
+ QG_PHANTOM (qg, amp) = NO
+
+ # X coordinates
+ switch (QG_AMPTYPE(qg, amp)) {
+ case AMP11, AMP21: # Left hand side
+ ddx1 = dx1
+ ddx2 = ddx1 + xdata - 1
+ ttx1 = ddx1 + txskip1
+ ttx2 = ddx2
+ bbx1 = ddx2 + bxskip1 + 1
+ bbx2 = ddx2 + xover - bxskip2
+ ccx1 = cx1
+ ccx2 = cx1 + xdata - 1
+
+ case AMP12, AMP22: # Right hand side
+ bbx1 = bxskip2 + 1
+ bbx2 = xover - bxskip1
+ ddx1 = xover + 1
+ ddx2 = ddx1 + xdata - 1
+ ttx1 = ddx1
+ ttx2 = ddx2 - txskip2
+ ccx1 = cx1 + xdata
+ ccx2 = cx2
+ }
+
+ # Y Coordinates
+ switch (QG_AMPTYPE(qg, amp)) {
+ case AMP11, AMP12: # Lower row
+ ddy1 = dy1
+ ddy2 = ddy1 + ydata - 1
+ tty1 = ddy1 + tyskip1
+ bby1 = ddy1 + byskip1
+ if (QG_NAMPSY(qg) == 1) {
+ tty2 = ddy2 - tyskip2
+ bby2 = ddy2 - byskip2
+ } else {
+ tty2 = ddy2
+ bby2 = ddy2
+ }
+ ccy1 = cy1
+ ccy2 = cy1 + ydata - 1
+
+ case AMP21, AMP22: # Upper row
+ ddy1 = 1
+ ddy2 = ddy1 + ydata - 1
+ if (QG_NAMPSY(qg) == 1) {
+ tty1 = ddy1 + tyskip1
+ bby1 = ddy1 + byskip1
+ } else {
+ tty1 = 1
+ bby1 = 1
+ }
+ tty2 = ddy2 - tyskip2
+ bby2 = ddy2 - byskip2
+ ccy1 = cy1 + ydata
+ ccy2 = cy2
+ }
+
+
+ QG_DX1(qg, amp) = ddx1
+ QG_DX2(qg, amp) = ddx2
+ QG_DY1(qg, amp) = ddy1
+ QG_DY2(qg, amp) = ddy2
+
+ QG_TX1(qg, amp) = ttx1
+ QG_TX2(qg, amp) = ttx2
+ QG_TY1(qg, amp) = tty1
+ QG_TY2(qg, amp) = tty2
+
+ QG_BX1(qg, amp) = bbx1
+ QG_BX2(qg, amp) = bbx2
+ QG_BY1(qg, amp) = bby1
+ QG_BY2(qg, amp) = bby2
+
+ QG_CX1(qg, amp) = ccx1
+ QG_CX2(qg, amp) = ccx2
+ QG_CY1(qg, amp) = ccy1
+ QG_CY2(qg, amp) = ccy2
+ }
+
+ # Set up "ampsec" - the section of the composite image derived from
+ # each sub-image.
+ do yamp = 1, QG_NAMPSY(qg) {
+ amp = QG_AMP (qg, 1, yamp)
+ QG_AX1(qg, amp) = 1
+ QG_AX2(qg, amp) = QG_NX(qg, amp)
+ do xamp = 2, QG_NAMPSX(qg) {
+ amp = QG_AMP (qg, xamp, yamp)
+ pre = QG_AMP (qg, xamp-1, yamp)
+ QG_AX1(qg, amp) = QG_AX2(qg, pre) + 1
+ QG_AX2(qg, amp) = QG_AX1(qg, amp) + QG_NX(qg, amp) - 1
+ }
+ }
+ do xamp = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP (qg, xamp, 1)
+ QG_AY1(qg, amp) = 1
+ QG_AY2(qg, amp) = QG_NY(qg, amp)
+ do yamp = 2, QG_NAMPSY(qg) {
+ amp = QG_AMP (qg, xamp, yamp)
+ pre = QG_AMP (qg, xamp, yamp-1)
+ QG_AY1(qg, amp) = QG_AY2(qg, pre) + 1
+ QG_AY2(qg, amp) = QG_AY1(qg, amp) + QG_NY(qg, amp) - 1
+ }
+ }
+
+end
diff --git a/noao/imred/quadred/src/quad/quadgeomred.x b/noao/imred/quadred/src/quad/quadgeomred.x
new file mode 100644
index 00000000..ff5d043c
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadgeomred.x
@@ -0,0 +1,165 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QUADGEOMRED -- Set up section information in quadgeom structure based on
+# information in the image header for a reduced image. The sections given in the
+# image header are "whole image" sections (i.e. those that would be appropriate
+# for single readout. From these we must calculate the sections to apply to
+# the data read through each readout.
+
+procedure quadgeomred (im, qg)
+
+pointer im #I Pointer to input image.
+pointer qg #IO Pointer to open quadgeom structure.
+
+char section[SZ_LINE], keyword[SZ_LINE], nampsyx[SZ_LINE]
+int nx, ny, x, y, amp, pre
+int dx1, dx2, dxs, dy1, dy2, dys
+int cx1, cx2, cxs, cy1, cy2, cys
+int ax1, ax2, axs, ay1, ay2, ays
+
+begin
+
+ # Get input image dimensions.
+ nx = IM_LEN(im, 1)
+ ny = IM_LEN(im, 2)
+ QG_NX (qg, 0) = nx
+ QG_NY (qg, 0) = ny
+
+ # Get number of active amplifiers in Y and X.
+ call hdmgstr (im, "nampsyx", nampsyx, SZ_LINE)
+ call sscan (nampsyx)
+ call gargi (QG_NAMPSY(qg))
+ call gargi (QG_NAMPSX(qg))
+
+ QG_NAMPS(qg) = QG_NAMPSY(qg) * QG_NAMPSX(qg)
+ if (QG_NAMPS(qg) > QG_MAXAMPS)
+ call error (0, "CCD has two many read-outs for this program")
+
+ # Get list of active amplifiers.
+ # Presently the header doesn't contain this information so we fake it
+ # since we know all the posibilities.
+ do amp = 1, QG_NAMPS(qg)
+ call malloc (QG_AMPID(qg, amp), SZ_AMPID, TY_CHAR)
+
+ switch (QG_NAMPSX(qg)) {
+ case 1:
+ switch (QG_NAMPSY(qg)) {
+ case 1: # Mono
+ QG_AMPTYPE (qg, 1) = AMP11
+ call strcpy ("11", Memc[QG_AMPID(qg, 1)], SZ_AMPID)
+
+ case 2: # Split parallels
+ call error (0, "Unsuported read-out configuration")
+ }
+
+ case 2:
+
+ switch (QG_NAMPSY(qg)) {
+ case 1: # Split serials
+ QG_AMPTYPE (qg, 1) = AMP11
+ call strcpy ("11", Memc[QG_AMPID(qg, 1)], SZ_AMPID)
+ QG_AMPTYPE (qg, 2) = AMP12
+ call strcpy ("12", Memc[QG_AMPID(qg, 2)], SZ_AMPID)
+
+ case 2: # Quad
+ QG_AMPTYPE (qg, 1) = AMP11
+ call strcpy ("11", Memc[QG_AMPID(qg, 1)], SZ_AMPID)
+ QG_AMPTYPE (qg, 2) = AMP12
+ call strcpy ("12", Memc[QG_AMPID(qg, 2)], SZ_AMPID)
+ QG_AMPTYPE (qg, 3) = AMP21
+ call strcpy ("21", Memc[QG_AMPID(qg, 3)], SZ_AMPID)
+ QG_AMPTYPE (qg, 4) = AMP22
+ call strcpy ("22", Memc[QG_AMPID(qg, 4)], SZ_AMPID)
+ }
+ }
+
+
+ # Get datasec.
+ call hdmgstr (im, "datasec", section, SZ_LINE)
+ dx1 = 1
+ dx2 = nx
+ dxs = 1
+ dy1 = 1
+ dy2 = ny
+ dys = 1
+ call ccd_section (section, dx1, dx2, dxs, dy1, dy2, dys)
+ QG_DX1(qg, 0) = dx1
+ QG_DX2(qg, 0) = dx2
+ QG_DY1(qg, 0) = dy1
+ QG_DY2(qg, 0) = dy2
+
+ # Get ccdsec.
+ call hdmgstr (im, "ccdsec", section, SZ_LINE)
+ cx1 = dx1
+ cx2 = dx2
+ cxs = 1
+ cy1 = dy1
+ cy2 = dy2
+ cys = 1
+ call ccd_section (section, cx1, cx2, cxs, cy1, cy2, cys)
+ QG_CX1(qg, 0) = cx1
+ QG_CX2(qg, 0) = cx2
+ QG_CY1(qg, 0) = cy1
+ QG_CY2(qg, 0) = cy2
+
+
+ do amp = 1, QG_NAMPS (qg) {
+
+ # Get AMPSECmn for each readout
+ call sprintf (keyword, SZ_LINE, "AMPSEC%s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ call hdmgstr (im, keyword, section, SZ_LINE)
+ ax1 = 1
+ ax2 = nx
+ axs = 1
+ ay1 = 1
+ ay2 = ny
+ ays = 1
+ call ccd_section (section, ax1, ax2, axs, ay1, ay2, ays)
+ QG_AX1(qg, amp) = ax1
+ QG_AX2(qg, amp) = ax2
+ QG_AY1(qg, amp) = ay1
+ QG_AY2(qg, amp) = ay2
+
+
+ # Set X and Y dimensions of subimage read out by each amplifier
+ QG_NX(qg, amp) = ax2 - ax1 + 1
+ QG_NY(qg, amp) = ay2 - ay1 + 1
+
+ # Set datsec and trimsec for each sub image
+ QG_DX1(qg, amp) = 1
+ QG_DX2(qg, amp) = QG_NX(qg, amp)
+ QG_DY1(qg, amp) = 1
+ QG_DY2(qg, amp) = QG_NY(qg, amp)
+
+ QG_TX1(qg, amp) = 1
+ QG_TX2(qg, amp) = QG_NX(qg, amp)
+ QG_TY1(qg, amp) = 1
+ QG_TY2(qg, amp) = QG_NY(qg, amp)
+ }
+
+ # Determine ccdsec for each each sub-image.
+ do y = 1, QG_NAMPSY(qg) {
+ amp = QG_AMP (qg, 1, y)
+ QG_CX1(qg, amp) = QG_CX1(qg, 0)
+ QG_CX2(qg, amp) = QG_CX1(qg, amp) + QG_NX(qg, amp) - 1
+ do x = 2, QG_NAMPSX(qg) {
+ amp = QG_AMP (qg, x, y)
+ pre = QG_AMP (qg, x-1, y)
+ QG_CX1(qg, amp) = QG_CX2(qg, pre) + 1
+ QG_CX2(qg, amp) = QG_CX1(qg, amp) + QG_NX(qg, amp) - 1
+ }
+ }
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP (qg, x, 1)
+ QG_CY1(qg, amp) = QG_CY1(qg, 0)
+ QG_CY2(qg, amp) = QG_CY1(qg, amp) + QG_NY(qg, amp) - 1
+ do y = 2, QG_NAMPSY(qg) {
+ amp = QG_AMP (qg, x, y)
+ pre = QG_AMP (qg, x, y-1)
+ QG_CY1(qg, amp) = QG_CY2(qg, pre) + 1
+ QG_CY2(qg, amp) = QG_CY1(qg, amp) + QG_NY(qg, amp) - 1
+ }
+ }
+end
diff --git a/noao/imred/quadred/src/quad/quadjoin.par b/noao/imred/quadred/src/quad/quadjoin.par
new file mode 100644
index 00000000..8ebf6582
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadjoin.par
@@ -0,0 +1,4 @@
+input,s,a,"",,,Input root name
+output,s,h,"",,,"Output image name
+"
+delete,b,h,yes,,,"Delete sub-images on completion"
diff --git a/noao/imred/quadred/src/quad/quadjoin.x b/noao/imred/quadred/src/quad/quadjoin.x
new file mode 100644
index 00000000..0ef94394
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadjoin.x
@@ -0,0 +1,638 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+procedure t_quadjoin ()
+
+char input[SZ_FNAME] #TI Input image root name.
+char output[SZ_FNAME] #TI Output image name.
+char instrument[SZ_FNAME] #TI Instrument translation file.
+bool delete #TI delete sub-images when done.
+
+int namps, amp
+pointer in[QG_MAXAMPS], out, qg
+char logstr[SZ_LINE]
+bool inplace
+
+pointer immap()
+int quadmap(), imaccess()
+bool streq(), clgetb()
+
+errchk ccddelete()
+
+begin
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Get input image name and output image names.
+ call clgstr ("input", input, SZ_FNAME)
+ call xt_imroot (input, input, SZ_FNAME)
+ call clgstr ("output", output, SZ_FNAME)
+
+ # If the output name is null the opperation is "done in place one
+ # removed". That is:
+ # the sub-images are combined to form a temporary image
+ # the ORIGINAL PARENT IMAGE is deleted or copied to a backup image
+ # the temporary image is renamed to the original parent image
+ #
+ if (streq (output, "")) {
+ call mktemp ("tmp", output, SZ_FNAME)
+ inplace = true
+ } else {
+ inplace = false
+ }
+
+ # Get delete sub-image flag
+ delete = clgetb ("delete")
+
+ # Allocate quadgeom structure
+ call quadalloc (qg)
+
+ # Open input sub-images
+ namps = quadmap (input, READ_ONLY, false, 0, qg, in)
+
+# call quaddump (qg)
+
+ # Open output image
+ out = immap (output, NEW_COPY, in[1])
+
+ # Merge header information to form header for composite image.
+# call quadjoinhdr (in, out, qg)
+ call quadjoinhdr2 (in, out, qg)
+
+ switch (IM_PIXTYPE(out)) {
+ case TY_USHORT, TY_SHORT:
+ call qjoins (in, out, qg)
+
+ case TY_INT:
+ call qjoini (in, out, qg)
+
+ case TY_LONG:
+ call qjoinl (in, out, qg)
+
+ case TY_REAL:
+ call qjoinr (in, out, qg)
+
+ case TY_DOUBLE:
+ call qjoind (in, out, qg)
+
+ default:
+ call error (1, "unsupported pixel datatype")
+ }
+
+ # Log opperation
+ if (QG_NAMPSX(qg) == 2 && QG_NAMPSY(qg) == 2) {
+ call sprintf (logstr, SZ_LINE, "Quad-readout image")
+ } else if (QG_NAMPSX(qg) == 2 || QG_NAMPSY(qg) == 2) {
+ call sprintf (logstr, SZ_LINE, "Dual-readout image: nampsx=%d nampsy=%d")
+ call pargi (QG_NAMPSX(qg))
+ call pargi (QG_NAMPSY(qg))
+ } else {
+ call sprintf (logstr, SZ_LINE, "Single-readout image")
+ }
+ call timelog (logstr, SZ_LINE)
+ call ccdlog (input, logstr)
+
+ # Tidy up
+ call imunmap (out)
+ do amp = 1, namps
+ call imunmap (in[amp])
+
+ # Delete sub-images
+ if (delete)
+ call quaddelete (qg, input)
+
+ if (inplace) {
+ # Replace the input by the output image.
+ if (imaccess (input, READ_ONLY) == YES) {
+ iferr (call ccddelete (input)) {
+ call imdelete (output)
+ call error (0, "Can't delete or make backup of original image")
+ }
+ }
+ call imrename (output, input)
+ }
+
+ call quadfree (qg)
+ call hdmclose ()
+end
+
+# Merge header information and write to header of output image.
+
+procedure quadjoinhdr (in, out, qg)
+
+pointer in[ARB] #I Pointer to input sub-images.
+pointer out #I Pointer to output image.
+pointer qg #I Pointer to quadgeom structure.
+
+char keyword[SZ_LINE], section[SZ_LINE], buffer[SZ_LINE]
+real rval, ccdmean
+int amp, brk
+
+int hdmaccf(), strsearch()
+real hdmgetr()
+
+begin
+ # Set image dimensions
+ IM_LEN (out, 1) = QG_NX(qg, 0)
+ IM_LEN (out, 2) = QG_NY(qg, 0)
+
+ # Add defined sections to output image header.
+ if ((QG_DX1 (qg, 0) != 0) && (hdmaccf (out, "trim") == NO)) {
+ call sprintf (section, SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_DX1(qg, 0))
+ call pargi (QG_DX2(qg, 0))
+ call pargi (QG_DY1(qg, 0))
+ call pargi (QG_DY2(qg, 0))
+ call hdmpstr (out, "datasec", section)
+
+ call sprintf (section, SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_TX1(qg, 0))
+ call pargi (QG_TX2(qg, 0))
+ call pargi (QG_TY1(qg, 0))
+ call pargi (QG_TY2(qg, 0))
+ call hdmpstr (out, "trimsec", section)
+
+ call sprintf (section, SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_BX1(qg, 0))
+ call pargi (QG_BX2(qg, 0))
+ call pargi (QG_BY1(qg, 0))
+ call pargi (QG_BY2(qg, 0))
+ call hdmpstr (out, "biassec", section)
+ }
+
+ if (QG_CX1 (qg, 0) != 0) {
+ call sprintf (section, SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_CX1(qg, 0))
+ call pargi (QG_CX2(qg, 0))
+ call pargi (QG_CY1(qg, 0))
+ call pargi (QG_CY2(qg, 0))
+ call hdmpstr (out, "ccdsec", section)
+ }
+
+ # Set AMPSECnm
+ do amp = 1, QG_NAMPS(qg) {
+ call sprintf (keyword, SZ_LINE, "ampsec%s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ call sprintf (section, SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_AX1(qg, amp))
+ call pargi (QG_AX2(qg, amp))
+ call pargi (QG_AY1(qg, amp))
+ call pargi (QG_AY2(qg, amp))
+
+ call hdmpstr (out, keyword, section)
+ }
+
+ # Tidy up processing history
+ if (hdmaccf (out, "trim") == YES) {
+ do amp = 1, QG_NAMPS(qg)
+ call mergehist (in[amp], out, "trim", Memc[QG_AMPID(qg, amp)])
+ call hdmdelf (out, "trim")
+ call strcpy ("Trimmed", buffer, SZ_LINE)
+ call timelog (buffer, SZ_LINE)
+ call hdmpstr (out, "trim", buffer)
+ }
+
+ if (hdmaccf (out, "overscan") == YES) {
+ do amp = 1, QG_NAMPS(qg)
+ call mergehist (in[amp], out, "overscan", Memc[QG_AMPID(qg, amp)])
+ call hdmdelf (out, "overscan")
+ call strcpy ("Overscan corrected", buffer, SZ_LINE)
+ call timelog (buffer, SZ_LINE)
+ call hdmpstr (out, "overscan", buffer)
+ }
+
+ if (hdmaccf (out, "ccdmean") == YES) {
+ ccdmean = 0.0
+ do amp = 1, QG_NAMPS(qg) {
+ rval = hdmgetr (in[amp], "ccdmean")
+ ccdmean = ccdmean + rval
+ call sprintf (keyword, SZ_LINE, "ccdmea%s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ call hdmputr (out, keyword, rval)
+ }
+ ccdmean = ccdmean / QG_NAMPS(qg)
+ call hdmdelf (out, "ccdmean")
+ call hdmputr (out, "ccdmean", ccdmean)
+ }
+
+ # Move CCDPROC keyword to end of header
+ if (hdmaccf (out, "ccdproc") == YES) {
+ call hdmgstr (in, "ccdproc", buffer, SZ_LINE)
+ call hdmdelf (out, "ccdproc")
+ brk = strsearch (buffer, "CCD")
+ if (brk !=0)
+ call strcpy (buffer[brk-3], buffer, SZ_LINE)
+ call timelog (buffer, SZ_LINE)
+ call hdmpstr (out, "ccdproc", buffer)
+ }
+end
+
+define SZ_KEYWRD 8 # Number of chars in FITS keyword
+
+define REVSTRING "1.000 09Mar94 (Included amplifier geometry keywords)"
+
+# Merge header information and write to header of output image.
+
+procedure quadjoinhdr2 (in, out, qg)
+
+pointer in[ARB] #I Pointer to input sub-images.
+pointer out #I Pointer to output image.
+pointer qg #I Pointer to quadgeom structure.
+
+pointer sp, keyword, section, buffer
+real rval, ccdmean
+int amp, brk, ch
+
+int ax1, ax2, ay1, ay2
+int bx1, bx2, by1, by2
+int dx1, dx2, dy1, dy2
+int tx1, tx2, ty1, ty2
+
+int hdmaccf(), strsearch()
+real hdmgetr()
+
+begin
+ call smark (sp)
+ call salloc (keyword, SZ_KEYWRD, TY_CHAR)
+ call salloc (section, SZ_LINE, TY_CHAR)
+ call salloc (buffer, SZ_LINE, TY_CHAR)
+
+ # Set image dimensions
+ IM_LEN (out, 1) = QG_NX(qg, 0)
+ IM_LEN (out, 2) = QG_NY(qg, 0)
+
+ # Set the header revision level if not already set.
+ if (hdmaccf (out, "HDR_REV") == NO) {
+ call hdmpstr (out, "HDR_REV", REVSTRING)
+ }
+
+ # Update nampsyx and amplist
+ call sprintf (Memc[buffer], SZ_LINE, "%d %d")
+ call pargi (QG_NAMPSY(qg))
+ call pargi (QG_NAMPSX(qg))
+ call hdmpstr (out, "nampsyx", Memc[buffer])
+
+ ch = 1
+ do amp = 1, QG_NAMPS(qg) {
+ call sprintf (Memc[buffer+ch-1], 3, "%2s ")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ ch = ch + 3
+ }
+ call hdmpstr (out, "amplist", Memc[buffer])
+
+ # Update geometry keywords for each amplifier in the header.
+ # If the corresponding section is undefined any old keywords are deleted
+ # The TSECyx, DSECyx and BSECyx keywords are only retained if the image
+ # has not been trimmed.
+ do amp = 1, QG_NAMPS (qg) {
+
+ # Ampsec (ASECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_LINE, "ASEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ ax1 = QG_AX1 (qg, amp)
+ ax2 = QG_AX2 (qg, amp)
+ ay1 = QG_AY1 (qg, amp)
+ ay2 = QG_AY2 (qg, amp)
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (ax1)
+ call pargi (ax2)
+ call pargi (ay1)
+ call pargi (ay2)
+
+ call hdmpstr (out, Memc[keyword], Memc[section])
+
+ # Biassec (BSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_LINE, "BSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if ((hdmaccf (out, "trim") == NO) && (QG_BX1 (qg, amp) != 0)) {
+
+
+ bx1 = QG_BX1 (qg, amp) + ax1 - 1
+ bx2 = QG_BX2 (qg, amp) + ax1 - 1
+ by1 = QG_BY1 (qg, amp) + ay1 - 1
+ by2 = QG_BY2 (qg, amp) + ay1 - 1
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (bx1)
+ call pargi (bx2)
+ call pargi (by1)
+ call pargi (by2)
+
+ call hdmpstr (out, Memc[keyword], Memc[section])
+
+ } else if (hdmaccf (out, Memc[keyword]) == YES) {
+
+ call hdmdelf (out, Memc[keyword])
+
+ }
+
+ # CCDsec (CSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_LINE, "CSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if ((hdmaccf (out, "trim") == NO) && (QG_CX1 (qg, amp) != 0)) {
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_CX1(qg, amp))
+ call pargi (QG_CX2(qg, amp))
+ call pargi (QG_CY1(qg, amp))
+ call pargi (QG_CY2(qg, amp))
+
+ call hdmpstr (out, Memc[keyword], Memc[section])
+
+ } else if (hdmaccf (out, Memc[keyword]) == YES) {
+
+ call hdmdelf (out, Memc[keyword])
+
+ }
+
+ # Datasec (DSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_LINE, "DSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if ((hdmaccf (out, "trim") == NO) && (QG_DX1 (qg, amp) != 0)) {
+
+ dx1 = QG_DX1 (qg, amp) + ax1 - 1
+ dx2 = QG_DX2 (qg, amp) + ax1 - 1
+ dy1 = QG_DY1 (qg, amp) + ay1 - 1
+ dy2 = QG_DY2 (qg, amp) + ay1 - 1
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (dx1)
+ call pargi (dx2)
+ call pargi (dy1)
+ call pargi (dy2)
+ call hdmpstr (out, Memc[keyword], Memc[section])
+
+ } else if (hdmaccf (out, Memc[keyword]) == YES) {
+
+ call hdmdelf (out, Memc[keyword])
+
+ }
+
+ # Trimsec (TSECyx keyword)
+ #
+ call sprintf (Memc[keyword], SZ_LINE, "TSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+
+ if ((hdmaccf (out, "trim") == NO) && (QG_TX1 (qg, amp) != 0)) {
+
+
+ tx1 = QG_TX1 (qg, amp) + ax1 - 1
+ tx2 = QG_TX2 (qg, amp) + ax1 - 1
+ ty1 = QG_TY1 (qg, amp) + ay1 - 1
+ ty2 = QG_TY2 (qg, amp) + ay1 - 1
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (tx1)
+ call pargi (tx2)
+ call pargi (ty1)
+ call pargi (ty2)
+ call hdmpstr (out, Memc[keyword], Memc[section])
+
+ } else if (hdmaccf (out, Memc[keyword]) == YES) {
+
+ call hdmdelf (out, Memc[keyword])
+
+ }
+
+ }
+
+ # Delete biassec, ccdsec, datasec and trimsec if present.
+ if (hdmaccf (out, "biassec") == YES) {
+ call hdmdelf (out, "biassec")
+ }
+
+ if (hdmaccf (out, "datasec") == YES) {
+ call hdmdelf (out, "datasec")
+ }
+
+ if (hdmaccf (out, "trimsec") == YES) {
+ call hdmdelf (out, "trimsec")
+ }
+
+ if (hdmaccf (out, "ccdsec") == YES) {
+ call hdmdelf (out, "ccdsec")
+ }
+
+ # If image has been trimmed insert CCDSEC for entire image. This is
+ # derived from the CCDSEC's for the sub-images in the BLH and TRH
+ # corners.
+ if (hdmaccf (out, "trim") == YES) {
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_CX1(qg, 1))
+ call pargi (QG_CX2(qg, QG_NAMPS(qg)))
+ call pargi (QG_CY1(qg, 1))
+ call pargi (QG_CY2(qg, QG_NAMPS(qg)))
+
+ call hdmpstr (out, "CCDSEC", Memc[section])
+ }
+
+ # Tidy up processing history as appropriate
+
+ # Overscan Subtraction
+ if (hdmaccf (out, "overscan") == YES) {
+ do amp = 1, QG_NAMPS(qg)
+ call merge_overscan (in[amp], out, Memc[QG_AMPID(qg, amp)])
+
+ call hdmdelf (out, "overscan")
+ call strcpy ("Overscan corrected", Memc[buffer], SZ_LINE)
+ call timelog (Memc[buffer], SZ_LINE)
+ call hdmpstr (out, "overscan", Memc[buffer])
+ }
+
+ # Triming.
+ if (hdmaccf (out, "trim") == YES) {
+
+ do amp = 1, QG_NAMPS(qg)
+ call merge_trim (in[amp], out, Memc[QG_AMPID(qg, amp)])
+
+ call hdmdelf (out, "trim")
+ call strcpy ("Trimmed", Memc[buffer], SZ_LINE)
+ call timelog (Memc[buffer], SZ_LINE)
+ call hdmpstr (out, "trim", Memc[buffer])
+
+ }
+
+ # CCDMEAN
+ if (hdmaccf (out, "ccdmean") == YES) {
+ ccdmean = 0.0
+ do amp = 1, QG_NAMPS(qg) {
+ rval = hdmgetr (in[amp], "ccdmean")
+ ccdmean = ccdmean + rval
+ call sprintf (Memc[keyword], SZ_LINE, "ccdmea%s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ call hdmputr (out, Memc[keyword], rval)
+ }
+ ccdmean = ccdmean / QG_NAMPS(qg)
+ call hdmdelf (out, "ccdmean")
+ call hdmputr (out, "ccdmean", ccdmean)
+ }
+
+ # Move CCDPROC keyword to end of header
+ if (hdmaccf (out, "ccdproc") == YES) {
+ call hdmgstr (in, "ccdproc", Memc[buffer], SZ_LINE)
+ call hdmdelf (out, "ccdproc")
+ brk = strsearch (Memc[buffer], "CCD")
+ if (brk !=0)
+ call strcpy (Memc[buffer+brk-4], Memc[buffer], SZ_LINE)
+ call timelog (Memc[buffer], SZ_LINE)
+ call hdmpstr (out, "ccdproc", Memc[buffer])
+ }
+
+ call sfree (sp)
+end
+
+define OVSC_FMT1 "Overscan section is %s with mean=%g"
+define OVSC_FMT2 "Overscan section is %s"
+define OVSC_FMT3 "Overscan section is %s with function=%s"
+
+procedure merge_overscan (in, out, ampid)
+
+pointer in # Input quadrant image
+pointer out # Output image
+char ampid[2] # Label for readout
+
+pointer sp, buffer, amplifier, biassec, func, rootname, fullname
+real mean
+int idx
+
+int hdmaccf(), stridx(), nscan()
+
+
+begin
+ call smark (sp)
+ call salloc (buffer, SZ_LINE, TY_CHAR)
+ call salloc (amplifier, SZ_LINE, TY_CHAR)
+ call salloc (biassec, SZ_LINE, TY_CHAR)
+ call salloc (func, SZ_LINE, TY_CHAR)
+ call salloc (rootname, SZ_KEYWRD, TY_CHAR)
+ call salloc (fullname, SZ_KEYWRD, TY_CHAR)
+
+ if (hdmaccf (out, "overscan") == YES) {
+
+ # Get BSECyx
+ call sprintf (Memc[fullname], SZ_LINE, "BSEC%2s")
+ call pargstr (ampid)
+ call hdmgstr (in, Memc[fullname], Memc[biassec], SZ_LINE)
+
+ # Get overscan flag and retrieve the mean value if present
+ call hdmgstr (in, "overscan", Memc[buffer], SZ_LINE)
+ idx = stridx ("=", Memc[buffer])
+ if (idx == 0) {
+ call sprintf (Memc[buffer], SZ_LINE, OVSC_FMT2)
+ call pargstr (Memc[biassec])
+
+ } else {
+ call sscan (Memc[buffer+idx])
+ call gargr (mean)
+ if (nscan() == 1) {
+ call sprintf (Memc[buffer], SZ_LINE, OVSC_FMT1)
+ call pargstr (Memc[biassec])
+ call pargr (mean)
+ } else {
+ call strcpy (Memc[buffer+idx], Memc[func], SZ_LINE)
+ call sprintf (Memc[buffer], SZ_LINE, OVSC_FMT3)
+ call pargstr (Memc[biassec])
+ call pargstr (Memc[func])
+ }
+ }
+
+ # Get overscan keyword name and append AMP_ID
+ call hdmname ("overscan", Memc[rootname], 6)
+ call strcpy (Memc[rootname], Memc[fullname], 6)
+ call strcat (ampid, Memc[fullname], SZ_KEYWRD)
+
+ # Write new overscan keyword.
+ call timelog (Memc[buffer], SZ_LINE)
+ call hdmpstr (out, Memc[fullname], Memc[buffer])
+
+ # And record opperation in logfile
+ call sprintf (Memc[amplifier], SZ_LINE, " AMP%s")
+ call pargstr (ampid)
+ call ccdlog (Memc[amplifier], Memc[buffer])
+
+ }
+
+ call sfree (sp)
+end
+
+define TRIM_FMT "Trim data section is %s"
+
+procedure merge_trim (in, out, ampid)
+
+pointer in # Input quadrant image
+pointer out # Output image
+char ampid[2] # Label for readout
+
+pointer sp, buffer, amplifier, trimsec, rootname, fullname
+
+int hdmaccf()
+
+
+begin
+ call smark (sp)
+ call salloc (buffer, SZ_LINE, TY_CHAR)
+ call salloc (amplifier, SZ_LINE, TY_CHAR)
+ call salloc (trimsec, SZ_LINE, TY_CHAR)
+ call salloc (rootname, SZ_KEYWRD, TY_CHAR)
+ call salloc (fullname, SZ_KEYWRD, TY_CHAR)
+
+ if (hdmaccf (out, "trim") == YES) {
+
+ # Get BSECyx
+ call sprintf (Memc[fullname], SZ_LINE, "TSEC%2s")
+ call pargstr (ampid)
+ call hdmgstr (in, Memc[fullname], Memc[trimsec], SZ_LINE)
+
+ call sprintf (Memc[buffer], SZ_LINE, TRIM_FMT)
+ call pargstr (Memc[trimsec])
+
+ # Get overscan keyword name and append AMP_ID
+ call hdmname ("trim", Memc[rootname], 6)
+ call strcpy (Memc[rootname], Memc[fullname], 6)
+ call strcat (ampid, Memc[fullname], SZ_KEYWRD)
+
+ # Write new overscan keyword.
+ call timelog (Memc[buffer], SZ_LINE)
+ call hdmpstr (out, Memc[fullname], Memc[buffer])
+
+ # And record opperation in logfile
+ call sprintf (Memc[amplifier], SZ_LINE, " AMP%s")
+ call pargstr (ampid)
+ call ccdlog (Memc[amplifier], Memc[buffer])
+ }
+
+ call sfree (sp)
+end
+
+procedure mergehist (in, out, keyword, ampid)
+
+pointer in # Input quadrant image
+pointer out # Output image
+char keyword[ARB] # Header keyword to modify
+char ampid[2] # Label for readout
+
+char rootname[6], fullname[8]
+char buffer[SZ_LINE]
+
+int hdmaccf()
+
+begin
+ if (hdmaccf (out, keyword) == YES) {
+ call hdmgstr (in, keyword, buffer, SZ_LINE)
+ call hdmname (keyword, rootname, 6)
+ call strcpy (rootname, fullname, 6)
+ call strcat (ampid, fullname, 8)
+ call hdmpstr (out, fullname, buffer)
+ }
+end
diff --git a/noao/imred/quadred/src/quad/quadmap.x b/noao/imred/quadred/src/quad/quadmap.x
new file mode 100644
index 00000000..db0a052b
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadmap.x
@@ -0,0 +1,297 @@
+include <imhdr.h>
+include <error.h>
+include "quadgeom.h"
+
+# QUADMAP -- Map subimages, one for each readout, for input or output
+
+int procedure quadmap (rootname, mode, clobber, in, qg, out)
+
+char rootname[SZ_FNAME] #I Root name for output images.
+int mode #I Access mode.
+bool clobber #I Clobber existing output images.
+pointer in #I Input image pointer (for NEW_COPY).
+pointer qg #I Pointer to quadgeom structure.
+pointer out[ARB] #O Array of imio pointers for sub-images.
+
+int nopen #O Number of subimages mapped.
+
+int i, j, x, y, nx[QG_MAXAMPS], nampsx, nampsy
+char fullname[SZ_LINE], id[SZ_AMPID]
+
+pointer immap()
+int ahivi(), imaccess()
+
+begin
+ switch (mode) {
+ case NEW_COPY, NEW_IMAGE:
+
+ # Loop over active readouts
+ nopen = 0
+ do i = 1, QG_NAMPS(qg) {
+
+ nopen = nopen + 1
+
+ # The sub-section image need only be written if this is not a
+ # phantom
+ if (QG_PHANTOM (qg, i) == NO) {
+
+ # Make sub-image name
+ call sprintf (fullname, SZ_LINE, "%s.%s")
+ call pargstr (rootname)
+ call pargstr (Memc[QG_AMPID(qg, nopen)])
+
+ # If clobber is set then we can delete any pre-existing
+ # sub-images. Otherwise it is an error if the sub-image already
+ # exists. However we leave it to the immap call to find out.
+ if (clobber) {
+ if (imaccess (fullname, READ_ONLY) == YES)
+ call imdelete (fullname)
+ }
+
+ iferr (out[nopen] = immap (fullname, mode, in)) {
+ nopen = nopen - 1
+ do j = 1, nopen
+ call imunmap (out[j])
+ call erract (EA_ERROR)
+ }
+
+ call quadwritehdr (qg, out[nopen], i)
+
+ } else {
+ out[nopen] = NULL
+ }
+ }
+
+
+ case READ_ONLY, READ_WRITE:
+
+ # Loop over full grid of possible readout positions.
+ nopen = 0
+ do y = 1, QG_MAXAMPS {
+ nx[y] = 0
+ do x = 1, QG_MAXAMPS {
+
+ # Make readout id string
+ call sprintf (id, SZ_AMPID, "%1d%1d")
+ call pargi (y)
+ call pargi (x)
+
+ # Make sub-image name
+ call sprintf (fullname, SZ_LINE, "%s.%s")
+ call pargstr (rootname)
+ call pargstr (id)
+
+ # Attempt to map it.
+ nopen = nopen + 1
+ if (nopen > QG_MAXAMPS) {
+ nopen = nopen - 1
+ next
+ }
+
+ # Skip to next grid position if sub-image does not exist.
+ iferr (out[nopen] = immap (fullname, mode, in)) {
+ nopen = nopen - 1
+ next
+ }
+ nx[y] = nx[y] + 1
+ call quadreadhdr (qg, out[nopen], nopen, id)
+ }
+ }
+
+ nampsx = ahivi (nx, QG_MAXAMPS)
+ nampsy = nopen / nampsx
+ QG_NAMPS(qg) = nopen
+ QG_NAMPSX(qg) = nampsx
+ QG_NAMPSY(qg) = nampsy
+
+ # Consolidate quadgeom structure and perform consistancy checks
+# call quaddump (qg)
+ call quadmerge (qg)
+
+ }
+
+ return (nopen)
+end
+
+# QUADWRITEHDR -- Add dimensions and section information to image header.
+
+procedure quadwritehdr (qg, im, readout)
+
+pointer im #I Pointer to output sub-image image.
+pointer qg #I Pointer to open quadgeom structure.
+int readout #I readout number.
+
+int amp
+pointer sp, section, keyword
+
+int hdmaccf()
+
+begin
+ call smark (sp)
+ call salloc (section, SZ_LINE, TY_CHAR)
+ call salloc (keyword, SZ_LINE, TY_CHAR)
+
+ IM_LEN (im, 1) = QG_NX(qg, readout)
+ IM_LEN (im, 2) = QG_NY(qg, readout)
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_DX1(qg, readout))
+ call pargi (QG_DX2(qg, readout))
+ call pargi (QG_DY1(qg, readout))
+ call pargi (QG_DY2(qg, readout))
+ call hdmpstr (im, "datasec", Memc[section])
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_TX1(qg, readout))
+ call pargi (QG_TX2(qg, readout))
+ call pargi (QG_TY1(qg, readout))
+ call pargi (QG_TY2(qg, readout))
+ call hdmpstr (im, "trimsec", Memc[section])
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_BX1(qg, readout))
+ call pargi (QG_BX2(qg, readout))
+ call pargi (QG_BY1(qg, readout))
+ call pargi (QG_BY2(qg, readout))
+ call hdmpstr (im, "biassec", Memc[section])
+
+ call sprintf (Memc[section], SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (QG_CX1(qg, readout))
+ call pargi (QG_CX2(qg, readout))
+ call pargi (QG_CY1(qg, readout))
+ call pargi (QG_CY2(qg, readout))
+ call hdmpstr (im, "ccdsec", Memc[section])
+
+ # Delete zSECyx keywords for all other amps from header
+ do amp = 1, QG_NAMPS(qg) {
+ if (amp != readout) {
+ call sprintf (Memc[keyword], SZ_LINE, "ASEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ if (hdmaccf (im, Memc[keyword]) == YES)
+ call hdmdelf (im, Memc[keyword])
+
+ call sprintf (Memc[keyword], SZ_LINE, "BSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ if (hdmaccf (im, Memc[keyword]) == YES)
+ call hdmdelf (im, Memc[keyword])
+
+ call sprintf (Memc[keyword], SZ_LINE, "CSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ if (hdmaccf (im, Memc[keyword]) == YES)
+ call hdmdelf (im, Memc[keyword])
+
+ call sprintf (Memc[keyword], SZ_LINE, "DSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ if (hdmaccf (im, Memc[keyword]) == YES)
+ call hdmdelf (im, Memc[keyword])
+
+ call sprintf (Memc[keyword], SZ_LINE, "TSEC%2s")
+ call pargstr (Memc[QG_AMPID(qg, amp)])
+ if (hdmaccf (im, Memc[keyword]) == YES)
+ call hdmdelf (im, Memc[keyword])
+ }
+ }
+
+ call sfree (sp)
+
+end
+
+# QUADREADHDR -- Get dimensions and section information from image header.
+
+procedure quadreadhdr (qg, im, readout, id)
+
+pointer qg #I Pointer to open quadgeom structure.
+pointer im #I Pointer to input sub-image image.
+int readout #I Readout number.
+char id[SZ_AMPID] #I Readout identifier.
+
+int nx, ny
+int dx1, dx2, dxs, dy1, dy2, dys
+int tx1, tx2, txs, ty1, ty2, tys
+int bx1, bx2, bxs, by1, by2, bys
+int cx1, cx2, cxs, cy1, cy2, cys
+pointer sp, section
+
+int hdmaccf(), strdic()
+
+begin
+ call smark (sp)
+ call salloc (section, SZ_LINE, TY_CHAR)
+
+ # Store QG_AMPID and set QG_AMPTYPE
+ call malloc (QG_AMPID(qg, readout), SZ_AMPID, TY_CHAR)
+
+ call strcpy (id, Memc[QG_AMPID(qg, readout)], SZ_AMPID)
+
+ QG_AMPTYPE (qg, readout) = strdic (id, id, SZ_AMPID, AMPDICT)
+
+ # Get input image dimensions.
+ nx = IM_LEN (im, 1)
+ ny = IM_LEN (im, 2)
+ QG_NX(qg, readout) = nx
+ QG_NY(qg, readout) = ny
+
+ # Get datasec, trimsec etc. from image header, setting a null value
+ # for any missing sections.
+ if (hdmaccf (im, "datasec") == YES) {
+ call hdmgstr (im, "datasec", Memc[section], SZ_LINE)
+ dx1 = 1
+ dx2 = nx
+ dxs = 1
+ dy1 = 1
+ dy2 = ny
+ dys = 1
+ call ccd_section (Memc[section], dx1, dx2, dxs, dy1, dy2, dys)
+ }
+ QG_DX1(qg, readout) = dx1
+ QG_DX2(qg, readout) = dx2
+ QG_DY1(qg, readout) = dy1
+ QG_DY2(qg, readout) = dy2
+
+ if (hdmaccf (im, "trimsec") == YES) {
+ call hdmgstr (im, "trimsec", Memc[section], SZ_LINE)
+ tx1 = dx1
+ tx2 = dx2
+ txs = 1
+ ty1 = dy1
+ ty2 = dy2
+ tys = 1
+ call ccd_section (Memc[section], tx1, tx2, txs, ty1, ty2, tys)
+ }
+ QG_TX1(qg, readout) = tx1
+ QG_TX2(qg, readout) = tx2
+ QG_TY1(qg, readout) = ty1
+ QG_TY2(qg, readout) = ty2
+
+ if (hdmaccf (im, "biassec") == YES) {
+ call hdmgstr (im, "biassec", Memc[section], SZ_LINE)
+ bx1 = dx2 + 1
+ bx2 = nx
+ bxs = 1
+ by1 = 1
+ by2 = ny
+ bys = 1
+ call ccd_section (Memc[section], bx1, bx2, bxs, by1, by2, bys)
+ }
+ QG_BX1(qg, readout) = bx1
+ QG_BX2(qg, readout) = bx2
+ QG_BY1(qg, readout) = by1
+ QG_BY2(qg, readout) = by2
+
+ if (hdmaccf (im, "ccdsec") == YES) {
+ call hdmgstr (im, "ccdsec", Memc[section], SZ_LINE)
+ cx1 = dx1
+ cx2 = dx2
+ cxs = 1
+ cy1 = dy1
+ cy2 = dy2
+ cys = 1
+ call ccd_section (Memc[section], cx1, cx2, cxs, cy1, cy2, cys)
+ }
+ QG_CX1(qg, readout) = cx1
+ QG_CX2(qg, readout) = cx2
+ QG_CY1(qg, readout) = cy1
+ QG_CY2(qg, readout) = cy2
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/quad/quadmerge.x b/noao/imred/quadred/src/quad/quadmerge.x
new file mode 100644
index 00000000..ec75d286
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadmerge.x
@@ -0,0 +1,122 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+procedure quadmerge (qg)
+
+pointer qg #I Pointer to quadgeom structure.
+
+int nx, ny, xdata, ydata, txskip1, txskip2, tyskip1, tyskip2
+int bxskip1, bxskip2, byskip1, byskip2
+int x, y, amp, pre, namps, nampsx, nampsy
+
+begin
+ namps = QG_NAMPS(qg)
+ nampsx = QG_NAMPSX(qg)
+ nampsy = QG_NAMPSY(qg)
+
+ # Check consistancy of number of active readouts.
+ if (namps == 0)
+ call error (0, "No input images")
+ if (namps != nampsx * nampsy)
+ call error (0, "Incomplete or inconsistant set of sub-images")
+
+ # Determine dimensions of the composite image.
+ # We just sum the dimensions of the first row and column of sub-images
+ # We should realy check that the sub-images do form a regular grid.
+ nx = 0
+ do x = 1, nampsx {
+ nx = nx + QG_NX(qg, QG_AMP(qg, x, 1))
+ }
+ ny = 0
+ do y = 1, nampsy {
+ ny = ny + QG_NY(qg, QG_AMP(qg, 1, y))
+ }
+ QG_NX(qg, 0) = nx
+ QG_NY(qg, 0) = ny
+
+ # Calculate datasec, trimsec, and biassec, ccdsec for composite image.
+ # The required sections are those for the equivalent mono-readout image.
+ # If datasec is uninitialised assume all these sections are absent as
+ # will be the case for processed [OT] images.
+ if (QG_DX1 (qg, 1) != 0) {
+ # Calculate number of data pixels.
+ xdata = 0
+ do x = 1, nampsx {
+ amp = QG_AMP(qg, x, 1)
+ xdata = xdata + QG_DX2(qg, amp) - QG_DX1(qg, amp) + 1
+ }
+ ydata = 0
+ do y = 1, nampsy {
+ amp = QG_AMP(qg, 1, y)
+ ydata = ydata + QG_DY2(qg, amp) - QG_DY1(qg, amp) + 1
+ }
+ txskip1 = QG_TX1(qg, 1) - QG_DX1(qg, 1)
+ txskip2 = QG_DX2(qg, namps) - QG_TX2(qg, namps)
+ tyskip1 = QG_TY1(qg, 1) - QG_DY1(qg, 1)
+ tyskip2 = QG_DY2(qg, namps) - QG_TY2(qg, namps)
+
+ # Calculate width of bias strip margins.
+ switch (QG_AMPTYPE(qg, 1)) {
+ case AMP11, AMP21: # "Left amp"
+ bxskip1 = QG_BX1(qg, 1) - QG_DX2(qg, 1) - 1
+ bxskip2 = QG_NX(qg, 1) - QG_BX2(qg, 1)
+
+ case AMP12, AMP22: # "Right amp"
+ bxskip1 = QG_DX1(qg, 1) - QG_BX2(qg, 1) - 1
+ bxskip2 = QG_BX1(qg, 1) - 1
+ }
+
+ byskip1 = QG_BY1(qg, 1) - 1
+ byskip2 = QG_NY(qg, namps) - QG_BY2(qg, namps)
+
+ QG_DX1(qg, 0) = QG_DX1(qg, 1)
+ QG_DX2(qg, 0) = QG_DX1(qg, 0) + xdata - 1
+ QG_DY1(qg, 0) = QG_DY1(qg, 1)
+ QG_DY2(qg, 0) = QG_DY1(qg, 0) + ydata - 1
+
+ QG_TX1(qg, 0) = QG_DX1(qg, 0) + txskip1
+ QG_TX2(qg, 0) = QG_DX2(qg, 0) - txskip2
+ QG_TY1(qg, 0) = QG_DY1(qg, 0) + tyskip1
+ QG_TY2(qg, 0) = QG_DY2(qg, 0) - tyskip2
+
+ QG_BX1(qg, 0) = QG_DX2(qg, 0) + bxskip1 + 1
+ QG_BX2(qg, 0) = nx - bxskip2
+ QG_BY1(qg, 0) = 1 + byskip1
+ QG_BY2(qg, 0) = ny - byskip2
+ }
+
+ # Calculate ccdsec for composite image using sub-images in BLH and TRH
+ # corners.
+ if (QG_CX1 (qg, 1) != 0) {
+ QG_CX1(qg, 0) = QG_CX1(qg, 1)
+ QG_CX2(qg, 0) = QG_CX2(qg, nampsx)
+ QG_CY1(qg, 0) = QG_CY1(qg, 1)
+ QG_CY2(qg, 0) = QG_CY2(qg, namps)
+ }
+
+ # Set up "ampsec" - the section of the composite image derived from
+ # each sub-image.
+ do y = 1, nampsy {
+ amp = QG_AMP (qg, 1, y)
+ QG_AX1(qg, amp) = 1
+ QG_AX2(qg, amp) = QG_NX(qg, amp)
+ do x = 2, nampsx {
+ amp = QG_AMP (qg, x, y)
+ pre = QG_AMP (qg, x-1, y)
+ QG_AX1(qg, amp) = QG_AX2(qg, pre) + 1
+ QG_AX2(qg, amp) = QG_AX1(qg, amp) + QG_NX(qg, amp) - 1
+ }
+ }
+ do x = 1, nampsx {
+ amp = QG_AMP (qg, x, 1)
+ QG_AY1(qg, amp) = 1
+ QG_AY2(qg, amp) = QG_NY(qg, amp)
+ do y = 2, nampsy {
+ amp = QG_AMP (qg, x, y)
+ pre = QG_AMP (qg, x, y-1)
+ QG_AY1(qg, amp) = QG_AY2(qg, pre) + 1
+ QG_AY2(qg, amp) = QG_AY1(qg, amp) + QG_NY(qg, amp) - 1
+ }
+ }
+
+end
diff --git a/noao/imred/quadred/src/quad/quadproc.cl b/noao/imred/quadred/src/quad/quadproc.cl
new file mode 100644
index 00000000..3a881cd7
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadproc.cl
@@ -0,0 +1,173 @@
+procedure quadproc (images)
+
+begin
+ string ims, in_list, cal_list, qp_list
+ int nims
+ struct buffer
+
+ # Freeze input image list
+ in_list = mktemp ("uparm$tmp")
+ ccdlist (images, ccdtype="", names+, > in_list)
+ ims = "@"//in_list
+
+ # Check that the input list contains some images of the specified type.
+ ccdssselect (ims, ccdtype=ccdtype, subset="") | count | scan (nims)
+ if (nims == 0) { # Nothing to do !
+ delete (in_list, ver-)
+ return
+ }
+
+ # Set initial values for the ccdproc parameters used for fitting the
+ # overscan. These parameters may be modified during the interactive
+ # fitting process, the new values being used for all subsequent fits.
+ qccdproc.function = function
+ qccdproc.order = order
+ qccdproc.sample = sample
+ qccdproc.naverage = naverage
+ qccdproc.niterate = niterate
+ qccdproc.low_reject = low_reject
+ qccdproc.high_reject = high_reject
+ qccdproc.grow = grow
+ qccdproc.interactive = interactive
+
+ if (overscan || trim || fixpix) {
+ # Only those images which must be treated specialy are processed
+ # with qproc:
+ # 1) Multiple readout
+ # 2) Not already trimmed
+
+ # First process [OT] any calibration images which WILL BE USED so
+ # that we can be sure they will be reduced explicitly by qproc
+ # rather than automaticaly within ccdproc.
+ qp_list = mktemp ("uparm$tmp")
+
+ if (noproc) {
+
+ cal_list = mktemp ("uparm$tmp")
+ qpcalimage (images=ims, only_param=yes, check=no, > cal_list)
+ qpselect ("@"//cal_list, ccdtype="", stop=no, > qp_list)
+ delete (cal_list, ver-)
+ count (qp_list) | scan (nims)
+ if (nims > 0) {
+ printf ("Calibration images which will be processed:\n")
+ qnoproc (qp_list, fixpix=fixpix, overscan=overscan,
+ trim=trim, fixfile=fixfile, xskip1=xskip1, xskip2=xskip2,
+ xtrim1=xtrim1, xtrim2=xtrim2, ytrim1=ytrim1, ytrim2=ytrim2)
+
+ printf ("\nImages from the input list:\n")
+ }
+
+ } else {
+
+ cal_list = mktemp ("uparm$tmp")
+ qpcalimage (images=ims, only_param=no, check=no, > cal_list)
+ qpselect ("@"//cal_list, ccdtype="", stop=no, > qp_list)
+ delete (cal_list, ver-)
+ count (qp_list) | scan (nims)
+ if (nims > 0) {
+ qproc (qp_list, fixpix=fixpix, overscan=overscan, trim=trim,
+ readaxis=readaxis, fixfile=fixfile, xskip1=xskip1,
+ xskip2=xskip2, xtrim1=xtrim1, xtrim2=xtrim2, ytrim1=ytrim1,
+ ytrim2=ytrim2)
+
+ }
+ }
+
+ delete (qp_list, ver-)
+
+ # Now process all the remaining images.
+ qpselect (ims, ccdtype=ccdtype, stop=no, >> qp_list)
+
+ if (noproc) {
+ qnoproc (qp_list, fixpix=fixpix, overscan=overscan,
+ trim=trim, fixfile=fixfile, xskip1=xskip1, xskip2=xskip2,
+ xtrim1=xtrim1, xtrim2=xtrim2, ytrim1=ytrim1, ytrim2=ytrim2)
+ } else {
+ qproc (qp_list, fixpix=fixpix, overscan=overscan, trim=trim,
+ readaxis=readaxis, fixfile=fixfile, xskip1=xskip1,
+ xskip2=xskip2, xtrim1=xtrim1, xtrim2=xtrim2, ytrim1=ytrim1,
+ ytrim2=ytrim2)
+ }
+
+ delete (qp_list, ver-)
+
+ }
+
+ # Do all other operations with the standard ccdproc.
+
+ if (noproc) {
+
+ # Handle those images that will be processed with qproc ....
+ qpselect (ims, ccdtype=ccdtype, stop=no, >> qp_list)
+
+ # We must also include the calibration images or ccdproc will
+ # complain about missing calibrations.
+ qpcalimage (images=ims, only_param=no, check=no, > cal_list)
+ qpselect ("@"//cal_list, ccdtype="", stop=yes, >> qp_list)
+
+ if (zerocor || darkcor || flatcor || illumcor || fringecor ||
+ readcor || scancor) {
+ qccdproc ("@"//qp_list, noproc=yes,
+ fixpix=no, overscan=no, trim=no, zerocor=zerocor,
+ darkcor=darkcor, flatcor=flatcor, illumcor=illumcor,
+ fringecor=fringecor, readcor=readcor, scancor=scancor,
+ ccdtype=ccdtype, max_cache=max_cache, readaxis=readaxis,
+ fixfile=fixfile, biassec="image", trimsec="image", zero=zero,
+ dark=dark, flat=flat, illum=illum, fringe=fringe,
+ minreplace=minreplace, scantype=scantype, nscan=nscan)
+ }
+
+ printf ("\n")
+ delete (qp_list, ver-)
+
+ # ..... and those that won't
+ qpselect (ims, ccdtype=ccdtype, stop=yes, >> qp_list)
+
+ qccdproc ("@"//qp_list, noproc=yes,
+ fixpix=fixpix, overscan=overscan, trim=trim, zerocor=zerocor,
+ darkcor=darkcor, flatcor=flatcor, illumcor=illumcor,
+ fringecor=fringecor, readcor=readcor, scancor=scancor,
+ ccdtype=ccdtype, max_cache=max_cache, readaxis=readaxis,
+ fixfile=fixfile, biassec="image", trimsec="image", zero=zero,
+ dark=dark, flat=flat, illum=illum, fringe=fringe,
+ minreplace=minreplace, scantype=scantype, nscan=nscan)
+
+ delete (qp_list, ver-)
+
+ } else {
+
+ # Validate fixfile
+ if (fixpix) {
+ match ("single_readout", fixfile) | scan (buffer)
+ if (stridx ("#", buffer) == 0) {
+ buffer = "fixfile " // fixfile //
+ " cannot be used with multi-readout images"
+ error (0, buffer)
+ }
+ }
+
+ qccdproc (ims, ccdtype=ccdtype, max_cache=max_cache, noproc=no,
+ fixpix=fixpix, overscan=overscan, trim=trim, zerocor=zerocor,
+ darkcor=darkcor, flatcor=flatcor, illumcor=illumcor,
+ fringecor=fringecor, readcor=readcor, scancor=scancor,
+ readaxis=readaxis, fixfile=fixfile, biassec="image",
+ trimsec="image", zero=zero, dark=dark, flat=flat, illum=illum,
+ fringe=fringe, minreplace=minreplace, scantype=scantype,
+ nscan=nscan, backup=backup, interactive=interactive,
+ verbose=verbose, logfile=logfile)
+
+ # Set task parameters used for overscan fitting to the ccdproc
+ # values which may have been adjusted interactively
+ function.p_value = qccdproc.function
+ order.p_value = qccdproc.order
+ sample.p_value = qccdproc.sample
+ naverage.p_value = qccdproc.naverage
+ niterate.p_value = qccdproc.niterate
+ low_reject.p_value = qccdproc.low_reject
+ high_reject.p_value = qccdproc.high_reject
+ grow.p_value = qccdproc.grow
+
+ }
+
+ delete (in_list, ver-)
+end
diff --git a/noao/imred/quadred/src/quad/quadproc.par b/noao/imred/quadred/src/quad/quadproc.par
new file mode 100644
index 00000000..edc70514
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadproc.par
@@ -0,0 +1,42 @@
+images,s,a,"",,,List of CCD images to correct
+ccdtype,s,h,"",,,CCD image type to correct
+max_cache,i,h,0,0,,Maximum image caching memory (in Mbytes)
+noproc,b,h,no,,,"List processing steps only?
+"
+fixpix,b,h,yes,,,Fix bad CCD lines and columns?
+overscan,b,h,yes,,,Apply overscan strip correction?
+trim,b,h,yes,,,Trim the image?
+zerocor,b,h,yes,,,Apply zero level correction?
+darkcor,b,h,yes,,,Apply dark count correction?
+flatcor,b,h,yes,,,Apply flat field correction?
+illumcor,b,h,no,,,Apply illumination correction?
+fringecor,b,h,no,,,Apply fringe correction?
+readcor,b,h,no,,,Convert zero level image to readout correction?
+scancor,b,h,no,,,"Convert flat field image to scan correction?
+"
+readaxis,s,h,"line","column|line",, Read out axis (column|line)
+fixfile,s,h,"",,,File describing the bad lines and columns
+xskip1,i,h,INDEF,0,,X pixels to skip at start of overscan
+xskip2,i,h,INDEF,0,,X pixels to skip at end of overscan
+xtrim1,i,h,INDEF,0,,X pixels to trim at start of data
+xtrim2,i,h,INDEF,0,,X pixels to trim at end of data
+ytrim1,i,h,INDEF,0,,Y pixels to trim at start of data
+ytrim2,i,h,INDEF,0,,Y pixels to trim at end of data
+zero,s,h,"",,,Zero level calibration image
+dark,s,h,"",,,Dark count calibration image
+flat,s,h,"",,,Flat field images
+illum,s,h,"",,,Illumination correction images
+fringe,s,h,"",,,Fringe correction images
+minreplace,r,h,1.,,,Minimum flat field value
+scantype,s,h,"shortscan","shortscan|longscan",,Scan type (shortscan|longscan)
+nscan,i,h,1,1,,"Number of short scan lines
+"
+interactive,b,h,no,,,Fit overscan interactively?
+function,s,h,"legendre",,,Fitting function
+order,i,h,1,1,,Number of polynomial terms or spline pieces
+sample,s,h,"*",,,Sample points to fit
+naverage,i,h,1,,,Number of sample points to combine
+niterate,i,h,1,0,,Number of rejection iterations
+low_reject,r,h,3.,0.,,Low sigma rejection factor
+high_reject,r,h,3.,0.,,High sigma rejection factor
+grow,r,h,0.,0.,,Rejection growing radius
diff --git a/noao/imred/quadred/src/quad/quadscale.par b/noao/imred/quadred/src/quad/quadscale.par
new file mode 100644
index 00000000..9241c2b1
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadscale.par
@@ -0,0 +1,7 @@
+input,s,a,,,,Input image
+output,s,a,,,,Output image
+gain11,r,h,1,,,"Gain for quadrant y=1, x=1"
+gain12,r,h,1,,,"Gain for quadrant y=1, x=2"
+gain21,r,h,1,,,"Gain for quadrant y=2, x=1"
+gain22,r,h,1,,,"Gain for quadrant y=2, x=2"
+opperation,s,h,"multiply","multiply|divide",,Multiply or divide by gains
diff --git a/noao/imred/quadred/src/quad/quadscale.x b/noao/imred/quadred/src/quad/quadscale.x
new file mode 100644
index 00000000..5e594eb4
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadscale.x
@@ -0,0 +1,159 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+define OPPERATIONS "|multiply|divide|"
+define OPMULTIPLY 1
+define OPDIVIDE 2
+define TOL1 1.0e-7
+define TOL2 -1.0e-7
+
+procedure t_quadscale ()
+
+char input[SZ_FNAME] #TI Input image name.
+char output[SZ_FNAME] #TI Output image name.
+char instrument[SZ_FNAME] #TI Instrument translation file
+
+real gain[QG_MAXAMPS] #TI Gain factor for each quadrant
+int op #TI Multiply or divide by gain factors
+
+int i
+pointer in, out, qg
+char buffer[SZ_LINE]
+
+real clgetr()
+int clgwrd(), hdmaccf()
+pointer immap()
+
+begin
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Open input image
+ call clgstr ("input", input, SZ_FNAME)
+ in = immap (input, READ_ONLY, 0)
+
+ # Allocate quadgeom structure and initialise it from image header
+ call quadalloc (qg)
+
+
+ # Fill out quadgeom structure from header depending on revision level
+ if (hdmaccf (in, "HDR_REV") == NO) {
+
+ # Check to see if the image has been processed or not
+ if (hdmaccf (in, "ccdproc") == YES) {
+ call quadgeomred (in, qg)
+ } else {
+ call quadgeom (in, qg, "", "")
+ }
+
+ } else {
+ call qghdr2 (in, qg)
+ }
+
+ # Open output image
+ call clgstr ("output", output, SZ_FNAME)
+ out = immap (output, NEW_COPY, in)
+ IM_PIXTYPE(out) = TY_REAL
+
+ # Get gain factors
+ gain[1] = clgetr ("gain11")
+ gain[2] = clgetr ("gain12")
+ gain[3] = clgetr ("gain21")
+ gain[4] = clgetr ("gain22")
+
+ # Get direction of opperation
+ op = clgwrd ("opperation", buffer, SZ_LINE, OPPERATIONS)
+
+ switch (op) {
+ case OPMULTIPLY:
+ call quadmult (in, out, gain, qg)
+
+ case OPDIVIDE:
+ # Check for zero gain --> divide by zero
+ do i = 1, 4 {
+ if ((gain[i] < TOL1) && (gain[i] > TOL2)) {
+ call error (0, "Attempt to divide by gain value of zero")
+ }
+ }
+ call quaddiv (in, out, gain, qg)
+
+ }
+
+ call imunmap (in)
+ call imunmap (out)
+ call quadfree (qg)
+ call hdmclose ()
+end
+
+procedure quadmult (in, out, gain, qg)
+
+pointer in #I imio pointer for input image.
+pointer out #I imio pointer for output image.
+real gain[ARB] #I Array of gain factors.
+pointer qg #I Pointer to quadgeom structure.
+
+pointer inbuf, obuf
+int junk, nx, x, y, line, amp, amp2, off
+long invec[IM_MAXDIM], ovec[IM_MAXDIM]
+
+int imgnlr(), impnlr()
+
+begin
+
+ # Setup start vector for sequential reads
+ call amovkl (long(1), invec, IM_MAXDIM)
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPS(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnlr (in, inbuf, invec)
+ junk = impnlr (out, obuf, ovec)
+ off = 0
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ nx = QG_NX(qg, amp)
+ call amulkr (Memr[inbuf+off], gain[amp], Memr[obuf+off], nx)
+ off = off + nx
+ }
+ }
+ }
+
+end
+
+procedure quaddiv (in, out, gain, qg)
+
+pointer in #I imio pointer for input image.
+pointer out #I imio pointer for output image.
+real gain[ARB] #I Array of gain factors.
+pointer qg #I Pointer to quadgeom structure.
+
+pointer inbuf, obuf
+int junk, nx, x, y, line, amp, amp2, off
+long invec[IM_MAXDIM], ovec[IM_MAXDIM]
+
+int imgnlr(), impnlr()
+
+begin
+
+ # Setup start vector for sequential reads
+ call amovkl (long(1), invec, IM_MAXDIM)
+ call amovkl (long(1), ovec, IM_MAXDIM)
+
+ do y = 1, QG_NAMPS(qg) {
+ amp2 = QG_AMP(qg, 1, y)
+ do line = 1, QG_NY(qg, amp2) {
+ junk = imgnlr (in, inbuf, invec)
+ junk = impnlr (out, obuf, ovec)
+ off = 0
+ do x = 1, QG_NAMPSX(qg) {
+ amp = QG_AMP(qg, x, y)
+ nx = QG_NX(qg, amp)
+ call adivkr (Memr[inbuf+off], gain[amp], Memr[obuf+off], nx)
+ off = off + nx
+ }
+ }
+ }
+
+end
diff --git a/noao/imred/quadred/src/quad/quadsections.par b/noao/imred/quadred/src/quad/quadsections.par
new file mode 100644
index 00000000..aa6ae59d
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadsections.par
@@ -0,0 +1,14 @@
+images,s,a,"",,,Input image name
+window,s,h,"datasec","|datasec|trimsec|biassec|reflect|duplicate|",,Window to apply to section
+section,s,h,"",,,Default Image section
+template,s,h,"",,,Template for formating results
+#
+## TRIM AND OVERSCAN MARGINS (override header values)"
+#xskip1,i,h,INDEF,0,,X pixels to skip at start of overscan
+#xskip2,i,h,INDEF,0,,X pixels to skip at end of overscan
+#xtrim1,i,h,INDEF,0,,X pixels to trim at start of data
+#xtrim2,i,h,INDEF,0,,X pixels to trim at end of data
+#ytrim1,i,h,INDEF,0,,Y pixels to trim at start of data
+#ytrim2,i,h,INDEF,0,,"Y pixels to trim at end of data
+#"
+#mode,s,h,"ql"
diff --git a/noao/imred/quadred/src/quad/quadsections.x b/noao/imred/quadred/src/quad/quadsections.x
new file mode 100644
index 00000000..1d21d94c
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadsections.x
@@ -0,0 +1,447 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+# QUADSECTIONS.X -- File header comment.
+
+define OPTION_DICT "|biassec|datasec|trimsec|reflect|duplicate|"
+
+define OPT_BIASSEC 1
+define OPT_DATASEC 2
+define OPT_TRIMSEC 3
+define OPT_REFLECT 4
+define OPT_DUPLICATE 5
+
+define DEFAULT_MACRO "$I$S\\n"
+
+
+# QUADSECTIONS -- Quadsections task.
+
+procedure t_quadsections ()
+
+pointer inlist #TI List of input image name.
+char instrument[SZ_FNAME] #TI Instrument translation file
+char section[SZ_LINE] #TI Section of CCD required
+int option #TI Type of section required
+char format[SZ_LINE] #TI strmac macro string for building output
+int xtrim1 #TI X pixels to trim at start of data
+int xtrim2 #TI X pixels to trim at end of data
+int ytrim1 #TI Y pixels to trim at start of data
+int ytrim2 #TI Y pixels to trim at end of data
+int xskip1 #TI X pixels to skip at start of overscan
+int xskip2 #TI X pixels to skip at end of overscan
+
+char buffer[SZ_LINE], input[SZ_LINE]
+
+int clgwrd(), imtopenp(), imtgetim()
+
+begin
+ # Open input image
+ inlist = imtopenp ("images")
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Get option.
+ option = clgwrd ("window", buffer, SZ_LINE, OPTION_DICT)
+
+ # Get default section
+ call clgstr ("section", section, SZ_LINE)
+
+ # Get user defined trim and overscan margins
+ #xtrim1 = clgeti ("xtrim1")
+ #xtrim2 = clgeti ("xtrim2")
+ #ytrim1 = clgeti ("ytrim1")
+ #ytrim2 = clgeti ("ytrim2")
+ #xskip1 = clgeti ("xskip1")
+ #xskip2 = clgeti ("xskip2")
+ xtrim1 = INDEFI
+ xtrim2 = INDEFI
+ ytrim1 = INDEFI
+ ytrim2 = INDEFI
+ xskip1 = INDEFI
+ xskip2 = INDEFI
+
+ # Get format string and convert to a strmac macro string.
+ call clgstr ("template", format, SZ_LINE)
+ if (format[1] == EOS)
+ call sprintf (format, SZ_LINE, "%s")
+ call pargstr (DEFAULT_MACRO)
+ call qsmkmacro (format, format, SZ_LINE)
+
+ while (imtgetim (inlist, input, SZ_LINE) != EOF) {
+ call quadsections (input, section, xtrim1, xtrim2, ytrim1, ytrim2,
+ xskip1, xskip2, option, format)
+ }
+
+ # Tidy up
+ call hdmclose ()
+ call imtclose (inlist)
+end
+
+
+# QUADSECTIONS -- ???
+
+procedure quadsections (input, section, xtrim1, xtrim2, ytrim1, ytrim2,
+ xskip1, xskip2, option, format)
+
+char input[SZ_FNAME] #I Input image name.
+char section[SZ_LINE] #I Default section specification
+int xtrim1 #I X pixels to trim at start of data
+int xtrim2 #I X pixels to trim at end of data
+int ytrim1 #I Y pixels to trim at start of data
+int ytrim2 #I Y pixels to trim at end of data
+int xskip1 #I X pixels to skip at start of overscan
+int xskip2 #I X pixels to skip at end of overscan
+int option #I Type of section required
+char format[SZ_LINE] #I strmac macro string for building output
+
+char image[SZ_LINE], argstr[SZ_LINE], buffer[SZ_LINE]
+char insection[SZ_LINE], outsection[SZ_LINE]
+int amp, arg[9], len
+int i, x1, x2, y1, y2
+
+pointer in, qg
+bool reduced
+
+pointer immap()
+int hdmaccf(), gstrcpy(), strlen(), strmac()
+bool quadsect()
+
+begin
+ # Parse input into an image name and an image section.
+ call imgimage (input, image, SZ_LINE)
+ call imgsection (input, insection, SZ_LINE)
+ # if no section was supplied in the image name use the default section.
+ if (insection[1] == EOS) {
+ call strcpy (section, insection, SZ_LINE)
+ }
+
+ # Open input image
+ in = immap (image, READ_ONLY, 0)
+
+ # Determine if image has been trimmed or not
+ reduced = (hdmaccf (in, "trim") == YES)
+
+ if (reduced) {
+ # OPT_BIASSEC does not make sense for reduced images.
+ if (option == OPT_BIASSEC)
+ return
+ # Trimsec and datasec are identical for reduced images
+ if (option == OPT_TRIMSEC)
+ option = OPT_DATASEC
+ }
+
+ # Set-up quadgeom structure
+ call quadalloc (qg)
+ if (hdmaccf (in, "HDR_REV") == NO) {
+ if (reduced) {
+ call quadgeomred (in, qg)
+ } else {
+ call quadgeom (in, qg, "", "")
+ }
+ } else {
+ call qghdr2 (in, qg)
+ }
+
+# call quaddump (qg)
+
+ # Adjust quadgeom structure for user trim and overscan margins
+ if (! reduced) {
+ call qguser (qg, xtrim1, xtrim2, ytrim1, ytrim2, xskip1, xskip2)
+ }
+# call quaddump (qg)
+
+
+ # Store image name as first argument in macro argument string "argstr"
+ arg[1] = 1
+ arg[2] = 1 + arg[1] + gstrcpy (image, argstr, SZ_LINE)
+
+ # Blank output string
+ buffer[1] = EOS
+
+ # Determine the intersection of the specified section with the portion
+ # of the image read through each readout.
+ do amp = 1, QG_NAMPS (qg) {
+
+ # skip any phantoms in raw images
+ if (QG_PHANTOM (qg, amp) == NO) {
+
+ if (quadsect (qg, insection, option, amp, x1, x2, y1, y2)) {
+ # Build resulting section string ...
+ call sprintf (outsection, SZ_LINE, "[%d:%d,%d:%d]")
+ call pargi (x1)
+ call pargi (x2)
+ call pargi (y1)
+ call pargi (y2)
+
+ # ... and save it as second argument
+ arg[3] = 1 + arg[2] + gstrcpy (outsection, argstr[arg[2]],
+ SZ_LINE-arg[2]+1)
+
+ # Save Ampid as third argument
+ call strcpy (Memc[QG_AMPID(qg, amp)], argstr[arg[3]],
+ SZ_LINE-arg[3]+1)
+
+ # Process macro string
+ i = strmac (format, argstr, buffer, SZ_LINE)
+ call printf (buffer)
+ }
+ }
+ }
+
+ # Output <lf> if format does not explicitly include one.
+ len = strlen (buffer)
+ if ((len > 2) && !(buffer[len-1]=='\\' && buffer[len]=='n')) {
+ call printf ("\n")
+ }
+
+ call flush (STDOUT)
+
+ # Tidy up
+ call quadfree (qg)
+ call imunmap (in)
+end
+
+
+# QSMKMACRO -- Perform the following substitutions on the given macro string
+#
+# $I --> $1
+# $S --> $2
+# $A --> $3
+# $? --> $?
+
+procedure qsmkmacro (instr, outstr, maxchars)
+
+char instr[ARB] #I Input macro string.
+char outstr[maxchars] #O Output macro string.
+int maxchars #I Maximum length of outstr
+
+char ch
+int ip, op
+
+begin
+
+ op = 1
+ for (ip=1; instr[ip] != EOS; ip=ip+1) {
+ ch = instr[ip]
+ outstr[op] = ch
+ op = op + 1
+ if (op > maxchars)
+ call error (0, "qsmkmacro: Output buffer overflow")
+
+ if (ch == '$') {
+ ip = ip + 1
+ ch = instr[ip]
+
+ if (ch == 'I') {
+ outstr (op) = '1'
+ op = op + 1
+ } else if (ch == 'S') {
+ outstr (op) = '2'
+ op = op + 1
+ } else if (ch == 'A') {
+ outstr (op) = '3'
+ op = op + 1
+ } else {
+ outstr (op) = ch
+ op = op + 1
+ }
+ if (op > maxchars)
+ call error (0, "qsmkmacro: Output buffer overflow")
+ }
+ }
+end
+
+
+# QUADSECT -- ??
+
+bool procedure quadsect (qg, section, option, amp, x1, x2, y1, y2)
+
+pointer qg #I Pointer to initialised quadgeom structure.
+char section[SZ_LINE] #I Default section specification.
+int option #I Type of section required.
+int amp #I Amplifier for which section is required.
+int x1, x2, y1, y2 #O Corners of specified section.
+bool overlap #O true if part of section read through amp.
+
+int xskip, xsize, yskip, ysize
+int sx1, sx2, sy1, sy2, sxs, sys
+int dx1, dx2, dy1, dy2
+int tx1, tx2, ty1, ty2
+int bx1, bx2, by1, by2
+
+begin
+
+ # Decode input section
+ x1 = 1
+ x2 = QG_NX(qg, 0)
+ sxs = 1
+ y1 = 1
+ y2 = QG_NY(qg, 0)
+ sys = 1
+ call ccd_section (section, x1, x2, sxs, y1, y2, sys)
+ sx1 = min (x1, x2)
+ sx2 = max (x1, x2)
+ sy1 = min (y1, y2)
+ sy2 = max (y1, y2)
+
+ # Set up null return (overlap) values in case no part of section was
+ # read with this amplifier.
+ overlap = false
+ x1 = 0
+ x2 = 0
+ y1 = 0
+ y2 = 0
+
+ # Calculate suplimentary quantitiies as required.
+ switch (option) {
+
+ case OPT_REFLECT, OPT_DUPLICATE:
+ xskip = sx1 - QG_DX1(qg, 0)
+ xsize = sx2 - sx1 + 1
+ yskip = sy1 - QG_DY1(qg, 0)
+ ysize = sy2 - sy1 + 1
+ }
+
+ # Determine the intersection of the specified section with the portion
+ # of the image read through the specified readout.
+ switch (option) {
+
+ case OPT_BIASSEC:
+ bx1 = QG_AX1(qg, amp) + QG_BX1(qg, amp) - 1
+ bx2 = QG_AX1(qg, amp) + QG_BX2(qg, amp) - 1
+ by1 = QG_AY1(qg, amp) + QG_BY1(qg, amp) - 1
+ by2 = QG_AY1(qg, amp) + QG_BY2(qg, amp) - 1
+
+ if (sx1 > bx2)
+ return (overlap)
+ if (sx2 < bx1)
+ return (overlap)
+ if (sy1 > by2)
+ return (overlap)
+ if (sy2 < by1)
+ return (overlap)
+
+ x1 = max (sx1, bx1)
+ x2 = min (sx2, bx2)
+ y1 = max (sy1, by1)
+ y2 = min (sy2, by2)
+
+ case OPT_DATASEC:
+ dx1 = QG_AX1(qg, amp) + QG_DX1(qg, amp) - 1
+ dx2 = QG_AX1(qg, amp) + QG_DX2(qg, amp) - 1
+ dy1 = QG_AY1(qg, amp) + QG_DY1(qg, amp) - 1
+ dy2 = QG_AY1(qg, amp) + QG_DY2(qg, amp) - 1
+
+ if (sx1 > dx2)
+ return (overlap)
+ if (sx2 < dx1)
+ return (overlap)
+ if (sy1 > dy2)
+ return (overlap)
+ if (sy2 < dy1)
+ return (overlap)
+
+ x1 = max (sx1, dx1)
+ x2 = min (sx2, dx2)
+ y1 = max (sy1, dy1)
+ y2 = min (sy2, dy2)
+
+ case OPT_TRIMSEC:
+ tx1 = QG_AX1(qg, amp) + QG_TX1(qg, amp) - 1
+ tx2 = QG_AX1(qg, amp) + QG_TX2(qg, amp) - 1
+ ty1 = QG_AY1(qg, amp) + QG_TY1(qg, amp) - 1
+ ty2 = QG_AY1(qg, amp) + QG_TY2(qg, amp) - 1
+
+ if (sx1 > tx2)
+ return (overlap)
+ if (sx2 < tx1)
+ return (overlap)
+ if (sy1 > ty2)
+ return (overlap)
+ if (sy2 < ty1)
+ return (overlap)
+
+ x1 = max (sx1, tx1)
+ x2 = min (sx2, tx2)
+ y1 = max (sy1, ty1)
+ y2 = min (sy2, ty2)
+
+ case OPT_REFLECT:
+ dx1 = QG_AX1(qg, amp) + QG_DX1(qg, amp) - 1
+ dx2 = QG_AX1(qg, amp) + QG_DX2(qg, amp) - 1
+ dy1 = QG_AY1(qg, amp) + QG_DY1(qg, amp) - 1
+ dy2 = QG_AY1(qg, amp) + QG_DY2(qg, amp) - 1
+
+ switch (QG_AMPTYPE(qg, amp)) {
+ case AMP11:
+ x1 = dx1 + xskip
+ x2 = x1 + xsize - 1
+ y1 = dy1 + yskip
+ y2 = y1 + ysize - 1
+
+ case AMP12:
+ x2 = dx2 - xskip
+ x1 = x2 - xsize + 1
+ y1 = dy1 + yskip
+ y2 = y1 + ysize - 1
+
+ case AMP21:
+ x1 = dx1 + xskip
+ x2 = x1 + xsize - 1
+ y2 = dy2 - yskip
+ y1 = y2 - ysize + 1
+
+ case AMP22:
+ x2 = dx2 - xskip
+ x1 = x2 - xsize + 1
+ y2 = dy2 - yskip
+ y1 = y2 - ysize + 1
+ }
+
+ if (x1 > dx2)
+ return (overlap)
+ if (x2 < dx1)
+ return (overlap)
+ if (y1 > dy2)
+ return (overlap)
+ if (y2 < dy1)
+ return (overlap)
+
+ x1 = max (x1, dx1)
+ x2 = min (x2, dx2)
+ y1 = max (y1, dy1)
+ y2 = min (y2, dy2)
+
+ case OPT_DUPLICATE:
+ dx1 = QG_AX1(qg, amp) + QG_DX1(qg, amp) - 1
+ dx2 = QG_AX1(qg, amp) + QG_DX2(qg, amp) - 1
+ dy1 = QG_AY1(qg, amp) + QG_DY1(qg, amp) - 1
+ dy2 = QG_AY1(qg, amp) + QG_DY2(qg, amp) - 1
+
+ x1 = dx1 + xskip
+ x2 = x1 + xsize - 1
+ y1 = dy1 + yskip
+ y2 = y1 + ysize - 1
+
+ if (x1 > dx2)
+ return (overlap)
+ if (x2 < dx1)
+ return (overlap)
+ if (y1 > dy2)
+ return (overlap)
+ if (y2 < dy1)
+ return (overlap)
+
+ x1 = max (x1, dx1)
+ x2 = min (x2, dx2)
+ y1 = max (y1, dy1)
+ y2 = min (y2, dy2)
+
+ }
+
+ overlap = true
+ return (overlap)
+
+end
diff --git a/noao/imred/quadred/src/quad/quadsplit.par b/noao/imred/quadred/src/quad/quadsplit.par
new file mode 100644
index 00000000..274018c0
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadsplit.par
@@ -0,0 +1,9 @@
+input,s,a,"",,,Input image name
+output,s,h,"",,,Output root name
+clobber,b,h,"yes",,,Clobber prexisting subimages
+#xskip1,i,h,INDEF,0,,X pixels to skip at start of overscan
+#xskip2,i,h,INDEF,0,,X pixels to skip at end of overscan
+#xtrim1,i,h,INDEF,0,,X pixels to trim at start of data
+#xtrim2,i,h,INDEF,0,,X pixels to trim at end of data
+#ytrim1,i,h,INDEF,0,,Y pixels to trim at start of data
+#ytrim2,i,h,INDEF,0,,Y pixels to trim at end of data
diff --git a/noao/imred/quadred/src/quad/quadsplit.x b/noao/imred/quadred/src/quad/quadsplit.x
new file mode 100644
index 00000000..a6ffc95f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadsplit.x
@@ -0,0 +1,115 @@
+include <imhdr.h>
+include "quadgeom.h"
+
+procedure t_quadsplit ()
+
+char input[SZ_FNAME] #TI Input image name.
+char output[SZ_FNAME] #TI Output image root name.
+char instrument[SZ_FNAME] #TI Instrument translation file.
+bool clobber #TI Clobber any existing sub-images.
+int xtrim1 #TI Number of pixels to trim at right.
+int xtrim2 #TI Number of pixels to trim at left.
+int ytrim1 #TI Number of pixels to trim at bottom.
+int ytrim2 #TI Number of pixels to trim at top.
+int xskip1 #TI Number of pixels to skip at start of overscan in X
+int xskip2 #TI Number of pixels to skip at end of overscan in X
+
+pointer in, qg, out[QG_MAXAMPS]
+int amp, namps
+char logstr[SZ_LINE]
+
+pointer immap()
+bool streq(), clgetb()
+int quadmap(), hdmaccf()
+
+begin
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Map input image
+ call clgstr ("input", input, SZ_FNAME)
+ in = immap (input, READ_ONLY, 0)
+
+ # Get root name for output image
+ call clgstr ("output", output, SZ_FNAME)
+ if (streq (output, ""))
+ call strcpy (input, output, SZ_FNAME)
+ call xt_imroot (output, output, SZ_FNAME)
+
+ # Set-up section translation
+ call quadalloc (qg)
+
+ if (hdmaccf (in, "HDR_REV") == NO) {
+ call quadgeom (in, qg, "", "")
+ } else {
+ call qghdr2 (in, qg)
+ }
+
+ # Adjust quadgeom structure for user trim and overscan margins
+ #xtrim1 = clgeti ("xtrim1")
+ #xtrim2 = clgeti ("xtrim2")
+ #ytrim1 = clgeti ("ytrim1")
+ #ytrim2 = clgeti ("ytrim2")
+ #xskip1 = clgeti ("xskip1")
+ #xskip2 = clgeti ("xskip2")
+ xtrim1 = INDEFI
+ xtrim2 = INDEFI
+ ytrim1 = INDEFI
+ ytrim2 = INDEFI
+ xskip1 = INDEFI
+ xskip2 = INDEFI
+ call qguser (qg, xtrim1, xtrim2, ytrim1, ytrim2, xskip1, xskip2)
+
+# call quaddump (qg)
+
+ # Map output images one for each readout
+ clobber = clgetb ("clobber")
+ namps = quadmap (output, NEW_COPY, clobber, in, qg, out)
+
+ # Split the image using the appropriately typed routine
+ switch (IM_PIXTYPE(in)) {
+ case TY_USHORT, TY_SHORT:
+ call qsplits (in, out, qg)
+
+ case TY_LONG:
+ call qsplitl (in, out, qg)
+
+ case TY_INT:
+ call qspliti (in, out, qg)
+
+ case TY_REAL:
+ call qsplitr (in, out, qg)
+
+ case TY_DOUBLE:
+ call qsplitd (in, out, qg)
+
+ default:
+ call error (1, "unsupported pixel datatype")
+ }
+
+ # Log opperation
+ if (QG_NAMPSX(qg) == 2 && QG_NAMPSY(qg) == 2) {
+ call sprintf (logstr, SZ_LINE, "Quad-readout image")
+ } else if (QG_NAMPSX(qg) == 2 || QG_NAMPSY(qg) == 2) {
+ call sprintf (logstr, SZ_LINE,
+ "Dual-readout image: nampsx=%d nampsy=%d")
+ call pargi (QG_NAMPSX(qg))
+ call pargi (QG_NAMPSY(qg))
+ } else {
+ call sprintf (logstr, SZ_LINE, "Single-readout image")
+ }
+ call timelog (logstr, SZ_LINE)
+ call ccdlog (input, logstr)
+
+ # Tidy up
+ call imunmap (in)
+ do amp = 1, namps {
+ if (out[amp] != NULL) {
+ call imunmap (out[amp])
+ }
+ }
+ call quadfree (qg)
+ call hdmclose ()
+end
diff --git a/noao/imred/quadred/src/quad/quadtest/artobs.cl b/noao/imred/quadred/src/quad/quadtest/artobs.cl
new file mode 100644
index 00000000..292ed8c6
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/artobs.cl
@@ -0,0 +1,68 @@
+# ARTOBS -- Simulate observe command using artificial data.
+
+procedure artobs ()
+
+begin
+ string image, oim, ccdt
+ int picnum, nexps, i
+ real exptime
+ string imtitle
+
+ # Get ccdtype
+ ccdt = obspars.ccdtype
+
+ # Get number of pictures to take
+ nexps = npics
+
+ # Get next picture number
+ if (obspars.autopicnum) {
+ picnum = obspars.picture.p_value
+ } else {
+ picnum = obspars.picture
+ }
+
+ # Get exposure time
+ if (ccdt != "zero") {
+ exptime = obspars.exposure
+ } else {
+ exptime = 0.0
+ }
+
+ # Set filter
+ if (obspars.setfilter != "none" && ccdt != "zero" && ccdt != "dark") {
+ if (instrpars.instrname != "")
+ mkquad.filter = instrpars.filter1
+ }
+
+ # Get imtitle. This MUST always be the last interactive prompt!
+ imtitle = title
+
+ for (i = picnum; i < picnum+nexps; i = i+1) {
+
+ # Make image name
+ if (ccdt == "object") {
+ printf ("obj%03d\n", i) | scan (image)
+ } else {
+ printf ("%s%03d\n", ccdt, i) | scan (image)
+ }
+ if (access (image//".imh")) {
+ oim = image
+ image = mktemp (image//".")
+ printf ("Output image %s already exists... %s used \n", oim,
+ image)
+ }
+
+ if (ccdt == "dflat" || ccdt == "pflat")
+ ccdt = "flat"
+
+ if (ccdt == "sflat" || ccdt == "comp" )
+ ccdt = "other"
+
+ # Call MKQUAD task
+ mkquad (image, exptime, ccdt)
+ hedit (image, "i_title", imtitle, add+, ver-, show-)
+ obspars.picture.p_value = i + 1
+ printf ("Image %s written to disk\n", image, > "STDERR")
+ }
+
+end
diff --git a/noao/imred/quadred/src/quad/quadtest/artobs.par b/noao/imred/quadred/src/quad/quadtest/artobs.par
new file mode 100644
index 00000000..d9611ef5
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/artobs.par
@@ -0,0 +1,5 @@
+# observe parameter file
+obspars,pset,h,,,,Observing parameters
+detpars,pset,h,,,,Detector parameters
+instrpars,pset,h,,,,Instrument parameters
+telpars,pset,h,,,,Telescope parameters
diff --git a/noao/imred/quadred/src/quad/quadtest/ccdpars.par b/noao/imred/quadred/src/quad/quadtest/ccdpars.par
new file mode 100644
index 00000000..b30fbb99
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/ccdpars.par
@@ -0,0 +1,29 @@
+ncols,i,h,1060,,,"Number of columns"
+nlines,i,h,1024,,,"Number of lines
+"
+datasec,s,h,"[1:1024,1:1024]",,,"Data section"
+trimsec,s,h,"[1:1024,1:1024]",,,"Trim section"
+biassec,s,h,"[1025:1060,1:1024]",,,"Bias section
+"
+amplifiers,s,h,"Quad","|Quad|UpperPair|LowerPair|LowerLeft|",,"Amplifiers to use
+"
+gain1,r,h,1.0,,,gain (e-/ADU) for Amp12
+ron1,r,h,4.0,,,readout noise for Amp11
+zero1,i,h,1000,,,"zero level for Amp11"
+nlin1,s,h,"",,,"Non-linearity coefficants
+"
+gain2,r,h,1.0,,,gain (e-/ADU) for Amp12
+ron2,r,h,4.0,,,readout noise for Amp12
+zero2,i,h,1000,,,"zero level for Amp12"
+nlin2,s,h,"",,,"Non-linearity coefficants
+"
+gain3,r,h,1.0,,,gain (e-/ADU) for Amp21
+ron3,r,h,4.0,,,readout noise for Amp21
+zero3,i,h,1000,,,"zero level for Amp21"
+nlin3,s,h,"",,,"Non-linearity coefficants
+"
+gain4,r,h,1.0,,,gain (e-/ADU) for Amp22
+ron4,r,h,4.0,,,readout noise for Amp22
+zero4,i,h,1000,,,"zero level for Amp22"
+nlin4,s,h,"",,,"Non-linearity coefficants
+"
diff --git a/noao/imred/quadred/src/quad/quadtest/logfile b/noao/imred/quadred/src/quad/quadtest/logfile
new file mode 100644
index 00000000..ddf97f0a
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/logfile
@@ -0,0 +1 @@
+obj99999: Dec 9 12:21 Quadjoin: nampsx=2 nampsy=2
diff --git a/noao/imred/quadred/src/quad/quadtest/mkamp.cl b/noao/imred/quadred/src/quad/quadtest/mkamp.cl
new file mode 100644
index 00000000..98cd8468
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/mkamp.cl
@@ -0,0 +1,166 @@
+# MKAMP -- Make a CCD observation
+
+procedure mkamp (image, exptime, ccdtype)
+
+string image {prompt="Image name"}
+real exptime {prompt="Exposure time"}
+string ccdtype {prompt="CCD type"}
+
+int ncols=132 {prompt="Number of columns"}
+int nlines=100 {prompt="Number of lines"}
+string filter="" {prompt="Filter"}
+string datasec="" {prompt="Data section"}
+string trimsec="" {prompt="Trim section"}
+string biassec="" {prompt="Bias section"}
+
+file imdata="" {prompt="Image data"}
+real skyrate=0. {prompt="Sky count rate"}
+real zeroval=0. {prompt="Zero level value"}
+real zeroslope=0. {prompt="Slope of zero level"}
+real flashval=0. {prompt="Preflash value"}
+real flashslope=0. {prompt="Slope of preflash value"}
+real darkrate=0. {prompt="Dark count rate"}
+real darkslope=0. {prompt="Slope of dark count rate"}
+real flatslope=0. {prompt="Flat field slope"}
+file badpix="" {prompt="Bad pixel regions"}
+real badval=0. {prompt="Bad pixel value"}
+real gain=1. {prompt="Gain (e-/adu)", min=1.0e-9}
+real ron=0. {prompt="Read out noise e-"}
+string nonlin {prompt="Non-linearity coefficiants"}
+bool poisson=yes {prompt="Add poisson noise?"}
+bool overwrite=no {prompt="Overwrite existing image?"}
+struct *fdnl {prompt="Internal use"}
+
+begin
+ int c1, c2, l1, l2, rseed, i, dummy
+ real exp, value, valslope, invgain, date, rval, coef[7]
+ string im, type, s, lincoefs, ampsec
+
+ im = image
+ exp = exptime
+ type = ccdtype
+
+ # Check for zero (or very small) gain
+ if (abs (gain) < 1.0e-9)
+ call error (0, "zero (or very small) gain specified")
+
+ invgain = 1.0 / gain
+
+ if (access (im//".imh") == yes)
+ im = im // ".imh"
+ if (access (im//".hhh") == yes)
+ im = im // ".hhh"
+ if (access (im) == yes) {
+ if (overwrite == yes)
+ imdelete (im, verify=no)
+ else
+ return
+ }
+
+ # Create the image.
+ s = str (ncols) // " " // str (nlines)
+ mkimage (im, "make", 0., 2, s, pixtype="real", slope=0., sigma=0.)
+
+ # Add a data image.
+ if (access (imdata//".imh") == yes)
+ imdata = imdata // ".imh"
+ if (access (imdata//".hhh") == yes)
+ imdata = imdata // ".hhh"
+ if (access (imdata) == yes)
+ imcopy (imdata//datasec, im//datasec, verbose=no)
+
+ # Add sky.
+ value = exp * skyrate
+ if (value != 0.)
+ mkimage (im//datasec, "add", value, slope=0., sigma=0.)
+
+ # Add flat field response.
+ if (flatslope != 0.)
+ mkimage (im//datasec, "mul", 1., slope=flatslope, sigma=0.)
+
+ # Add preflash level and dark count.
+ value = flashval + exp * darkrate
+ valslope = flashslope + exp * darkslope
+ if ((value != 0.) && (valslope != 0.))
+ mkimage (im//datasec, "add", value, slope=valslope, sigma=0.)
+
+ # Convert to ADU
+ mkimage (im//datasec, "mul", invgain, slope=0., sigma=0.)
+
+ # Add poisson and readout noise
+ # if seed is 0 pick a fairly arbitrary value
+ if (seed == 0) {
+ date | translit ("STDIN", from_string="a-zA-Z: ", delete+) |
+ scan (date)
+ rseed = abs (date / 10000)
+ } else {
+ rseed = seed
+ }
+
+ # Add non-linearity
+ if (nonlin != "") {
+ lincoefs = mktemp ("uparm$tmp")
+ files (nonlin, >> lincoefs)
+ fdnl = lincoefs
+ coef[1] = 1.0
+ for (i=2; i <= 7; i = i+1) {
+ dummy = fscan (fdnl, rval)
+ if (dummy == EOF) {
+ coef[i] = 0.0
+ } else {
+ coef[i] = rval
+ }
+ }
+
+ irlincor (im, im, section= "", coeff1=coef[1], coeff2=coef[2],
+ coeff3=coef[3], coeff4=coef[4], coeff5=coef[5], coeff6=coef[6],
+ coeff7=coef[7], maxadu=65535.0)
+ delete (lincoefs, ver-)
+ }
+
+ mknoise (im, background=0., gain=gain, rdnoise=ron, poisson=poisson,
+ seed=rseed, cosrays="", ncosrays=0, comments=no)
+
+ # decrement seed for next use
+ if (seed < 0)
+ seed.p_value = seed - 1
+
+ # Add zero level
+ # We add an extra 0.5 so that we nint rather than truncate when
+ # converting to short integer.
+ zeroval = zeroval + 0.5
+ mkimage (im, "add", zeroval, slope=zeroslope, sigma=0.)
+
+ # Set bad pixels.
+ if (access (badpix)) {
+ list = badpix
+ while (fscan (list, c1, c2, l1, l2) != EOF) {
+ if (nscan() != 4)
+ next
+ c1 = max (1, c1)
+ c2 = min (ncols, c2)
+ l1 = max (1, l1)
+ l2 = min (nlines, l2)
+ s = "["//c1//":"//c2//","//l1//":"//l2//"]"
+ mkimage (im//s, "replace", badval, slope=0., sigma=0.)
+ }
+ }
+
+ # Convert to ushort data type
+ chpixtype (im, im, "ushort", oldpixtype="all", ver-)
+
+ # Set image header
+ ccdhedit (im, "exptime", exp, type="real")
+ if (type != "")
+ ccdhedit (im, "imagetyp", type, type="string")
+
+ if (datasec != "") {
+ ccdhedit (im, "datasec", datasec, type="string")
+ }
+ if (trimsec != "")
+ ccdhedit (im, "trimsec", trimsec, type="string")
+ if (biassec != "")
+ ccdhedit (im, "biassec", biassec, type="string")
+ if (filter != "")
+ ccdhedit (im, "subset", filter, type="string")
+end
diff --git a/noao/imred/quadred/src/quad/quadtest/mkimage.par b/noao/imred/quadred/src/quad/quadtest/mkimage.par
new file mode 100644
index 00000000..148bf7ea
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/mkimage.par
@@ -0,0 +1,10 @@
+image,s,a,,,,Image to make or modify
+option,s,a,,"make|replace|add|multiply",,Editing option
+value,r,a,,,,Mean pixel value
+slope,r,h,0.,,,Slope of pixel values
+sigma,r,h,0.,0.,,Noise sigma
+seed,i,h,0,0,,Seed for noise generator
+
+ndim,i,a,,1,7,Number of dimensions
+dims,s,a,,,,Image dimensions
+pixtype,s,h,"real","short|real",,Pixel datatype
diff --git a/noao/imred/quadred/src/quad/quadtest/mkquad.cl b/noao/imred/quadred/src/quad/quadtest/mkquad.cl
new file mode 100644
index 00000000..391d1a61
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/mkquad.cl
@@ -0,0 +1,222 @@
+# MKQUAD -- Make an artifical multi-readout image
+
+procedure mkquad (image, exptime, ccdtype)
+
+begin
+ string im, ccdt
+ real exp, sky
+
+ string amps, as, bs, cs, ds, ts, nampsyx, amplist
+ int tx1, tx2, ty1, ty2
+ int bx1, bx2, by1, by2
+ int cx1, cx2, cy1, cy2
+ int dx1, dx2, dy1, dy2
+ int txs1, txs2, tys1, tys2
+ int bxs1, bxs2, bys1, bys2
+ int nx, ny, dnx, dny, onx, ony
+ int nampx, nampy
+ bool use_amp[4]
+
+ im = image
+ if (access (im//".imh"))
+ error (0, "Output image already exists")
+
+ exp = exptime
+ ccdt = ccdtype
+ sky = skyrate
+ amps = ccdpars.amplifiers
+
+ nx = ccdpars.ncols
+ ny = ccdpars.nlines
+
+ # Set number of amplifiers and use_amp. This is a bit kludgy
+ if (amps == "Quad") {
+ nampx = 2
+ nampy = 2
+ use_amp[1] = yes
+ use_amp[2] = yes
+ use_amp[3] = yes
+ use_amp[4] = yes
+ amplist = "11 12 21 22"
+ } else if (amps == "LowerPair") {
+ nampx = 2
+ nampy = 1
+ use_amp[1] = yes
+ use_amp[2] = yes
+ use_amp[3] = no
+ use_amp[4] = no
+ amplist = "11 12"
+ } else if (amps == "UpperPair") {
+ nampx = 2
+ nampy = 1
+ use_amp[1] = no
+ use_amp[2] = no
+ use_amp[3] = yes
+ use_amp[4] = yes
+ amplist = "21 22"
+ } else if (amps == "LowerLeft") {
+ nampx = 1
+ nampy = 1
+ use_amp[1] = yes
+ use_amp[2] = no
+ use_amp[3] = no
+ use_amp[4] = no
+ amplist = "11"
+ }
+
+ # Parse sections strings.
+ ccdsection (ccdpars.trimsec) | scan (tx1, tx2, ty1, ty2)
+ tx1 = max (1, tx1)
+ tx2 = min (nx, tx2)
+ ty1 = max (1, ty1)
+ ty2 = min (ny, ty2)
+
+ ccdsection (ccdpars.biassec) | scan (bx1, bx2, by1, by2)
+ bx1 = max (1, bx1)
+ bx2 = min (nx, bx2)
+ by1 = max (1, by1)
+ by2 = min (ny, by2)
+
+ ccdsection (ccdpars.datasec) | scan (dx1, dx2, dy1, dy2)
+ dx1 = max (1, dx1)
+ dx2 = min (nx, dx2)
+ dy1 = max (1, dy1)
+ dy2 = min (ny, dy2)
+
+ # Number of pixels to trim
+ txs1 = tx1 - 1
+ txs2 = dx2 - tx2
+ tys1 = ty1 - 1
+ tys2 = dy2 - ty2
+
+ # Number of pixels to skip before overscan strip
+ bxs1 = bx1 - dx2 - 1
+ bxs2 = nx - bx2
+ bys1 = by1 - 1
+ bys2 = ny - by2
+
+ # Number of pixels in subimages
+ nx = nx / nampx
+ ny = ny / nampy
+ dnx = (dx2 - dx1 + 1) / nampx
+ dny = (dy2 - dy1 + 1) / nampy
+ onx = nx - dnx
+ ony = ny
+
+ # Set ampsec for all amps
+ printf ("[1:%d,1:%d]\n", nx, ny) | scan (as)
+
+ # Set sections for Amp11 & Amp21
+ dx1 = 1
+ dx2 = dx1 + dnx - 1
+ dy1 = 1
+ dy2 = dy1 + dny - 1
+ printf ("[%d:%d,%d:%d]\n", dx1, dx2, dy1, dy2) | scan (ds)
+
+ tx1 = dx1 + txs1
+ tx2 = dx2
+ ty1 = dy1 + tys1
+ ty2 = dy2
+ printf ("[%d:%d,%d:%d]\n", tx1, tx2, ty1, ty2) | scan (ts)
+
+ bx1 = dx2 + bxs1 + 1
+ bx2 = nx - bxs2
+ by1 = 1 + bys1
+ by2 = ny
+ printf ("[%d:%d,%d:%d]\n", bx1, bx2, by1, by2) | scan (bs)
+
+ if (use_amp[1]) {
+ mkamp (im//".11", exp, ccdt, ncols=nx, nlines=ny,
+ filter=filter, datasec=ds, trimsec=ts, biassec=bs, imdata=imdata,
+ skyrate=sky, zeroval=ccdpars.zero1, zeroslope=zeroslope,
+ badpix=badpix, badval=badval, flashval=flashval,
+ flashslope=flashslope, darkrate=darkrate, darkslope=darkslope,
+ flatslope=flatslope, gain=ccdpars.gain1, ron=ccdpars.ron1,
+ nonlin=ccdpars.nlin1, poisson=poisson, overwrite=yes)
+ hedit (im//".11", "asec11", as, show-, ver-, add+)
+ cx1 = 1
+ cx2 = cx1 + dnx - 1
+ cy1 = 1
+ cy2 = cy1 + dny - 1
+ printf ("[%d:%d,%d:%d]\n", cx1, cx2, cy1, cy2) | scan (cs)
+ hedit (im//".11", "ccdsec", cs, show-, ver-, add+)
+ }
+
+ if (use_amp[3]) {
+ mkamp (im//".21", exp, ccdt, ncols=nx, nlines=ny,
+ filter=filter, datasec=ds, trimsec=ts, biassec=bs, imdata=imdata,
+ skyrate=sky, zeroval=ccdpars.zero3, zeroslope=zeroslope,
+ badpix=badpix, badval=badval, flashval=flashval,
+ flashslope=flashslope, darkrate=darkrate, darkslope=darkslope,
+ flatslope=flatslope, gain=ccdpars.gain3, ron=ccdpars.ron3,
+ nonlin=ccdpars.nlin3, poisson=poisson, overwrite=yes)
+ hedit (im//".21", "asec21", as, show-, ver-, add+)
+ cx1 = 1
+ cx2 = cx1 + dnx - 1
+ cy1 = dny + 1
+ cy2 = cy1 + dny - 1
+ printf ("[%d:%d,%d:%d]\n", cx1, cx2, cy1, cy2) | scan (cs)
+ hedit (im//".21", "ccdsec", cs, show-, ver-, add+)
+ }
+
+ # Set sections for Amp12 & Amp22
+ dx1 = onx + 1
+ dx2 = nx
+ dy1 = 1
+ dy2 = dy1 + dny - 1
+ printf ("[%d:%d,%d:%d]\n", dx1, dx2, dy1, dy2) | scan (ds)
+
+ tx1 = dx1 + txs1
+ tx2 = dx2
+ ty1 = dy1 + tys1
+ ty2 = dy2
+ printf ("[%d:%d,%d:%d]\n", tx1, tx2, ty1, ty2) | scan (ts)
+
+ bx1 = 1 + bxs1
+ bx2 = onx - bxs2
+ by1 = 1 + bys1
+ by2 = ny
+ printf ("[%d:%d,%d:%d]\n", bx1, bx2, by1, by2) | scan (bs)
+
+ if (use_amp[2]) {
+ mkamp (im//".12", exp, ccdt, ncols=nx, nlines=ny,
+ filter=filter, datasec=ds, trimsec=ts, biassec=bs, imdata=imdata,
+ skyrate=sky, zeroval=ccdpars.zero2, zeroslope=zeroslope,
+ badpix=badpix, badval=badval, flashval=flashval,
+ flashslope=flashslope, darkrate=darkrate, darkslope=darkslope,
+ flatslope=flatslope, gain=ccdpars.gain2, ron=ccdpars.ron2,
+ nonlin=ccdpars.nlin2, poisson=poisson, overwrite=yes)
+ hedit (im//".12", "asec12", as, show-, ver-, add+)
+ cx1 = dnx + 1
+ cx2 = cx1 + dnx - 1
+ cy1 = 1
+ cy2 = cy1 + dny - 1
+ printf ("[%d:%d,%d:%d]\n", cx1, cx2, cy1, cy2) | scan (cs)
+ hedit (im//".12", "ccdsec", cs, show-, ver-, add+)
+ }
+
+ if (use_amp[4]) {
+ mkamp (im//".22", exp, ccdt, ncols=nx, nlines=ny,
+ filter=filter, datasec=ds, trimsec=ts, biassec=bs, imdata=imdata,
+ skyrate=sky, zeroval=ccdpars.zero4, zeroslope=zeroslope,
+ badpix=badpix, badval=badval, flashval=flashval,
+ flashslope=flashslope, darkrate=darkrate, darkslope=darkslope,
+ flatslope=flatslope, gain=ccdpars.gain4, ron=ccdpars.ron4,
+ nonlin=ccdpars.nlin4, poisson=poisson, overwrite=yes)
+ hedit (im//".22", "asec22", as, show-, ver-, add+)
+ cx1 = dnx + 1
+ cx2 = cx1 + dnx - 1
+ cy1 = dny + 1
+ cy2 = cy1 + dny - 1
+ printf ("[%d:%d,%d:%d]\n", cx1, cx2, cy1, cy2) | scan (cs)
+ hedit (im//".22", "ccdsec", cs, show-, ver-, add+)
+ }
+
+ # Set NAMPSYX and amplistin header
+ nampsyx = str (nampy) // " " // str (nampx)
+ hedit (im//".??.imh", "nampsyx", nampsyx, show-, ver-, add+)
+ hedit (im//".??.imh", "amplist", amplist, show-, ver-, add+)
+
+ quadjoin (im, output="", delete=yes)
+
+end
diff --git a/noao/imred/quadred/src/quad/quadtest/mkquad.par b/noao/imred/quadred/src/quad/quadtest/mkquad.par
new file mode 100644
index 00000000..662f315f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/mkquad.par
@@ -0,0 +1,4 @@
+image,s,a,,,,"Image name"
+exptime,r,a,,,,"Exposure time"
+ccdtype,s,a,,,,"CCD type"
+filter,s,h,"",,,"Filter"
diff --git a/noao/imred/quadred/src/quad/quadtest/quadtest.cl b/noao/imred/quadred/src/quad/quadtest/quadtest.cl
new file mode 100644
index 00000000..3129f636
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/quadtest.cl
@@ -0,0 +1,14 @@
+# QUADTEST -- QUAD Test package
+
+# load artdata package (mknoise)
+artdata
+
+package quadtest
+
+task mkimage = quadtest$x_ccdred.e
+task mkamp = quadtest$mkamp.cl
+task mkquad = quadtest$mkquad.cl
+task artobs = quadtest$artobs.cl
+task ccdpars = quadtest$ccdpars.par
+
+clbye()
diff --git a/noao/imred/quadred/src/quad/quadtest/quadtest.par b/noao/imred/quadred/src/quad/quadtest/quadtest.par
new file mode 100644
index 00000000..efe72ef5
--- /dev/null
+++ b/noao/imred/quadred/src/quad/quadtest/quadtest.par
@@ -0,0 +1,20 @@
+# QUADTEST -- package parameter file
+imdata,f,h,"",,,"Image data"
+skyrate,r,h,0.,,,"Sky count rate
+"
+zeroslope,r,h,0.,,,"Slope of zero value"
+flatslope,r,h,0.,,,"Flat field slope
+"
+badpix,f,h,"",,,"Bad pixel regions"
+badval,r,h,0.,,,"Bad pixel value
+"
+flashval,r,h,0.,,,"Preflash level value"
+flashslope,r,h,0.,,,"Slope of preflash level
+"
+darkrate,r,h,0.,,,"Dark count rate"
+darkslope,r,h,0.,,,"Slope of dark count rate
+"
+poisson,b,h,yes,,,"Add Poisson noise"
+seed,i,h,0,,,"Random number seed
+"
+version,s,h,"Version 1.0 - Oct 93","Version 1.0 - Oct 93"
diff --git a/noao/imred/quadred/src/quad/qzerocombine.cl b/noao/imred/quadred/src/quad/qzerocombine.cl
new file mode 100644
index 00000000..b8d554ae
--- /dev/null
+++ b/noao/imred/quadred/src/quad/qzerocombine.cl
@@ -0,0 +1,48 @@
+# ZEROCOMBINE -- Process and combine zero level CCD images.
+
+procedure zerocombine (input)
+
+string input {prompt="List of zero level images to combine"}
+file output="Zero" {prompt="Output zero level name"}
+string combine="average" {prompt="Type of combine operation",
+ enum="average|median"}
+string reject="minmax" {prompt="Type of rejection",
+ enum="none|minmax|ccdclip|crreject|sigclip|avsigclip|pclip"}
+string ccdtype="zero" {prompt="CCD image type to combine"}
+bool process=no {prompt="Process images before combining?"}
+bool delete=no {prompt="Delete input images after combining?"}
+bool clobber=no {prompt="Clobber existing output image?"}
+string scale="none" {prompt="Image scaling",
+ enum="none|mode|median|mean|exposure"}
+string statsec="" {prompt="Image section for computing statistics"}
+int nlow=0 {prompt="minmax: Number of low pixels to reject"}
+int nhigh=1 {prompt="minmax: Number of high pixels to reject"}
+int nkeep=1 {prompt="Minimum to keep (pos) or maximum to reject (neg)"}
+bool mclip=yes {prompt="Use median in sigma clipping algorithms?"}
+real lsigma=3. {prompt="Lower sigma clipping factor"}
+real hsigma=3. {prompt="Upper sigma clipping factor"}
+string rdnoise="0." {prompt="ccdclip: CCD readout noise (electrons)"}
+string gain="1." {prompt="ccdclip: CCD gain (electrons/DN)"}
+string snoise="0." {prompt="ccdclip: Sensitivity noise (fraction)"}
+real pclip=-0.5 {prompt="pclip: Percentile clipping parameter"}
+real blank=0. {prompt="Value if there are no pixels"}
+
+begin
+ string ims
+
+ ims = input
+
+ # Process images first if desired.
+ if (process == YES)
+ quadproc (ims, ccdtype=ccdtype)
+
+ # Combine the flat field images.
+ combine (ims, output=output, plfile="", sigma="", combine=combine,
+ reject=reject, ccdtype=ccdtype, subsets=no, delete=delete,
+ clobber=clobber, project=no, outtype="real", offsets="none",
+ masktype="none", blank=blank, scale=scale, zero="none", weight=no,
+ statsec=statsec, lthreshold=INDEF, hthreshold=INDEF, nlow=nlow,
+ nhigh=nhigh, nkeep=nkeep, mclip=mclip, lsigma=lsigma, hsigma=hsigma,
+ rdnoise=rdnoise, gain=gain, snoise=snoise, sigscale=0.1,
+ pclip=pclip, grow=0)
+end
diff --git a/noao/imred/quadred/src/quad/setinstrument.cl b/noao/imred/quadred/src/quad/setinstrument.cl
new file mode 100644
index 00000000..72361f89
--- /dev/null
+++ b/noao/imred/quadred/src/quad/setinstrument.cl
@@ -0,0 +1,58 @@
+# SETINSTRUMENT -- Set up instrument parameters for the CCD reduction tasks.
+#
+# This task sets default parameters based on an instrument ID.
+
+procedure setinstrument (instrument)
+
+char instrument {prompt="Instrument ID (type ? for a list)"}
+char site="ctio" {prompt="Site ID"}
+char directory="ccddb$" {prompt="Instrument directory"}
+bool review=yes {prompt="Review instrument parameters?"}
+char query {prompt="Instrument ID (type q to quit)",
+ mode="q"}
+
+begin
+ string inst, instdir, instmen, instfile
+
+ # Define instrument directory, menu, and file
+ instdir = directory
+ if (site != "")
+ instdir = instdir // site // "/"
+ instmen = instdir // "instruments.men"
+ inst = instrument
+ instfile = instdir // inst // ".dat"
+
+ # Loop until a valid instrument file is given.
+ while (inst != "" && !access (instfile)) {
+ if (access (instmen))
+ page (instmen)
+ else if (inst == "?")
+ print ("Instrument list ", instmen, " not found")
+ else
+ print ("Instrument file ", instfile, " not found")
+ print ("")
+ inst = query
+ if (inst == "q")
+ return
+ instrument = inst
+ instfile = instdir // inst // ".dat"
+ }
+
+ # Set instrument parameter.
+ if (access (instfile))
+ quadred.instrument = instfile
+ else
+ quadred.instrument = ""
+
+ # Run instrument setup script.
+ instfile = instdir // inst // ".cl"
+ if (access (instfile))
+ cl (< instfile)
+
+ # Review parameters if desired.
+ if (review) {
+ eparam ("quadred")
+ eparam ("qccdproc")
+ eparam ("quadproc")
+ }
+end
diff --git a/noao/imred/quadred/src/quad/test.x b/noao/imred/quadred/src/quad/test.x
new file mode 100644
index 00000000..3df5240a
--- /dev/null
+++ b/noao/imred/quadred/src/quad/test.x
@@ -0,0 +1,71 @@
+include "quadgeom.h"
+
+procedure new ()
+
+char input[SZ_FNAME] #TI Input image name.
+char instrument[SZ_FNAME] #TI Instrument translation file
+
+pointer in, qg
+int xtrim1, xtrim2, ytrim1, ytrim2, xskip1, xskip2
+
+int clgeti
+pointer immap()
+
+begin
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Open input image
+ call clgstr ("input", input, SZ_FNAME)
+ in = immap (input, READ_ONLY, 0)
+
+ xtrim1 = clgeti ("xtrim1")
+ xtrim2 = clgeti ("xtrim2")
+ ytrim1 = clgeti ("ytrim1")
+ ytrim2 = clgeti ("ytrim2")
+ xskip1 = clgeti ("xskip1")
+ xskip2 = clgeti ("xskip2")
+
+ # Set-up section translation
+ call quadalloc (qg)
+ call qghdr2 (in, qg)
+ call qguser (qg, xtrim1, xtrim2, ytrim1, ytrim2, xskip1, xskip2)
+ call quaddump (qg)
+
+ # Tidy up
+ call imunmap (in)
+ call quadfree (qg)
+ call hdmclose ()
+end
+
+procedure old ()
+
+char input[SZ_FNAME] #TI Input image name.
+char instrument[SZ_FNAME] #TI Instrument translation file
+
+pointer in, qg
+
+pointer immap()
+
+begin
+
+ # Open instrument file
+ call clgstr ("instrument", instrument, SZ_FNAME)
+ call hdmopen (instrument)
+
+ # Open input image
+ call clgstr ("input", input, SZ_FNAME)
+ in = immap (input, READ_ONLY, 0)
+
+ # Set-up section translation
+ call quadalloc (qg)
+ call quadgeom (in, qg, "", "")
+ call quaddump (qg)
+
+ # Tidy up
+ call imunmap (in)
+ call quadfree (qg)
+ call hdmclose ()
+end
diff --git a/noao/imred/quadred/src/quad/timelog.x b/noao/imred/quadred/src/quad/timelog.x
new file mode 100644
index 00000000..7a8d969f
--- /dev/null
+++ b/noao/imred/quadred/src/quad/timelog.x
@@ -0,0 +1,29 @@
+include <time.h>
+
+
+# TIMELOG -- Prepend a time stamp to the given string.
+#
+# For the purpose of a history logging prepend a short time stamp to the
+# given string. Note that the input string is modified.
+
+procedure timelog (str, max_char)
+
+char str[max_char] # String to be time stamped
+int max_char # Maximum characters in string
+
+pointer sp, time, temp
+long clktime()
+
+begin
+ call smark (sp)
+ call salloc (time, SZ_DATE, TY_CHAR)
+ call salloc (temp, max_char, TY_CHAR)
+
+ call cnvdate (clktime(0), Memc[time], SZ_DATE)
+ call sprintf (Memc[temp], max_char, "%s %s")
+ call pargstr (Memc[time])
+ call pargstr (str)
+ call strcpy (Memc[temp], str, max_char)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/quadred/src/quad/x_quad.x b/noao/imred/quadred/src/quad/x_quad.x
new file mode 100644
index 00000000..8000dabf
--- /dev/null
+++ b/noao/imred/quadred/src/quad/x_quad.x
@@ -0,0 +1,14 @@
+task ccdsection = t_ccdsection,
+ quadsplit = t_quadsplit,
+ quadjoin = t_quadjoin,
+ ccddelete = t_ccddelete,
+ ccdgetparam = ccdgetparam,
+ ccdprcselect = t_ccdprcselect,
+ ccdssselect = t_ccdsubsetselect,
+ qpcalimage = t_qpcalimage,
+ qpselect = t_qpselect,
+ quadscale = t_quadscale,
+ quadsections = t_quadsections,
+ gainmeasure = t_gainmeasure,
+ new = new,
+ old = old
diff --git a/noao/imred/specred/Revisions b/noao/imred/specred/Revisions
new file mode 100644
index 00000000..890668dc
--- /dev/null
+++ b/noao/imred/specred/Revisions
@@ -0,0 +1,167 @@
+.help revisions Jun88 noao.imred.msred
+.nf
+
+=======
+V2.12.3
+=======
+
+specred.cl
+specred.men
+ 1. Added ODCOMBINE and LSCOMBINE to the package.
+ 2. Modified SCOMBINE to point to new executable.
+ (6/21/04, Valdes)
+
+=====
+V2.12
+=====
+
+imred$specred/msresp1d.cl
+ Modified to use "imtype" rather than hardwired to "imh". This uses
+ the same code as srcfibers$fibresponse.cl.
+ (2/29/00, Valdes)
+
+=========
+V2.11.3p1
+=========
+=======
+V2.11.3
+=======
+
+imred$specred/doc/msresp1d.hlp
+ The package was incorrect and the requirement that the unextracted
+ spectrum must be present even if the extracted spectrum is present
+ was made clearer. (9/20/99, Valdes)
+
+=======
+V2.11.2
+=======
+
+imred$specred/doc/dofibers.hlp
+imred$specred/doc/doslit.hlp
+imred$specred/doc/dofibers.ms
+imred$specred/doc/doslit.ms
+ Updated for change where if both crval and cdelt are INDEF then the
+ automatic identification is not done. (5/2/96, Valdes)
+
+imred$specred/specred.cl
+ Increased the minimum min_lenuserarea from 40000 to 100000.
+ (7/31/96, Valdes)
+
+imred$specred/doc/doslit.hlp
+imred$specred/doc/doslit.ms
+ Updated for the addition of automatic arc line identification. (4/9/96)
+
+imred$specred/sparams.par
+ Changed match from 10 to -3. (4/5/96, Valdes)
+
+imred$specred/dofibers.cl
+imred$specred/dofibers.par
+imred$specred/params.par
+imred$specred/doc/dofibers.hlp
+imred$specred/doc/dofibers.ms
+imred$specred/doc/doslit.hlp
+imred$specred/doc/doslit.ms
+ Added crval/cdelt parameters used in new version with automatic arc
+ line identification. (4/5/96, Valdes)
+
+imred$specred/doc/msresp1d.hlp
+ Added a sentence to say that extracted throughput (sky) spectra are
+ flat fielded before computing the throughput values. (1/9/96, Valdes)
+
+imred$specred/doc/dofibers.hlp
+imred$specred/doc/dofibers.ms
+ Describes the new header option for the aperture identification table.
+ (7/25/95, Valdes)
+
+imred$specred/specred.cl
+imred$specred/dofibers.cl
+imred$specred/dofibers.par
+imred$specred/doc/dofibers.hlp
+imred$specred/doc/dofibers.ms
+ Added sky alignment option. (7/19/95, Valdes)
+
+=======
+V2.10.4
+=======
+
+specred/specred.cl
+specred/specred.men
+ 1. Added background, illumination, and response.
+ 2. Renamed the fiber response task to "fibresponse".
+ (12/29/94, Valdes)
+
+imred$specred/dofibers.cl
+ There was an incorrect default value for the dispaxis parameter:
+ )_dispaxis --> )_.dispaxis. (8/24/92, Valdes)
+
+=============
+V2.10-V2.10.1
+=============
+
+imred$msred --> imred$specred
+ Renamed the package to a more generic name since it is for both
+ multiple aperture/fiber data as well as generic slit data.
+ (2/20/92, Valdes)
+
+imred$msred/msred.cl
+imred$msred/msred.hd
+ Added generic slit and fiber reduction tasks to make this a complete
+ generic package. (2/19/92, Valdes)
+
+imred$msred/msresp1d.cl
+ Improved task to include throughput file (2/18/92, Valdes)
+
+imred$msred/msresp1d.cl +
+imred$msred/doc/msresp1d.hlp +
+imred$msred/msred.cl
+imred$msred/msred.hd
+ 1. Added new task MSRESP1D modeled after imred$src/fibers/response but
+ using APALL instead of APSCRIPT and all the connections to PARAMS.
+ 2. Added new task SKYSUB from imred$src/fibers.
+
+imred$msred/msbplot.cl -
+imred$msred/doc/msdispcor.hlp -
+imred$msred/doc/msbplot.hlp -
+imred$msred/msred.hd
+ 1. Moved help to ONEDSPEC.
+
+imred$msred/*
+ Updated to new APEXTRACT and ONEDSPEC packages.
+ (7/13/90, Valdes)
+
+====
+V2.9
+====
+
+imred$msred/msbplot.cl +
+imred$msred/doc/msbplot.hlp +
+imred$msred/msred.cl
+imred$msred/msred.men
+imred$msred/msred.hd
+ Added a version of ECBPLOT (written by Rob Seaman) to the package with
+ appropriate changes to refer to lines and apertures instead of
+ orders. (10/27/89, Valdes)
+
+imred$msred/doc/msdispcor.hlp
+ Tried to make the description of the global parameter clearer. Also
+ fixed an incorrect example. (8/29/89, Valdes)
+
+====
+V2.8
+====
+
+imred$msred/apnormalize.par +
+imred$msred/msred.cl
+imred$msred/msred.men
+ Added APNORMALIZE to the package. (6/1/89, Valdes)
+
+imred$msred/standard.par
+ Removed ennumerated list. (4/10/89, Valdes)
+
+imred$msred/* +
+imred$msred/doc/* +
+ New package specialized for reducing spectra in "multispec" format.
+ It includes new tasks for doing dispersion solutions in multispec
+ format spectra. (3/29/89, Valdes)
+
+.endhelp
diff --git a/noao/imred/specred/doc/dofibers.hlp b/noao/imred/specred/doc/dofibers.hlp
new file mode 100644
index 00000000..d5a893a7
--- /dev/null
+++ b/noao/imred/specred/doc/dofibers.hlp
@@ -0,0 +1,1531 @@
+.help dofibers Jul95 noao.imred.specred
+.ih
+NAME
+dofibers -- Multifiber data reduction task
+.ih
+USAGE
+dofibers objects
+.ih
+SUMMARY
+The \fBdofibers\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of multifiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by multifiber instruments. Variants of this task are
+\fBdoargus\fR, \fBdofoe\fR, \fBdohydra\fR, and \fBdo3fiber\fR.
+.ih
+PARAMETERS
+.ls objects
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.le
+.ls apref = ""
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.le
+.ls flat = "" (optional)
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field calibrations. If a separate
+throughput file or image is not specified the flat field is also used
+for computing a fiber throughput correction.
+.le
+.ls throughput = "" (optional)
+Throughput file or image. If an image is specified, typically a blank
+sky observation, the total flux through
+each fiber is used to correct for fiber throughput. If a file consisting
+of lines with the aperture number and relative throughput is specified
+then the fiber throughput will be corrected by those values. If neither
+is specified but a flat field image is given it is used to compute the
+throughput.
+.le
+.ls arcs1 = "" (at least one if dispersion correcting)
+List of primary arc spectra. These spectra are used to define the dispersion
+functions for each fiber apart from a possible zero point correction made
+with secondary shift spectra or arc calibration fibers in the object spectra.
+One fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for all other fibers and
+arc spectra are derived from it.
+.le
+.ls arcs2 = "" (optional)
+List of optional shift arc spectra. Features in these secondary observations
+are used to supply a wavelength zero point shift through the observing
+sequence. One type of observation is dome lamps containing characteristic
+emission lines.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIparams.sort\fR, such as the observation time is made.
+.le
+
+.ls readnoise = "0." (apsum)
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.le
+.ls gain = "1." (apsum)
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls fibers = 97 (apfind)
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum. It is best if this
+reflects the actual number of fibers which may be found in the aperture
+reference image. The interactive
+review of the aperture assignments allows verification and adjustments
+to the automatic aperture definitions.
+.le
+.ls width = 12. (apedit)
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.le
+.ls minsep = 8. (apfind)
+Minimum separation between fibers. Weaker spectra or noise within this
+distance of a stronger spectrum are rejected.
+.le
+.ls maxsep = 15. (apfind)
+Maximum separation between adjacent fibers. This parameter
+is used to identify missing fibers. If two adjacent spectra exceed this
+separation then it is assumed that a fiber is missing and the aperture
+identification assignments will be adjusted accordingly.
+.le
+.ls apidtable = "" (apfind)
+Aperture identification table. This may be either a text file or an
+image. A text file contains the fiber number, beam number defining object
+(1), sky (0), and arc (2) fibers, and a object title. An image contains
+the keywords SLFIBnnn with string value consisting of the fiber number,
+beam number, optional right ascension and declination, and an object
+title. Unassigned and broken fibers (beam of -1) should be included in the
+identification information since they will automatically be excluded.
+.le
+.ls crval = INDEF, cdelt = INDEF (autoidentify)
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.le
+.ls objaps = "", skyaps = "", arcaps = ""
+List of object, sky, and arc aperture numbers. These are used to
+identify arc apertures for wavelength calibration and object and sky
+apertures for sky subtraction. Note sky apertures may be identified as
+both object and sky if one wants to subtract the mean sky from the
+individual sky spectra. Typically the different spectrum types are
+identified by their beam numbers and the default, null string,
+lists select all apertures.
+.le
+.ls objbeams = "0,1", skybeams = "0", arcbeams = 2
+List of object, sky, and arc beam numbers. The convention is that sky
+fibers are given a beam number of 0, object fibers a beam number of 1, and
+arc fibers a beam number of 2. The beam numbers are typically set in the
+\fIapidtable\fR. Unassigned or broken fibers may be given a beam number of
+-1 in the aperture identification table since apertures with negative beam
+numbers are not extracted. Note it is valid to identify sky fibers as both
+object and sky.
+.le
+
+.ls scattered = no (apscatter)
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.le
+.ls fitflat = yes (flat1d)
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.le
+.ls clean = yes (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.le
+.ls dispcor = yes
+Dispersion correct spectra? Depending on the \fIparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.le
+.ls skyalign = no
+Align sky lines? If yes then for the first object spectrum you are asked
+to mark one or more sky lines to use for alignment. Then these lines will
+be found in all spectra and an average zeropoint shift computed and applied
+to the dispersion solution to align these lines. Note that this assumes
+the sky lines are seen in all fibers.
+.le
+.ls savearcs = yes
+Save any simultaneous arc apertures? If no then the arc apertures will
+be deleted after use.
+.le
+.ls skysubtract = yes
+Subtract sky from the object spectra? If yes the sky spectra are combined
+and subtracted from the object spectra as defined by the object and sky
+aperture/beam parameters.
+.le
+.ls skyedit = yes
+Overplot all the sky spectra and allow contaminated sky spectra to be
+deleted?
+.le
+.ls saveskys = yes
+Save the combined sky spectrum? If no then the sky spectrum will be
+deleted after sky subtraction is completed.
+.le
+.ls splot = no
+Plot the final spectra with the task \fBsplot\fR?
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.le
+.ls update = yes
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.le
+.ls batch = no
+Process spectra as a background or batch job provided there are no interactive
+options (\fIskyedit\fR and \fIsplot\fR) selected.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls params = "" (pset)
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.le
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdofibers\fR.
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.le
+.ls observatory = "observatory"
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. See \fBobservatory\fR for more
+details.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls database = "database"
+Database (directory) used for storing aperture and dispersion information.
+.le
+.ls verbose = no
+Print verbose information available with various tasks.
+.le
+.ls logfile = "logfile", plotfile = ""
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.le
+.ls records = ""
+Dummy parameter to be ignored.
+.le
+.ls version = "SPECRED: ..."
+Version of the package.
+.le
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdofibers\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls order = "decreasing" (apfind)
+When assigning aperture identifications order the spectra "increasing"
+or "decreasing" with increasing pixel position (left-to-right or
+right-to-left in a cross-section plot of the image).
+.le
+.ls extras = no (apsum)
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.le
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+.ls lower = -5., upper = 5. (apdefault)
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 3 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+.ls buffer = 1. (apscatter)
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.le
+.ls apscat1 = "" (apscatter)
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.le
+.ls apscat2 = "" (apscatter)
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.le
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum) (fit1d|fit2d)
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for multifiber data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+.ls nsubaps = 1 (apsum)
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.le
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+.ls f_interactive = yes (fit1d)
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \fIfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.le
+.ls f_function = "spline3", f_order = 10 (fit1d)
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (autoidentify/identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.le
+.ls match = -3. (autoidentify/identify)
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 4. (autoidentify/identify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 10. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "spline3", i_order = 3 (autoidentify/identify)
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.le
+.ls i_niterate = 2, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (reidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+.ls addfeatures = no (reidentify)
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.le
+.le
+.ls sort = "jd", group = "ljd" (refspectra)
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdofibers\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.le
+.ls log = no (dispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (dispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+
+.ce
+-- SKY SUBTRACTION PARAMETERS --
+.ls combine = "average" (scombine) (average|median)
+Option for combining sky pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average" or "median"
+of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "none" (scombine) (none|minmax|avsigclip)
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+help for \fBscombine\fR. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the low and high pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.fi
+
+.le
+.ls scale = "none" (none|mode|median|mean)
+Multiplicative scaling to be applied to each spectrum. The choices are none
+or scale by the mode, median, or mean. This should not be necessary if the
+flat field and throughput corrections have been properly made.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+The \fBdofibers\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of multifiber spectra. It is a command language script
+which collects and combines the functions and parameters of many general
+purpose tasks to provide a single complete data reduction path. The task
+provides a degree of guidance, automation, and record keeping necessary
+when dealing with the large amount of data generated by multifiber
+instruments. Variants of this task are \fBdoargus\fR, \fBdofoe\fR,
+\fBdohydra\fR, and \fBdo3fiber\fR.
+
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \fIredo\fR and \fIupdate\fR options, skip or
+repeat some or all the steps.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage Since
+\fBdofibers\fR combines many separate, general purpose tasks the
+description given here refers to these tasks and leaves some of the details
+to their help documentation.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdofibers\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.le
+.ls [2]
+Set the \fBdofibers\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. A throughput file or image,
+such as a blank sky observation, may also be specified. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Specify the aperture identification table for the configuration
+if one has been created. If the image headers contain the SLFIB keywords
+specify an image name; typically the same as the aperture reference
+image.
+You might wish to verify the geometry parameters,
+separations, dispersion direction, etc., which may
+change with different detector setups. The processing parameters are set
+for complete reductions but for quicklook you might not use the clean
+option or dispersion calibration and sky subtraction.
+
+The parameters are set for a particular configuration and different
+configurations may use different flat fields, arcs, and aperture
+identification tables.
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+The apertures are defined using the specified aperture reference image.
+The spectra are found automatically and apertures assigned based on
+task parameters and the aperture identification table. Unassigned
+fibers may have a negative beam number and will be ignored in subsequent
+processing. The resize option sets the aperture size to the widths of
+the profiles at a fixed fraction of the peak height. The interactive
+review of the apertures is recommended. If the identifications are off
+by a shift the 'o' key is used. To exit the aperture review type 'q'.
+.le
+.ls [5]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.le
+.ls [6]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.le
+.ls [7]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra. If a throughput image, a
+sky flat, is specified then the total sky counts through each fiber are
+used to correct the total flat field counts. Alternatively, a separately
+derived throughput file can be used for specifying throughput corrections.
+If neither type of throughput is used the flat field also provides the
+throughput correction. The final response spectra are normalized to a unit
+mean over all fibers. The relative average throughput for each fiber is
+recorded in the log and possibly printed to the terminal.
+.le
+.ls [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.le
+.ls [9]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.le
+.ls [10]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.le
+.ls [11]
+If the sky line alignment option is selected and the sky lines have not
+been identified for a particular aperture identification table then you are
+asked to mark one or more sky lines. You may simply accept the wavelengths
+of these lines as defined by the dispersion solution for this spectrum and
+fiber or you may specify knowns wavelengths for the lines. These lines will
+be reidentified in all object spectra extracted and a mean zeropoint shift
+will be added to the dispersion solution. This has the effect of aligning
+these lines to optimize sky subtraction.
+.le
+.ls [12]
+The object spectra are now automatically scattered light subtracted,
+ extracted, flat fielded, and dispersion corrected.
+.le
+.ls [13]
+When sky subtracting, the individual sky spectra may be reviewed and some
+spectra eliminated using the 'd' key. The last deleted spectrum may be
+recovered with the 'e' key. After exiting the review with 'q' you are
+asked for the combining option. The type of combining is dictated by the
+number of sky fibers.
+.le
+.ls [14]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.le
+.ls [15]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added. The flat field and arc spectra will
+also have part of the aperture identification table name added to
+allow different configurations to use the same 2D flat field and arcs
+but with different aperture definitions. If using the sky alignment
+option an image "align" with the aperture identification table name
+applied will also be created.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of multifiber object and
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must
+be processed to remove overscan, bias, and dark count effects. This
+is generally done using the \fBccdred\fR package.
+The \fBdofibers\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is
+generally not done at this stage but as part of \fBdofibers\fR.
+If flat fielding is done as part of the basic CCD processing then
+a flattened flat field, blank sky observation, or throughput file
+should still be created for applying fiber throughput corrections.
+
+The task \fBdofibers\fR uses several types of calibration spectra. These
+are flat fields, blank sky flat fields, comparison lamp spectra, auxiliary
+mercury line (from the dome lights) or sky line spectra, and simultaneous
+arc spectra taken during the object observation. The flat field,
+throughput image or file, auxiliary emission line spectra, and simultaneous
+comparison fibers are optional. If a flat field is used then the sky flat
+or throughput file is optional assuming the flat field has the same fiber
+iillumination. It is legal to specify only a throughput image or file and
+leave the flat field blank in order to simply apply a throughput
+correction. Because only the total counts through each fiber are used from
+a throughput image, sky flat exposures need not be of high signal per
+pixel.
+
+There are three types of arc calibration methods. One is to take arc
+calibration exposures through all fibers periodically and apply the
+dispersion function derived from one or interpolated between pairs to the
+object fibers. This is the most common method. Another method is to
+use only one or two all-fiber arcs to define the shape of the dispersion
+function and track zero point wavelength shifts with \fIsimultaneous arc\fR
+fibers taken during the object exposure. The simultaneous arcs may or may
+not be available at the instrument but \fBdofibers\fR can use this type of
+observation. The arc fibers are identified by their beam or aperture
+numbers. A related and mutually exclusive method is to use \fIauxiliary
+line spectra\fR such as lines in the dome lights or sky lines to monitor
+shifts relative to a few actual arc exposures. The main reason to do this
+is if taking arc exposures through all fibers is inconvenient.
+
+The assignment of arc or auxiliary line calibration exposures to object
+exposures is generally done by selecting the nearest in time and
+interpolating. There are other options possible which are described under
+the task \fBrefspectra\fR. The most general option is to define a table
+giving the object image name and the one or two arc spectra to be assigned
+to that object. That file is called an \fIarc assignment table\fR and it
+is one of the optional setup files which can used with \fBdofibers\fR.
+
+The first step in the processing is identifying the spectra in the images.
+The \fIaperture identification table\fR contains information about the fiber
+assignments. The identification table is not mandatory, sequential numbering
+will be used, but it is highly recommended for keeping track of the objects
+assigned to the fibers. The aperture identification table may be
+a file containing lines
+specifying an aperture number, a beam number, and an object
+identification. It may also be an image whose header contains the keywords
+SLFIB with strings consisting of an aperture number, beam number, optional
+right ascension and declination, and a tile. The file lines or keywords
+must be in the same order as the fibers in the
+image. The aperture number may be any unique number but it is recommended
+that the fiber number be used. The beam number is used to flag object,
+sky, arc, or other types of spectra. The default beam numbers used by the
+task are 0 for sky, 1 for object, and 2 for arc. The object
+identifications are optional but it is good practice to include them so
+that the data will contain the object information independent of other
+records. Figure 1 shows an example identification file called M33Sch2.
+.nf
+
+ Figure 1: Example Aperture Identification File
+
+ cl> type m33sch2
+ 1 1 143
+ 2 1 254
+ 3 0 sky
+ 4 -1 Broken
+ 5 2 arc
+ .
+ .
+ .
+ 44 1 s92
+ 45 -1 Unassigned
+ 46 2 arc
+ 47 0 sky
+ 48 1 phil2
+
+.fi
+Note the identification of the sky fibers with beam number 0, the object
+fibers with 1, and the arc fibers with 2.
+The broken and unassigned fiber entries, given beam
+number -1, are optional but recommended to give the automatic spectrum
+finding operation the best chance to make the correct identifications. The
+identification table will vary for each plugboard setup. Additional
+information about the aperture identification table may be found in the
+description of the task \fBapfind\fR.
+
+An alternative to using an aperture identification table is to give no
+name, the "" empty string, and to explicitly give a range of
+aperture numbers for the skys and possibly for the sky subtraction
+object list in the parameters \fIobjaps, skyaps, arcaps, objbeams,
+skybeams,\fR and \fIarcbeams\fR. This is reasonable if the fibers always
+have a fixed typed. As an example the CTIO Argus instrument always
+alternates object and sky fibers so the object apertures can be given
+as 1x2 and the sky fibers as 2x2; i.e. objects are the odd numbered
+apertures and skys are the even numbered apertures.
+
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. When the \fIextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tools such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images.
+
+\fBPackage Parameters\fR
+
+The \fBspecred\fR package parameters set parameters affecting all the
+tasks in the package.
+The dispersion axis parameter defines the image axis along which the
+dispersion runs. This is used if the image header doesn't define the
+dispersion axis with the DISPAXIS keyword.
+The observatory parameter is used if there is no
+OBSERVAT keyword in the image header (see \fBobservatory\fR for more
+details). The spectrum interpolation type might be changed to "sinc" but
+with the cautions given in \fBonedspec.package\fR.
+The other parameters define the standard I/O functions.
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdofibers\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of apertures, traces, and extracted spectra
+but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The list of objects and arcs can be @ files if desired. The aperture
+reference spectrum is usually the same as the flat field spectrum though it
+could be any exposure with enough signal to accurately define the positions
+and trace the spectra. The first list of arcs are the standard Th-Ar or
+HeNeAr comparison arc spectra (they must all be of the same type). The
+second list of arcs are the auxiliary emission line exposures mentioned
+previously.
+
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. This is specified either explicitly or by reference
+to an image header keyword.
+The dispersion axis defines the wavelength direction of spectra in
+the image if not defined in the image header by the keyword DISPAXIS. The
+width and separation parameters define the dimensions (in pixels) of the
+spectra (fiber profile) across the dispersion. The width parameter
+primarily affects the centering. The maximum separation parameter is
+important if missing spectra from the aperture identification table are to
+be correctly skipped. The number of fibers is either the actual number
+of fibers or the number in the aperture identification table. An attempt
+is made to account for unassigned or missing fibers. As a recommendation
+the actual number of fibers should be specified.
+
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+
+The task needs to know which fibers are object, sky if sky subtraction is
+to be done, and simultaneous arcs if used. One could explicitly give the
+aperture numbers but the recommended way, provided an aperture
+identification table is used, is to select the apertures based on the beam
+numbers. The default values are recommended beam numbers. Sky
+subtracted sky spectra are useful for evaluating the sky subtraction.
+Since only the spectra identified as objects are sky subtracted one can
+exclude fibers from the sky subtraction. For example, if the
+\fIobjbeams\fR parameter is set to 1 then only those fibers with a beam of
+1 will be sky subtracted. All other fibers will remain in the extracted
+spectra but will not be sky subtracted.
+
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra. The \fIclean\fR option invokes a profile fitting and
+deviant point rejection algorithm as well as a variance weighting of points
+in the aperture. These options require knowing the effective (i.e.
+accounting for any image combining) read out noise and gain. For a
+discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR.
+
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale. If simultaneous arc fibers are defined there is an option to delete
+them from the final spectra when they are no longer needed.
+
+The sky alignment option allows applying a zeropoint dispersion shift
+to all fibers based on one or more sky lines. This requires all fibers
+to have the sky lines visible. When there are sky lines this will
+improve the sky subtraction if there is a systematic error in the
+fiber iillumination between the sky and the arc calibration.
+
+The sky subtraction option selects whether to combine the sky fiber spectra
+and subtract this sky from the object fiber spectra. \fIDispersion
+correction and sky subtraction are independent operations.\fR This means
+that if dispersion correction is not done then the sky subtraction will be
+done with respect to pixel coordinates. This might be desirable in some
+quick look cases though it is incorrect for final reductions.
+
+The sky subtraction option has two additional options. The individual sky
+spectra may be examined and contaminated spectra deleted interactively
+before combining. This can be a useful feature in crowded regions. The
+final combined sky spectrum may be saved for later inspection in an image
+with the spectrum name prefixed by \fBsky\fR.
+
+After a spectrum has been processed it is possible to examine the results
+interactively using the \fBsplot\fR tasks. This option has a query which
+may be turned off with "YES" or "NO" if there are multiple spectra to be
+processed.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are a new
+aperture identification table, a new reference image, new flat fields, and a
+new arc reference. If all input spectra are to be processed regardless of
+previous processing the \fIredo\fR flag may be used. Note that
+reprocessing clobbers the previously processed output spectra.
+
+The \fIbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if sky spectra editing and
+\fBsplot\fR review (interactive operations) are turned off, either when the
+task is run or by responding with "NO" to the queries during processing.
+
+The \fIlistonly\fR option prints a summary of the processing steps which
+will be performed on the input spectra without actually doing anything.
+This is useful for verifying which spectra will be affected if the input
+list contains previously processed spectra. The listing does not include
+any arc spectra which may be extracted to dispersion calibrate an object
+spectrum.
+
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdofibers\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the \fBdofibers\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdofibers\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+multifiber data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.nf
+
+ cl> epar params
+
+.fi
+or simple typing \fIparams\fR. The parameter editor can also be
+entered when editing the \fBdofibers\fR parameters by typing \fI:e
+params\fR or simply \fI:e\fR if positioned at the \fIparams\fR
+parameter.
+
+\fBExtraction\fR
+
+The identification of the spectra in the two dimensional images and their
+scattered light subtraction and extraction to one dimensional spectra
+in multispec format is accomplished
+using the tasks from the \fBapextract\fR package. The first parameters
+through \fInsubaps\fR control the extractions.
+
+The dispersion line is that used for finding the spectra, for plotting in
+the aperture editor, and as the starting point for tracing. The default
+value of \fBINDEF\fR selects the middle of the image. The aperture
+finding, adjusting, editing, and tracing operations also allow summing a
+number of dispersion lines to improve the signal. The number of lines is
+set by the \fInsum\fR parameter.
+
+The order parameter defines whether the order of the aperture
+identifications in the aperture identification table (or the default
+sequential numbers if no table is used) is in the same sense as the image
+coordinates (increasing) or the opposite sense (decreasing). If the
+aperture identifications turn out to be opposite to what is desired when
+viewed in the aperture editing graph then simply change this parameter.
+
+The basic data output by the spectral extraction routines are the one
+dimensional spectra. Additional information may be output when the
+\fIextras\fR option is selected and the cleaning or variance weighting
+options are also selected. In this case a three dimensional image is
+produced with the first element of the third dimension being the cleaned
+and/or weighted spectra, the second element being the uncleaned and
+unweighted spectra, and the third element being an estimate of the sigma
+of each pixel in the extracted spectrum. Currently the sigma data is not
+used by any other tasks and is only for reference.
+
+The initial step of finding the fiber spectra in the aperture reference
+image consists of identifying the peaks in a cut across the dispersion,
+eliminating those which are closer to each other than the \fIminsep\fR
+distance, and then keeping the specified \fInfibers\fR highest peaks. The
+centers of the profiles are determined using the \fBcenter1d\fR algorithm
+which uses the \fIwidth\fR parameter.
+
+Apertures are then assigned to each spectrum. The initial edges of the
+aperture relative to the center are defined by the \fIlower\fR and
+\fIupper\fR parameters. The trickiest part of assigning the apertures is
+relating the aperture identification from the aperture identification table
+to automatically selected fiber profiles. The first aperture id in the
+file is assigned to the first spectrum found using the \fIorder\fR parameter to
+select the assignment direction. The numbering proceeds in this way except
+that if a gap greater than a multiple of the \fImaxsep\fR parameter is
+encountered then assignments in the file are skipped under the assumption
+that a fiber is missing (broken). If unassigned fibers are still
+visible in a flat field, either by design or by scattered light, the
+unassigned fibers can be included in the number of fibers to find and
+then the unassigned (negative beam number) apertures are excluded from
+any extraction. For more on the finding and
+assignment algorithms see \fBapfind\fR.
+
+The initial apertures are the same for all spectra but they can each be
+automatically resized. The automatic resizing sets the aperture limits
+at a fraction of the peak relative to the interfiber minimum.
+The default \fIylevel\fR is to resize the apertures to 5% of the peak.
+See the description for the task \fBapresize\fR for further details.
+
+The user is given the opportunity to graphically review and adjust the
+aperture definitions. This is recommended. As mentioned previously, the
+correct identification of the fibers is tricky and it is fundamentally
+important that this be done correctly; otherwise the spectrum
+identifications will not be for the objects they say. An important command in
+this regard is the 'o' key which allows reordering the identifications
+based on the aperture identification table. This is required if the first
+fiber is actually missing since the initial assignment begins assigning the
+first spectrum found with the first entry in the aperture file. The
+aperture editor is a very powerful tool and is described in detail as
+\fBapedit\fR.
+
+The next set of parameters control the tracing and function fitting of the
+aperture reference positions along the dispersion direction. The position
+of a spectrum across the dispersion is determined by the centering
+algorithm (see \fBcenter1d\fR) at a series of evenly spaced steps, given by
+the parameter \fIt_step\fR, along the dispersion. The step size should be
+fine enough to follow position changes but it is not necessary to measure
+every point. The fitted points may jump around a little bit due to noise
+and cosmic rays even when summing a number of lines. Thus, a smooth
+function is fit. The function type, order, and iterative rejection of
+deviant points is controlled by the other trace parameters. For more
+discussion consult the help pages for \fBaptrace\fR and \fBicfit\fR. The
+default is to fit a cubic spline of three pieces with a single iteration of
+3 sigma rejection.
+
+The actual extraction of the spectra by summing across the aperture at each
+point along the dispersion is controlled by the next set of parameters.
+The default extraction simply sums the pixels using partial pixels at the
+ends. The options allow selection of a weighted sum based on a Poisson
+variance model using the \fIreadnoise\fR and \fIgain\fR detector
+parameters. Note that if the \fIclean\fR option is selected the variance
+weighted extraction is used regardless of the \fIweights\fR parameter. The
+sigma thresholds for cleaning are also set in the \fBparams\fR parameters.
+For more on the variance weighted extraction and cleaning see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+
+The last parameter, \fInsubaps\fR, is used only in special cases when it is
+desired to subdivide the fiber profiles into subapertures prior to
+dispersion correction. After dispersion correction the subapertures are
+then added together. The purpose of this is to correct for wavelength
+shifts across a fiber.
+
+\fBScattered Light Subtraction\fR
+
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+
+\fBFlat Field and Fiber Throughput Corrections\fR
+
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdofibers\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdofibers\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+
+In addition to correcting for pixel-to-pixel response the flat field step
+also corrects for differences in the fiber throughput. Thus, even if the
+pixel-to-pixel flat field corrections have been made in some other way it
+is desirable to use a sky or dome flat observation for determining a fiber
+throughput correction. Alternatively, a separately derived throughput
+file may be specified. This file consists of the aperture numbers
+(the same as used for the aperture reference) and relative throughput
+numbers.
+
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+After extraction one or more corrections are applied. If the \fIfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \fIf_function\fR and
+\fIf_order\fR. If the parameter \fIf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+
+If a throughput image is available (an observation of blank sky
+usually at twilight) it is extracted. If no flat field is used the average
+signal through each fiber is computed and this becomes the response
+normalization function. Note that a dome flat may be used in place of a
+sky in the sky flat field parameter for producing throughput only
+corrections. If a flat field is specified then each sky spectrum is
+divided by the appropriate flat field spectrum. The total counts through
+each fiber are multiplied into the flat field spectrum thus making the sky
+throughput of each fiber the same. This correction is important if the
+iillumination of the fibers differs between the flat field source and the
+sky. Since only the total counts are required the sky or dome flat field
+spectra need not be particularly strong though care must be taken to avoid
+objects.
+
+Instead of a sky flat or other throughput image a separately derived
+throughput file may be used. It may be used with or without a
+flat field.
+
+The final step is to normalize the flat field spectra by the mean counts of
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra. The final relative throughput values are recorded in the
+log and possibly printed on the terminal.
+
+These flat field response steps and algorithm are available as a separate
+task called \fBmsresp1d\fR.
+
+\fBDispersion Correction\fR
+
+Dispersion corrections are applied to the extracted spectra if the
+\fBdispcor\fR parameter is set. This can be a complicated process which
+the \fBdofibers\fR task tries to simplify for you. There are three basic
+steps involved; determining the dispersion functions relating pixel
+position to wavelength, assigning the appropriate dispersion function to a
+particular observation, and resampling the spectra to evenly spaced pixels
+in wavelength.
+
+The comparison arc spectra are used to define dispersion functions for the
+fibers using the tasks \fBautoidentify\fR and \fBreidentify\fR. The
+interactive \fBautoidentify\fR task is only used on the central fiber of the
+first arc spectrum to define the basic reference dispersion solution from
+which all other fibers and arc spectra are automatically derived using
+\fBreidentify\fR. \fBAutoidentify\fR attempts to automatically identify
+the arc lines using the \fIcrval\fR and \fIcdelt\fR parameters. Whether
+or not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+
+Generally, taking a number of comparison arc lamp exposures interspersed
+with the program spectra is sufficient to accurately dispersion calibrate
+multifiber spectra. However, there are some other calibration options
+which may be of interest. These options apply additional calibration data
+consisting either of auxiliary line spectra, such as from dome lights or
+night sky lines, or simultaneous arc lamp spectra taken through a few
+fibers during the object exposure. These options add complexity to the
+dispersion calibration process.
+
+When only arc comparison lamp spectra are used, dispersion functions are
+determined independently for each fiber of each arc image and then assigned
+to the matching fibers in the program object observations. The assignment
+consists of selecting one or two arc images to calibrate each object
+image. When two bracketing arc spectra are used the dispersion functions
+are linearly interpolated (usually based on the time of the observations).
+
+If taking comparison exposures is time-consuming, possibly requiring
+reconfiguration to illuminate the fibers, and the spectrograph is
+expected to be fairly stable apart from small shifts, there are two
+mutually exclusive methods for monitoring
+shifts in the dispersion zero point from the basic arc lamp spectra other
+than taking many arc lamp exposures. One is to use some fibers to take a
+simultaneous arc spectrum while observing the program objects. The fibers
+are identified by aperture or beam numbers. The second method is to use
+\fIauxiliary line spectra\fR, such as mercury lines from the dome lights.
+These spectra are specified with an auxiliary shift arc list, \fIarc2\fR.
+
+When using auxiliary line spectra for monitoring zero point shifts one of
+these spectra is plotted interactively by \fBidentify\fR with the
+reference dispersion function from the reference arc spectrum. The user
+marks one or more lines which will be used to compute zero point wavelength
+shifts in the dispersion functions automatically. The actual wavelengths
+of the lines need not be known. In this case accept the wavelength based
+on the reference dispersion function. As other observations of the same
+features are made the changes in the positions of the features will be
+tracked as zero point wavelength changes such that wavelengths of the
+features remain constant.
+
+When using auxiliary line spectra the only arc lamp spectrum used is the
+initial arc reference spectrum (the first image in the \fIarcs1\fR list).
+The master dispersion functions are then shifted based on the spectra in
+the \fIarcs2\fR list (which must all be of the same type). The dispersion
+function assignments made by \fBrefspectra\fR using either the arc
+assignment file or based on header keywords is done in the same way as
+described for the arc lamp images except using the auxiliary spectra.
+
+If simultaneous arcs are used the arc lines are reidentified to determine a
+zero point shift relative to the comparison lamp spectra selected, by
+\fBrefspectra\fR, of the same fiber. A linear function of aperture
+position on the image across the dispersion verses the zero point shifts
+from the arc fibers is determined and applied to the dispersion functions
+from the assigned calibration arcs for the non-arc fibers. Note that if
+there are two comparison lamp spectra (before and after the object
+exposure) then there will be two shifts applied to two dispersion functions
+which are then combined using the weights based on the header parameters
+(usually the observation time).
+
+The arc assignments may be done either explicitly with an arc assignment
+table (parameter \fIarctable\fR) or based on a header parameter. The task
+used is \fBrefspectra\fR and the user should consult this task if the
+default behavior is not what is desired. The default is to interpolate
+linearly between the nearest arcs based on the Julian date (corrected to
+the middle of the exposure). The Julian date and a local Julian day number
+(the day number at local noon) are computed automatically by the task
+\fBsetjd\fR and recorded in the image headers under the keywords JD and
+LJD. In addition the universal time at the middle of the exposure, keyword
+UTMIDDLE, is computed by the task \fBsetairmass\fR and this may also be used
+for ordering the arc and object observations.
+
+An optional step is to use sky lines in the spectra to compute a zeropoint
+dispersion shift that will align the sky lines. This may improve sky
+subtraction if the iillumination is not the same between the arc calibration
+and the sky. When selected the object spectrum is dispersion corrected
+using a non-linear dispersion function to avoid resampling the spectrum.
+The sky lines are then reidentified in wavelength space from a template
+list of sky lines. The mean shift in the lines for each fiber relative to
+the template in that fiber is computed to give the zeropoint shift. The
+database file is created when the first object is extracted. You are asked
+to mark the sky lines in one fiber and then the lines are automatically
+reidentified in all other fibers. Note that this technique requires the
+sky lines be found in all fibers.
+
+The last step of dispersion correction (resampling the spectrum to evenly
+spaced pixels in wavelength) is optional and relatively straightforward.
+If the \fIlinearize\fR parameter is no then the spectra are not resampled
+and the nonlinear dispersion information is recorded in the image header.
+Other IRAF tasks (the coordinate description is specific to IRAF) will use
+this information whenever wavelengths are needed. If linearizing is
+selected a linear dispersion relation, either linear in the wavelength or
+the log of the wavelength, is defined once and applied to every extracted
+spectrum. The resampling algorithm parameters allow selecting the
+interpolation function type, whether to conserve flux per pixel by
+integrating across the extent of the final pixel, and whether to linearize
+to equal linear or logarithmic intervals. The latter may be appropriate
+for radial velocity studies. The default is to use a fifth order
+polynomial for interpolation, to conserve flux, and to not use logarithmic
+wavelength bins. These parameters are described fully in the help for the
+task \fBdispcor\fR which performs the correction. The interpolation
+function options and the nonlinear dispersion coordinate system is
+described in the help topic \fBonedspec.package\fR.
+
+\fBSky Subtraction\fR
+
+Sky subtraction is selected with the \fIskysubtract\fR processing option.
+The sky spectra are selected by their aperture and beam numbers and
+combined into a single master sky spectrum
+which is then subtracted from each object spectrum. If the \fIskyedit\fR
+option is selected the sky spectra are plotted using the task
+\fBspecplot\fR. By default they are superposed to allow identifying
+spectra with unusually high signal due to object contamination. To
+eliminate a sky spectrum from consideration point at it with the cursor and
+type 'd'. The last deleted spectrum may be undeleted with 'e'. This
+allows recovery of incorrect or accidental deletions.
+
+The sky combining algorithm parameters define how the individual sky fiber
+spectra, after interactive editing, are combined before subtraction from
+the object fibers. The goals of combining are to reduce noise, eliminate
+cosmic-rays, and eliminate fibers with inadvertent objects. The common
+methods for doing this to use a median and/or a special sigma clipping
+algorithm (see \fBscombine\fR for details). The scale
+parameter determines whether the individual skys are first scaled to a
+common mode. The scaling should be used if the throughput is uncertain,
+but in that case you probably did the wrong thing in the throughput
+correction. If the sky subtraction is done interactively, i.e. with the
+\fIskyedit\fR option selected, then after selecting the spectra to be
+combined a query is made for the combining algorithm. This allows
+modifying the default algorithm based on the number of sky spectra
+selected since the "avsigclip" rejection algorithm requires at least
+three spectra.
+
+The combined sky spectrum is subtracted from only those spectra specified
+by the object aperture and beam numbers. Other spectra, such as comparison
+arc spectra, are retained unchanged. One may include the sky spectra as
+object spectra to produce residual sky spectra for analysis. The combined
+master sky spectra may be saved if the \fIsaveskys\fR parameter is set.
+The saved sky is given the name of the object spectrum with the prefix
+"sky".
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is also the sequence performed
+by the test procedure "demos dohydra" from the \fBhydra\fR package..
+
+.nf
+sp> hydra
+hy> demos mkhydra
+Creating image demoobj ...
+Creating image demoflat ...
+Creating image demoarc ...
+hy> bye
+sp> type demoapid
+===> demoapid <===
+36 1
+37 0
+38 1
+39 1
+41 0
+42 1
+43 1
+44 0
+45 1
+46 -1
+47 0
+48 1
+sp> specred.verbose = yes
+sp> dofibers demoobj apref=demoflat flat=demoflat arcs1=demoarc \
+>>> fib=12 apid=demoapid width=4. minsep=5. maxsep=7. clean- splot+
+Set reference apertures for demoflat
+Resize apertures for demoflat? (yes):
+Edit apertures for demoflat? (yes):
+<Exit with 'q'>
+Fit curve to aperture 36 of demoflat interactively (yes):
+<Exit with 'q'>
+Fit curve to aperture 37 of demoflat interactively (yes): N
+Create response function demoflatdemoad.ms
+Extract flat field demoflat
+Fit and ratio flat field demoflat
+<Exit with 'q'>
+Create the normalized response demoflatdemoad.ms
+demoflatdemoad.ms -> demoflatdemoad.ms using bzero: 0.
+ and bscale: 1.000001
+ mean: 1.000001 median: 1.052665 mode: 1.273547
+ upper: INDEF lower: INDEF
+Average fiber response:
+1. 1.151023
+2. 0.4519709
+3. 1.250614
+4. 1.287281
+5. 1.271358
+6. 0.6815334
+7. 1.164336
+8. 0.7499605
+9. 1.008654
+10. 1.053296
+11. 0.929967
+Extract arc reference image demoarc
+Determine dispersion solution for demoarc
+<A solution is found and presented.>
+<Type 'f' to look at fit. Type 'q' to exit fit.>
+<Exit with 'q'>
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Tue 16:01:07 11-Feb-92
+ Reference image = d....ms.imh, New image = d....ms, Refit = yes
+ Image Data Found Fit Pix Shift User Shift Z Shift RMS
+d....ms - Ap 41 16/20 16/16 0.00796 0.0682 8.09E-6 3.86
+Fit dispersion function interactively? (no|yes|NO|YES) (NO): y
+<Exit with 'q'>
+d....ms - Ap 41 16/20 16/16 0.00796 0.0682 8.09E-6 3.86
+d....ms - Ap 39 19/20 19/19 0.152 1.3 1.95E-4 3.89
+Fit dispersion function interactively? (no|yes|NO|YES) (yes): N
+d....ms - Ap 39 19/20 19/19 0.152 1.3 1.95E-4 3.89
+d....ms - Ap 38 18/20 18/18 0.082 0.697 9.66E-5 3.64
+d....ms - Ap 37 19/20 19/19 0.0632 0.553 1.09E-4 6.05
+d....ms - Ap 36 18/20 18/18 0.0112 0.0954 1.35E-5 4.12
+d....ms - Ap 43 17/20 17/17 0.0259 0.221 3.00E-5 3.69
+d....ms - Ap 44 19/20 19/19 0.168 1.44 2.22E-4 4.04
+d....ms - Ap 45 20/20 20/20 0.18 1.54 2.35E-4 3.95
+d....ms - Ap 47 18/20 18/18 -2.02E-4 0.00544 9.86E-6 4.4
+d....ms - Ap 48 16/20 16/16 0.00192 0.0183 1.44E-6 3.82
+
+Dispersion correct demoarc
+d....ms.imh: w1 = 5748.07..., w2 = 7924.62..., dw = 8.50..., nw = 257
+ Change wavelength coordinate assignments? (yes|no|NO): n
+Extract object spectrum demoobj
+Assign arc spectra for demoobj
+[demoobj] refspec1='demoarc'
+Dispersion correct demoobj
+demoobj.ms.imh: w1 = 5748.078, w2 = 7924.622, dw = 8.502127, nw = 257
+Sky subtract demoobj: skybeams=0
+Edit the sky spectra? (yes):
+<Exit with 'q'>
+Sky rejection option (none|minmax|avsigclip) (avsigclip):
+demoobj.ms.imh:
+Splot spectrum? (no|yes|NO|YES) (yes):
+Image line/aperture to plot (1:) (1):
+<Look at spectra and change apertures with # key>
+<Exit with 'q'>
+.fi
+.ih
+REVISIONS
+.ls DOFIBERS V2.11
+A sky alignment option was added.
+
+The aperture identification can now be taken from image header keywords.
+
+The initial arc line identifications is done with the automatic line
+identification algorithm.
+.le
+.ls DOFIBERS V2.10.3
+The usual output WCS format is "equispec". The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. A scattered
+light subtraction processing option has been added.
+.le
+.ih
+SEE ALSO
+apedit, apfind, approfiles, aprecenter, apresize, apsum, aptrace,
+apvariance, ccdred, center1d, doargus, dohydra, dofoe, do3fiber, dispcor,
+fit1d, icfit, identify, msresp1d, observatory, onedspec.package,
+refspectra, reidentify, scombine, setairmass, setjd, specplot, splot
+.endhelp
diff --git a/noao/imred/specred/doc/dofibers.ms b/noao/imred/specred/doc/dofibers.ms
new file mode 100644
index 00000000..c0196563
--- /dev/null
+++ b/noao/imred/specred/doc/dofibers.ms
@@ -0,0 +1,1807 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND July 1995
+.TL
+Guide to the Multifiber Reduction Task DOFIBERS
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+The \fBdofibers\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of multifiber spectra. It is a
+command language script which collects and combines the functions and
+parameters of many general purpose tasks to provide a single complete data
+reduction path. The task provides a degree of guidance, automation, and
+record keeping necessary when dealing with the large amount of data
+generated by multifiber instruments. Variants of this task are
+\fBdoargus\fR, \fBdofoe\fR, \fBdohydra\fR, and \fBdo3fiber\fR.
+.AE
+.NH
+Introduction
+.LP
+The \fBdofibers\fR reduction task is specialized for scattered light
+subtraction, extraction, flat
+fielding, fiber throughput correction, wavelength calibration, and sky
+subtraction of multifiber spectra. It is a command language script
+which collects and combines the functions and parameters of many general
+purpose tasks to provide a single complete data reduction path. The task
+provides a degree of guidance, automation, and record keeping necessary
+when dealing with the large amount of data generated by multifiber
+instruments. Variants of this task are \fBdoargus\fR, \fBdofoe\fR,
+\fBdohydra\fR, and \fBdo3fiber\fR.
+.LP
+The general organization of the task is to do the interactive setup steps
+first using representative calibration data and then perform the majority
+of the reductions automatically, and possibly as a background process, with
+reference to the setup data. In addition, the task determines which setup
+and processing operations have been completed in previous executions of the
+task and, contingent on the \f(CWredo\fR and \f(CWupdate\fR options, skip or
+repeat some or all the steps.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage Since
+\fBdofibers\fR combines many separate, general purpose tasks the
+description given here refers to these tasks and leaves some of the details
+to their help documentation.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images are first processed with \fBccdproc\fR for overscan,
+bias, and dark corrections.
+The \fBdofibers\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+.IP [2]
+Set the \fBdofibers\fR parameters with \fBeparam\fR. Specify the object
+images to be processed, the flat field image as the aperture reference and
+the flat field, and one or more arc images. A throughput file or image,
+such as a blank sky observation, may also be specified. If there are many
+object or arc spectra per setup you might want to prepare "@ files".
+Specify the aperture identification table for the configuration
+if one has been created. If the image headers contain the SLFIB keywords
+specify an image name; typically the same as the aperture reference
+image.
+You might wish to verify the geometry parameters,
+separations, dispersion direction, etc., which may
+change with different detector setups. The processing parameters are set
+for complete reductions but for quicklook you might not use the clean
+option or dispersion calibration and sky subtraction.
+
+The parameters are set for a particular configuration and different
+configurations may use different flat fields, arcs, and aperture
+identification tables.
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the execution and no further queries of that
+type will be made.
+.IP [4]
+The apertures are defined using the specified aperture reference image.
+The spectra are found automatically and apertures assigned based on
+task parameters and the aperture identification table. Unassigned
+fibers may have a negative beam number and will be ignored in subsequent
+processing. The resize option sets the aperture size to the widths of
+the profiles at a fixed fraction of the peak height. The interactive
+review of the apertures is recommended. If the identifications are off
+by a shift the 'o' key is used. To exit the aperture review type 'q'.
+.IP [5]
+The fiber positions at a series of points along the dispersion are measured
+and a function is fit to these positions. This may be done interactively to
+adjust the fitting parameters. Not all fibers need be examined and the "NO"
+response will quit the interactive fitting. To exit the interactive
+fitting type 'q'.
+.IP [6]
+If scattered light subtraction is to be done the flat field image is
+used to define the scattered light fitting parameters interactively.
+If one is not specified then the aperture reference image is used for
+this purpose.
+
+There are two queries for the interactive fitting. A graph of the
+data between the defined reference apertures separated by a specified
+buffer distance is first shown. The function order and type may be
+adjusted. After quiting with 'q' the user has the option of changing
+the buffer value and returning to the fitting, changing the image line
+or column to check if the fit parameters are satisfactory at other points,
+or to quit and accept the fit parameters. After fitting all points
+across the dispersion another graph showing the scattered light from
+the individual fits is shown and the smoothing parameters along the
+dispersion may be adjusted. Upon quiting with 'q' you have the option
+of checking other cuts parallel to the dispersion or quiting and finishing
+the scattered light function smoothing and subtraction.
+
+If there is a throughput image then this is corrected for scattered light
+noninteractively using the previous fitting parameters.
+.IP [7]
+If flat fielding is to be done the flat field spectra are extracted. The
+average spectrum over all fibers is determined and a function is fit
+interactively (exit with 'q'). This function is generally of sufficiently
+high order that the overall shape is well fit. This function is then used
+to normalize the individual flat field spectra. If a throughput image, a
+sky flat, is specified then the total sky counts through each fiber are
+used to correct the total flat field counts. Alternatively, a separately
+derived throughput file can be used for specifying throughput corrections.
+If neither type of throughput is used the flat field also provides the
+throughput correction. The final response spectra are normalized to a unit
+mean over all fibers. The relative average throughput for each fiber is
+recorded in the log and possibly printed to the terminal.
+.IP [8]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The middle fiber is used to identify the arc lines and define
+the dispersion function using the task \fBautoidentify\fR. The
+\fIcrval\fR and \fIcdelt\fR parameters are used in the automatic
+identification. Whether or not the automatic identification is
+successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc
+lines with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.IP [9]
+The remaining fibers are automatically reidentified. You have the option
+to review the line identifications and dispersion function for each fiber
+and interactively add or delete arc lines and change fitting parameters.
+This can be done selectively, such as when the reported RMS increases
+significantly.
+.IP [10]
+If the spectra are to be resampled to a linear dispersion system
+(which will be the same for all spectra) default dispersion parameters
+are printed and you are allowed to adjust these as desired.
+.IP [11]
+If the sky line alignment option is selected and the sky lines have not
+been identified for a particular aperture identification table then you are
+asked to mark one or more sky lines. You may simply accept the wavelengths
+of these lines as defined by the dispersion solution for this spectrum and
+fiber or you may specify knowns wavelengths for the lines. These lines will
+be reidentified in all object spectra extracted and a mean zeropoint shift
+will be added to the dispersion solution. This has the effect of aligning
+these lines to optimize sky subtraction.
+.IP [12]
+The object spectra are now automatically scattered light subtracted,
+ extracted, flat fielded, and dispersion corrected.
+.IP [13]
+When sky subtracting, the individual sky spectra may be reviewed and some
+spectra eliminated using the 'd' key. The last deleted spectrum may be
+recovered with the 'e' key. After exiting the review with 'q' you are
+asked for the combining option. The type of combining is dictated by the
+number of sky fibers.
+.IP [14]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'.
+.IP [15]
+If scattered light is subtracted from the input data a copy of the
+original image is made by appending "noscat" to the image name.
+If the data are reprocessed with the \fIredo\fR flag the original
+image will be used again to allow modification of the scattered
+light parameters.
+
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added. The flat field and arc spectra will
+also have part of the aperture identification table name added to
+allow different configurations to use the same 2D flat field and arcs
+but with different aperture definitions. If using the sky alignment
+option an image "align" with the aperture identification table name
+applied will also be created.
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of multifiber object and
+calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must
+be processed to remove overscan, bias, and dark count effects. This
+is generally done using the \fBccdred\fR package.
+The \fBdofibers\fR task will abort if the image header keyword CCDPROC,
+which is added by \fBccdproc\fR, is missing. If the data processed outside
+of the IRAF \fBccdred\fR package then a dummy CCDPROC keyword should be
+added to the image headers; say with \fBhedit\fR.
+Flat fielding is
+generally not done at this stage but as part of \fBdofibers\fR.
+If flat fielding is done as part of the basic CCD processing then
+a flattened flat field, blank sky observation, or throughput file
+should still be created for applying fiber throughput corrections.
+.LP
+The task \fBdofibers\fR uses several types of calibration spectra. These
+are flat fields, blank sky flat fields, comparison lamp spectra, auxiliary
+mercury line (from the dome lights) or sky line spectra, and simultaneous
+arc spectra taken during the object observation. The flat field,
+throughput image or file, auxiliary emission line spectra, and simultaneous
+comparison fibers are optional. If a flat field is used then the sky flat
+or throughput file is optional assuming the flat field has the same fiber
+illumination. It is legal to specify only a throughput image or file and
+leave the flat field blank in order to simply apply a throughput
+correction. Because only the total counts through each fiber are used from
+a throughput image, sky flat exposures need not be of high signal per
+pixel.
+.LP
+There are three types of arc calibration methods. One is to take arc
+calibration exposures through all fibers periodically and apply the
+dispersion function derived from one or interpolated between pairs to the
+object fibers. This is the most common method. Another method is to
+use only one or two all-fiber arcs to define the shape of the dispersion
+function and track zero point wavelength shifts with \fIsimultaneous arc\fR
+fibers taken during the object exposure. The simultaneous arcs may or may
+not be available at the instrument but \fBdofibers\fR can use this type of
+observation. The arc fibers are identified by their beam or aperture
+numbers. A related and mutually exclusive method is to use \fIauxiliary
+line spectra\fR such as lines in the dome lights or sky lines to monitor
+shifts relative to a few actual arc exposures. The main reason to do this
+is if taking arc exposures through all fibers is inconvenient.
+.LP
+The assignment of arc or auxiliary line calibration exposures to object
+exposures is generally done by selecting the nearest in time and
+interpolating. There are other options possible which are described under
+the task \fBrefspectra\fR. The most general option is to define a table
+giving the object image name and the one or two arc spectra to be assigned
+to that object. That file is called an \fIarc assignment table\fR and it
+is one of the optional setup files which can used with \fBdofibers\fR.
+.LP
+The first step in the processing is identifying the spectra in the images.
+The \fIaperture identification table\fR contains information about the fiber
+assignments. The identification table is not mandatory, sequential numbering
+will be used, but it is highly recommended for keeping track of the objects
+assigned to the fibers. The aperture identification table may be
+a file containing lines
+specifying an aperture number, a beam number, and an object
+identification. It may also be an image whose header contains the keywords
+SLFIB with strings consisting of an aperture number, beam number, optional
+right ascension and declination, and a tile. The file lines or keywords
+must be in the same order as the fibers in the
+image. The aperture number may be any unique number but it is recommended
+that the fiber number be used. The beam number is used to flag object,
+sky, arc, or other types of spectra. The default beam numbers used by the
+task are 0 for sky, 1 for object, and 2 for arc. The object
+identifications are optional but it is good practice to include them so
+that the data will contain the object information independent of other
+records. Figure 1 shows an example aperture identification file
+called M33Sch2.
+.V1
+
+.ce
+Figure 1: Example Aperture Identification File
+
+ cl> type m33sch2
+ 1 1 143
+ 2 1 254
+ 3 0 sky
+ 4 -1 Broken
+ 5 2 arc
+ .
+ .
+ .
+ 44 1 s92
+ 45 -1 Unassigned
+ 46 2 arc
+ 47 0 sky
+ 48 1 phil2
+
+.V2
+Note the identification of the sky fibers with beam number 0, the object
+fibers with 1, and the arc fibers with 2.
+The broken and unassigned fiber entries, given beam
+number -1, are optional but recommended to give the automatic spectrum
+finding operation the best chance to make the correct identifications. The
+identification table will vary for each plugboard setup. Additional
+information about the aperture identification table may be found in the
+description of the task \fBapfind\fR.
+.LP
+An alternative to using an aperture identification table is to give no
+name, the "" empty string, and to explicitly give a range of
+aperture numbers for the skys and possibly for the sky subtraction
+object list in the parameters \f(CWobjaps, skyaps, arcaps, objbeams,
+skybeams,\fR and \f(CWarcbeams\fR. This is reasonable if the fibers always
+have a fixed typed. As an example the CTIO Argus instrument always
+alternates object and sky fibers so the object apertures can be given
+as 1x2 and the sky fibers as 2x2; i.e. objects are the odd numbered
+apertures and skys are the even numbered apertures.
+.LP
+The final reduced spectra are recorded in two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. When the \f(CWextras\fR parameter is set the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tools such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR. The special task \fBscopy\fR may be used to extract
+specific apertures or to change format to individual one dimensional
+images.
+.NH
+Package Parameters
+.LP
+The \fBspecred\fR package parameters, shown in Figure 2, set parameters
+affecting all the tasks in the package.
+.KS
+.V1
+
+.ce
+Figure 2: Package Parameter Set for SPECRED
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = specred
+
+(extinct= onedstds$kpnoextinct.dat) Extinction file
+(caldir = onedstds$spec16redcal/) Standard star calibration directory
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+(dispaxi= 2) Image axis for 2D images
+(nsum = 1) Number of lines/columns to sum for 2D images
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Log file
+(plotfil= ) Plot file
+
+(records= ) Record number extensions
+(version= SPECRED V3: April 1992)
+
+.KE
+.V2
+The dispersion axis parameter defines the image axis along which the
+dispersion runs. This is used if the image header doesn't define the
+dispersion axis with the DISPAXIS keyword.
+The observatory parameter is used if there is no
+OBSERVAT keyword in the image header (see \fBobservatory\fR for more
+details). The spectrum interpolation type might be changed to "sinc" but
+with the cautions given in \fBonedspec.package\fR.
+The other parameters define the standard I/O functions.
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdofibers\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of apertures, traces, and extracted spectra
+but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+.NH
+Processing Parameters
+.LP
+The \fBdofibers\fR parameters are shown in Figure 3.
+.KS
+.V1
+
+.ce
+Figure 3: Parameter Set for DOFIBERS
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = specred
+ TASK = dofibers
+
+objects = List of object spectra
+(apref = ) Aperture reference spectrum
+(flat = ) Flat field spectrum
+(through= ) Throughput file or image (optional)
+(arcs1 = ) List of arc spectra
+(arcs2 = ) List of shift arc spectra
+(arctabl= ) Arc assignment table (optional)
+
+.KE
+.V1
+(readnoi= 0.) Read out noise sigma (photons)
+(gain = 1.) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(fibers = 97) Number of fibers
+(width = 12.) Width of profiles (pixels)
+(minsep = 8.) Minimum separation between fibers (pixels)
+(maxsep = 15.) Maximum separation between fibers (pixels)
+(apidtab= ) Aperture identifications
+(crval = INDEF) Approximate wavelength
+(cdelt = INDEF) Approximate dispersion
+(objaps = ) Object apertures
+(skyaps = ) Sky apertures
+(arcaps = ) Arc apertures
+(objbeam= 0,1) Object beam numbers
+(skybeam= 0) Sky beam numbers
+(arcbeam= ) Arc beam numbers
+
+(scatter= no) Subtract scattered light?
+(fitflat= yes) Fit and ratio flat field spectrum?
+(clean = yes) Detect and replace bad pixels?
+(dispcor= yes) Dispersion correct spectra?
+(savearc= yes) Save simultaneous arc apertures?
+(skysubt= yes) Subtract sky?
+(skyedit= yes) Edit the sky spectra?
+(savesky= yes) Save sky spectra?
+(splot = no) Plot the final spectrum?
+(redo = no) Redo operations if previously done?
+(update = yes) Update spectra if cal data changes?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(params = ) Algorithm parameters
+
+.V2
+The list of objects and arcs can be @ files if desired. The aperture
+reference spectrum is usually the same as the flat field spectrum though it
+could be any exposure with enough signal to accurately define the positions
+and trace the spectra. The first list of arcs are the standard Th-Ar or
+HeNeAr comparison arc spectra (they must all be of the same type). The
+second list of arcs are the auxiliary emission line exposures mentioned
+previously.
+.LP
+The detector read out noise and gain are used for cleaning and variance
+(optimal) extraction. This is specified either explicitly or by reference
+to an image header keyword.
+The variance
+weighting and cosmic-ray cleanning are sensitive to extremely strong
+cosmic-rays; ones which are hundreds of times brighter than the
+spectrum. The \fIdatamax\fR is used to set an upper limit for any
+real data. Any pixels above this value will be flagged as cosmic-rays
+and will not affect the extractions.
+The dispersion axis defines the wavelength direction of spectra in
+the image if not defined in the image header by the keyword DISPAXIS. The
+width and separation parameters define the dimensions (in pixels) of the
+spectra (fiber profile) across the dispersion. The width parameter
+primarily affects the centering. The maximum separation parameter is
+important if missing spectra from the aperture identification table are to
+be correctly skipped. The number of fibers is either the actual number
+of fibers or the number in the aperture identification table. An attempt
+is made to account for unassigned or missing fibers. As a recommendation
+the actual number of fibers should be specified.
+.LP
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+.LP
+The task needs to know which fibers are object, sky if sky subtraction is
+to be done, and simultaneous arcs if used. One could explicitly give the
+aperture numbers but the recommended way, provided an aperture
+identification table is used, is to select the apertures based on the beam
+numbers. The default values are recommended beam numbers. Sky
+subtracted sky spectra are useful for evaluating the sky subtraction.
+Since only the spectra identified as objects are sky subtracted one can
+exclude fibers from the sky subtraction. For example, if the
+\f(CWobjbeams\fR parameter is set to 1 then only those fibers with a beam of
+1 will be sky subtracted. All other fibers will remain in the extracted
+spectra but will not be sky subtracted.
+.LP
+The next set of parameters select the processing steps and options. The
+scattered light option allows fitting and subtracting a scattered light
+surface from the input object and flat field. If there is significant
+scattered light which is not subtracted the fiber throughput correction
+will not be accurate. The
+flat fitting option allows fitting and removing the overall shape of the
+flat field spectra while preserving the pixel-to-pixel response
+corrections. This is useful for maintaining the approximate object count
+levels and not introducing the reciprocal of the flat field spectrum into
+the object spectra. The \f(CWclean\fR option invokes a profile fitting and
+deviant point rejection algorithm as well as a variance weighting of points
+in the aperture. These options require knowing the effective (i.e.
+accounting for any image combining) read out noise and gain. For a
+discussion of cleaning and variance weighted extraction see
+\fBapvariance\fR and \fBapprofiles\fR.
+.LP
+The dispersion correction option selects whether to extract arc spectra,
+determine a dispersion function, assign them to the object spectra, and,
+possibly, resample the spectra to a linear (or log-linear) wavelength
+scale. If simultaneous arc fibers are defined there is an option to delete
+them from the final spectra when they are no longer needed.
+.LP
+The sky alignment option allows applying a zeropoint dispersion shift
+to all fibers based on one or more sky lines. This requires all fibers
+to have the sky lines visible. When there are sky lines this will
+improve the sky subtraction if there is a systematic error in the
+fiber illumination between the sky and the arc calibration.
+.LP
+The sky subtraction option selects whether to combine the sky fiber spectra
+and subtract this sky from the object fiber spectra. \fIDispersion
+correction and sky subtraction are independent operations.\fR This means
+that if dispersion correction is not done then the sky subtraction will be
+done with respect to pixel coordinates. This might be desirable in some
+quick look cases though it is incorrect for final reductions.
+.LP
+The sky subtraction option has two additional options. The individual sky
+spectra may be examined and contaminated spectra deleted interactively
+before combining. This can be a useful feature in crowded regions. The
+final combined sky spectrum may be saved for later inspection in an image
+with the spectrum name prefixed by \fBsky\fR.
+.LP
+After a spectrum has been processed it is possible to examine the results
+interactively using the \fBsplot\fR tasks. This option has a query which
+may be turned off with "YES" or "NO" if there are multiple spectra to be
+processed.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a new
+aperture identification table, a new reference image, new flat fields, and a
+new arc reference. If all input spectra are to be processed regardless of
+previous processing the \f(CWredo\fR flag may be used. Note that
+reprocessing clobbers the previously processed output spectra.
+.LP
+The \f(CWbatch\fR processing option allows object spectra to be processed as
+a background or batch job. This will only occur if sky spectra editing and
+\fBsplot\fR review (interactive operations) are turned off, either when the
+task is run or by responding with "NO" to the queries during processing.
+.LP
+The \f(CWlistonly\fR option prints a summary of the processing steps which
+will be performed on the input spectra without actually doing anything.
+This is useful for verifying which spectra will be affected if the input
+list contains previously processed spectra. The listing does not include
+any arc spectra which may be extracted to dispersion calibrate an object
+spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to another
+parameter set for the algorithm parameters. The way \fBdofibers\fR works
+this may not have any value and the parameter set \fBparams\fR is always
+used. The algorithm parameters are discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the \fBdofibers\fR
+task and the parameters which control and modify the algorithms. The
+algorithm parameters available to the user are collected in the parameter
+set \fBparams\fR. These parameters are taken from the various general
+purpose tasks used by the \fBdofibers\fR processing task. Additional
+information about these parameters and algorithms may be found in the help
+for the actual task executed. These tasks are identified in the parameter
+section listing in parenthesis. The aim of this parameter set organization
+is to collect all the algorithm parameters in one place separate from the
+processing parameters and include only those which are relevant for
+multifiber data. The parameter values can be changed from the
+defaults by using the parameter editor,
+.V1
+
+ cl> epar params
+
+.V2
+or simple typing \f(CWparams\fR. The parameter editor can also be
+entered when editing the \fBdofibers\fR parameters by typing \f(CW:e
+params\fR or simply \f(CW:e\fR if positioned at the \f(CWparams\fR
+parameter. Figure 4 shows the parameter set.
+.KS
+.V1
+
+.ce
+Figure 4: Algorithm Parameter Set
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = specred
+ TASK = params
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(order = decreasing) Order of apertures
+(extras = no) Extract sky, sigma, etc.?
+
+ -- DEFAULT APERTURE LIMITS --
+(lower = -5.) Lower aperture limit relative to center
+(upper = 5.) Upper aperture limit relative to center
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+
+.KE
+.KS
+.V1
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 3) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- SCATTERED LIGHT PARAMETERS --
+(buffer = 1.) Buffer distance from apertures
+(apscat1= ) Fitting parameters across the dispersion
+(apscat2= ) Fitting parameters along the dispersion
+
+.KE
+.KS
+.V1
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+(nsubaps= 1) Number of subapertures
+
+.KE
+.KS
+.V1
+ -- FLAT FIELD FUNCTION FITTING PARAMETERS --
+(f_inter= yes) Fit flat field interactively?
+(f_funct= spline3) Fitting function
+(f_order= 10) Fitting function order
+
+.KE
+.KS
+.V1
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli=linelists$idhenear.dat) Line list
+(match = -3.) Line list matching limit in Angstroms
+(fwidth = 4.) Arc line widths in pixels
+(cradius= 10.) Centering radius in pixels
+(i_funct= spline3) Coordinate function
+(i_order= 3) Order of dispersion function
+(i_niter= 2) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying?
+(addfeat= no) Add features when reidentifying?
+
+.KE
+.KS
+.V1
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+.KE
+.KS
+.V1
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+.KE
+.KS
+.V1
+ -- SKY SUBTRACTION PARAMETERS --
+(combine= average) Type of combine operation
+(reject = avsigclip) Sky rejection option
+(scale = none) Sky scaling option
+
+.KE
+.V2
+.NH 2
+Extraction
+.LP
+The identification of the spectra in the two dimensional images and their
+scattered light subtraction and extraction to one dimensional spectra
+in multispec format is accomplished
+using the tasks from the \fBapextract\fR package. The first parameters
+through \f(CWnsubaps\fR control the extractions.
+.LP
+The dispersion line is that used for finding the spectra, for plotting in
+the aperture editor, and as the starting point for tracing. The default
+value of \fBINDEF\fR selects the middle of the image. The aperture
+finding, adjusting, editing, and tracing operations also allow summing a
+number of dispersion lines to improve the signal. The number of lines is
+set by the \f(CWnsum\fR parameter.
+.LP
+The order parameter defines whether the order of the aperture
+identifications in the aperture identification table (or the default
+sequential numbers if no file is used) is in the same sense as the image
+coordinates (increasing) or the opposite sense (decreasing). If the
+aperture identifications turn out to be opposite to what is desired when
+viewed in the aperture editing graph then simply change this parameter.
+.LP
+The basic data output by the spectral extraction routines are the one
+dimensional spectra. Additional information may be output when the
+\f(CWextras\fR option is selected and the cleaning or variance weighting
+options are also selected. In this case a three dimensional image is
+produced with the first element of the third dimension being the cleaned
+and/or weighted spectra, the second element being the uncleaned and
+unweighted spectra, and the third element being an estimate of the sigma
+of each pixel in the extracted spectrum. Currently the sigma data is not
+used by any other tasks and is only for reference.
+.LP
+The initial step of finding the fiber spectra in the aperture reference
+image consists of identifying the peaks in a cut across the dispersion,
+eliminating those which are closer to each other than the \f(CWminsep\fR
+distance, and then keeping the specified \f(CWnfibers\fR highest peaks. The
+centers of the profiles are determined using the \fBcenter1d\fR algorithm
+which uses the \f(CWwidth\fR parameter.
+.LP
+Apertures are then assigned to each spectrum. The initial edges of the
+aperture relative to the center are defined by the \f(CWlower\fR and
+\f(CWupper\fR parameters. The trickiest part of assigning the apertures is
+relating the aperture identification from the aperture identification table
+to automatically selected fiber profiles. The first aperture id in the
+file is assigned to the first spectrum found using the \f(CWorder\fR
+parameter to select the assignment direction. The numbering proceeds in
+this way except that if a gap greater than a multiple of the \f(CWmaxsep\fR
+parameter is encountered then assignments in the file are skipped under the
+assumption that a fiber is missing (broken). If unassigned fibers are
+still visible in a flat field, either by design or by scattered light, the
+unassigned fibers can be included in the number of fibers to find and then
+the unassigned (negative beam number) apertures are excluded from any
+extraction. For more on the finding and assignment algorithms see
+\fBapfind\fR.
+.LP
+The initial apertures are the same for all spectra but they can each be
+automatically resized. The automatic resizing sets the aperture limits
+at a fraction of the peak relative to the interfiber minimum.
+The default \f(CWylevel\fR is to resize the apertures to 5% of the peak.
+See the description for the task \fBapresize\fR for further details.
+.LP
+The user is given the opportunity to graphically review and adjust the
+aperture definitions. This is recommended. As mentioned previously, the
+correct identification of the fibers is tricky and it is fundamentally
+important that this be done correctly; otherwise the spectrum
+identifications will not be for the objects they say. An important command in
+this regard is the 'o' key which allows reordering the identifications
+based on the aperture identification table. This is required if the first
+fiber is actually missing since the initial assignment begins assigning the
+first spectrum found with the first entry in the aperture file. The
+aperture editor is a very powerful tool and is described in detail as
+\fBapedit\fR.
+.LP
+The next set of parameters control the tracing and function fitting of the
+aperture reference positions along the dispersion direction. The position
+of a spectrum across the dispersion is determined by the centering
+algorithm (see \fBcenter1d\fR) at a series of evenly spaced steps, given by
+the parameter \f(CWt_step\fR, along the dispersion. The step size should be
+fine enough to follow position changes but it is not necessary to measure
+every point. The fitted points may jump around a little bit due to noise
+and cosmic rays even when summing a number of lines. Thus, a smooth
+function is fit. The function type, order, and iterative rejection of
+deviant points is controlled by the other trace parameters. For more
+discussion consult the help pages for \fBaptrace\fR and \fBicfit\fR. The
+default is to fit a cubic spline of three pieces with a single iteration of
+3 sigma rejection.
+.LP
+The actual extraction of the spectra by summing across the aperture at each
+point along the dispersion is controlled by the next set of parameters.
+The default extraction simply sums the pixels using partial pixels at the
+ends. The options allow selection of a weighted sum based on a Poisson
+variance model using the \f(CWreadnoise\fR and \f(CWgain\fR detector
+parameters. Note that if the \f(CWclean\fR option is selected the variance
+weighted extraction is used regardless of the \f(CWweights\fR parameter. The
+sigma thresholds for cleaning are also set in the \fBparams\fR parameters.
+For more on the variance weighted extraction and cleaning see
+\fBapvariance\fR and \fBapprofiles\fR as well as \fBapsum\fR.
+.LP
+The last parameter, \f(CWnsubaps\fR, is used only in special cases when it is
+desired to subdivide the fiber profiles into subapertures prior to
+dispersion correction. After dispersion correction the subapertures are
+then added together. The purpose of this is to correct for wavelength
+shifts across a fiber.
+.NH 2
+Scattered Light Subtraction
+.LP
+Scattered light may be subtracted from the input two dimensional image as
+the first step. This is done using the algorithm described in
+\fBapscatter\fR. This can be important if there is significant scattered
+light since the flat field/throughput correction will otherwise be
+incorrect. The algorithm consists of fitting a function to the data
+outside the defined apertures by a specified \fIbuffer\fR at each line or
+column across the dispersion. The function fitting parameters are the same
+at each line. Because the fitted functions are independent at each line or
+column a second set of one dimensional functions are fit parallel to the
+dispersion using the evaluated fit values from the cross-dispersion step.
+This produces a smooth scattered light surface which is finally subtracted
+from the input image. Again the function fitting parameters are the
+same at each line or column though they may be different than the parameters
+used to fit across the dispersion.
+.LP
+The first time the task is run with a particular flat field (or aperture
+reference image if no flat field is used) the scattered light fitting
+parameters are set interactively using that image. The interactive step
+selects a particular line or column upon which the fitting is done
+interactively with the \fBicfit\fR commands. A query is first issued
+which allows skipping this interactive stage. Note that the interactive
+fitting is only for defining the fitting functions and orders. When
+the graphical \fBicfit\fR fitting is exited (with 'q') there is a second prompt
+allowing you to change the buffer distance (in the first cross-dispersion
+stage) from the apertures, change the line/column, or finally quit.
+.LP
+The initial fitting parameters and the final set parameters are recorded
+in the \fBapscat1\fR and \fBapscat2\fR hidden parameter sets. These
+parameters are then used automatically for every subsequent image
+which is scattered light corrected.
+.LP
+The scattered light subtraction modifies the input 2D images. To preserve
+the original data a copy of the original image is made with the same
+root name and the word "noscat" appended. The scattered light subtracted
+images will have the header keyword "APSCATTE" which is how the task
+avoids repeating the scattered light subtraction during any reprocessing.
+However if the \fIredo\fR option is selected the scattered light subtraction
+will also be redone by first restoring the "noscat" images to the original
+input names.
+.NH 2
+Flat Field and Fiber Throughput Corrections
+.LP
+Flat field corrections may be made during the basic CCD processing; i.e.
+direct division by the two dimensional flat field observation. In that
+case do not specify a flat field spectrum; use the null string "". The
+\fBdofibers\fR task provides an alternative flat field response correction
+based on division of the extracted object spectra by the extracted flat field
+spectra. A discussion of the theory and merits of flat fielding directly
+verses using the extracted spectra will not be made here. The
+\fBdofibers\fR flat fielding algorithm is the \fIrecommended\fR method for
+flat fielding since it works well and is not subject to the many problems
+involved in two dimensional flat fielding.
+.LP
+In addition to correcting for pixel-to-pixel response the flat field step
+also corrects for differences in the fiber throughput. Thus, even if the
+pixel-to-pixel flat field corrections have been made in some other way it
+is desirable to use a sky or dome flat observation for determining a fiber
+throughput correction. Alternatively, a separately derived throughput
+file may be specified. This file consists of the aperture numbers
+(the same as used for the aperture reference) and relative throughput
+numbers.
+.LP
+The first step is extraction of the flat field spectrum, if specified,
+using the reference apertures. Only one flat field is allowed so if
+multiple flat fields are required the data must be reduced in groups.
+After extraction one or more corrections are applied. If the \f(CWfitflat\fR
+option is selected (the default) the extracted flat field spectra are
+averaged together and a smooth function is fit. The default fitting
+function and order are given by the parameters \f(CWf_function\fR and
+\f(CWf_order\fR. If the parameter \f(CWf_interactive\fR is "yes" then the
+fitting is done interactively using the \fBfit1d\fR task which uses the
+\fBicfit\fR interactive fitting commands.
+.LP
+The fitted function is divided into the individual flat field spectra to
+remove the basic shape of the spectrum while maintaining the relative
+individual pixel responses and any fiber to fiber differences. This step
+avoids introducing the flat field spectrum shape into the object spectra
+and closely preserves the object counts.
+.LP
+If a throughput image is available (an observation of blank sky
+usually at twilight) it is extracted. If no flat field is used the average
+signal through each fiber is computed and this becomes the response
+normalization function. Note that a dome flat may be used in place of a
+sky in the sky flat field parameter for producing throughput only
+corrections. If a flat field is specified then each sky spectrum is
+divided by the appropriate flat field spectrum. The total counts through
+each fiber are multiplied into the flat field spectrum thus making the sky
+throughput of each fiber the same. This correction is important if the
+illumination of the fibers differs between the flat field source and the
+sky. Since only the total counts are required the sky or dome flat field
+spectra need not be particularly strong though care must be taken to avoid
+objects.
+.LP
+Instead of a sky flat or other throughput image a separately derived
+throughput file may be used. It may be used with or without a
+flat field.
+.LP
+The final step is to normalize the flat field spectra by the mean counts of
+all the fibers. This normalization step is simply to preserve the average
+counts of the extracted object and arc spectra after division by the
+response spectra. The final relative throughput values are recorded in the
+log and possibly printed on the terminal.
+.LP
+These flat field response steps and algorithm are available as a separate
+task called \fBmsresp1d\fR.
+.NH
+Dispersion Correction
+.LP
+Dispersion corrections are applied to the extracted spectra if the
+\fBdispcor\fR parameter is set. This can be a complicated process which
+the \fBdofibers\fR task tries to simplify for you. There are three basic
+steps involved; determining the dispersion functions relating pixel
+position to wavelength, assigning the appropriate dispersion function to a
+particular observation, and resampling the spectra to evenly spaced pixels
+in wavelength.
+.LP
+The comparison arc spectra are used to define dispersion functions for the
+fibers using the tasks \fBautoidentify\fR and \fBreidentify\fR. The
+interactive \fBautoidentify\fR task is only used on the central fiber of the
+first arc spectrum to define the basic reference dispersion solution from
+which all other fibers and arc spectra are automatically derived using
+\fBreidentify\fR. \fBAutoidentify\fR attempts to automatically identify
+the arc lines using the \fIcrval\fR and \fIcdelt\fR parameters. Whether
+or not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+.LP
+The set of arc dispersion function parameters are from \fBautoidentify\fR and
+\fBreidentify\fR. The parameters define a line list for use in
+automatically assigning wavelengths to arc lines, a parameter controlling
+the width of the centering window (which should match the base line
+widths), the dispersion function type and order, parameters to exclude bad
+lines from function fits, and parameters defining whether to refit the
+dispersion function, as opposed to simply determining a zero point shift,
+and the addition of new lines from the line list when reidentifying
+additional arc spectra. The defaults should generally be adequate and the
+dispersion function fitting parameters may be altered interactively. One
+should consult the help for the two tasks for additional details of these
+parameters and the operation of \fBautoidentify\fR.
+.LP
+Generally, taking a number of comparison arc lamp exposures interspersed
+with the program spectra is sufficient to accurately dispersion calibrate
+multifiber spectra. However, there are some other calibration options
+which may be of interest. These options apply additional calibration data
+consisting either of auxiliary line spectra, such as from dome lights or
+night sky lines, or simultaneous arc lamp spectra taken through a few
+fibers during the object exposure. These options add complexity to the
+dispersion calibration process.
+.LP
+When only arc comparison lamp spectra are used, dispersion functions are
+determined independently for each fiber of each arc image and then assigned
+to the matching fibers in the program object observations. The assignment
+consists of selecting one or two arc images to calibrate each object
+image. When two bracketing arc spectra are used the dispersion functions
+are linearly interpolated (usually based on the time of the observations).
+.LP
+If taking comparison exposures is time-consuming, possibly requiring
+reconfiguration to illuminate the fibers, and the spectrograph is
+expected to be fairly stable apart from small shifts, there are two
+mutually exclusive methods for monitoring
+shifts in the dispersion zero point from the basic arc lamp spectra other
+than taking many arc lamp exposures. One is to use some fibers to take a
+simultaneous arc spectrum while observing the program objects. The fibers
+are identified by aperture or beam numbers. The second method is to use
+\fIauxiliary line spectra\fR, such as mercury lines from the dome lights.
+These spectra are specified with an auxiliary shift arc list, \f(CWarc2\fR.
+.LP
+When using auxiliary line spectra for monitoring zero point shifts one of
+these spectra is plotted interactively by \fBidentify\fR with the
+reference dispersion function from the reference arc spectrum. The user
+marks one or more lines which will be used to compute zero point wavelength
+shifts in the dispersion functions automatically. The actual wavelengths
+of the lines need not be known. In this case accept the wavelength based
+on the reference dispersion function. As other observations of the same
+features are made the changes in the positions of the features will be
+tracked as zero point wavelength changes such that wavelengths of the
+features remain constant.
+.LP
+When using auxiliary line spectra the only arc lamp spectrum used is the
+initial arc reference spectrum (the first image in the \f(CWarcs1\fR list).
+The master dispersion functions are then shifted based on the spectra in
+the \f(CWarcs2\fR list (which must all be of the same type). The dispersion
+function assignments made by \fBrefspectra\fR using either the arc
+assignment file or based on header keywords is done in the same way as
+described for the arc lamp images except using the auxiliary spectra.
+.LP
+If simultaneous arcs are used the arc lines are reidentified to determine a
+zero point shift relative to the comparison lamp spectra selected, by
+\fBrefspectra\fR, of the same fiber. A linear function of aperture
+position on the image across the dispersion verses the zero point shifts
+from the arc fibers is determined and applied to the dispersion functions
+from the assigned calibration arcs for the non-arc fibers. Note that if
+there are two comparison lamp spectra (before and after the object
+exposure) then there will be two shifts applied to two dispersion functions
+which are then combined using the weights based on the header parameters
+(usually the observation time).
+.LP
+The arc assignments may be done either explicitly with an arc assignment
+table (parameter \f(CWarctable\fR) or based on a header parameter. The task
+used is \fBrefspectra\fR and the user should consult this task if the
+default behavior is not what is desired. The default is to interpolate
+linearly between the nearest arcs based on the Julian date (corrected to
+the middle of the exposure). The Julian date and a local Julian day number
+(the day number at local noon) are computed automatically by the task
+\fBsetjd\fR and recorded in the image headers under the keywords JD and
+LJD. In addition the universal time at the middle of the exposure, keyword
+UTMIDDLE, is computed by the task \fBsetairmass\fR and this may also be used
+for ordering the arc and object observations.
+.LP
+.LP
+An optional step is to use sky lines in the spectra to compute a zeropoint
+dispersion shift that will align the sky lines. This may improve sky
+subtraction if the illumination is not the same between the arc calibration
+and the sky. When selected the object spectrum is dispersion corrected
+using a non-linear dispersion function to avoid resampling the spectrum.
+The sky lines are then reidentified in wavelength space from a template
+list of sky lines. The mean shift in the lines for each fiber relative to
+the template in that fiber is computed to give the zeropoint shift. The
+database file is created when the first object is extracted. You are asked
+to mark the sky lines in one fiber and then the lines are automatically
+reidentified in all other fibers. Note that this technique requires the
+sky lines be found in all fibers.
+.LP
+The last step of dispersion correction (resampling the spectrum to evenly
+spaced pixels in wavelength) is optional and relatively straightforward.
+If the \f(CWlinearize\fR parameter is no then the spectra are not resampled
+and the nonlinear dispersion information is recorded in the image header.
+Other IRAF tasks (the coordinate description is specific to IRAF) will use
+this information whenever wavelengths are needed. If linearizing is
+selected a linear dispersion relation, either linear in the wavelength or
+the log of the wavelength, is defined once and applied to every extracted
+spectrum. The resampling algorithm parameters allow selecting the
+interpolation function type, whether to conserve flux per pixel by
+integrating across the extent of the final pixel, and whether to linearize
+to equal linear or logarithmic intervals. The latter may be appropriate
+for radial velocity studies. The default is to use a fifth order
+polynomial for interpolation, to conserve flux, and to not use logarithmic
+wavelength bins. These parameters are described fully in the help for the
+task \fBdispcor\fR which performs the correction. The interpolation
+function options and the nonlinear dispersion coordinate system is
+described in the help topic \fBonedspec.package\fR.
+.NH
+Sky Subtraction
+.LP
+Sky subtraction is selected with the \f(CWskysubtract\fR processing option.
+The sky spectra are selected by their aperture and beam numbers and
+combined into a single master sky spectrum
+which is then subtracted from each object spectrum. If the \f(CWskyedit\fR
+option is selected the sky spectra are plotted using the task
+\fBspecplot\fR. By default they are superposed to allow identifying
+spectra with unusually high signal due to object contamination. To
+eliminate a sky spectrum from consideration point at it with the cursor and
+type 'd'. The last deleted spectrum may be undeleted with 'e'. This
+allows recovery of incorrect or accidental deletions.
+.LP
+The sky combining algorithm parameters define how the individual sky fiber
+spectra, after interactive editing, are combined before subtraction from
+the object fibers. The goals of combining are to reduce noise, eliminate
+cosmic-rays, and eliminate fibers with inadvertent objects. The common
+methods for doing this to use a median and/or a special sigma clipping
+algorithm (see \fBscombine\fR for details). The scale
+parameter determines whether the individual skys are first scaled to a
+common mode. The scaling should be used if the throughput is uncertain,
+but in that case you probably did the wrong thing in the throughput
+correction. If the sky subtraction is done interactively, i.e. with the
+\f(CWskyedit\fR option selected, then after selecting the spectra to be
+combined a query is made for the combining algorithm. This allows
+modifying the default algorithm based on the number of sky spectra
+selected since the "avsigclip" rejection algorithm requires at least
+three spectra.
+.LP
+The combined sky spectrum is subtracted from only those spectra specified
+by the object aperture and beam numbers. Other spectra, such as comparison
+arc spectra, are retained unchanged. One may include the sky spectra as
+object spectra to produce residual sky spectra for analysis. The combined
+master sky spectra may be saved if the \f(CWsaveskys\fR parameter is set.
+The saved sky is given the name of the object spectrum with the prefix
+"sky".
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+Task Help References
+.LP
+Each task in the \fBspecred\fR packages and tasks used by \fBdofibers\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters and apidtable
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apfit - Fit 2D spectra and output the fit, difference, or ratio
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apmask - Create and IRAF pixel list mask of the apertures
+apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Extinction and flux calibrate spectra
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction correction
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ fitprofs - Fit gaussian profiles
+ identify - Identify features in spectrum for dispersion solution
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically reidentify features in spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Compute instrumental sensitivity from standard stars
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sfit - Fit spectra and output fit, ratio, or difference
+ skysub - Sky subtract extracted multispec spectra
+ slist - List spectrum header parameters
+ specplot - Scale, stack, and plot multiple spectra
+ splot - Preliminary spectral plot/analysis
+ standard - Tabulate standard star counts and fluxes
+
+ dofibers - Process multifiber spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+.V2
+.SH
+Appendix A: DOFIBERS Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object spectra to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set and
+dependent calibration data has changed. Extracted spectra are ignored.
+.LE
+apref = ""
+.LS
+Aperture reference spectrum. This spectrum is used to define the basic
+extraction apertures and is typically a flat field spectrum.
+.LE
+flat = "" (optional)
+.LS
+Flat field spectrum. If specified the one dimensional flat field spectra
+are extracted and used to make flat field calibrations. If a separate
+throughput file or image is not specified the flat field is also used
+for computing a fiber throughput correction.
+.LE
+throughput = "" (optional)
+.LS
+Throughput file or image. If an image is specified, typically a blank
+sky observation, the total flux through
+each fiber is used to correct for fiber throughput. If a file consisting
+of lines with the aperture number and relative throughput is specified
+then the fiber throughput will be corrected by those values. If neither
+is specified but a flat field image is given it is used to compute the
+throughput.
+.LE
+arcs1 = "" (at least one if dispersion correcting)
+.LS
+List of primary arc spectra. These spectra are used to define the dispersion
+functions for each fiber apart from a possible zero point correction made
+with secondary shift spectra or arc calibration fibers in the object spectra.
+One fiber from the first spectrum is used to mark lines and set the dispersion
+function interactively and dispersion functions for all other fibers and
+arc spectra are derived from it.
+.LE
+arcs2 = "" (optional)
+.LS
+List of optional shift arc spectra. Features in these secondary observations
+are used to supply a wavelength zero point shift through the observing
+sequence. One type of observation is dome lamps containing characteristic
+emission lines.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining arc spectra to be assigned to object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIparams.sort\fR, such as the observation time is made.
+.LE
+
+readnoise = "0." (apsum)
+.LS
+Read out noise in photons. This parameter defines the minimum noise
+sigma. It is defined in terms of photons (or electrons) and scales
+to the data values through the gain parameter. A image header keyword
+(case insensitive) may be specified to get the value from the image.
+.LE
+gain = "1." (apsum)
+.LS
+Detector gain or conversion factor between photons/electrons and
+data values. It is specified as the number of photons per data value.
+A image header keyword (case insensitive) may be specified to get the value
+from the image.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the flat field or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+fibers = 97 (apfind)
+.LS
+Number of fibers. This number is used during the automatic definition of
+the apertures from the aperture reference spectrum. It is best if this
+reflects the actual number of fibers which may be found in the aperture
+reference image. The interactive
+review of the aperture assignments allows verification and adjustments
+to the automatic aperture definitions.
+.LE
+width = 12. (apedit)
+.LS
+Approximate base full width of the fiber profiles. This parameter is used
+for the profile centering algorithm.
+.LE
+minsep = 8. (apfind)
+.LS
+Minimum separation between fibers. Weaker spectra or noise within this
+distance of a stronger spectrum are rejected.
+.LE
+maxsep = 15. (apfind)
+.LS
+Maximum separation between adjacent fibers. This parameter
+is used to identify missing fibers. If two adjacent spectra exceed this
+separation then it is assumed that a fiber is missing and the aperture
+identification assignments will be adjusted accordingly.
+.LE
+apidtable = "" (apfind)
+.LS
+Aperture identification table. This may be either a text file or an
+image. A text file contains the fiber number, beam number defining object
+(1), sky (0), and arc (2) fibers, and a object title. An image contains
+the keywords SLFIBnnn with string value consisting of the fiber number,
+beam number, optional right ascension and declination, and an object
+title. Unassigned and broken fibers (beam of -1) should be included in the
+identification information since they will automatically be excluded.
+.LE
+crval = INDEF, cdelt = INDEF (autoidentify)
+.LS
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.LE
+objaps = "", skyaps = "", arcaps = ""
+.LS
+List of object, sky, and arc aperture numbers. These are used to
+identify arc apertures for wavelength calibration and object and sky
+apertures for sky subtraction. Note sky apertures may be identified as
+both object and sky if one wants to subtract the mean sky from the
+individual sky spectra. Typically the different spectrum types are
+identified by their beam numbers and the default, null string,
+lists select all apertures.
+.LE
+objbeams = "0,1", skybeams = "0", arcbeams = 2
+.LS
+List of object, sky, and arc beam numbers. The convention is that sky
+fibers are given a beam number of 0, object fibers a beam number of 1, and
+arc fibers a beam number of 2. The beam numbers are typically set in the
+\fIapidtable\fR. Unassigned or broken fibers may be given a beam number of
+-1 in the aperture identification table since apertures with negative beam
+numbers are not extracted. Note it is valid to identify sky fibers as both
+object and sky.
+.LE
+
+scattered = no (apscatter)
+.LS
+Smooth and subtracted scattered light from the object and flat field
+images. This operation consists of fitting independent smooth functions
+across the dispersion using data outside the fiber apertures and then
+smoothing the individual fits along the dispersion. The initial
+flat field, or if none is given the aperture reference image, are
+done interactively to allow setting the fitting parameters. All
+subsequent subtractions use the same fitting parameters.
+.LE
+fitflat = yes (flat1d)
+.LS
+Fit the composite flat field spectrum by a smooth function and divide each
+flat field spectrum by this function? This operation removes the average
+spectral signature of the flat field lamp from the sensitivity correction to
+avoid modifying the object fluxes.
+.LE
+clean = yes (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction and requires reasonably good values
+for the readout noise and gain. In addition the datamax parameters
+can be useful.
+.LE
+dispcor = yes
+.LS
+Dispersion correct spectra? Depending on the \fIparams.linearize\fR
+parameter this may either resample the spectra or insert a dispersion
+function in the image header.
+.LE
+skyalign = no
+.LS
+Align sky lines? If yes then for the first object spectrum you are asked
+to mark one or more sky lines to use for alignment. Then these lines will
+be found in all spectra and an average zeropoint shift computed and applied
+to the dispersion solution to align these lines. Note that this assumes
+the sky lines are seen in all fibers.
+.LE
+savearcs = yes
+.LS
+Save any simultaneous arc apertures? If no then the arc apertures will
+be deleted after use.
+.LE
+skysubtract = yes
+.LS
+Subtract sky from the object spectra? If yes the sky spectra are combined
+and subtracted from the object spectra as defined by the object and sky
+aperture/beam parameters.
+.LE
+skyedit = yes
+.LS
+Overplot all the sky spectra and allow contaminated sky spectra to be
+deleted?
+.LE
+saveskys = yes
+.LS
+Save the combined sky spectrum? If no then the sky spectrum will be
+deleted after sky subtraction is completed.
+.LE
+splot = no
+.LS
+Plot the final spectra with the task \fBsplot\fR?
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the objects list will not be processed (unless they need to be updated).
+.LE
+update = yes
+.LS
+Update processing of previously processed spectra if aperture, flat
+field, or dispersion reference definitions are changed?
+.LE
+batch = no
+.LS
+Process spectra as a background or batch job provided there are no interactive
+options (\fIskyedit\fR and \fIsplot\fR) selected.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+params = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. The
+default is parameter set \fBparams\fR. The parameter set may be examined
+and modified in the usual ways (typically with "epar params" or ":e params"
+from the parameter editor). Note that using a different parameter file
+is not allowed. The parameters are described below.
+.LE
+
+.ce
+-- PACKAGE PARAMETERS
+
+Package parameters are those which generally apply to all task in the
+package. This is also true of \fBdofibers\fR.
+
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.LE
+observatory = "observatory"
+.LS
+Observatory at which the spectra were obtained if not specified in the
+image header by the keyword OBSERVAT. See \fBobservatory\fR for more
+details.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.LE
+database = "database"
+.LS
+Database (directory) used for storing aperture and dispersion information.
+.LE
+verbose = no
+.LS
+Print verbose information available with various tasks.
+.LE
+logfile = "logfile", plotfile = ""
+.LS
+Text and plot log files. If a filename is not specified then no log is
+kept. The plot file contains IRAF graphics metacode which may be examined
+in various ways such as with \fBgkimosaic\fR.
+.LE
+records = ""
+.LS
+Dummy parameter to be ignored.
+.LE
+version = "SPECRED: ..."
+.LS
+Version of the package.
+.LE
+
+.ce
+PARAMS PARAMETERS
+
+The following parameters are part of the \fBparams\fR parameter set and
+define various algorithm parameters for \fBdofibers\fR.
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, recentering, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+order = "decreasing" (apfind)
+.LS
+When assigning aperture identifications order the spectra "increasing"
+or "decreasing" with increasing pixel position (left-to-right or
+right-to-left in a cross-section plot of the image).
+.LE
+extras = no (apsum)
+.LS
+Include extra information in the output spectra? When cleaning or using
+variance weighting the cleaned and weighted spectra are recorded in the
+first 2D plane of a 3D image, the raw, simple sum spectra are recorded in
+the second plane, and the estimated sigmas are recorded in the third plane.
+.LE
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+
+lower = -5., upper = 5. (apdefault)
+.LS
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first found and may be
+resized automatically or interactively.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Data level at which to set aperture limits during automatic resizing.
+It is a fraction of the peak relative to a local background.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 3 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+
+.ce
+-- SCATTERED LIGHT PARAMETERS --
+
+buffer = 1. (apscatter)
+.LS
+Buffer distance from the aperture edges to be excluded in selecting the
+scattered light pixels to be used.
+.LE
+apscat1 = "" (apscatter)
+.LS
+Fitting parameters across the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat1"
+parameter set.
+.LE
+apscat2 = "" (apscatter)
+.LS
+Fitting parameters along the dispersion. This references an additional
+set of parameters for the ICFIT package. The default is the "apscat2"
+parameter set.
+.LE
+.LE
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum)
+.LS
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum) (fit1d|fit2d)
+.LS
+Profile fitting algorithm for cleaning and variance weighted extractions.
+The default is generally appropriate for multifiber data but users
+may try the other algorithm. See \fBapprofiles\fR for further information.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+nsubaps = 1 (apsum)
+.LS
+During extraction it is possible to equally divide the apertures into
+this number of subapertures.
+.LE
+
+.ce
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --
+
+f_interactive = yes (fit1d)
+.LS
+Fit the composite one dimensional flat field spectrum interactively?
+This is used if \fIfitflat\fR is set and a two dimensional flat field
+spectrum is specified.
+.LE
+f_function = "spline3", f_order = 10 (fit1d)
+.LS
+Function and order used to fit the composite one dimensional flat field
+spectrum. The functions are "legendre", "chebyshev", "spline1", and
+"spline3". The spline functions are linear and cubic splines with the
+order specifying the number of pieces.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (autoidentify/identify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.LE
+match = -3. (autoidentify/identify)
+.LS
+The maximum difference for a match between the dispersion function prediction
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 4. (autoidentify/identify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 10. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "spline3", i_order = 3 (autoidentify/identify)
+.LS
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.LE
+i_niterate = 2, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (reidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+addfeatures = no (reidentify)
+.LS
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any sort parameter.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given.
+This option is used to assign two reference spectra, with equal weights,
+independent of any sorting parameter.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sorting parameter. If there is no following spectrum use the nearest preceding
+spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sorting parameter. If there is no preceding and following
+spectrum use the nearest spectrum. The interpolation is weighted by the
+relative distances of the sorting parameter.
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides the reference aperture check.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sorting
+parameter.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sorting parameter. If there is no preceding spectrum use the nearest following
+spectrum.
+.LE
+.LE
+sort = "jd", group = "ljd" (refspectra)
+.LS
+Image header keywords to be used as the sorting parameter for selection
+based on order and to group spectra.
+A null string, "", or the word "none" may be use to disable the sorting
+or grouping parameters.
+The sorting parameter
+must be numeric but otherwise may be anything. The grouping parameter
+may be a string or number and must simply be the same for all spectra within
+the same group (say a single night).
+Common sorting parameters are times or positions.
+In \fBdofibers\fR the Julian date (JD) and the local Julian day number (LJD)
+at the middle of the exposure are automatically computed from the universal
+time at the beginning of the exposure and the exposure time. Also the
+parameter UTMIDDLE is computed.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling
+If no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data are not interpolated.
+.LE
+log = no (dispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (dispcor)
+.LS
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+-- SKY SUBTRACTION PARAMETERS --
+
+combine = "average" (scombine) (average|median)
+.LS
+Option for combining sky pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average" or "median"
+of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.LE
+reject = "none" (scombine) (none|minmax|avsigclip)
+.LS
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+help for \fBscombine\fR. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the low and high pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.fi
+
+.LE
+scale = "none" (none|mode|median|mean)
+.LS
+Multiplicative scaling to be applied to each spectrum. The choices are none
+or scale by the mode, median, or mean. This should not be necessary if the
+flat field and throughput corrections have been properly made.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/specred/doc/doslit.hlp b/noao/imred/specred/doc/doslit.hlp
new file mode 100644
index 00000000..2bcf7294
--- /dev/null
+++ b/noao/imred/specred/doc/doslit.hlp
@@ -0,0 +1,1201 @@
+.help doslit Feb93 noao.imred.specred
+.ih
+NAME
+doslit -- Slit spectra data reduction task
+.ih
+USAGE
+doslit objects
+.ih
+SUMMARY
+\fBDoslit\fR extracts, sky subtracts, wavelength calibrates, and flux
+calibrates simple two dimensional slit spectra which have been processed to
+remove the detector characteristics; i.e. CCD images have been bias, dark
+count, and flat field corrected. It is primarily intended for
+spectrophotometry or radial velocities of stellar spectra with the spectra
+aligned with one of the image axes; i.e. the assumption is that extractions
+can be done by summing along image lines or columns. The alignment does
+not have to be precise but only close enough that the wavelength difference
+across the spectrum profiles is insignificant. The task is available
+in the \fBctioslit\fR, \fBkpnoslit\fR, \fBkpnocoude\fR, and \fBspecred\fR
+packages.
+.ih
+PARAMETERS
+.ls objects
+List of object images to be processed. Previously processed spectra are
+ignored unless the \fIredo\fR flag is set or the \fIupdate\fR flag is set
+and dependent calibration data has changed. If the images contain the
+keyword IMAGETYP then only those with a value of "object" or "OBJECT"
+are used and those with a value of "comp" or "COMPARISON" are added
+to the list of arcs. Extracted spectra are ignored.
+.le
+.ls arcs = "" (at least one if dispersion correcting)
+List of arc calibration spectra. These spectra are used to define
+the dispersion functions. The first spectrum is used to mark lines
+and set the dispersion function interactively and dispersion functions
+for all other arc spectra are derived from it. If the images contain
+the keyword IMAGETYP then only those with a value of "comp" or
+"COMPARISON" are used. All others are ignored as are extracted spectra.
+.le
+.ls arctable = "" (optional) (refspectra)
+Table defining which arc spectra are to be assigned to which object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \fIsparams.sort\fR, such as the Julian date
+is made.
+.le
+.ls standards = "" (at least one if flux calibrating)
+List of standard star spectra. The standard stars must have entries in
+the calibration database (package parameter \fIcaldir\fR).
+.le
+
+.ls readnoise = "rdnoise", gain = "gain" (apsum)
+Read out noise in photons and detector gain in photons per data value.
+This parameter defines the minimum noise sigma and the conversion between
+photon Poisson statistics and the data number statistics. Image header
+keywords (case insensitive) may be specified to obtain the values from the
+image header.
+.le
+.ls datamax = INDEF (apsum.saturation)
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the standard star or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.le
+.ls width = 5. (apedit)
+Approximate full width of the spectrum profiles. This parameter is used
+to define a width and error radius for the profile centering algorithm.
+.le
+.ls crval = INDEF, cdelt = INDEF (autoidentify)
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.le
+
+.ls dispcor = yes
+Dispersion correct spectra? This may involve either defining a nonlinear
+dispersion coordinate system in the image header or resampling the
+spectra to uniform linear wavelength coordinates as selected by
+the parameter \fIsparams.linearize\fR.
+.le
+.ls extcor = no
+Extinction correct the spectra?
+.le
+.ls fluxcal = no
+Flux calibrate the spectra using standard star observations?
+.le
+.ls resize = no (apresize)
+Resize the default aperture for each object based on the spectrum profile?
+.le
+.ls clean = no (apsum)
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction. In addition the datamax parameters
+can be useful.
+.le
+.ls splot = no
+Plot the final spectra with the task \fBsplot\fR? In quicklook mode
+this is automatic and in non-quicklook mode it is queried.
+.le
+.ls redo = no
+Redo operations previously done? If no then previously processed spectra
+in the object list will not be processed unless required by the
+update option.
+.le
+.ls update = no
+Update processing of previously processed spectra if the
+dispersion reference image or standard star calibration data are changed?
+.le
+.ls quicklook = no
+Extract and calibrate spectra with minimal interaction? In quicklook mode
+only the initial dispersion function solution and standard star setup are
+done interactively. Normally the \fIsplot\fR option is set in this mode to
+produce an automatic final spectrum plot for each object. It is
+recommended that this mode not be used for final reductions.
+.le
+.ls batch = yes
+Process spectra as a background or batch job provided there are no interactive
+steps remaining.
+.le
+.ls listonly = no
+List processing steps but don't process?
+.le
+
+.ls sparams = "" (pset)
+Name of parameter set containing additional processing parameters. This
+parameter is only for indicating the link to the parameter set
+\fBsparams\fR and should not be given a value. The parameter set may be
+examined and modified in the usual ways (typically with "epar sparams"
+or ":e sparams" from the parameter editor). The parameters are
+described below.
+.le
+
+.ce
+-- GENERAL PARAMETERS --
+.ls line = INDEF, nsum = 10
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.le
+.ls extras = no (apsum)
+Include raw unweighted and uncleaned spectra, the background spectra, and
+the estimated sigmas in a three dimensional output image format.
+See the discussion in the \fBapextract\fR package for further information.
+.le
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+.ls lower = -3., upper = 3. (apdefault)
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first defined.
+.le
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+.ls ylevel = 0.05 (apresize)
+Fraction of the peak to set aperture limits during automatic resizing.
+.le
+
+.ce
+-- TRACE PARAMETERS --
+.ls t_step = 10 (aptrace)
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \fInsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.le
+.ls t_function = "spline3", t_order = 1 (aptrace)
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of terms in the
+polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+Default number of rejection iterations and rejection sigma thresholds.
+.le
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+.ls weights = "none" (apsum) (none|variance)
+Type of extraction weighting. Note that if the \fIclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+.ls "none"
+The pixels are summed without weights except for partial pixels at the
+ends.
+.le
+.ls "variance"
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \fIgain\fR and \fIreadnoise\fR
+parameters.
+.le
+.le
+.ls pfit = "fit1d" (apsum and approfile) (fit1d|fit2d)
+Type of profile fitting algorithm to use. The "fit1d" algorithm is
+preferred except in cases of extreme tilt.
+.le
+.ls lsigma = 3., usigma = 3. (apsum)
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.le
+
+.ce
+-- DEFAULT BACKGROUND PARAMETERS --
+.ls background = "fit" (apsum) (none|average|median|minimum|fit)
+Type of background subtraction. The choices are "none" for no background
+subtraction, "average" to average the background within the background
+regions, "median" to use the median in the background regions, "minimum" to
+use the minimum in the background regions, or "fit" to fit across the
+dispersion using the background within the background regions. Note that
+the "average" option does not do any medianing or bad pixel checking,
+something which is recommended. The fitting option is slower than the
+other options and requires additional fitting parameter.
+.le
+.ls b_function = "legendre", b_order = 1 (apsum)
+Default background fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.le
+.ls b_sample = "-10:-6,6:10" (apsum)
+Default background sample. The sample is given by a set of colon separated
+ranges each separated by either whitespace or commas. The string "*" refers
+to all points. Note that the background coordinates are relative to the
+aperture center and not image pixel coordinates so the endpoints need not
+be integer. It is recommended that the background regions be examined
+and set interactively with the 'b' key in the interactive aperture
+definition mode. This requires \fIquicklook\fR to be no.
+.le
+.ls b_naverage = -100 (apsum)
+Default number of points to average or median. Positive numbers
+average that number of sequential points to form a fitting point.
+Negative numbers median that number, in absolute value, of sequential
+points. A value of 1 does no averaging and each data point is used in the
+fit.
+.le
+.ls b_niterate = 1 (apsum)
+Default number of rejection iterations. If greater than zero the fit is
+used to detect deviant fitting points and reject them before repeating the
+fit. The number of iterations of this process is given by this parameter.
+.le
+.ls b_low_reject = 3., b_high_reject = 3. (apsum)
+Default background lower and upper rejection sigmas. If greater than zero
+points deviating from the fit below and above the fit by more than this
+number of times the sigma of the residuals are rejected before refitting.
+.le
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+.ls threshold = 10. (autoidentify/identify/reidentify)
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.le
+.ls coordlist = "linelists$idhenear.dat" (autoidentify/identify)
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.le
+.ls match = -3. (autoidentify/identify)
+The maximum difference for a match between the dispersion function computed
+value and a wavelength in the coordinate list.
+.le
+.ls fwidth = 4. (autoidentify/identify)
+Approximate full base width (in pixels) of arc lines.
+.le
+.ls cradius = 10. (reidentify)
+Radius from previous position to reidentify arc line.
+.le
+.ls i_function = "spline3", i_order = 1 (autoidentify/identify)
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.le
+.ls i_niterate = 0, i_low = 3.0, i_high = 3.0 (autoidentify/identify)
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.le
+.ls refit = yes (reidentify)
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the initial arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.le
+.ls addfeatures = no (reidentify)
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.le
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+.ls select = "interp" (refspectra)
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+.ls average
+Average two reference spectra without regard to any
+sort or group parameters.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given. There is no checking of the
+group values.
+.le
+.ls following
+Select the nearest following spectrum in the reference list based on the
+sort and group parameters. If there is no following spectrum use the
+nearest preceding spectrum.
+.le
+.ls interp
+Interpolate between the preceding and following spectra in the reference
+list based on the sort and group parameters. If there is no preceding and
+following spectrum use the nearest spectrum. The interpolation is weighted
+by the relative distances of the sorting parameter (see cautions in
+DESCRIPTION section).
+.le
+.ls match
+Match each input spectrum with the reference spectrum list in order.
+This overrides any group values.
+.le
+.ls nearest
+Select the nearest spectrum in the reference list based on the sort and
+group parameters.
+.le
+.ls preceding
+Select the nearest preceding spectrum in the reference list based on the
+sort and group parameters. If there is no preceding spectrum use the
+nearest following spectrum.
+.le
+.le
+.ls sort = "jd" (setjd and refspectra)
+Image header keyword to be used as the sorting parameter for selection
+based on order. The header parameter must be numeric but otherwise may
+be anything. Common sorting parameters are times or positions.
+.le
+.ls group = "ljd" (setjd and refspectra)
+Image header keyword to be used to group spectra. For those selection
+methods which use the group parameter the reference and object
+spectra must have identical values for this keyword. This can
+be anything but it must be constant within a group. Common grouping
+parameters are the date of observation "date-obs" (provided it does not
+change over a night) or the local Julian day number.
+.le
+.ls time = no, timewrap = 17. (refspectra)
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.le
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+.ls linearize = yes (dispcor)
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling using
+the linear dispersion parameters specified by other parameters. If
+no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data is not interpolated. Note the interpolation
+function type is set by the package parameter \fIinterp\fR.
+.le
+.ls log = no (dispcor)
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.le
+.ls flux = yes (dispcor)
+Conserve the total flux during interpolation? If \fIno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \fIyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.le
+
+.ce
+-- SENSITIVITY CALIBRATION PARAMETERS --
+.ls s_function = "spline3", s_order = 1 (sensfunc)
+Function and order used to fit the sensitivity data. The function types
+are "chebyshev" polynomial, "legendre" polynomial, "spline3" cubic spline,
+and "spline1" linear spline. Order of the sensitivity fitting function.
+The value corresponds to the number of polynomial terms or the number of
+spline pieces. The default values may be changed interactively.
+.le
+.ls fnu = no (calibrate)
+The default calibration is into units of F-lambda. If \fIfnu\fR = yes then
+the calibrated spectrum will be in units of F-nu.
+.le
+
+.ce
+PACKAGE PARAMETERS
+
+The following package parameters are used by this task. The default values
+may vary depending on the package.
+.ls dispaxis = 2
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.le
+.ls extinction (standard, sensfunc, calibrate)
+Extinction file for a site. There are two extinction files in the
+NOAO standards library, onedstds$, for KPNO and CTIO. These extinction
+files are used for extinction and flux calibration.
+.le
+.ls caldir (standard)
+Standard star calibration directory. A directory containing standard
+star data files. Note that the directory name must end with '/'.
+There are a number of standard star calibrations directories in the NOAO
+standards library, onedstds$.
+.le
+.ls observatory = "observatory" (observatory)
+The default observatory to use for latitude dependent computations.
+If the OBSERVAT keyword in the image header it takes precedence over
+this parameter.
+.le
+.ls interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc) (dispcor)
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.nf
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.fi
+.le
+.ls database = "database"
+Database name used by various tasks. This is a directory which is created
+if necessary.
+.le
+.ls verbose = no
+Verbose output? If set then almost all the information written to the
+logfile is also written to the terminal except when the task is a
+background or batch process.
+.le
+.ls logfile = "logfile"
+If specified detailed text log information is written to this file.
+.le
+.ls plotfile = ""
+If specified metacode plots are recorded in this file for later review.
+Since plot information can become large this should be used only if
+really desired.
+.le
+.ih
+ENVIRONMENT PARAMETERS
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
+.ih
+DESCRIPTION
+\fBDoslit\fR extracts, sky subtracts, wavelength calibrates, and flux
+calibrates simple two dimensional slit spectra which have been processed to
+remove the detector characteristics; i.e. CCD images have been bias, dark
+count, and flat field corrected. It is primarily intended for
+spectrophotometry or radial velocities of stellar spectra with the spectra
+aligned with one of the image axes; i.e. the assumption is that extractions
+can be done by summing along image lines or columns. The alignment does
+not have to be precise but only close enough that the wavelength difference
+across the spectrum profiles is insignificant. Extended objects requiring
+accurate geometric alignment over many pixels are reduced using the
+\fBlongslit\fR package.
+
+The task is a command language script which collects and combines the
+functions and parameters of many general purpose tasks to provide a single,
+complete data reduction path and a degree of guidance, automation, and
+record keeping. In the following description and in the parameter section
+the various general tasks used are identified. Further
+information about those tasks and their parameters may be found in their
+documentation. \fBDoslit\fR also simplifies and consolidates parameters
+from those tasks and keeps track of previous processing to avoid
+duplications.
+
+The general organization of the task is to do the interactive setup steps,
+such as the reference dispersion function
+determination, first using representative calibration data and then perform
+the majority of the reductions automatically, possibly as a background
+process, with reference to the setup data. In addition, the task
+determines which setup and processing operations have been completed in
+previous executions of the task and, contingent on the \fIredo\fR and
+\fIupdate\fR options, skip or repeat some or all the steps.
+
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage
+since there are many variations possible.
+
+\fBUsage Outline\fR
+
+.ls 6 [1]
+The images are first processed with \fBccdproc\fR for overscan,
+zero level, dark count, and flat field corrections.
+.le
+.ls [2]
+Set the \fBdoslit\fR parameters with \fBeparam\fR. Specify the object
+images to be processed,
+one or more arc images, and one or more standard
+star images. If there are many object, arc, or standard star images
+you might prepare "@ files". Set the detector and data
+specific parameters. Select the processing options desired.
+Finally you might wish to review the \fIsparams\fR algorithm parameters
+though the defaults are probably adequate.
+.le
+.ls [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the current execution and no further queries of that
+type will be made.
+.le
+.ls [4]
+Apertures are defined for all the standard and object images. This is only
+done if there are no previous aperture definitions for the image.
+The highest peak is found and centered and the default aperture limits
+are set. If the resize option is set the aperture is resized by finding
+the level which is 5% (the default) of the peak above local background.
+If not using the quicklook option you now have the option
+of entering the aperture editing loop to check the aperture position,
+size, and background fitting parameters, and possibly add additional
+apertures. This is step is highly recommended.
+It is important to check the background regions with the 'b'
+key. To exit the background mode and then
+to exit the review mode use 'q'.
+
+The spectrum positions at a series of points along the dispersion are
+measured and a function is fit to these positions. If not using the
+quicklook option the traced positions may be examined interactively and the
+fitting parameters adjusted. To exit the interactive fitting type 'q'.
+.le
+.ls [5]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The dispersion function is defined using the task
+\fBautoidentify\fR. The \fIcrval\fR and \fIcdelt\fR parameters are used in
+the automatic identification. Whether or not the automatic identification
+is successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc lines
+with with 'm' and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.le
+.ls [6]
+If the flux calibration option is selected the standard star spectra are
+processed (if not done previously). The images are
+extracted and wavelength calibrated. The appropriate arc
+calibration spectra are extracted and the dispersion function refit
+using the arc reference spectrum as a starting point. The standard star
+fluxes through the calibration bandpasses are compiled. You are queried
+for the name of the standard star calibration data file.
+
+After all the standard stars are processed a sensitivity function is
+determined using the interactive task \fBsensfunc\fR. Finally, the
+standard star spectra are extinction corrected and flux calibrated
+using the derived sensitivity function.
+.le
+.ls [7]
+The object spectra are now automatically
+extracted, wavelength calibrated, and flux calibrated.
+.le
+.ls [8]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'. In quicklook mode the spectra are plotted
+noninteractively with \fBbplot\fR.
+.le
+.ls [9]
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added.
+.le
+
+\fBSpectra and Data Files\fR
+
+The basic input consists of two dimensional slit object, standard star, and
+arc calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must be
+processed to remove overscan, bias, dark count, and flat field effects.
+This is generally done using the \fBccdred\fR package. Lines of constant
+wavelength should be closely aligned with one of the image axes though a
+small amount of misalignment only causes a small loss of resolution. For
+large misalignments one may use the \fBrotate\fR task. More complex
+geometric problems and observations of extended objects should be handled
+by the \fBlongslit\fR package.
+
+The arc
+spectra are comparison arc lamp observations (they must all be of the same
+type). The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in task \fBrefspectra\fR.
+
+The final reduced spectra are recorded in one, two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. With a single aperture the image will be one dimensional
+and with multiple apertures the image will be two dimensional.
+When the \fIextras\fR parameter is set the images will be three
+dimensional (regardless of the number of apertures) and the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR.
+
+\fBPackage Parameters\fR
+
+The package parameters set parameters which change
+infrequently and set the standard I/O functions. The extinction file
+is used for making extinction corrections and the standard star
+calibration directory is used for determining flux calibrations from
+standard star observations. The calibration directories contain data files
+with standard star fluxes and band passes. The available extinction
+files and flux calibration directories may be listed using the command:
+.nf
+
+ cl> help onedstds
+
+.fi
+
+The extinction correction requires computation of an air mass using the
+task \fBsetairmass\fR. The air mass computation needs information
+about the observation and, in particular, the latitude of the observatory.
+This is determined using the OBSERVAT image header keyword. If this
+keyword is not present the observatory parameter is used. See the
+task \fBobservatory\fR for more on defining the observatory parameters.
+
+The spectrum interpolation type is used whenever a spectrum needs to be
+resampled for linearization or performing operations between spectra
+with different sampling. The "sinc" interpolation may be of interest
+as an alternative but see the cautions given in \fBonedspec.package\fR.
+
+The general direction in which the spectra run is specified by the
+dispersion axis parameter. Recall that ideally it is the direction
+of constant wavelength which should be aligned with an image axis and
+the dispersion direction may not be exactly aligned because atmospheric
+dispersion.
+
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdoslit\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of the apertures, traces, and extracted
+spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+
+\fBProcessing Parameters\fR
+
+The input images are specified by image lists. The lists may be
+a list of explicit comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+To allow wildcard image lists to be used safely and conveniently the
+image lists are checked to remove extracted images (the .ms images)
+and to automatically identify object and arc spectra. Object and arc
+images are identified by the keyword IMAGETYP with values of "object",
+"OBJECT", "comp", or "COMPARISON" (the current practice at NOAO).
+If arc images are found in the object list they are transferred to the
+arc list while if object images are found in the arc list they are ignored.
+All other image types, such as biases, darks, or flat fields, are
+ignored. This behavior allows simply specifying all images with a wildcard
+in the object list with automatic selections of arc spectra or a
+wildcard in the arc list to automatically find the arc spectra.
+If the data lack the identifying information it is up to the user
+to explicitly set the proper lists.
+
+The arc assignment table is a file which may be used to assign
+specific arc spectra to specific object and standard star spectra.
+For more on this option see \fBrefspectra\fR.
+
+The next set of parameters describe the noise characteristics and
+spectrum characteristics. The read out noise and gain are used when
+"cleaning" cosmic rays and when using variance or optimal weighting. These
+parameters must be fairly accurate. Note that these are the effective
+parameters and must be adjusted if previous processing has modified the
+pixel values; such as with an unnormalized flat field.
+The variance
+weighting and cosmic-ray cleanning are sensitive to extremely strong
+cosmic-rays; ones which are hundreds of times brighter than the
+spectrum. The \fIdatamax\fR is used to set an upper limit for any
+real data. Any pixels above this value will be flagged as cosmic-rays
+and will not affect the extractions.
+
+The profile width should be approximately the full width
+at the profile base. This parameter is used for centering and tracing
+of the spectrum profiles.
+
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+
+The next set of parameters select the processing steps and options. The
+various calibration steps may be done simultaneously, that is at the same
+time as the basic extractions, or in separate executions of the task.
+Typically, all the desired operations are done at the same time.
+Dispersion correction requires at least one arc spectrum and flux
+calibration requires dispersion correction and at least one standard star
+observation.
+
+The \fIresize\fR option resets the edges of the extraction aperture based
+on the profile for each object and standard star image. The default
+resizing is to the 5% point relative to the peak measured above the
+background. This allows following changes in the seeing. However, one
+should consider the consequences of this if attempting to flux calibrate
+the observations. Except in quicklook mode, the apertures for each object
+and standard star observation may be reviewed graphically and
+adjustments made to the aperture width and background regions.
+
+The \fIclean\fR option invokes a profile
+fitting and deviant point rejection algorithm as well as a variance weighting
+of points in the aperture. See the next section for more about
+requirements to use this option.
+
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\fIupdate\fR flag is set. The changes which will cause an update are a
+new arc reference image and new standard stars. If all input spectra are to be
+processed regardless of previous processing the \fIredo\fR flag may be
+used. Note that reprocessing clobbers the previously processed output
+spectra.
+
+The final step is to plot the spectra if the \fIsplot\fR option is
+selected. In non-quicklook mode there is a query which may be
+answered either in lower or upper case. The plotting uses the interactive
+task \fBsplot\fR. In quicklook mode the plot appears noninteractively
+using the task \fBbplot\fR.
+
+The \fIquicklook\fR option provides a simpler, less interactive, mode.
+In quicklook mode a single aperture is defined using default parameters
+without interactive aperture review or trace fitting and
+the \fIsplot\fR option selects a noninteractive plot to be
+shown at the end of processing of each object and standard star
+spectrum. While the algorithms used in quicklook mode are nearly the same
+as in non-quicklook mode and the final results may be the same it is
+recommended that the greater degree of monitoring and review in
+non-quicklook mode be used for careful final reductions.
+
+The batch processing option allows object spectra to be processed as a
+background or batch job. This will occur only if the interactive
+\fIsplot\fR option is not active; either not set, turned off during
+processing with "NO", or in quicklook mode. In batch processing the
+terminal output is suppressed.
+
+The \fIlistonly\fR option prints a summary of the processing steps
+which will be performed on the input spectra without actually doing
+anything. This is useful for verifying which spectra will be affected
+if the input list contains previously processed spectra. The listing
+does not include any arc spectra which may be extracted to dispersion
+calibrate an object spectrum.
+
+The last parameter (excluding the task mode parameter) points to
+another parameter set for the algorithm parameters. The default
+parameter set is called \fIsparams\fR. The algorithm parameters are
+discussed further in the next section.
+
+\fBAlgorithms and Algorithm Parameters\fR
+
+This section summarizes the various algorithms used by the
+\fBdoslit\fR task and the parameters which control and modify the
+algorithms. The algorithm parameters available to you are
+collected in the parameter set \fBsparams\fR. These parameters are
+taken from the various general purpose tasks used by the \fBdoslit\fR
+processing task. Additional information about these parameters and
+algorithms may be found in the help for the actual
+task executed. These tasks are identified below. The aim of this
+parameter set organization is to collect all the algorithm parameters
+in one place separate from the processing parameters and include only
+those which are relevant for slit data. The parameter values
+can be changed from the defaults by using the parameter editor,
+.nf
+
+cl> epar sparams
+
+.fi
+or simple typing \fIsparams\fR.
+The parameter editor can also be entered when editing the \fBdoslit\fR
+parameters by typing \fI:e\fR when positioned at the \fIsparams\fR
+parameter.
+
+\fBAperture Definitions\fR
+
+The first operation is to define the extraction apertures, which include the
+aperture width, background regions, and position dependence with
+wavelength, for the input slit spectra and, if flux calibration is
+selected, the standard star spectra. This is done only for spectra which
+do not have previously defined apertures unless the \fIredo\fR option is
+set to force all definitions to be redone. Thus, apertures may be
+defined separately using the \fBapextract\fR tasks. This is particularly
+useful if one needs to use reference images to define apertures for very
+weak spectra which are not well centered or traced by themselves.
+
+Initially a single spectrum is found and a default aperture defined
+automatically. If the \fIresize\fR parameter is set the aperture width is
+adjusted to a specified point on the spectrum profile (see
+\fBapresize\fR). If not in "quicklook" mode (set by the \fIquicklook\fR
+parameter) a query is printed to select whether to inspect and modify the
+aperture and background aperture definitions using the commands described
+for \fBapedit\fR. This option allows adding
+apertures for other objects on the slit and adjusting
+background regions to avoid contaminating objects. The query may be
+answered in lower case for a single spectrum or in upper case to
+permanently set the response for the duration of the task execution. This
+convention for query responses is used throughout the task. It is
+recommended that quicklook only be used for initial quick extractions and
+calibration and that for final reductions one at least review the aperture
+definitions and traces.
+
+The initial spectrum finding and aperture definitions are done at a specified
+line or column. The positions of the spectrum at a set of other lines or
+columns is done next and a smooth function is fit to define the aperture
+centers at all points in the image. In non-quicklook mode the user has the
+option to review and adjust the function fitting parameters and delete bad
+position determinations. As with the initial aperture review there is a
+query which may be answered either in lower or upper case.
+
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBsparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the object position on the slit and the number
+of image lines or columns to sum are set by the \fIline\fR and \fInsum\fR
+parameters. A line of INDEF (the default) selects the middle of the image.
+The automatic finding algorithm is described for the task
+\fBapfind\fR and is basically finds the strongest peak. The default
+aperture size, background parameters, and resizing are described in
+the tasks \fBapdefault\fR and \fBapresize\fR and the
+parameters used are also described there.
+The tracing is done as described in \fBaptrace\fR and consists of
+stepping along the image using the specified \fIt_step\fR parameter. The
+function fitting uses the \fBicfit\fR commands with the other parameters
+from the tracing section.
+
+\fBExtraction\fR
+
+The actual extraction of the spectra is done by summing across the
+fixed width apertures at each point along the dispersion.
+The default is to simply sum the pixels using
+partial pixels at the ends. There is an option to weight the
+sum based on a Poisson variance model using the \fIreadnoise\fR and
+\fIgain\fR detector parameters. Note that if the \fIclean\fR
+option is selected the variance weighted extraction is used regardless
+of the \fIweights\fR parameter. The sigma thresholds for cleaning
+are also set in the \fBsparams\fR parameters.
+
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain. These
+numbers need to be adjusted if the image has been processed such that the
+intensity scale has a different origin (such as applying a separate
+background subtraction operation) or scaling (such as caused by
+unnormalized flat fielding). These options also require using background
+subtraction if the profile does not go to zero. For optimal extraction and
+cleaning to work it is recommended that any flat fielding be done using
+normalized flat fields (as is done in \fBccdproc\fR) and using background
+subtraction if there is any appreciable sky. For further discussion of
+cleaning and variance weighted extraction see \fBapvariance\fR and
+\fBapprofiles\fR as well as \fBapsum\fR.
+
+Background sky subtraction is done during the extraction based on
+background regions and parameters defined by the default parameters or
+changed during the interactive setting of the apertures. The background
+subtraction options are to do no background subtraction, subtract the
+average, median, or minimum of the pixels in the background regions, or to
+fit a function and subtract the function from under the extracted object
+pixels. The background regions are specified in pixels from
+the aperture center and follow changes in center of the spectrum along the
+dispersion. The syntax is colon separated ranges with multiple ranges
+separated by a comma or space. The background fitting uses the \fBicfit\fR
+routines which include medians, iterative rejection of deviant points, and
+a choice of function types and orders. Note that it is important to use a
+method which rejects cosmic rays such as using either medians over all the
+background regions (\fIbackground\fR = "median") or median samples during
+fitting (\fIb_naverage\fR < -1). The background subtraction algorithm and
+options are described in greater detail in \fBapsum\fR and
+\fBapbackground\fR.
+
+\fBDispersion Correction\fR
+
+If dispersion correction is not selected, \fIdispcor\fR=no, then the object
+spectra are simply extracted. The extracted spectra may be plotted
+by setting the \fIsplot\fR option. This produces a query and uses
+the interactive \fBsplot\fR task in non-quicklook mode and uses the
+noninteractive \fBbplot\fR task in quicklook mode.
+
+Dispersion corrections are applied to the extracted spectra if the
+\fIdispcor\fR processing parameter is set. There are three basic steps
+involved; determining the dispersion functions relating pixel position to
+wavelength, assigning the appropriate dispersion function to a particular
+observation, and either storing the nonlinear dispersion function in the
+image headers or resampling the spectra to evenly spaced pixels in
+wavelength.
+
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted at middle of the image with no
+tracing. Note extractions of arc spectra are not background subtracted.
+The task \fBautoidentify\fR is attempts to define the dispersion function
+automatically using the \fIcrval\fR and \fIcdelt\fR parameters. Whether or
+not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+
+The arc dispersion function parameters are for \fBautoidentify\fR and it's
+related partner \fBreidentify\fR. The parameters define a line list for
+use in automatically assigning wavelengths to arc lines, a centering width
+(which should match the line widths at the base of the lines), the
+dispersion function type and orders, parameters to exclude bad lines from
+function fits, and defining whether to refit the dispersion function as
+opposed to simply determining a zero point shift. The defaults should
+generally be adequate and the dispersion function fitting parameters may be
+altered interactively. One should consult the help for the two tasks for
+additional details of these parameters and the interactive operation of
+\fBautoidentify\fR.
+
+The extracted reference arc spectrum is then dispersion corrected.
+If the spectra are to be linearized, as set by the \fIlinearize\fR
+parameter, the default linear wavelength parameters are printed and
+you have the option to adjust them. The dispersion system defined at
+this point will be applied automatically to all other spectra as they
+are dispersion corrected.
+
+Once the reference dispersion function is defined other arc spectra are
+extracted as required by the object spectra. The assignment of arcs is
+done either explicitly with an arc assignment table (parameter
+\fIarctable\fR) or based on a header parameter such as a time.
+This assignments are made by the task
+\fBrefspectra\fR. When two arcs are assigned to an object spectrum an
+interpolation is done between the two dispersion functions. This makes an
+approximate correction for steady drifts in the dispersion.
+
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+
+The assigned arc spectra are then extracted using the object aperture
+definitions (but without background subtraction or cleaning) so that the
+same pixels on the detector are used. The extracted arc spectra are then
+reidentified automatically against the reference arc spectrum. Some
+statistics of the reidentification are printed (if not in batch mode) and
+the user has the option of examining the lines and fits interactively if
+not in quicklook mode. The task which does the reidentification is called
+\fBreidentify\fR.
+
+The last step of dispersion correction is setting the dispersion
+of the object image from the arc images. There are two choices here.
+If the \fIlinearize\fR parameter is not set the nonlinear dispersion
+function is stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+
+If the \fIlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength using the dispersion coordinate system defined previously
+for the arc reference spectrum.
+
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+
+\fBFlux Calibration\fR
+
+Flux calibration consists of an extinction correction and an instrumental
+sensitivity calibration. The extinction correction only depends on the
+extinction function defined by the package parameter \fIextinct\fR and
+determination of the airmass from the header parameters (the air mass is
+computed by \fBsetairmass\fR as mentioned earlier). The sensitivity
+calibration depends on a sensitivity calibration spectrum determined from
+standard star observations for which there are tabulated absolute fluxes.
+The task that applies both the extinction correction and sensitivity
+calibration to each extracted object spectrum is \fBcalibrate\fR. Consult
+the manual page for this task for more information.
+
+Generation of the sensitivity calibration spectrum is done before
+processing any object spectra since it has two interactive steps and
+requires all the standard star observations. The first step is tabulating
+the observed fluxes over the same bandpasses as the calibrated absolute
+fluxes. The standard star tabulations are done after each standard star is
+extracted and dispersion corrected. You are asked for the name of the
+standard star as tabulated in the absolute flux data files in the directory
+\fIcaldir\fR defined by the package parameters.
+The tabulation of the standard star
+observations over the standard bandpasses is done by the task
+\fBstandard\fR. The tabulated data is stored in the file \fIstd\fR. Note
+that if the \fIredo\fR flag is not set any new standard stars specified in
+subsequent executions of \fBdoslit\fR are added to the previous data in
+the data file, otherwise the file is first deleted. Modification of the
+tabulated standard star data, such as by adding new stars, will cause any
+spectra in the input list which have been previously calibrated to be
+reprocessed if the \fIupdate\fR flag is set.
+
+After the standard star calibration bandpass fluxes are tabulated the
+information from all the standard stars is combined to produce a
+sensitivity function for use by \fBcalibrate\fR. The sensitivity function
+determination is interactive and uses the task \fBsensfunc\fR. This task
+allows fitting a smooth sensitivity function to the ratio of the observed
+to calibrated fluxes verses wavelength. The types of manipulations one
+needs to do include deleting bad observations, possibly removing variable
+extinction (for poor data), and possibly deriving a revised extinction
+function. This is a complex operation and one should consult the manual
+page for \fBsensfunc\fR. The sensitivity function is saved as a one
+dimensional spectrum with the name \fIsens\fR. Deletion of this image
+will also cause reprocessing to occur if the \fIupdate\fR flag is set.
+.ih
+EXAMPLES
+1. The following example uses artificial data and may be executed
+at the terminal (with IRAF V2.10). This is similar to the sequence
+performed by the test procedure "demos doslit". The output is with
+the verbose package parameter set. Normally users use \fBeparam\fR
+rather than the long command line. All parameters not shown
+for \fBsparams\fR and \fBdoslit\fR are the default.
+
+.nf
+cl> demos mkdoslit
+Creating example longslit in image demoarc1 ...
+Creating example longslit in image demoobj1 ...
+Creating example longslit in image demostd1 ...
+Creating example longslit in image demoarc2 ...
+cl> doslit demoobj1 arcs=demoarc1,demoarc2 stand=demostd1 \
+>>> extcor=yes, fluxcal=yes resize=yes
+Searching aperture database ...
+Finding apertures ...
+Jan 17 15:52: FIND - 1 apertures found for demoobj1
+Resizing apertures ...
+Jan 17 15:52: APRESIZE - 1 apertures resized for demoobj1 (-3.50, 3.49)
+Edit apertures for demostd1? (yes):
+<Check aperture and background definitions ('b'). Exit with 'q'>
+Fit traced positions for demostd1 interactively? (yes):
+Tracing apertures ...
+Fit curve to aperture 1 of demostd1 interactively (yes):
+<Exit with 'q'>
+Searching aperture database ...
+Finding apertures ...
+Jan 17 15:54: FIND - 1 apertures found for demostd1
+Resizing apertures ...
+Jan 17 15:54: APRESIZE - 1 apertures resized for demostd1 (-3.35, 3.79)
+Edit apertures for demostd1? (yes):
+<Exit with 'q'>
+Fit traced positions for demostd1 interactively? (yes): n
+Tracing apertures ...
+Jan 17 15:55: TRACE - 1 apertures traced in demostd1.
+Jan 17 15:55: DATABASE - 1 apertures for demostd1 written to database
+Extract arc reference image demoarc1
+Searching aperture database ...
+Finding apertures ...
+Jan 17 15:55: FIND - 1 apertures found for demoarc1
+Jan 17 15:55: DATABASE - 1 apertures for demoarc1 written to database
+Extracting apertures ...
+Jan 17 15:55: EXTRACT - Aperture 1 from demoarc1 --> demoarc1.ms
+Determine dispersion solution for demoarc1
+<A dispersion function is automatically determined.>
+<Type 'f' to see the fit residuals>
+<Type 'd' to delete the two deviant lines>
+<Type 'f' to refit with the bad points deleted>
+<Type 'q' to quit fit and then 'q' to exit>
+demoarc1.ms.imh: w1 = 4204.18..., w2 = 7355.37..., dw = 6.16..., nw = 512
+ Change wavelength coordinate assignments? (yes|no|NO) (no): n
+Extract standard star spectrum demostd1
+Searching aperture database ...
+Jan 17 15:59: DATABASE - 1 apertures read for demostd1 from database
+Extracting apertures ...
+Jan 17 15:59: EXTRACT - Aperture 1 from demostd1 --> demostd1.ms
+Assign arc spectra for demostd1
+[demostd1] refspec1='demoarc1 0.403'
+[demostd1] refspec2='demoarc2 0.597'
+Extract and reidentify arc spectrum demoarc1
+Searching aperture database ...
+Jan 17 15:59: DATABASE - 1 apertures read for demostd1 from database
+Jan 17 15:59: DATABASE - 1 apertures for demoarc1 written to database
+Extracting apertures ...
+Jan 17 15:59: EXTRACT - Aperture 1 from demoarc1 --> demostd1demoarc1.ms
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Fri 15:59:21 17-Jan-92
+ Reference image = demoarc1.ms, New image = demostd1..., Refit = yes
+Image Data Found Fit Pix Shift User Shift Z Shift RMS
+demo... 48/48 48/48 2.22E-4 0.00184 5.09E-7 0.225
+Fit dispersion function interactively? (no|yes|NO|YES) (yes):
+demoarc1.ms: w1 = 4211.81, w2 = 7353.58, dw = 6.148, nw = 512, log = no
+ Change wavelength coordinate assignments? (yes|no|NO): N
+demo... 48/48 48/48 2.22E-4 0.00184 5.09E-7 0.225
+Extract and reidentify arc spectrum demoarc2
+Searching aperture database ...
+Jan 17 16:01: DATABASE - 1 apertures read for demostd1 from database
+Jan 17 16:01: DATABASE - 1 apertures for demoarc2 written to database
+Extracting apertures ...
+Jan 17 16:01: EXTRACT - Aperture 1 from demoarc2 --> demostd1demoarc2.ms
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Fri 16:01:54 17-Jan-92
+ Reference image = demoarc1.ms, New image = demostd1..., Refit = yes
+Image Data Found Fit Pix Shift User Shift Z Shift RMS
+demo... 48/48 48/48 0.00302 0.0191 3.82E-6 0.244
+Dispersion correct demostd1
+demostd1.ms: ap = 1, w1 = 4204.181, w2 = 7355.375, dw = 6.16..., nw = 512
+Compile standard star fluxes for demostd1
+Star name in calibration list: hz2 <in kpnoslit package>
+demostd1.ms.imh[1]: Example artificial long slit image
+Compute sensitivity function
+Fit aperture 1 interactively? (no|yes|NO|YES) (no|yes|NO|YES) (yes):
+<Exit with 'q'>
+Sensitivity function for all apertures --> sens
+Flux and/or extinction calibrate standard stars
+[demostd1.ms.imh][1]: Example artificial long slit image
+ Extinction correction applied
+ Flux calibration applied
+Extract object spectrum demoobj1
+Searching aperture database ...
+Jan 17 16:05: DATABASE - 1 apertures read for demoobj1 from database
+Extracting apertures ...
+Jan 17 16:05: EXTRACT - Aperture 1 from demoobj1 --> demoobj1.ms
+Assign arc spectra for demoobj1
+[demoobj1] refspec1='demoarc1 0.403'
+[demoobj1] refspec2='demoarc2 0.597'
+Extract and reidentify arc spectrum demoarc1
+Searching aperture database ...
+Jan 17 16:05: DATABASE - 1 apertures read for demoobj1 from database
+Jan 17 16:05: DATABASE - 1 apertures for demoarc1 written to database
+Extracting apertures ...
+Jan 17 16:05: EXTRACT - Aperture 1 from demoarc1 --> demoobj1demoarc1.ms
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Fri 16:05:39 17-Jan-92
+ Reference image = demoarc1.ms, New image = demoobj1..., Refit = yes
+Image Data Found Fit Pix Shift User Shift Z Shift RMS
+demo... 48/48 48/48 -2.49E-4 -0.00109 -1.1E-7 0.227
+Extract and reidentify arc spectrum demoarc2
+Searching aperture database ...
+Jan 17 16:05: DATABASE - 1 apertures read for demoobj1 from database
+Jan 17 16:05: DATABASE - 1 apertures for demoarc2 written to database
+Extracting apertures ...
+Jan 17 16:05: EXTRACT - Aperture 1 from demoarc2 --> demoobj1demoarc2.ms
+
+REIDENTIFY: NOAO/IRAF V2.10BETA valdes@puppis Fri 16:05:42 17-Jan-92
+ Reference image = demoarc1.ms, New image = demoobj1..., Refit = yes
+Image Data Found Fit Pix Shift User Shift Z Shift RMS
+demo... 48/48 48/48 0.00266 0.0169 3.46E-6 0.24
+Dispersion correct demoobj1
+demoobj1.ms: ap = 1, w1 = 4204.181, w2 = 7355.375, dw = 6.16..., nw = 512
+Extinction correct demoobj1
+Flux calibrate demoobj1
+[demoobj1.ms.imh][1]: Example artificial long slit image
+ Extinction correction applied
+ Flux calibration applied
+.fi
+
+2. To redo the above:
+
+.nf
+cl> doslit demoobj1 arcs=demoarc1,demoarc2 stand=demostd1 \
+>>> extcor=yes, fluxcal=yes resize=yes redo+
+.fi
+.ih
+REVISIONS
+.ls DOSLIT V2.11
+The initial arc line identifications is done with the automatic line
+identification algorithm.
+.le
+.ls DOSLIT V2.10.3
+The usual output WCS format is "equispec". The image format type to be
+processed is selected with the \fIimtype\fR environment parameter. The
+dispersion axis parameter is now a package parameter. Images will only
+be processed if the have the CCDPROC keyword. A \fIdatamax\fR parameter
+has been added to help improve cosmic ray rejection. The arc reference
+is no longer taken from the center of the image but using the first object
+aperture. A bug which alphabetized the arc list was fixed.
+.le
+.ih
+SEE ALSO
+apbackground, apedit, apfind, approfiles, aprecenter, apresize, apsum,
+aptrace, apvariance, calibrate, ccdred, center1d, ctioslit, dispcor,
+echelle.doecslit, icfit, autoidentify, identify, kpnocoude, kpnoslit,
+specred, observatory, onedspec.package, refspectra, reidentify, sensfunc,
+setairmass, setjd, splot, standard
+.endhelp
diff --git a/noao/imred/specred/doc/doslit.ms b/noao/imred/specred/doc/doslit.ms
new file mode 100644
index 00000000..03ad0ab5
--- /dev/null
+++ b/noao/imred/specred/doc/doslit.ms
@@ -0,0 +1,1401 @@
+.nr PS 9
+.nr VS 11
+.de V1
+.ft CW
+.nf
+..
+.de V2
+.fi
+.ft R
+..
+.de LS
+.br
+.in +2
+..
+.de LE
+.br
+.sp .5v
+.in -2
+..
+.ND February 1993
+.TL
+Guide to the Slit Spectra Reduction Task DOSLIT
+.AU
+Francisco Valdes
+.AI
+IRAF Group - Central Computer Services
+.K2
+.DY
+
+.AB
+\fBDoslit\fR extracts, sky subtracts, wavelength calibrates, and flux
+calibrates simple two dimensional slit spectra which have been processed to
+remove the detector characteristics; i.e. CCD images have been bias, dark
+count, and flat field corrected. It is primarily intended for
+spectrophotometry or radial velocities of stellar spectra with the spectra
+aligned with one of the image axes; i.e. the assumption is that extractions
+can be done by summing along image lines or columns. The alignment does
+not have to be precise but only close enough that the wavelength difference
+across the spectrum profiles is insignificant. The task is available
+in the \fBctioslit\fR, \fBkpnoslit\fR, \fBkpnocoude\fR, and \fBspecred\fR
+packages.
+.AE
+.NH
+Introduction
+.LP
+\fBDoslit\fR extracts, sky subtracts, wavelength calibrates, and flux
+calibrates simple two dimensional slit spectra which have been processed to
+remove the detector characteristics; i.e. CCD images have been bias, dark
+count, and flat field corrected. It is primarily intended for
+spectrophotometry or radial velocities of stellar spectra with the spectra
+aligned with one of the image axes; i.e. the assumption is that extractions
+can be done by summing along image lines or columns. The alignment does
+not have to be precise but only close enough that the wavelength difference
+across the spectrum profiles is insignificant. Extended objects requiring
+accurate geometric alignment over many pixels are reduced using the
+\fBlongslit\fR package.
+.LP
+The task is a command language script which collects and combines the
+functions and parameters of many general purpose tasks to provide a single,
+complete data reduction path and a degree of guidance, automation, and
+record keeping. In the following description and in the parameter section
+the various general tasks used are identified. Further
+information about those tasks and their parameters may be found in their
+documentation. \fBDoslit\fR also simplifies and consolidates parameters
+from those tasks and keeps track of previous processing to avoid
+duplications.
+.LP
+The general organization of the task is to do the interactive setup steps,
+such as the reference dispersion function
+determination, first using representative calibration data and then perform
+the majority of the reductions automatically, possibly as a background
+process, with reference to the setup data. In addition, the task
+determines which setup and processing operations have been completed in
+previous executions of the task and, contingent on the \f(CWredo\fR and
+\f(CWupdate\fR options, skip or repeat some or all the steps.
+.LP
+The description is divided into a quick usage outline followed by details
+of the parameters and algorithms. The usage outline is provided as a
+checklist and a refresher for those familiar with this task and the
+component tasks. It presents only the default or recommended usage
+since there are many variations possible.
+.NH
+Usage Outline
+.LP
+.IP [1] 6
+The images are first processed with \fBccdproc\fR for overscan,
+zero level, dark count, and flat field corrections.
+.IP [2]
+Set the \fBdoslit\fR parameters with \fBeparam\fR. Specify the object
+images to be processed,
+one or more arc images, and one or more standard
+star images. If there are many object, arc, or standard star images
+you might prepare "@ files". Set the detector and data
+specific parameters. Select the processing options desired.
+Finally you might wish to review the \f(CWsparams\fR algorithm parameters
+though the defaults are probably adequate.
+.IP [3]
+Run the task. This may be repeated multiple times with different
+observations and the task will generally only do the setup steps
+once and only process new images. Queries presented during the
+execution for various interactive operations may be answered with
+"yes", "no", "YES", or "NO". The lower case responses apply just
+to that query while the upper case responses apply to all further
+such queries during the current execution and no further queries of that
+type will be made.
+.IP [4]
+Apertures are defined for all the standard and object images. This is only
+done if there are no previous aperture definitions for the image.
+The highest peak is found and centered and the default aperture limits
+are set. If the resize option is set the aperture is resized by finding
+the level which is 5% (the default) of the peak above local background.
+If not using the quicklook option you now have the option
+of entering the aperture editing loop to check the aperture position,
+size, and background fitting parameters, and possibly add additional
+apertures. This is step is highly recommended.
+It is important to check the background regions with the 'b'
+key. To exit the background mode and then
+to exit the review mode use 'q'.
+.IP
+The spectrum positions at a series of points along the dispersion are
+measured and a function is fit to these positions. If not using the
+quicklook option the traced positions may be examined interactively and the
+fitting parameters adjusted. To exit the interactive fitting type 'q'.
+.IP [5]
+If dispersion correction is selected the first arc in the arc list is
+extracted. The dispersion function is defined using the task
+\fBautoidentify\fR. The \fIcrval\fR and \fIcdelt\fR parameters are used in
+the automatic identification. Whether or not the automatic identification
+is successful you will be shown the result of the arc line identification.
+If the automatic identification is not successful identify a few arc lines
+with with 'm' and and use the 'l' line list identification command to
+automatically add additional lines and fit the dispersion function. Check
+the quality of the dispersion function fit with 'f'. When satisfied exit
+with 'q'.
+.IP [6]
+If the flux calibration option is selected the standard star spectra are
+processed (if not done previously). The images are
+extracted and wavelength calibrated. The appropriate arc
+calibration spectra are extracted and the dispersion function refit
+using the arc reference spectrum as a starting point. The standard star
+fluxes through the calibration bandpasses are compiled. You are queried
+for the name of the standard star calibration data file.
+.IP
+After all the standard stars are processed a sensitivity function is
+determined using the interactive task \fBsensfunc\fR. Finally, the
+standard star spectra are extinction corrected and flux calibrated
+using the derived sensitivity function.
+.IP [7]
+The object spectra are now automatically
+extracted, wavelength calibrated, and flux calibrated.
+.IP [8]
+The option to examine the final spectra with \fBsplot\fR may be given.
+To exit type 'q'. In quicklook mode the spectra are plotted
+noninteractively with \fBbplot\fR.
+.IP [9]
+The final spectra will have the same name as the original 2D images
+with a ".ms" extension added.
+.NH
+Spectra and Data Files
+.LP
+The basic input consists of two dimensional slit object, standard star, and
+arc calibration spectra stored as IRAF images.
+The type of image format is defined by the
+environment parameter \fIimtype\fR. Only images with that extension will
+be processed and created.
+The raw CCD images must be
+processed to remove overscan, bias, dark count, and flat field effects.
+This is generally done using the \fBccdred\fR package. Lines of constant
+wavelength should be closely aligned with one of the image axes though a
+small amount of misalignment only causes a small loss of resolution. For
+large misalignments one may use the \fBrotate\fR task. More complex
+geometric problems and observations of extended objects should be handled
+by the \fBlongslit\fR package.
+.LP
+The arc
+spectra are comparison arc lamp observations (they must all be of the same
+type). The assignment of arc calibration exposures to object exposures is
+generally done by selecting the nearest in time and interpolating.
+However, the optional \fIarc assignment table\fR may be used to explicitly
+assign arc images to specific objects. The format of this file is
+described in task \fBrefspectra\fR.
+.LP
+The final reduced spectra are recorded in one, two or three dimensional IRAF
+images. The images have the same name as the original images with an added
+".ms" extension. Each line in the reduced image is a one dimensional
+spectrum with associated aperture, wavelength, and identification
+information. With a single aperture the image will be one dimensional
+and with multiple apertures the image will be two dimensional.
+When the \f(CWextras\fR parameter is set the images will be three
+dimensional (regardless of the number of apertures) and the lines in the
+third dimension contain additional information (see
+\fBapsum\fR for further details). These spectral formats are accepted by the
+one dimensional spectroscopy tasks such as the plotting tasks \fBsplot\fR
+and \fBspecplot\fR.
+.NH
+Package Parameters
+.LP
+The package parameters, shown in Figure 1 for the \fBspecred\fR package,
+set parameters which change infrequently and define the standard I/O functions.
+.KS
+.V1
+
+.ce
+Figure 1: Package Parameter Set for DOSLIT Packages
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = imred
+ TASK = specred
+
+(extinct= onedstds$kpnoextinct.dat) Extinction file
+(caldir = onedstds$spec16redcal/) Standard star calibration directory
+(observa= observatory) Observatory of data
+(interp = poly5) Interpolation type
+(dispaxi= 2) Image axis for 2D images
+(nsum = 1) Number of lines/columns to sum for 2D images
+
+(databas= database) Database
+(verbose= no) Verbose output?
+(logfile= logfile) Log file
+(plotfil= ) Plot file
+
+(records= ) Record number extensions
+(version= SPECRED V3: April 1992)
+
+.KE
+.V2
+The extinction file
+is used for making extinction corrections and the standard star
+calibration directory is used for determining flux calibrations from
+standard star observations. The calibration directories contain data files
+with standard star fluxes and band passes. The available extinction
+files and flux calibration directories may be listed using the command:
+.V1
+
+ cl> help onedstds
+
+.V2
+The extinction correction requires computation of an air mass using the
+task \fBsetairmass\fR. The air mass computation needs information
+about the observation and, in particular, the latitude of the observatory.
+This is determined using the OBSERVAT image header keyword. If this
+keyword is not present the observatory parameter is used. See the
+task \fBobservatory\fR for more on defining the observatory parameters.
+.LP
+The spectrum interpolation type is used whenever a spectrum needs to be
+resampled for linearization or performing operations between spectra
+with different sampling. The "sinc" interpolation may be of interest
+as an alternative but see the cautions given in \fBonedspec.package\fR.
+.LP
+The general direction in which the spectra run is specified by the
+dispersion axis parameter. Recall that ideally it is the direction
+of constant wavelength which should be aligned with an image axis and
+the dispersion direction may not be exactly aligned because atmospheric
+dispersion.
+.LP
+The verbose parameter selects whether to print everything which goes
+into the log file on the terminal. It is useful for monitoring
+what the \fBdoslit\fR task does. The log and plot files are useful for
+keeping a record of the processing. A log file is highly recommended.
+A plot file provides a record of the apertures, traces, and extracted
+spectra but can become quite large.
+The plotfile is most conveniently viewed and printed with \fBgkimosaic\fR.
+.NH
+Processing Parameters
+.LP
+The \fBdoslit\fR parameters are shown in Figure 2.
+.KS
+.V1
+
+.ce
+Figure 2: Parameter Set for DOSLIT
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = specred
+ TASK = doslit
+
+objects = List of object spectra
+(arcs = ) List of arc spectra
+(arctabl= ) Arc assignment table (optional)
+(standar= ) List of standard star spectra
+
+.KE
+.V1
+(readnoi= rdnoise) Read out noise sigma (photons)
+(gain = gain) Photon gain (photons/data number)
+(datamax= INDEF) Max data value / cosmic ray threshold
+(width = 5.) Width of profiles (pixels)
+(crval = INDEF) Approximate wavelength
+(cdelt = INDEF) Approximate dispersion
+
+(dispcor= yes) Dispersion correct spectra?
+(extcor = no) Extinction correct spectra?
+(fluxcal= no) Flux calibrate spectra?
+(resize = no) Automatically resize apertures?
+(clean = no) Detect and replace bad pixels?
+(splot = no) Plot the final spectrum?
+(redo = no) Redo operations if previously done?
+(update = no) Update spectra if cal data changes?
+(quicklo= no) Minimally interactive quick-look?
+(batch = no) Extract objects in batch?
+(listonl= no) List steps but don't process?
+
+(sparams= ) Algorithm parameters
+
+.V2
+The input images are specified by image lists. The lists may be
+explicit comma separate image names, @ files, or image
+templates using pattern matching against file names in the directory.
+To allow wildcard image lists to be used safely and conveniently the
+image lists are checked to remove extracted images (the .ms images)
+and to automatically identify object and arc spectra. Object and arc
+images are identified by the keyword IMAGETYP with values of "object",
+"OBJECT", "comp", or "COMPARISON" (the current practice at NOAO).
+If arc images are found in the object list they are transferred to the
+arc list while if object images are found in the arc list they are ignored.
+All other image types, such as biases, darks, or flat fields, are
+ignored. This behavior allows simply specifying all images with a wildcard
+in the object list with automatic selections of arc spectra or a
+wildcard in the arc list to automatically find the arc spectra.
+If the data lack the identifying information it is up to the user
+to explicitly set the proper lists.
+.LP
+The arc assignment table is a file which may be used to assign
+specific arc spectra to specific object and standard star spectra.
+For more on this option see \fBrefspectra\fR.
+.LP
+The next set of parameters describe the noise characteristics and
+spectrum characteristics. The read out noise and gain are used when
+"cleaning" cosmic rays and when using variance or optimal weighting. These
+parameters must be fairly accurate. Note that these are the effective
+parameters and must be adjusted if previous processing has modified the
+pixel values; such as with an unnormalized flat field.
+The variance
+weighting and cosmic-ray cleanning are sensitive to extremely strong
+cosmic-rays; ones which are hundreds of times brighter than the
+spectrum. The \fIdatamax\fR is used to set an upper limit for any
+real data. Any pixels above this value will be flagged as cosmic-rays
+and will not affect the extractions.
+.LP
+The profile width should be approximately the full width
+at the profile base. This parameter is used for centering and tracing
+of the spectrum profiles.
+.LP
+The approximate central wavelength and dispersion are used for the
+automatic identification of the arc reference. They may be specified
+as image header keywords or values. The INDEF values search the
+entire range of the coordinate reference file but the automatic
+line identification algorithm works much better and faster if
+approximate values are given.
+.LP
+The next set of parameters select the processing steps and options. The
+various calibration steps may be done simultaneously, that is at the same
+time as the basic extractions, or in separate executions of the task.
+Typically, all the desired operations are done at the same time.
+Dispersion correction requires at least one arc spectrum and flux
+calibration requires dispersion correction and at least one standard star
+observation.
+.LP
+The \f(CWresize\fR option resets the edges of the extraction aperture based
+on the profile for each object and standard star image. The default
+resizing is to the 5% point relative to the peak measured above the
+background. This allows following changes in the seeing. However, one
+should consider the consequences of this if attempting to flux calibrate
+the observations. Except in quicklook mode, the apertures for each object
+and standard star observation may be reviewed graphically and
+adjustments made to the aperture width and background regions.
+.LP
+The \f(CWclean\fR option invokes a profile
+fitting and deviant point rejection algorithm as well as a variance weighting
+of points in the aperture. See the next section for more about
+requirements to use this option.
+.LP
+Generally once a spectrum has been processed it will not be reprocessed if
+specified as an input spectrum. However, changes to the underlying
+calibration data can cause such spectra to be reprocessed if the
+\f(CWupdate\fR flag is set. The changes which will cause an update are a
+new arc reference image and new standard stars. If all input spectra are to be
+processed regardless of previous processing the \f(CWredo\fR flag may be
+used. Note that reprocessing clobbers the previously processed output
+spectra.
+.LP
+The final step is to plot the spectra if the \f(CWsplot\fR option is
+selected. In non-quicklook mode there is a query which may be
+answered either in lower or upper case. The plotting uses the interactive
+task \fBsplot\fR. In quicklook mode the plot appears noninteractively
+using the task \fBbplot\fR.
+.LP
+The \f(CWquicklook\fR option provides a simpler, less interactive, mode.
+In quicklook mode a single aperture is defined using default parameters
+without interactive aperture review or trace fitting and
+the \f(CWsplot\fR option selects a noninteractive plot to be
+shown at the end of processing of each object and standard star
+spectrum. While the algorithms used in quicklook mode are nearly the same
+as in non-quicklook mode and the final results may be the same it is
+recommended that the greater degree of monitoring and review in
+non-quicklook mode be used for careful final reductions.
+.LP
+The batch processing option allows object spectra to be processed as a
+background or batch job. This will occur only if the interactive
+\f(CWsplot\fR option is not active; either not set, turned off during
+processing with "NO", or in quicklook mode. In batch processing the
+terminal output is suppressed.
+.LP
+The \f(CWlistonly\fR option prints a summary of the processing steps
+which will be performed on the input spectra without actually doing
+anything. This is useful for verifying which spectra will be affected
+if the input list contains previously processed spectra. The listing
+does not include any arc spectra which may be extracted to dispersion
+calibrate an object spectrum.
+.LP
+The last parameter (excluding the task mode parameter) points to
+another parameter set for the algorithm parameters. The default
+parameter set is called \f(CWsparams\fR. The algorithm parameters are
+discussed further in the next section.
+.NH
+Algorithms and Algorithm Parameters
+.LP
+This section summarizes the various algorithms used by the
+\fBdoslit\fR task and the parameters which control and modify the
+algorithms. The algorithm parameters available to you are
+collected in the parameter set \fBsparams\fR. These parameters are
+taken from the various general purpose tasks used by the \fBdoslit\fR
+processing task. Additional information about these parameters and
+algorithms may be found in the help for the actual
+task executed. These tasks are identified below. The aim of this
+parameter set organization is to collect all the algorithm parameters
+in one place separate from the processing parameters and include only
+those which are relevant for slit data. The parameter values
+can be changed from the defaults by using the parameter editor,
+.V1
+
+cl> epar sparams
+
+.V2
+or simple typing \f(CWsparams\fR.
+The parameter editor can also be entered when editing the \fBdoslit\fR
+parameters by typing \f(CW:e\fR when positioned at the \f(CWsparams\fR
+parameter. Figure 3 shows the parameter set.
+.KS
+.V1
+
+.ce
+Figure 3: Algorithm Parameter Set
+
+ I R A F
+ Image Reduction and Analysis Facility
+PACKAGE = specred
+ TASK = sparams
+
+(line = INDEF) Default dispersion line
+(nsum = 10) Number of dispersion lines to sum
+(extras = no) Extract sky, sigma, etc.?
+
+ -- DEFAULT APERTURE LIMITS --
+(lower = -3.) Lower aperture limit relative to center
+(upper = 3.) Upper aperture limit relative to center
+
+ -- AUTOMATIC APERTURE RESIZING PARAMETERS --
+(ylevel = 0.05) Fraction of peak or intensity for resizing
+
+.KE
+.KS
+.V1
+ -- TRACE PARAMETERS --
+(t_step = 10) Tracing step
+(t_funct= spline3) Trace fitting function
+(t_order= 1) Trace fitting function order
+(t_niter= 1) Trace rejection iterations
+(t_low = 3.) Trace lower rejection sigma
+(t_high = 3.) Trace upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- APERTURE EXTRACTION PARAMETERS --
+(weights= none) Extraction weights (none|variance)
+(pfit = fit1d) Profile fitting algorithm (fit1d|fit2d)
+(lsigma = 3.) Lower rejection threshold
+(usigma = 3.) Upper rejection threshold
+
+.KE
+.KS
+.V1
+ -- BACKGROUND SUBTRACTION PARAMETERS --
+(backgro= fit) Background to subtract
+(b_funct= legendre) Background function
+(b_order= 1) Background function order
+(b_sampl= -10:-6,6:10) Background sample regions
+(b_naver= -100) Background average or median
+(b_niter= 1) Background rejection iterations
+(b_low = 3.) Background lower rejection sigma
+(b_high = 3.) Background upper rejection sigma
+
+.KE
+.KS
+.V1
+ -- ARC DISPERSION FUNCTION PARAMETERS --
+(coordli=linelists$idhenear.dat) Line list
+(match = -3.) Line list matching limit in Angstroms
+(fwidth = 4.) Arc line widths in pixels
+(cradius= 10.) Centering radius in pixels
+(i_funct= spline3) Coordinate function
+(i_order= 1) Order of dispersion function
+(i_niter= 0) Rejection iterations
+(i_low = 3.) Lower rejection sigma
+(i_high = 3.) Upper rejection sigma
+(refit = yes) Refit coordinate function when reidentifying?
+(addfeat= no) Add features when reidentifying?
+
+.KE
+.KS
+.V1
+ -- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+(select = interp) Selection method for reference spectra
+(sort = jd) Sort key
+(group = ljd) Group key
+(time = no) Is sort key a time?
+(timewra= 17.) Time wrap point for time sorting
+
+.KE
+.KS
+.V1
+ -- DISPERSION CORRECTION PARAMETERS --
+(lineari= yes) Linearize (interpolate) spectra?
+(log = no) Logarithmic wavelength scale?
+(flux = yes) Conserve flux?
+
+.KE
+.KS
+.V1
+ -- SENSITIVITY CALIBRATION PARAMETERS --
+(s_funct= spline3) Fitting function
+(s_order= 1) Order of sensitivity function
+(fnu = no) Create spectra having units of FNU?
+
+.KE
+.V2
+.NH 2
+Aperture Definitions
+.LP
+The first operation is to define the extraction apertures, which include the
+aperture width, background regions, and position dependence with
+wavelength, for the input slit spectra and, if flux calibration is
+selected, the standard star spectra. This is done only for spectra which
+do not have previously defined apertures unless the \f(CWredo\fR option is
+set to force all definitions to be redone. Thus, apertures may be
+defined separately using the \fBapextract\fR tasks. This is particularly
+useful if one needs to use reference images to define apertures for very
+weak spectra which are not well centered or traced by themselves.
+.LP
+Initially a single spectrum is found and a default aperture defined
+automatically. If the \f(CWresize\fR parameter is set the aperture width is
+adjusted to a specified point on the spectrum profile (see
+\fBapresize\fR). If not in "quicklook" mode (set by the \f(CWquicklook\fR
+parameter) a query is printed to select whether to inspect and modify the
+aperture and background aperture definitions using the commands described
+for \fBapedit\fR. This option allows adding
+apertures for other objects on the slit and adjusting
+background regions to avoid contaminating objects. The query may be
+answered in lower case for a single spectrum or in upper case to
+permanently set the response for the duration of the task execution. This
+convention for query responses is used throughout the task. It is
+recommended that quicklook only be used for initial quick extractions and
+calibration and that for final reductions one at least review the aperture
+definitions and traces.
+.LP
+The initial spectrum finding and aperture definitions are done at a specified
+line or column. The positions of the spectrum at a set of other lines or
+columns is done next and a smooth function is fit to define the aperture
+centers at all points in the image. In non-quicklook mode the user has the
+option to review and adjust the function fitting parameters and delete bad
+position determinations. As with the initial aperture review there is a
+query which may be answered either in lower or upper case.
+.LP
+The above steps are all performed using tasks from the \fBapextract\fR
+package and parameters from the \fBsparams\fR parameters. As a quick
+summary, the dispersion direction of the spectra are determined from the
+package \fBdispaxis\fR parameter if not defined in the image header. The default
+line or column for finding the object position on the slit and the number
+of image lines or columns to sum are set by the \f(CWline\fR and \f(CWnsum\fR
+parameters. A line of INDEF (the default) selects the middle of the image.
+The automatic finding algorithm is described for the task
+\fBapfind\fR and is basically finds the strongest peak. The default
+aperture size, background parameters, and resizing are described in
+the tasks \fBapdefault\fR and \fBapresize\fR and the
+parameters used are also described there.
+The tracing is done as described in \fBaptrace\fR and consists of
+stepping along the image using the specified \f(CWt_step\fR parameter. The
+function fitting uses the \fBicfit\fR commands with the other parameters
+from the tracing section.
+.NH 2
+Extraction
+.LP
+The actual extraction of the spectra is done by summing across the
+fixed width apertures at each point along the dispersion.
+The default is to simply sum the pixels using
+partial pixels at the ends. There is an option to weight the
+sum based on a Poisson variance model using the \f(CWreadnoise\fR and
+\f(CWgain\fR detector parameters. Note that if the \f(CWclean\fR
+option is selected the variance weighted extraction is used regardless
+of the \f(CWweights\fR parameter. The sigma thresholds for cleaning
+are also set in the \fBsparams\fR parameters.
+.LP
+The cleaning and variance weighting options require knowing the effective
+(i.e. accounting for any image combining) read out noise and gain. These
+numbers need to be adjusted if the image has been processed such that the
+intensity scale has a different origin (such as applying a separate
+background subtraction operation) or scaling (such as caused by
+unnormalized flat fielding). These options also require using background
+subtraction if the profile does not go to zero. For optimal extraction and
+cleaning to work it is recommended that any flat fielding be done using
+normalized flat fields (as is done in \fBccdproc\fR) and using background
+subtraction if there is any appreciable sky. For further discussion of
+cleaning and variance weighted extraction see \fBapvariance\fR and
+\fBapprofiles\fR as well as \fBapsum\fR.
+.LP
+Background sky subtraction is done during the extraction based on
+background regions and parameters defined by the default parameters or
+changed during the interactive setting of the apertures. The background
+subtraction options are to do no background subtraction, subtract the
+average, median, or minimum of the pixels in the background regions, or to
+fit a function and subtract the function from under the extracted object
+pixels. The background regions are specified in pixels from
+the aperture center and follow changes in center of the spectrum along the
+dispersion. The syntax is colon separated ranges with multiple ranges
+separated by a comma or space. The background fitting uses the \fBicfit\fR
+routines which include medians, iterative rejection of deviant points, and
+a choice of function types and orders. Note that it is important to use a
+method which rejects cosmic rays such as using either medians over all the
+background regions (\f(CWbackground\fR = "median") or median samples during
+fitting (\f(CWb_naverage\fR < -1). The background subtraction algorithm and
+options are described in greater detail in \fBapsum\fR and
+\fBapbackground\fR.
+.NH 2
+Dispersion Correction
+.LP
+If dispersion correction is not selected, \f(CWdispcor\fR=no, then the object
+spectra are simply extracted. The extracted spectra may be plotted
+by setting the \f(CWsplot\fR option. This produces a query and uses
+the interactive \fBsplot\fR task in non-quicklook mode and uses the
+noninteractive \fBbplot\fR task in quicklook mode.
+.LP
+Dispersion corrections are applied to the extracted spectra if the
+\f(CWdispcor\fR processing parameter is set. There are three basic steps
+involved; determining the dispersion functions relating pixel position to
+wavelength, assigning the appropriate dispersion function to a particular
+observation, and either storing the nonlinear dispersion function in the
+image headers or resampling the spectra to evenly spaced pixels in
+wavelength.
+.LP
+The first arc spectrum in the arc list is used to define the reference
+dispersion solution. It is extracted at middle of the image with no
+tracing. Note extractions of arc spectra are not background subtracted.
+The task \fBautoidentify\fR is attempts to define the dispersion function
+automatically using the \fIcrval\fR and \fIcdelt\fR parameters. Whether or
+not it is successful the user is presented with the interactive
+identification graph. The automatic identifications can be reviewed and a
+new solution or corrections to the automatic solution may be performed.
+.LP
+The arc dispersion function parameters are for \fBautoidentify\fR and it's
+related partner \fBreidentify\fR. The parameters define a line list for
+use in automatically assigning wavelengths to arc lines, a centering width
+(which should match the line widths at the base of the lines), the
+dispersion function type and orders, parameters to exclude bad lines from
+function fits, and defining whether to refit the dispersion function as
+opposed to simply determining a zero point shift. The defaults should
+generally be adequate and the dispersion function fitting parameters may be
+altered interactively. One should consult the help for the two tasks for
+additional details of these parameters and the interactive operation of
+\fBautoidentify\fR.
+.LP
+The extracted reference arc spectrum is then dispersion corrected.
+If the spectra are to be linearized, as set by the \f(CWlinearize\fR
+parameter, the default linear wavelength parameters are printed and
+you have the option to adjust them. The dispersion system defined at
+this point will be applied automatically to all other spectra as they
+are dispersion corrected.
+.LP
+Once the reference dispersion function is defined other arc spectra are
+extracted as required by the object spectra. The assignment of arcs is
+done either explicitly with an arc assignment table (parameter
+\f(CWarctable\fR) or based on a header parameter such as a time.
+This assignments are made by the task
+\fBrefspectra\fR. When two arcs are assigned to an object spectrum an
+interpolation is done between the two dispersion functions. This makes an
+approximate correction for steady drifts in the dispersion.
+.LP
+The tasks \fBsetjd\fR and \fBsetairmass\fR are automatically run on all
+spectra. This computes and adds the header parameters for the Julian date
+(JD), the local Julian day number (LJD), the universal time (UTMIDDLE), and
+the air mass at the middle of the exposure. The default arc assignment is
+to use the Julian date grouped by the local Julian day number. The
+grouping allows multiple nights of data to be correctly assigned at the
+same time.
+.LP
+The assigned arc spectra are then extracted using the object aperture
+definitions (but without background subtraction or cleaning) so that the
+same pixels on the detector are used. The extracted arc spectra are then
+reidentified automatically against the reference arc spectrum. Some
+statistics of the reidentification are printed (if not in batch mode) and
+the user has the option of examining the lines and fits interactively if
+not in quicklook mode. The task which does the reidentification is called
+\fBreidentify\fR.
+.LP
+The last step of dispersion correction is setting the dispersion
+of the object image from the arc images. There are two choices here.
+If the \f(CWlinearize\fR parameter is not set the nonlinear dispersion
+function is stored in the image header. Other IRAF tasks interpret
+this information when dispersion coordinates are needed for plotting
+or analysis. This has the advantage of not requiring the spectra
+to be interpolated and the disadvantage that the dispersion
+information is only understood by IRAF tasks and cannot be readily
+exported to other analysis software.
+.LP
+If the \f(CWlinearize\fR parameter is set then the spectra are resampled to a
+linear dispersion relation either in wavelength or the log of the
+wavelength using the dispersion coordinate system defined previously
+for the arc reference spectrum.
+.LP
+The linearization algorithm parameters allow selecting the interpolation
+function type, whether to conserve flux per pixel by integrating across the
+extent of the final pixel, and whether to linearize to equal linear or
+logarithmic intervals. The latter may be appropriate for radial velocity
+studies. The default is to use a fifth order polynomial for interpolation,
+to conserve flux, and to not use logarithmic wavelength bins. These
+parameters are described fully in the help for the task \fBdispcor\fR which
+performs the correction.
+.NH 2
+Flux Calibration
+.LP
+Flux calibration consists of an extinction correction and an instrumental
+sensitivity calibration. The extinction correction only depends on the
+extinction function defined by the package parameter \f(CWextinct\fR and
+determination of the airmass from the header parameters (the air mass is
+computed by \fBsetairmass\fR as mentioned earlier). The sensitivity
+calibration depends on a sensitivity calibration spectrum determined from
+standard star observations for which there are tabulated absolute fluxes.
+The task that applies both the extinction correction and sensitivity
+calibration to each extracted object spectrum is \fBcalibrate\fR. Consult
+the manual page for this task for more information.
+.LP
+Generation of the sensitivity calibration spectrum is done before
+processing any object spectra since it has two interactive steps and
+requires all the standard star observations. The first step is tabulating
+the observed fluxes over the same bandpasses as the calibrated absolute
+fluxes. The standard star tabulations are done after each standard star is
+extracted and dispersion corrected. You are asked for the name of the
+standard star as tabulated in the absolute flux data files in the directory
+\f(CWcaldir\fR defined by the package parameters.
+The tabulation of the standard star
+observations over the standard bandpasses is done by the task
+\fBstandard\fR. The tabulated data is stored in the file \f(CWstd\fR. Note
+that if the \f(CWredo\fR flag is not set any new standard stars specified in
+subsequent executions of \fBdoslit\fR are added to the previous data in
+the data file, otherwise the file is first deleted. Modification of the
+tabulated standard star data, such as by adding new stars, will cause any
+spectra in the input list which have been previously calibrated to be
+reprocessed if the \f(CWupdate\fR flag is set.
+.LP
+After the standard star calibration bandpass fluxes are tabulated the
+information from all the standard stars is combined to produce a
+sensitivity function for use by \fBcalibrate\fR. The sensitivity function
+determination is interactive and uses the task \fBsensfunc\fR. This task
+allows fitting a smooth sensitivity function to the ratio of the observed
+to calibrated fluxes verses wavelength. The types of manipulations one
+needs to do include deleting bad observations, possibly removing variable
+extinction (for poor data), and possibly deriving a revised extinction
+function. This is a complex operation and one should consult the manual
+page for \fBsensfunc\fR. The sensitivity function is saved as a one
+dimensional spectrum with the name \f(CWsens\fR. Deletion of this image
+will also cause reprocessing to occur if the \f(CWupdate\fR flag is set.
+.NH
+References
+.NH 2
+IRAF Introductory References
+.LP
+Work is underway on a new introductory guide to IRAF. Currently, the
+work below is the primary introduction.
+.IP
+P. Shames and D. Tody, \fIA User's Introduction to the IRAF Command
+Language\fR, Central Computer Services, NOAO, 1986.
+.NH 2
+CCD Reductions
+.IP
+F. Valdes, \fIThe IRAF CCD Reduction Package -- CCDRED\fR, Central
+Computer Services, NOAO, 1987.
+.IP
+F. Valdes, \fIUser's Guide to the CCDRED Package\fR, Central
+Computer Services, NOAO, 1988. Also on-line as \f(CWhelp ccdred.guide\fR.
+.IP
+P. Massey, \fIA User's Guide to CCD Reductions with IRAF\fR, Central
+Computer Services, NOAO, 1989.
+.NH 2
+Aperture Extraction Package
+.IP
+F. Valdes, \fIThe IRAF APEXTRACT Package\fR, Central Computer Services,
+NOAO, 1987 (out-of-date).
+.NH 2
+DOSLIT Task
+.IP
+P. Massey, \fIUser's Guide to Slit Spectra Reductions\fR,
+Central Computer Services, NOAO, 1992.
+.NH 2
+Task Help References
+.LP
+Each task in the \fBspecred\fR packages and tasks used by \fBdoslit\fR have
+help pages describing the parameters and task in some detail. To get
+on-line help type
+.V1
+
+cl> help \fItaskname\fR
+
+.V2
+The output of this command can be piped to \fBlprint\fR to make a printed
+copy.
+
+.V1
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters and apidtable
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apfit - Fit 2D spectra and output the fit, difference, or ratio
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apmask - Create and IRAF pixel list mask of the apertures
+ apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+autoidentify - Automatically identify arc lines and a dispersion function
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Extinction and flux calibrate spectra
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction correction
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ fitprofs - Fit gaussian profiles
+ identify - Identify features in spectrum for dispersion solution
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically reidentify features in spectra
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Compute instrumental sensitivity from standard stars
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sfit - Fit spectra and output fit, ratio, or difference
+ skysub - Sky subtract extracted multispec spectra
+ slist - List spectrum header parameters
+ specplot - Scale, stack, and plot multiple spectra
+ splot - Preliminary spectral plot/analysis
+ standard - Tabulate standard star counts and fluxes
+
+ doslit - Process slit spectra
+ demos - Demonstrations and tests
+
+ Additional help topics
+
+ onedspec.package - Package parameters and general description of package
+ apextract.package - Package parameters and general description of package
+ approfiles - Profile determination algorithms
+ apvariance - Extractions, variance weighting, cleaning, and noise model
+ center1d - One dimensional centering algorithm
+ icfit - Interactive one dimensional curve fitting
+.V2
+.SH
+Appendix A: DOSLIT Parameters
+.LP
+.nr PS 8
+.nr VS 10
+objects
+.LS
+List of object images to be processed. Previously processed spectra are
+ignored unless the \f(CWredo\fR flag is set or the \f(CWupdate\fR flag is set
+and dependent calibration data has changed. If the images contain the
+keyword IMAGETYP then only those with a value of "object" or "OBJECT"
+are used and those with a value of "comp" or "COMPARISON" are added
+to the list of arcs. Extracted spectra are ignored.
+.LE
+arcs = "" (at least one if dispersion correcting)
+.LS
+List of arc calibration spectra. These spectra are used to define
+the dispersion functions. The first spectrum is used to mark lines
+and set the dispersion function interactively and dispersion functions
+for all other arc spectra are derived from it. If the images contain
+the keyword IMAGETYP then only those with a value of "comp" or
+"COMPARISON" are used. All others are ignored as are extracted spectra.
+.LE
+arctable = "" (optional) (refspectra)
+.LS
+Table defining which arc spectra are to be assigned to which object
+spectra (see \fBrefspectra\fR). If not specified an assignment based
+on a header parameter, \f(CWsparams.sort\fR, such as the Julian date
+is made.
+.LE
+standards = "" (at least one if flux calibrating)
+.LS
+List of standard star spectra. The standard stars must have entries in
+the calibration database (package parameter \f(CWcaldir\fR).
+.LE
+
+readnoise = "rdnoise", gain = "gain" (apsum)
+.LS
+Read out noise in photons and detector gain in photons per data value.
+This parameter defines the minimum noise sigma and the conversion between
+photon Poisson statistics and the data number statistics. Image header
+keywords (case insensitive) may be specified to obtain the values from the
+image header.
+.LE
+datamax = INDEF (apsum.saturation)
+.LS
+The maximum data value which is not a cosmic ray.
+When cleaning cosmic rays and/or using variance weighted extraction
+very strong cosmic rays (pixel values much larger than the data) can
+cause these operations to behave poorly. If a value other than INDEF
+is specified then all data pixels in excess of this value will be
+excluded and the algorithms will yield improved results.
+This applies only to the object spectra and not the standard star or
+arc spectra. For more
+on this see the discussion of the saturation parameter in the
+\fBapextract\fR package.
+.LE
+width = 5. (apedit)
+.LS
+Approximate full width of the spectrum profiles. This parameter is used
+to define a width and error radius for the profile centering algorithm.
+.LE
+crval = INDEF, cdelt = INDEF (autoidentify)
+.LS
+These parameters specify an approximate central wavelength and dispersion.
+They may be specified as numerical values, INDEF, or image header keyword
+names whose values are to be used.
+If both these parameters are INDEF then the automatic identification will
+not be done.
+.LE
+
+dispcor = yes
+.LS
+Dispersion correct spectra? This may involve either defining a nonlinear
+dispersion coordinate system in the image header or resampling the
+spectra to uniform linear wavelength coordinates as selected by
+the parameter \f(CWsparams.linearize\fR.
+.LE
+extcor = no
+.LS
+Extinction correct the spectra?
+.LE
+fluxcal = no
+.LS
+Flux calibrate the spectra using standard star observations?
+.LE
+resize = no (apresize)
+.LS
+Resize the default aperture for each object based on the spectrum profile?
+.LE
+clean = no (apsum)
+.LS
+Detect and correct for bad pixels during extraction? This is the same
+as the clean option in the \fBapextract\fR package. If yes this also
+implies variance weighted extraction. In addition the datamax parameters
+can be useful.
+.LE
+splot = no
+.LS
+Plot the final spectra with the task \fBsplot\fR? In quicklook mode
+this is automatic and in non-quicklook mode it is queried.
+.LE
+redo = no
+.LS
+Redo operations previously done? If no then previously processed spectra
+in the object list will not be processed unless required by the
+update option.
+.LE
+update = no
+.LS
+Update processing of previously processed spectra if the
+dispersion reference image or standard star calibration data are changed?
+.LE
+quicklook = no
+.LS
+Extract and calibrate spectra with minimal interaction? In quicklook mode
+only the initial dispersion function solution and standard star setup are
+done interactively. Normally the \f(CWsplot\fR option is set in this mode to
+produce an automatic final spectrum plot for each object. It is
+recommended that this mode not be used for final reductions.
+.LE
+batch = yes
+.LS
+Process spectra as a background or batch job provided there are no interactive
+steps remaining.
+.LE
+listonly = no
+.LS
+List processing steps but don't process?
+.LE
+
+sparams = "" (pset)
+.LS
+Name of parameter set containing additional processing parameters. This
+parameter is only for indicating the link to the parameter set
+\fBsparams\fR and should not be given a value. The parameter set may be
+examined and modified in the usual ways (typically with "eparam sparams"
+or ":e sparams" from the parameter editor). The parameters are
+described below.
+.LE
+
+.ce
+-- GENERAL PARAMETERS --
+
+line = INDEF, nsum = 10
+.LS
+The dispersion line (line or column perpendicular to the dispersion
+axis) and number of adjacent lines (half before and half after unless
+at the end of the image) used in finding, resizing,
+editing, and tracing operations. A line of INDEF selects the middle of the
+image along the dispersion axis.
+.LE
+extras = no (apsum)
+.LS
+Include raw unweighted and uncleaned spectra, the background spectra, and
+the estimated sigmas in a three dimensional output image format.
+See the discussion in the \fBapextract\fR package for further information.
+.LE
+
+.ce
+-- DEFAULT APERTURE LIMITS --
+
+lower = -3., upper = 3. (apdefault)
+.LS
+Default lower and upper aperture limits relative to the aperture center.
+These limits are used when the apertures are first defined.
+.LE
+
+.ce
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --
+
+ylevel = 0.05 (apresize)
+.LS
+Fraction of the peak to set aperture limits during automatic resizing.
+.LE
+
+.ce
+-- TRACE PARAMETERS --
+
+t_step = 10 (aptrace)
+.LS
+Step along the dispersion axis between determination of the spectrum
+positions. Note the \f(CWnsum\fR parameter is also used to enhance the
+signal-to-noise at each step.
+.LE
+t_function = "spline3", t_order = 1 (aptrace)
+.LS
+Default trace fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of terms in the
+polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+t_niterate = 1, t_low = 3., t_high = 3. (aptrace)
+.LS
+Default number of rejection iterations and rejection sigma thresholds.
+.LE
+
+.ce
+-- APERTURE EXTRACTION PARAMETERS --
+
+weights = "none" (apsum) (none|variance)
+.LS
+Type of extraction weighting. Note that if the \f(CWclean\fR parameter is
+set then the weights used are "variance" regardless of the weights
+specified by this parameter. The choices are:
+
+"none"
+.LS
+The pixels are summed without weights except for partial pixels at the
+ends.
+.LE
+"variance"
+.LS
+The extraction is weighted by the variance based on the data values
+and a poisson/ccd model using the \f(CWgain\fR and \f(CWreadnoise\fR
+parameters.
+.LE
+.LE
+pfit = "fit1d" (apsum and approfile) (fit1d|fit2d)
+.LS
+Type of profile fitting algorithm to use. The "fit1d" algorithm is
+preferred except in cases of extreme tilt.
+.LE
+lsigma = 3., usigma = 3. (apsum)
+.LS
+Lower and upper rejection thresholds, given as a number of times the
+estimated sigma of a pixel, for cleaning.
+.LE
+
+.ce
+-- DEFAULT BACKGROUND PARAMETERS --
+
+background = "fit" (apsum) (none|average|median|minimum|fit)
+.LS
+Type of background subtraction. The choices are "none" for no background
+subtraction, "average" to average the background within the background
+regions, "median" to use the median in the background regions, "minimum" to
+use the minimum in the background regions, or "fit" to fit across the
+dispersion using the background within the background regions. Note that
+the "average" option does not do any medianing or bad pixel checking,
+something which is recommended. The fitting option is slower than the
+other options and requires additional fitting parameter.
+.LE
+b_function = "legendre", b_order = 1 (apsum)
+.LS
+Default background fitting function and order. The fitting function types are
+"chebyshev" polynomial, "legendre" polynomial, "spline1" linear spline, and
+"spline3" cubic spline. The order refers to the number of
+terms in the polynomial functions or the number of spline pieces in the spline
+functions.
+.LE
+b_sample = "-10:-6,6:10" (apsum)
+.LS
+Default background sample. The sample is given by a set of colon separated
+ranges each separated by either whitespace or commas. The string "*" refers
+to all points. Note that the background coordinates are relative to the
+aperture center and not image pixel coordinates so the endpoints need not
+be integer. It is recommended that the background regions be examined
+and set interactively with the 'b' key in the interactive aperture
+definition mode. This requires \f(CWquicklook\fR to be no.
+.LE
+b_naverage = -100 (apsum)
+.LS
+Default number of points to average or median. Positive numbers
+average that number of sequential points to form a fitting point.
+Negative numbers median that number, in absolute value, of sequential
+points. A value of 1 does no averaging and each data point is used in the
+fit.
+.LE
+b_niterate = 1 (apsum)
+.LS
+Default number of rejection iterations. If greater than zero the fit is
+used to detect deviant fitting points and reject them before repeating the
+fit. The number of iterations of this process is given by this parameter.
+.LE
+b_low_reject = 3., b_high_reject = 3. (apsum)
+.LS
+Default background lower and upper rejection sigmas. If greater than zero
+points deviating from the fit below and above the fit by more than this
+number of times the sigma of the residuals are rejected before refitting.
+.LE
+
+.ce
+-- ARC DISPERSION FUNCTION PARAMETERS --
+
+threshold = 10. (autoidentify/reidentify)
+.LS
+In order for a feature center to be determined the range of pixel intensities
+around the feature must exceed this threshold.
+.LE
+coordlist = "linelists$idhenear.dat" (autoidentify)
+.LS
+Arc line list consisting of an ordered list of wavelengths.
+Some standard line lists are available in the directory "linelists$".
+.LE
+match = -3. (autoidentify)
+.LS
+The maximum difference for a match between the dispersion function computed
+value and a wavelength in the coordinate list.
+.LE
+fwidth = 4. (autoidentify)
+.LS
+Approximate full base width (in pixels) of arc lines.
+.LE
+cradius = 10. (reidentify)
+.LS
+Radius from previous position to reidentify arc line.
+.LE
+i_function = "spline3", i_order = 1 (autoidentify)
+.LS
+The default function and order to be fit to the arc wavelengths as a
+function of the pixel coordinate. The functions choices are "chebyshev",
+"legendre", "spline1", or "spline3".
+.LE
+i_niterate = 0, i_low = 3.0, i_high = 3.0 (autoidentify)
+.LS
+Number of rejection iterations and sigma thresholds for rejecting arc
+lines from the dispersion function fits.
+.LE
+refit = yes (reidentify)
+.LS
+Refit the dispersion function? If yes and there is more than 1 line
+and a dispersion function was defined in the initial arc reference then a new
+dispersion function of the same type as in the reference image is fit
+using the new pixel positions. Otherwise only a zero point shift is
+determined for the revised fitted coordinates without changing the
+form of the dispersion function.
+.LE
+addfeatures = no (reidentify)
+.LS
+Add new features from a line list during each reidentification?
+This option can be used to compensate for lost features from the
+reference solution. Care should be exercised that misidentified features
+are not introduced.
+.LE
+
+.ce
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --
+
+select = "interp" (refspectra)
+.LS
+Selection method for assigning wavelength calibration spectra.
+Note that an arc assignment table may be used to override the selection
+method and explicitly assign arc spectra to object spectra.
+The automatic selection methods are:
+
+average
+.LS
+Average two reference spectra without regard to any
+sort or group parameters.
+If only one reference spectrum is specified then it is assigned with a
+warning. If more than two reference spectra are specified then only the
+first two are used and a warning is given. There is no checking of the
+group values.
+.LE
+following
+.LS
+Select the nearest following spectrum in the reference list based on the
+sort and group parameters. If there is no following spectrum use the
+nearest preceding spectrum.
+.LE
+interp
+.LS
+Interpolate between the preceding and following spectra in the reference
+list based on the sort and group parameters. If there is no preceding and
+following spectrum use the nearest spectrum. The interpolation is weighted
+by the relative distances of the sorting parameter (see cautions in
+DESCRIPTION section).
+.LE
+match
+.LS
+Match each input spectrum with the reference spectrum list in order.
+This overrides any group values.
+.LE
+nearest
+.LS
+Select the nearest spectrum in the reference list based on the sort and
+group parameters.
+.LE
+preceding
+.LS
+Select the nearest preceding spectrum in the reference list based on the
+sort and group parameters. If there is no preceding spectrum use the
+nearest following spectrum.
+.LE
+.LE
+sort = "jd" (setjd and refspectra)
+.LS
+Image header keyword to be used as the sorting parameter for selection
+based on order. The header parameter must be numeric but otherwise may
+be anything. Common sorting parameters are times or positions.
+.LE
+group = "ljd" (setjd and refspectra)
+.LS
+Image header keyword to be used to group spectra. For those selection
+methods which use the group parameter the reference and object
+spectra must have identical values for this keyword. This can
+be anything but it must be constant within a group. Common grouping
+parameters are the date of observation "date-obs" (provided it does not
+change over a night) or the local Julian day number.
+.LE
+time = no, timewrap = 17. (refspectra)
+.LS
+Is the sorting parameter a 24 hour time? If so then the time origin
+for the sorting is specified by the timewrap parameter. This time
+should precede the first observation and follow the last observation
+in a 24 hour cycle.
+.LE
+
+.ce
+-- DISPERSION CORRECTION PARAMETERS --
+
+linearize = yes (dispcor)
+.LS
+Interpolate the spectra to a linear dispersion sampling? If yes the
+spectra will be interpolated to a linear or log linear sampling using
+the linear dispersion parameters specified by other parameters. If
+no the nonlinear dispersion function(s) from the dispersion function
+database are assigned to the input image world coordinate system
+and the spectral data is not interpolated. Note the interpolation
+function type is set by the package parameter \f(CWinterp\fR.
+.LE
+log = no (dispcor)
+.LS
+Use linear logarithmic wavelength coordinates? Linear logarithmic
+wavelength coordinates have wavelength intervals which are constant
+in the logarithm of the wavelength.
+.LE
+flux = yes (dispcor)
+.LS
+Conserve the total flux during interpolation? If \f(CWno\fR the output
+spectrum is interpolated from the input spectrum at each output
+wavelength coordinate. If \f(CWyes\fR the input spectrum is integrated
+over the extent of each output pixel. This is slower than
+simple interpolation.
+.LE
+
+.ce
+-- SENSITIVITY CALIBRATION PARAMETERS --
+
+s_function = "spline3", s_order = 1 (sensfunc)
+.LS
+Function and order used to fit the sensitivity data. The function types
+are "chebyshev" polynomial, "legendre" polynomial, "spline3" cubic spline,
+and "spline1" linear spline. Order of the sensitivity fitting function.
+The value corresponds to the number of polynomial terms or the number of
+spline pieces. The default values may be changed interactively.
+.LE
+fnu = no (calibrate)
+.LS
+The default calibration is into units of F-lambda. If \f(CWfnu\fR = yes then
+the calibrated spectrum will be in units of F-nu.
+.LE
+
+.ce
+PACKAGE PARAMETERS
+
+The following package parameters are used by this task. The default values
+may vary depending on the package.
+
+dispaxis = 2
+.LS
+Default dispersion axis. The dispersion axis is 1 for dispersion
+running along image lines and 2 for dispersion running along image
+columns. If the image header parameter DISPAXIS is defined it has
+precedence over this parameter. The default value defers to the
+package parameter of the same name.
+.LE
+extinction (standard, sensfunc, calibrate)
+.LS
+Extinction file for a site. There are two extinction files in the
+NOAO standards library, onedstds$, for KPNO and CTIO. These extinction
+files are used for extinction and flux calibration.
+.LE
+caldir (standard)
+.LS
+Standard star calibration directory. A directory containing standard
+star data files. Note that the directory name must end with '/'.
+There are a number of standard star calibrations directories in the NOAO
+standards library, onedstds$.
+.LE
+observatory = "observatory" (observatory)
+.LS
+The default observatory to use for latitude dependent computations.
+If the OBSERVAT keyword in the image header it takes precedence over
+this parameter.
+.LE
+interp = "poly5" (nearest|linear|poly3|poly5|spline3|sinc) (dispcor)
+.LS
+Spectrum interpolation type used when spectra are resampled. The choices are:
+
+.V1
+ nearest - nearest neighbor
+ linear - linear
+ poly3 - 3rd order polynomial
+ poly5 - 5th order polynomial
+ spline3 - cubic spline
+ sinc - sinc function
+.V2
+.LE
+database = "database"
+.LS
+Database name used by various tasks. This is a directory which is created
+if necessary.
+.LE
+verbose = no
+.LS
+Verbose output? If set then almost all the information written to the
+logfile is also written to the terminal except when the task is a
+background or batch process.
+.LE
+logfile = "logfile"
+.LS
+If specified detailed text log information is written to this file.
+.LE
+plotfile = ""
+.LS
+If specified metacode plots are recorded in this file for later review.
+Since plot information can become large this should be used only if
+really desired.
+.LE
+
+.ce
+ENVIRONMENT PARAMETERS
+.LP
+The environment parameter \fIimtype\fR is used to determine the extension
+of the images to be processed and created. This allows use with any
+supported image extension. For STF images the extension has to be exact;
+for example "d1h".
diff --git a/noao/imred/specred/doc/msresp1d.hlp b/noao/imred/specred/doc/msresp1d.hlp
new file mode 100644
index 00000000..cc3dfed2
--- /dev/null
+++ b/noao/imred/specred/doc/msresp1d.hlp
@@ -0,0 +1,191 @@
+.help msresp1d Feb92 noao.imred.specred
+.ih
+NAME
+msresp1d -- Create 1D aperture response from flat and throughput data
+.ih
+USAGE
+msresp1d flat throughput apreference response
+.ih
+PARAMETERS
+.ls flat
+Flat field image to extract and normalize to create a one dimensional
+aperture response image. If no flat field is specified then a throughput
+image or file must be specified and only a throughput correction will be
+created. Note that the two dimensional unextracted image is specified.
+If an extracted image of the same name with the ".ms" extension is present
+it is used without reextraction though the unextracted image must also
+be present.
+.le
+.ls throughput
+Throughput file or image. If an image is specified, typically a blank sky
+observation, the total flux through each aperture is used to correct for
+the aperture throughput. If a file consisting of lines with the aperture
+number and relative throughput is specified then the aperture throughput
+will be generated by those values. If neither is specified but a flat
+field image is given the flat field is used to compute the throughput.
+Note that the image is a two dimensional unextracted image. If an
+extracted image of the same name with the ".ms" extension is present
+it is used without reextraction though the unextracted image must also
+be present.
+.le
+.ls apreference
+Aperture reference spectrum. If not specified the apertures are defined
+using the flat field or throughput images. If only a throughput file
+is used then an aperture reference spectrum must be specified to define
+the apertures and dimensions of the final response image.
+.le
+.ls response
+Response spectrum to be created.
+.le
+
+.ls recenter = no
+Recenter throughput image apertures?
+.le
+.ls edit = yes
+Edit and review apertures?
+.le
+.ls trace = no
+Trace spectra?
+.le
+.ls clean = no
+Detect and replace bad pixels?
+.le
+.ls fitflat = yes
+Fit and ratio flat field spectrum?
+.le
+.ls interactive = yes
+Interactive flat field fit?
+.le
+.ls function = "spline3", order = 20
+Flat field fitting function and order. The functions may be one of
+"chebyshev", "legendre", "spline1" (linear spline), or "spline3" (cubic spline).
+The order is either the number of polynomial terms or the number of spline
+pieces.
+.le
+.ih
+OTHER PARAMETERS
+The package parameters control logging of the operations performed and
+the verbose option allows printing of some progress information. The
+graphics use the device defined by the STDGRAPH variable and cursor
+input is with the parameter \fIcl.gcur\fR.
+
+Aperture extraction is done using the task \fBapall\fR and any parameters
+not overridden by task parameters will be used; for example the detector
+noise parameters.
+.ih
+DESCRIPTION
+For multiaperture or multifiber spectra a throughput aperture correction
+must be applied to extracted object spectra. Also it is often better to
+divide by a one dimensional flat field than a two dimensional one. This
+is valid provided the pixels sampled by the flat field and object are
+essentially the same. The advantages are that interspectrum pixels where
+there is little signal are not used and small shifts (fractions of a pixel)
+can be tolerated. The task \fBmsresp1d\fR creates a multiaperture image
+containing one dimensional flat field and throughput corrections which
+can be directly divided into extracted object spectra.
+
+If a one dimensional flat field is to be determined the flat field spectra
+are extracted unless an extracted image having the specified flat field
+name with the ".ms" extension is present. If the \fIfitflat\fR parameter
+is set then all the spectra are averaged and a smooth function is fit to
+this composite flat field spectrum. The smooth fit is divided into the
+individual flat field spectra. This removes the mean flat field spectrum
+shape, thus avoiding introducing the inverse of the flat field spectrum
+into the object spectra and changing the approximate count levels in the
+object. This procedure is recommended. Note that it does not matter if
+the individual fibers have differing spectral shapes (such as might happen
+with a combination of fibers with differing spectral throughput) because
+only a common function is used. The fitting is done using the \fBfit1d\fR
+task based on the \fBicfit\fR function fitting routines. When the
+\fIinteractive\fR flag is set the fitting may be done interactively
+allowing iteration on the fitting function and other fitting parameters.
+Note that the function fit should follow the overall shape using a fairly
+high order.
+
+If no throughput image or file is specified the relative strengths
+of the flat field spectra define a throughput correction. If a
+separate throughput image or file is given then the individual
+flat field spectra are normalized to unity and then scaled by the
+throughput determined from the image or file.
+
+If a throughput image, such as a blank sky observation, is specified it is
+extracted if needed. The extracted sky spectra are divided by the flat
+field which is not yet corrected for throughput variations. The total flux
+through each aperture is then found to define the relative throughputs of
+the apertures. If a flat field was also specified the throughput values
+are multiplied into the normalized flat field otherwise the response image
+will consist of constant spectra with the relative throughputs derived from
+the image.
+
+If a throughput file is specified the throughput values for each aperture
+are defined from this file. The file consists of lines with two columns,
+the aperture number and the relative throughput. All apertures should
+be represented. If a flat field was also specified the throughput values
+are multiplied into the normalized flat field. If no flat field
+is given then the aperture reference image must be specified and it
+will be extracted, if necessary, to provide the template for the response
+image having constant values for each aperture spectrum.
+
+It is an error unless one or both of the flat field and throughput
+are specified.
+
+The last stage is to normalize of the response spectra over
+all apertures to a global unit mean. Because of this step the throughput
+values derived from the flat field, throughput image, or throughput
+file need only be relative. Log information is recorded and printed
+which includes the final relative throughputs values.
+
+Aperture extraction is done using the task \fBapall\fR and any parameters
+not overridden by task parameters will be used; for example the detector
+noise parameters. Task parmeters control whether recentering,
+aperture review, and tracing are done. If no aperture reference is
+specified the apertures will be defined as the task is run.
+The aperture reference, if defined, is often the same as the flat field.
+.ih
+EXAMPLES
+1. To make a flat field response and apply it to an extracted object:
+
+.nf
+ ms> msred.verbose=yes
+ ms> msresp1d flat005 "" "" resp005.ms
+ Extract flat field flat005
+ Searching aperture database ...
+ Sep 7 14:36: DATABASE - 44 apertures read for flat005.
+ Resize apertures for flat005? (yes): n
+ Edit apertures for flat005? (yes): n
+ Extract aperture spectra for flat005? (yes):
+ Review extracted spectra from flat005? (yes): n
+ Extracting apertures ...
+ Sep 7 14:37: EXTRACT - Aperture 1 from flat005 --> flat005.ms
+ Sep 7 14:37: EXTRACT - Aperture 2 from flat005 --> flat005.ms
+ Sep 7 14:37: EXTRACT - Aperture 3 from flat005 --> flat005.ms
+ Sep 7 14:37: EXTRACT - Aperture 4 from flat005 --> flat005.ms
+ Sep 7 14:37: EXTRACT - Aperture 5 from flat005 --> flat005.ms
+ <etc>
+ Fit and ratio flat field flat005
+ <Interactive fitting of average extracted flat field>
+ Create the normalized response resp005.ms
+ Sep 7 14:38 BSCALE: image = resp005.ms
+ bzero=0. bscale=1.0 mean=1.0 median=1.02386 mode=1.07141
+ Average fiber response:
+ 1 0.8049859
+ 2 0.6428247
+ 3 0.9014022
+ 4 0.7955039
+ 5 0.9898984
+ <etc>
+ ms> imarith obj006.ms / resp005.ms obj006.ms
+.fi
+
+Of course the extracted object spectra must be the same in terms of apertures,
+wavelength coverage, etc.
+
+2. To make only a throughput correction:
+
+.nf
+ ms> msresp1d "" obj005 "" resp005
+.fi
+.ih
+SEE ALSO
+icfit, fit1d, apflatten, apnormalize, dofibers
+.endhelp
diff --git a/noao/imred/specred/doc/skysub.hlp b/noao/imred/specred/doc/skysub.hlp
new file mode 100644
index 00000000..75392822
--- /dev/null
+++ b/noao/imred/specred/doc/skysub.hlp
@@ -0,0 +1,98 @@
+.help skysub Mar94 noao.imred.specred
+.ih
+NAME
+skysub -- Sky subtract extracted multispec spectra
+.ih
+USAGE
+skysub input
+.ih
+PARAMETERS
+.ls input
+List of input multispec spectra to sky subtract.
+.le
+.ls output = ""
+List of output sky subtracted spectra. If no output is specified then
+the output replaces the input spectra.
+.le
+.ls objaps = "", objbeams = ""
+Object aperture and beam numbers. An empty list selects all aperture
+or beam numbers. Only the selected apertures are sky subtracted.
+Other apertures are left unmodified. Note that it is valid to include
+the sky apertures in the object selection which results in residual
+sky spectra after subtraction by a mean sky.
+.le
+.ls skyaps = "", skybeams = ""
+Sky aperture and beam numbers. An empty list selects all aperture or
+beam numbers.
+.le
+.ls skyedit = yes
+Edit the sky spectra? If yes the sky spectra are graphed using the
+task \fBspecplot\fR and the user may delete contaminated sky spectra with
+the 'd' key and exit with 'q'.
+.le
+.ls combine = "average" (average|median|sum)
+Option for combining pixels at the same dispersion coordinate after any
+rejection operation. The options are to compute the "average", "median",
+or "sum" of the pixels. The median uses the average of the two central
+values when the number of pixels is even.
+.le
+.ls reject = "none" (none|minmax|avsigclip)
+Type of rejection operation performed on the pixels which overlap at each
+dispersion coordinate. The algorithms are discussed in the
+DESCRIPTION section. The rejection choices are:
+
+.nf
+ none - No rejection
+ minmax - Reject the nlow and nhigh pixels
+ avsigclip - Reject pixels using an averaged sigma clipping algorithm
+.fi
+
+.le
+.ls scale = no
+Scale the sky spectra by the mode?
+.le
+.ls saveskys = yes
+Save the sky spectra? If no then the mean sky spectra will be deleted after
+sky subtraction is completed. Otherwise a one dimensional image with
+the prefix "sky" and then the output name is created.
+.le
+.ls logfile = ""
+Logfile for making a record of the sky subtraction operation.
+.le
+.ih
+DESCRIPTION
+This task selects a subset of aperture spectra from a multispec
+format image, called sky spectra though they could be anything,
+and combines them into a master spectrum which is subtracted
+from another subset of spectra called the objects. Options include
+saving the master sky spectrum and reviewing the selected sky spectra
+graphically and deleting some of them.
+
+The sky apertures are selected using the aperture and beam numbers
+defined during extraction (see the \fBapextract\fR package). In
+some applications the beam numbers are used to code object and sky
+apertures and selection by beam number is quite easy. Otherwise one
+must list the aperture numbers explicitly.
+
+The object apertures are also selected using an aperture and beam
+number list. Spectra not selected to be objects are not modified
+by the sky subtraction. Note that it is perfectly valid to include
+the sky spectra in the object list to produce residual sky spectra.
+
+When interactively editing the sky spectra the task \fBspecplot\fR
+is used. To delete a spectrum type 'd'. To undelete the last deleted
+spectrum type 'e'. When finished type 'e'.
+
+The sky spectra are combined using one of combining and rejection options from
+the task \fBscombine\fR except for the option "none".
+.ih
+EXAMPLES
+1. To median and subtract apertures 1,10,15,20 from all apertures:
+
+.nf
+ ms> skysub obj010.ms out=skysub010.ms skyaps="1,10,15,20"
+.fi
+.ih
+SEE ALSO
+specplot, scombine
+.endhelp
diff --git a/noao/imred/specred/dofibers.cl b/noao/imred/specred/dofibers.cl
new file mode 100644
index 00000000..90a10b7e
--- /dev/null
+++ b/noao/imred/specred/dofibers.cl
@@ -0,0 +1,74 @@
+# DOFIBERS -- Process fiber spectra from 2D to wavelength calibrated 1D.
+#
+# The task PROC does all of the interactive work and BATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure dofibers (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+file throughput = "" {prompt="Throughput file or image (optional)"}
+string arcs1 = "" {prompt="List of arc spectra"}
+string arcs2 = "" {prompt="List of shift arc spectra"}
+file arctable = "" {prompt="Arc assignment table (optional)\n"}
+
+string readnoise = "0." {prompt="Read out noise sigma (photons)"}
+string gain = "1." {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+int fibers = 97 {prompt="Number of fibers"}
+real width = 12. {prompt="Width of profiles (pixels)"}
+real minsep = 8. {prompt="Minimum separation between fibers (pixels)"}
+real maxsep = 15. {prompt="Maximum separation between fibers (pixels)"}
+file apidtable = "" {prompt="Aperture identifications"}
+string crval = "INDEF" {prompt="Approximate central wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion"}
+string objaps = "" {prompt="Object apertures"}
+string skyaps = "" {prompt="Sky apertures"}
+string arcaps = "" {prompt="Arc apertures"}
+string objbeams = "0,1" {prompt="Object beam numbers"}
+string skybeams = "0" {prompt="Sky beam numbers"}
+string arcbeams = "" {prompt="Arc beam numbers\n"}
+
+bool scattered = no {prompt="Subtract scattered light?"}
+bool fitflat = yes {prompt="Fit and ratio flat field spectrum?"}
+bool clean = yes {prompt="Detect and replace bad pixels?"}
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool skyalign = no {prompt="Align sky lines?"}
+bool savearcs = yes {prompt="Save simultaneous arc apertures?"}
+bool skysubtract = yes {prompt="Subtract sky?"}
+bool skyedit = yes {prompt="Edit the sky spectra?"}
+bool saveskys = yes {prompt="Save sky spectra?"}
+bool splot = no {prompt="Plot the final spectrum?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = yes {prompt="Update spectra if cal data changes?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset params = "" {prompt="Algorithm parameters"}
+
+begin
+ apscript.readnoise = readnoise
+ apscript.gain = gain
+ apscript.nfind = fibers
+ apscript.width = width
+ apscript.t_width = width
+ apscript.minsep = minsep
+ apscript.maxsep = maxsep
+ apscript.radius = minsep
+ apscript.clean = clean
+ proc.datamax = datamax
+
+ proc (objects, apref, flat, throughput, arcs1, arcs2, "",
+ arctable, fibers, apidtable, crval, cdelt, objaps, skyaps,
+ arcaps, objbeams, skybeams, arcbeams, scattered, fitflat, no,
+ no, no, no, clean, dispcor, savearcs, skyalign, skysubtract,
+ skyedit, saveskys, splot, redo, update, batch, listonly)
+
+ if (proc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("batch&batch") | cl
+ }
+end
diff --git a/noao/imred/specred/dofibers.par b/noao/imred/specred/dofibers.par
new file mode 100644
index 00000000..cbe2e2a0
--- /dev/null
+++ b/noao/imred/specred/dofibers.par
@@ -0,0 +1,42 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,h,"",,,"Aperture reference spectrum"
+flat,f,h,"",,,"Flat field spectrum"
+throughput,f,h,"",,,"Throughput file or image (optional)"
+arcs1,s,h,"",,,"List of arc spectra"
+arcs2,s,h,"",,,"List of shift arc spectra"
+arctable,f,h,"",,,"Arc assignment table (optional)
+"
+readnoise,s,h,"0.",,,"Read out noise sigma (photons)"
+gain,s,h,"1.",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+fibers,i,h,97,,,"Number of fibers"
+width,r,h,12.,,,"Width of profiles (pixels)"
+minsep,r,h,8.,,,"Minimum separation between fibers (pixels)"
+maxsep,r,h,15.,,,"Maximum separation between fibers (pixels)"
+apidtable,f,h,"",,,"Aperture identifications"
+crval,s,h,INDEF,,,"Approximate central wavelength"
+cdelt,s,h,INDEF,,,"Approximate dispersion"
+objaps,s,h,"",,,"Object apertures"
+skyaps,s,h,"",,,"Sky apertures"
+arcaps,s,h,"",,,"Arc apertures"
+objbeams,s,h,"0,1",,,"Object beam numbers"
+skybeams,s,h,"0",,,"Sky beam numbers"
+arcbeams,s,h,"",,,"Arc beam numbers
+"
+scattered,b,h,no,,,"Subtract scattered light?"
+fitflat,b,h,yes,,,"Fit and ratio flat field spectrum?"
+clean,b,h,yes,,,"Detect and replace bad pixels?"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+skyalign,b,h,no,,,"Align sky lines?"
+savearcs,b,h,yes,,,"Save simultaneous arc apertures?"
+skysubtract,b,h,yes,,,"Subtract sky?"
+skyedit,b,h,yes,,,"Edit the sky spectra?"
+saveskys,b,h,yes,,,"Save sky spectra?"
+splot,b,h,no,,,"Plot the final spectrum?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,yes,,,"Update spectra if cal data changes?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+params,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/specred/msresp1d.cl b/noao/imred/specred/msresp1d.cl
new file mode 100644
index 00000000..1083c64d
--- /dev/null
+++ b/noao/imred/specred/msresp1d.cl
@@ -0,0 +1,234 @@
+# MSRESP1D -- Make a aperture response spectrum using a flat field
+# and a throughput file or image.
+
+procedure msresp1d (flat, throughput, apreference, response)
+
+string flat {prompt="Flat field spectrum"}
+string throughput {prompt="Throughput file or image"}
+string apreference {prompt="Aperture reference spectrum"}
+string response {prompt="Response spectrum"}
+
+bool recenter = no {prompt="Recenter apertures?"}
+bool edit = yes {prompt="Edit/review apertures?"}
+bool trace = no {prompt="Trace spectra?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool fitflat = no {prompt="Fit and ratio flat field spectrum?"}
+bool interactive = yes {prompt="Fit flat field interactively?"}
+string function = "spline3" {prompt="Fitting function",
+ enum="spline3|legendre|chebyshev|spline1"}
+int order = 20 {prompt="Fitting function order", min=1}
+
+begin
+ file flat2d, skyflat2d, apref, resp
+ file temp1, temp2, log1, log2
+ string imtype, mstype
+ int i, n, ap, naxis
+ real respval
+
+ flat2d = flat
+ skyflat2d = throughput
+ apref = apreference
+ resp = response
+ temp1 = mktemp ("tmp")
+ temp2 = mktemp ("tmp")
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ n = strlen (imtype)
+
+ # Check required input and output.
+ if (resp == "" || resp == flat2d || resp == skyflat2d)
+ error (1, "Bad response image name")
+ if (flat2d == "" && skyflat2d == "")
+ error (1, "No flat field or throughput specified")
+
+ if (flat2d != "") {
+ i = strlen (flat2d)
+ if (i > n && substr (flat2d, i-n+1, i) == imtype)
+ flat2d = substr (flat2d, 1, i-n)
+ if (!access (flat2d // imtype))
+ error (1, "Flat field spectrum not found - " // flat2d)
+ }
+ if (skyflat2d != "") {
+ i = strlen (skyflat2d)
+ if (i > n && substr (skyflat2d, i-n+1, i) == imtype)
+ skyflat2d = substr (skyflat2d, 1, i-n)
+ if (!access (skyflat2d // imtype)) {
+ if (!access (skyflat2d))
+ error (1,
+ "Throughput file or image not found - " // skyflat2d)
+
+ if (flat2d == "") {
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+ if (!access (apref // imtype))
+ error (1, "Aperture reference image required")
+ }
+ }
+ }
+
+ # Set logging
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # If using a flat field extract it if necessary and possibly fit it
+ # and ratio the individual apertures by an overall smooth function
+
+ if (flat2d != "") {
+ if (!access (flat2d // mstype)) {
+ print ("Extract flat field ", flat2d) | tee (log1)
+ if (flat2d != apref)
+ apall (flat2d, output=resp, references=apref, profiles="",
+ interactive=yes, find=yes, recenter=recenter,
+ resize=no, edit=edit, trace=trace, fittrace=yes,
+ extract=yes, review=no, background="none", clean=clean,
+ extras=no)
+ else
+ apall (flat2d, output=resp, references=apref, profiles="",
+ interactive=no, find=yes, recenter=no, resize=no,
+ edit=edit, trace=no, fittrace=yes, extract=yes,
+ review=no, background="none", clean=clean, extras=no)
+ } else
+ imcopy (flat2d//".ms", resp, verbose=no)
+
+ if (fitflat) {
+ print ("Fit and ratio flat field ", flat2d) | tee (log1)
+ blkavg (resp, temp1, option="average", b1=1, b2=10000)
+ imcopy (temp1//"[*,1]", temp1, verbose=no)
+ fit1d (temp1, temp1, "fit", axis=1, interactive=interactive,
+ sample="*", naverage=1, function=function, order=order,
+ low_reject=0., high_reject=0., niterate=1, grow=0.,
+ graphics="stdgraph")
+ sarith (resp, "/", temp1, resp, w1=INDEF, w2=INDEF,
+ apertures="", beams="", apmodulus=0, reverse=no,
+ ignoreaps=yes, format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp1, verify=no)
+ }
+ }
+
+ # If using a throughput image extract it if necessary.
+ # Apply it to the flat field if given and otherwise only
+ # compute the throughput through each aperture.
+
+ if (skyflat2d != "") {
+ if (access (skyflat2d // imtype)) {
+ if (!access (skyflat2d // mstype)) {
+ print ("Extract throughput image ", skyflat2d) | tee (log1)
+ apall (skyflat2d, output=temp1, references=apref,
+ profiles="", interactive=yes, find=yes,
+ recenter=recenter, resize=no, edit=edit,
+ trace=trace, fittrace=yes, extract=yes, review=no,
+ background="none", clean=clean, extras=no)
+ temp2 = temp1
+ } else
+ temp2 = skyflat2d // ".ms"
+
+ if (flat2d != "") {
+ print ("Correct flat field to throughput image") |
+ tee (log1)
+ sarith (temp2, "/", resp, temp1, w1=INDEF, w2=INDEF,
+ apertures="", beams="", apmodulus=0, reverse=no,
+ ignoreaps=no, format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ fit1d (temp1, temp1, type="fit", axis=1,
+ interactive=no, sample="*", naverage=1,
+ function="legendre", order=1, niterate=0)
+ sarith (resp, "*", temp1, resp, w1=INDEF, w2=INDEF,
+ apertures="", beams="", apmodulus=0, reverse=no,
+ ignoreaps=yes, format="multispec", renumber=no,
+ offset=0, clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp1, verify=no)
+ } else {
+ print ("Compute aperture throughput from image") |
+ tee (log1)
+ fit1d (temp2, resp, type="fit", axis=1,
+ interactive=no, sample="*", naverage=1,
+ function="legendre", order=1, niterate=0)
+ if (temp2 == temp1)
+ imdelete (temp2, verify=no)
+ }
+
+ # If a flat field and throughput file are used scale the average
+ # flat field in each aperture to those values
+
+ } else if (flat2d != "") {
+ print ("Correct flat field with throughput file ", skyflat2d) |
+ tee (log1)
+ fit1d (resp, resp, type="ratio", axis=1,
+ interactive=no, sample="*", naverage=1,
+ function="legendre", order=1, niterate=0)
+
+ list = skyflat2d
+ while (fscan (list, ap, respval) != EOF) {
+ sarith (resp, "*", respval, resp, w1=INDEF, w2=INDEF,
+ apertures=ap, beams="", apmodulus=0, reverse=no,
+ ignoreaps=no, format="multispec", renumber=no,
+ offset=0, clobber=yes, merge=yes, errval=0.,
+ verbose=no)
+ }
+ list = ""
+
+ # If only a throughput file is given create the response from the
+ # aperture reference and set the aperture response to the specified
+ # values.
+
+ } else {
+ print ("Set aperture throughput using ", skyflat2d) | tee (log1)
+ if (!access (apref // mstype)) {
+ apall (apref, output=resp, references=apref,
+ profiles="", interactive=no, find=yes, recenter=no,
+ resize=no, edit=edit, trace=no, fittrace=yes,
+ extract=yes, review=no, background="none", clean=no,
+ extras=no)
+ sarith (resp, "replace", "0", resp, w1=INDEF, w2=INDEF,
+ apertures="", beams="", apmodulus=0, reverse=no,
+ ignoreaps=no, format="multispec", renumber=no,
+ offset=0, clobber=yes, merge=yes, errval=0., verbose=no)
+ } else
+ sarith (apref//".ms", "replace", "0", resp, w1=INDEF,
+ w2=INDEF, apertures="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=yes,
+ errval=0., verbose=no)
+
+ list = skyflat2d
+ while (fscan (list, ap, respval) != EOF) {
+ sarith (resp, "replace", respval, resp, w1=INDEF, w2=INDEF,
+ apertures=ap, beams="", apmodulus=0, reverse=no,
+ ignoreaps=no, format="multispec", renumber=no,
+ offset=0, clobber=yes, merge=yes, errval=0.,
+ verbose=no)
+ }
+ list = ""
+ }
+ }
+
+ # The final response is normalized to overall unit mean and the
+ # average aperture response is printed.
+
+ print ("Create the normalized response ", resp) | tee (log1)
+ bscale (resp, resp, bzero="0.", bscale="mean", section="",
+ step=1, upper=INDEF, lower=INDEF, verbose=yes) | tee (log1, >log2)
+ blkavg (resp, temp1, option="average", b1=10000, b2=1)
+ print ("Average aperture response:") | tee (log1, >log2)
+ hselect (temp1, "naxis", yes, > temp2)
+ list = temp2; ap = fscan (list, naxis)
+ if (naxis == 1)
+ listpixels (temp1) | tee (log1, >log2)
+ else
+ listpixels (temp1//"[1,*]") | tee (log1, >log2)
+ list = ""; delete (temp2, verify=no)
+ imdelete (temp1, verify=no)
+end
diff --git a/noao/imred/specred/msresp1d.par b/noao/imred/specred/msresp1d.par
new file mode 100644
index 00000000..9a59eb46
--- /dev/null
+++ b/noao/imred/specred/msresp1d.par
@@ -0,0 +1,13 @@
+flat,s,a,,,,"Flat field spectrum"
+throughput,s,a,,,,"Throughput file or image"
+apreference,s,a,,,,"Aperture reference spectrum"
+response,s,a,,,,"Response spectrum"
+recenter,b,h,no,,,"Recenter apertures?"
+edit,b,h,yes,,,"Edit/review apertures?"
+trace,b,h,no,,,"Trace spectra?"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+fitflat,b,h,no,,,"Fit and ratio flat field spectrum?"
+interactive,b,h,yes,,,"Fit flat field interactively?"
+function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+order,i,h,20,1,,"Fitting function order"
+mode,s,h,"ql",,,
diff --git a/noao/imred/specred/params.par b/noao/imred/specred/params.par
new file mode 100644
index 00000000..fab14009
--- /dev/null
+++ b/noao/imred/specred/params.par
@@ -0,0 +1,67 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+order,s,h,"decreasing","increasing|decreasing",,"Order of apertures"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE LIMITS --"
+lower,r,h,-5.,,,"Lower aperture limit relative to center"
+upper,r,h,5.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,3,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- SCATTERED LIGHT PARAMETERS --"
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,"Fitting parameters along the dispersion
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,Upper rejection threshold
+nsubaps,i,h,1,1,,"Number of subapertures
+
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --"
+f_interactive,b,h,yes,,,"Fit flat field interactively?"
+f_function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+f_order,i,h,10,1,,"Fitting function order
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$idhenear.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"spline3","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,3,1,,"Order of dispersion function"
+i_niterate,i,h,2,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SKY SUBTRACTION PARAMETERS --"
+combine,s,h,"average","average|median",,Type of combine operation
+reject,s,h,"avsigclip","none|minmax|avsigclip",,"Sky rejection option"
+scale,s,h,"none","none|mode|median|mean",,"Sky scaling option"
diff --git a/noao/imred/specred/sparams.par b/noao/imred/specred/sparams.par
new file mode 100644
index 00000000..1cc001d8
--- /dev/null
+++ b/noao/imred/specred/sparams.par
@@ -0,0 +1,65 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE PARAMETERS -- "
+lower,r,h,-3.,,,Lower aperture limit relative to center
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,1,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none",,,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- BACKGROUND SUBTRACTION PARAMETERS --"
+background,s,h,"fit","none|average|median|minimum|fit",,Background to subtract
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,"Background function"
+b_order,i,h,1,,,"Background function order"
+b_sample,s,h,"-10:-6,6:10",,,"Background sample regions"
+b_naverage,i,h,-100,,,"Background average or median"
+b_niterate,i,h,1,0,,"Background rejection iterations"
+b_low,r,h,3.,0.,,"Background lower rejection sigma"
+b_high,r,h,3.,0.,,"Background upper rejection sigma
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$idhenear.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"spline3","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,1,1,,"Order of dispersion function"
+i_niterate,i,h,1,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SENSITIVITY CALIBRATION PARAMETERS --"
+s_function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,"Fitting function"
+s_order,i,h,1,1,,"Order of sensitivity function"
+fnu,b,h,no,,,"Create spectra having units of FNU?"
diff --git a/noao/imred/specred/specred.cl b/noao/imred/specred/specred.cl
new file mode 100644
index 00000000..ab859853
--- /dev/null
+++ b/noao/imred/specred/specred.cl
@@ -0,0 +1,103 @@
+#{ SPECRED package definition
+
+proto # bscale
+
+s1 = envget ("min_lenuserarea")
+if (s1 == "")
+ reset min_lenuserarea = 100000
+else if (int (s1) < 100000)
+ reset min_lenuserarea = 100000
+
+package specred
+
+# Slitproc
+cl < doslit$doslittasks.cl
+task sparams = "specred$sparams.par"
+
+# Dofibers
+task dofibers = "specred$dofibers.cl"
+task params = "specred$params.par"
+
+task proc = "srcfibers$proc.cl"
+task fibresponse = "srcfibers$fibresponse.cl"
+task arcrefs = "srcfibers$arcrefs.cl"
+task doarcs = "srcfibers$doarcs.cl"
+task doalign = "srcfibers$doalign.cl"
+task skysub = "srcfibers$skysub.cl"
+task batch = "srcfibers$batch.cl"
+task getspec = "srcfibers$getspec.cl"
+task listonly = "srcfibers$listonly.cl"
+task apscript = "srcfibers$x_apextract.e"
+
+# Generic fiber reduction tasks
+task msresp1d = "specred$msresp1d.cl"
+
+# Onedspec tasks
+task autoidentify,
+ calibrate,
+ continuum,
+ deredden,
+ dispcor,
+ dopcor,
+ fitprofs,
+ identify,
+ odcombine,
+ refspectra,
+ reidentify,
+ sapertures,
+ sarith,
+ sensfunc,
+ sfit,
+ sflip,
+ slist,
+ skytweak,
+ specplot,
+ specshift,
+ splot,
+ standard,
+ telluric = "onedspec$x_onedspec.e"
+task scombine = "onedspec$scombine/x_scombine.e"
+task aidpars = "onedspec$aidpars.par"
+task bplot = "onedspec$bplot.cl"
+task scopy = "onedspec$scopy.cl"
+task dispcor1 = "onedspec$dispcor1.par"
+
+# Apextract tasks
+task apall,
+ apedit,
+ apfind,
+ apfit,
+ apflatten,
+ apmask,
+ apnormalize,
+ aprecenter,
+ apresize,
+ apscatter,
+ apsum,
+ aptrace = "apextract$x_apextract.e"
+task apparams = "apextract$apparams.par"
+task apall1 = "apextract$apall1.par"
+task apfit1 = "apextract$apfit1.par"
+task apflat1 = "apextract$apflat1.par"
+task apnorm1 = "apextract$apnorm1.par"
+task apdefault = "apextract$apdefault.par"
+task apscat1 = "apextract$apscat1.par"
+task apscat2 = "apextract$apscat2.par"
+
+# Longslit tasks
+task illumination,
+ lscombine,
+ response,
+ transform = "twodspec$longslit/x_longslit.e"
+task background = "generic$background.cl"
+
+# Astutil tasks
+task setairmass,
+ setjd = "astutil$x_astutil.e"
+
+# Hide tasks from the user
+hidetask apparams, apall1, apfit1, apflat1, apnorm1, apscat1, apscat2, dispcor1
+hidetask sparams, params, doalign
+hidetask apscript, proc, batch, arcrefs, doarcs, getspec, listonly, fibresponse
+
+clbye
diff --git a/noao/imred/specred/specred.hd b/noao/imred/specred/specred.hd
new file mode 100644
index 00000000..94f4609b
--- /dev/null
+++ b/noao/imred/specred/specred.hd
@@ -0,0 +1,11 @@
+# Help directory for the SPECRED package.
+
+$doc = "./doc/"
+$doslit = "noao$imred/src/doslit/"
+
+dofibers hlp=doc$dofibers.hlp, src=dofibers.cl
+doslit hlp=doc$doslit.hlp, src=doslit$doslit.cl
+msresp1d hlp=doc$msresp1d.hlp, src=msresp1d.cl
+skysub hlp=doc$skysub.hlp, src=srcfibers$skysub.cl
+
+revisions sys=Revisions
diff --git a/noao/imred/specred/specred.men b/noao/imred/specred/specred.men
new file mode 100644
index 00000000..3e2b0130
--- /dev/null
+++ b/noao/imred/specred/specred.men
@@ -0,0 +1,51 @@
+ apall - Extract 1D spectra (all parameters in one task)
+ apdefault - Set the default aperture parameters and apidtable
+ apedit - Edit apertures interactively
+ apfind - Automatically find spectra and define apertures
+ apfit - Fit 2D spectra and output the fit, difference, or ratio
+ apflatten - Remove overall spectral and profile shapes from flat fields
+ apmask - Create and IRAF pixel list mask of the apertures
+ apnormalize - Normalize 2D apertures by 1D functions
+ aprecenter - Recenter apertures
+ apresize - Resize apertures
+ apscatter - Fit and subtract scattered light
+ apsum - Extract 1D spectra
+ aptrace - Trace positions of spectra
+
+ background - Fit and subtract a line or column background
+ bplot - Batch plot of spectra with SPLOT
+ calibrate - Extinction and flux calibrate spectra
+ continuum - Fit the continuum in spectra
+ deredden - Apply interstellar extinction correction
+ dispcor - Dispersion correct spectra
+ dopcor - Doppler correct spectra
+ fitprofs - Fit gaussian profiles
+ identify - Identify features in spectrum for dispersion solution
+ illumination - Determine illumination calibration
+ lscombine - Combine longslit spectra
+ msresp1d - Create 1D response spectra from flat field and sky spectra
+ odcombine - Combine spectra (new version)
+ refspectra - Assign wavelength reference spectra to other spectra
+ reidentify - Automatically reidentify features in spectra
+ response - Determine response calibration
+ sapertures - Set or change aperture header information
+ sarith - Spectrum arithmetic
+ scombine - Combine spectra
+ scopy - Select and copy apertures in different spectral formats
+ sensfunc - Compute instrumental sensitivity from standard stars
+ setairmass - Compute effective airmass and middle UT for an exposure
+ setjd - Compute and set Julian dates in images
+ sfit - Fit spectra and output fit, ratio, or difference
+ sflip - Flip data and/or dispersion coordinates in spectra
+ skysub - Sky subtract extracted multispec spectra
+ skytweak - Sky subtract 1D spectra after tweaking sky spectra
+ slist - List spectrum header parameters
+ specplot - Scale, stack, and plot multiple spectra
+ specshift - Shift spectral dispersion coordinate systems
+ splot - Preliminary spectral plot/analysis
+ standard - Tabulate standard star counts and fluxes
+ telluric - Remove telluric features from 1D spectra
+ transform - Resample longslit spectra
+
+ dofibers - Process fiber spectra
+ doslit - Process slit spectra
diff --git a/noao/imred/specred/specred.par b/noao/imred/specred/specred.par
new file mode 100644
index 00000000..15471412
--- /dev/null
+++ b/noao/imred/specred/specred.par
@@ -0,0 +1,15 @@
+# SPECRED parameter file
+extinction,s,h,onedstds$kpnoextinct.dat,,,Extinction file
+caldir,s,h,onedstds$spec16redcal/,,,Standard star calibration directory
+observatory,s,h,"observatory",,,Observatory of data
+interp,s,h,"poly5","nearest|linear|poly3|poly5|spline3|sinc",,Interpolation type
+dispaxis,i,h,2,1,3,Image axis for 2D/3D images
+nsum,s,h,"1",,,"Number of lines/columns/bands to sum for 2D/3D images
+"
+database,f,h,"database",,,Database
+verbose,b,h,no,,,Verbose output?
+logfile,s,h,"logfile",,,Log file
+plotfile,s,h,"",,,"Plot file
+"
+records,s,h,"",,,Record number extensions
+version,s,h,"SPECRED V3: April 1992"
diff --git a/noao/imred/src/doecslit/Revisions b/noao/imred/src/doecslit/Revisions
new file mode 100644
index 00000000..c4e6ee16
--- /dev/null
+++ b/noao/imred/src/doecslit/Revisions
@@ -0,0 +1,93 @@
+.help revisions Dec94 noao.imred.src.doecslit
+.nf
+
+=======
+V2.12.3
+=======
+
+doecslit$sdoarcs.cl
+ The sparams.refit parameter was being ignored and the ecreidentify
+ step has refit=yes hardwired. The parameter reference is now used.
+ (11/14/05, Valdes)
+
+doecslit$sbatch.cl
+doecslit$sproc.cl
+doecslit$fibresponse.cl
+ Error messages now hint to check imtype setting.
+ (4/15/05, Valdes)
+
+========
+V2.12.2b
+========
+
+doecslit$sproc.cl
+ Modified code to eliminate goto. This is for use with pyraf.
+ (11/21/00, Valdes)
+
+========
+V2.11.3a
+========
+
+doecslit$sproc.cl
+ The arcref and arcrefs variables were not initialized if dispcor=no
+ and if the user goes directly to batch mode there is an undefined
+ local variable error. Added initialization. (1/27/98, Valdes)
+
+=======
+V2.11.1
+=======
+
+doecslit$sarcrefs.cl
+doecslit$sbatch.cl
+doecslit$sfluxcal.cl
+doecslit$sgetspec.cl
+doecslit$slistonly.cl
+doecslit$sproc.cl
+doecslit$sdoarcs.cl
+ Any additional qualifiers in the imtype string are stripped.
+ (8/14/97, Valdes)
+
+doecslit$sgetspec.cl
+ Added the field parameter to the RENAME call. (8/12/97, Valdes)
+
+=========
+V2.11Beta
+=========
+
+doecslit$sbatch.cl
+ Fixed typo bugs in this script. (10/3/96, Valdes)
+
+doecslit$apslitproc.par
+ Made changes for the new aperture selection option. (9/5/96, Valdes)
+
+=======
+V2.10.4
+=======
+
+doecslit$sgetspec.cl
+doecslit$doecslit.cl
+ The arc table will now be checked for arc spectra. (5/1/95, Valdes)
+
+doecslit$sparams.par
+doecslit$sdoarcs.cl
+doecslit$sarcrefs.cl
+ Added "threshold" as a user parameter. (1/16/95, Valdes)
+
+doecslit$sproc.cl
+doecslit$sbatch.cl
+doecslit$sfluxcal.cl
+doecslit$sproc.par
+doecslit$sbatch.par
+doecslit$sfluxcal.par
+ SETAIRMASS and SETJD are only called if the required keywords are
+ present. Errors from missing airmass or JD are then reported from
+ the task that actually uses them. (12/31/94, Valdes)
+
+doecslit$sgetspec.cl
+doecslit$sgetspec.par
+ Added warning and query for missing CCDPROC keyword. (12/31/94, Valdes)
+
+=======
+V2.10.3
+=======
+.endhelp
diff --git a/noao/imred/src/doecslit/apslitproc.par b/noao/imred/src/doecslit/apslitproc.par
new file mode 100644
index 00000000..3233960a
--- /dev/null
+++ b/noao/imred/src/doecslit/apslitproc.par
@@ -0,0 +1,145 @@
+# APSCRIPT
+
+input,s,a,,,,List of input images
+output,s,h,"",,,List of output spectra
+apertures,s,h,"",,,Apertures
+scatter,s,h,"",,,List of scattered light images (optional)
+references,s,h,"",,,List of aperture reference images
+profiles,s,h,"",,,"List of aperture profile images
+"
+interactive,b,h,yes,,,Run task interactively?
+find,b,h,yes,,,Find apertures?
+recenter,b,h,yes,,,Recenter apertures?
+resize,b,h,yes,,,Resize apertures?
+edit,b,h,yes,,,Edit apertures?
+trace,b,h,yes,,,Trace apertures?
+fittrace,b,h,yes,,,Fit the traced points interactively?
+extract,b,h,yes,,,Extract spectra?
+review,b,h,yes,,,Review extractions?
+subtract,b,h,yes,,,Subtract scattered light?
+smooth,b,h,yes,,,Smooth scattered light along the dispersion?
+fitscatter,b,h,yes,,,Fit scattered light interactively?
+fitsmooth,b,h,yes,,,"Smooth the scattered light interactively?
+"
+line,i,h,)sparams.line,,,>sparams.line
+nsum,i,h,)sparams.nsum,,,>sparams.nsum
+buffer,r,h,)sparams.buffer,,,">sparams.buffer
+
+# OUTPUT PARAMETERS
+"
+format,s,h,"echelle",,,Extracted spectra format
+extras,b,h,)sparams.extras,,,"Extract sky, sigma, etc.?"
+dbwrite,s,h,"YES",,,Write to database?
+initialize,b,h,no,,,Initialize answers?
+verbose,b,h,)_.verbose,,,"Verbose output?
+
+# DEFAULT APERTURE PARAMETERS
+"
+lower,r,h,,,,Lower aperture limit relative to center
+upper,r,h,,,,Upper aperture limit relative to center
+apidtable,s,h,"",,,"Aperture ID table (optional)
+
+# DEFAULT BACKGROUND PARAMETERS
+"
+b_function,s,h,)sparams.b_function,,,>sparams.b_function
+b_order,i,h,)sparams.b_order,,,>sparams.b_order
+b_sample,s,h,)sparams.b_sample,,,>sparams.b_sample
+b_naverage,i,h,)sparams.b_naverage,,,>sparams.b_naverage
+b_niterate,i,h,)sparams.b_niterate,,,>sparams.b_niterate
+b_low_reject,r,h,)sparams.b_low,,,>sparams.b_low
+b_high_reject,r,h,)sparams.b_high,,,>sparams.b_high
+b_grow,r,h,0.,0.,,"Background rejection growing radius
+
+# APERTURE CENTERING PARAMETERS
+"
+width,r,h,,,,Profile centering width
+radius,r,h,,,,Profile centering radius
+threshold,r,h,10.,0.,,"Detection threshold for profile centering
+
+# AUTOMATIC FINDING AND ORDERING PARAMETERS
+"
+nfind,i,h,,,,Number of apertures to be found automatically
+minsep,r,h,,,,Minimum separation between spectra
+maxsep,r,h,100000.,,,Maximum separation between spectra
+order,s,h,"increasing",,,"Order of apertures
+
+# RECENTERING PARAMETERS
+"
+aprecenter,s,h,"",,,Apertures for recentering calculation
+npeaks,r,h,INDEF,,,Select brightest peaks
+shift,b,h,yes,,,"Use average shift instead of recentering?
+
+# RESIZING PARAMETERS
+"
+llimit,r,h,INDEF,,,Lower aperture limit relative to center
+ulimit,r,h,INDEF,,,Upper aperture limit relative to center
+ylevel,r,h,)sparams.ylevel,,,>sparams.ylevel
+peak,b,h,yes,,,Is ylevel a fraction of the peak?
+bkg,b,h,yes,,,Subtract background in automatic width?
+r_grow,r,h,0.,,,Grow limits by this factor
+avglimits,b,h,no,,,"Average limits over all apertures?
+
+# EDITING PARAMETERS
+"
+e_output,s,q,,,,Output spectra rootname
+e_profiles,s,q,,,,"Profile reference image
+
+# TRACING PARAMETERS
+"
+t_nsum,i,h,)sparams.nsum,,,>sparams.nsum
+t_step,i,h,)sparams.t_step,,,>sparams.t_step
+t_nlost,i,h,3,1,,Number of consecutive times profile is lost before quitting
+t_width,r,h,)sparams.width,,,>sparams.width
+t_function,s,h,)sparams.t_function,,,>sparams.t_function
+t_sample,s,h,"*",,,Trace sample regions
+t_order,i,h,)sparams.t_order,,,>sparams.t_order
+t_naverage,i,h,1,,,Trace average or median
+t_niterate,i,h,)sparams.t_niterate,,,>sparams.t_niterate
+t_low_reject,r,h,)sparams.t_low,,,>sparams.t_low
+t_high_reject,r,h,)sparams.t_high,,,>sparams.t_high
+t_grow,r,h,0.,0.,,"Trace rejection growing radius
+
+# EXTRACTION PARAMETERS
+"
+background,s,h,,,,Background to subtract
+skybox,i,h,1,,,Box car smoothing length for sky
+weights,s,h,)sparams.weights,,,>sparams.weights
+pfit,s,h,)sparams.pfit,,,>sparams.pfit
+clean,b,h,no,,,Detect and replace bad pixels?
+nclean,r,h,0.5,,,Maximum number of pixels to clean
+niterate,i,h,5,0,,Number of profile fitting iterations
+saturation,r,h,INDEF,,,Saturation level
+readnoise,s,h,,,,Read out noise sigma (photons)
+gain,s,h,,,,Photon gain (photons/data number)
+lsigma,r,h,)sparams.lsigma,,,>sparams.lsigma
+usigma,r,h,)sparams.usigma,,,>sparams.usigma
+polysep,r,h,0.95,0.1,0.95,Marsh algorithm polynomial spacing
+polyorder,i,h,10,1,,Marsh algorithm polynomial order
+nsubaps,i,h,1,1,,"Number of subapertures per aperture
+
+# ANSWER PARAMETERS
+"
+ansclobber,s,h,"NO",,," "
+ansclobber1,s,h,"NO",,," "
+ansdbwrite,s,h,"YES",,," "
+ansdbwrite1,s,h,"NO",,," "
+ansedit,s,h,"NO",,," "
+ansextract,s,h,"NO",,," "
+ansfind,s,h,"NO",,," "
+ansfit,s,h,"NO",,," "
+ansfitscatter,s,h,"NO",,," "
+ansfitsmooth,s,h,"NO",,," "
+ansfitspec,s,h,"NO",,," "
+ansfitspec1,s,h,"NO",,," "
+ansfittrace,s,h,"NO",,," "
+ansfittrace1,s,h,"NO",,," "
+ansflat,s,h,"NO",,," "
+ansnorm,s,h,"NO",,," "
+ansrecenter,s,h,"NO",,," "
+ansresize,s,h,"NO",,," "
+ansreview,s,h,"NO",,," "
+ansreview1,s,h,"NO",,," "
+ansscat,s,h,"NO",,," "
+ansskyextract,s,h,"NO",,," "
+anssmooth,s,h,"NO",,," "
+anstrace,s,h,"NO",,," "
diff --git a/noao/imred/src/doecslit/doecslit.cl b/noao/imred/src/doecslit/doecslit.cl
new file mode 100644
index 00000000..a3675416
--- /dev/null
+++ b/noao/imred/src/doecslit/doecslit.cl
@@ -0,0 +1,106 @@
+# DOECSLIT -- Process Echelle slit spectra from 2D to wavelength calibrated
+# and flux calibrated 1D spectra.
+#
+# The task PROC does all of the interactive work and BATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure doecslit (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+file apref = "" {prompt="Aperture reference spectrum"}
+string arcs = "" {prompt="List of arc spectra"}
+file arctable = "" {prompt="Arc assignment table (optional)"}
+string standards = "" {prompt="List of standard star spectra\n"}
+
+string readnoise = "0." {prompt="Read out noise sigma (photons)"}
+string gain = "1." {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+int norders = 10 {prompt="Number of orders"}
+real width = 5. {prompt="Width of profiles (pixels)\n"}
+
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool extcor = no {prompt="Extinction correct spectra?"}
+bool fluxcal = no {prompt="Flux calibrate spectra?"}
+bool resize = no {prompt="Resize object apertures?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool trace = yes {prompt="Trace object spectra?"}
+string background = "none" {prompt="Background to subtract",
+ enum="none|scattered|average|median|minimum|fit"}
+bool splot = no {prompt="Plot the final spectra?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = no {prompt="Update spectra if cal data changes?"}
+bool quicklook = no {prompt="Approximate quicklook reductions?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset sparams = "" {prompt="Algorithm parameters"}
+
+begin
+ bool recenter, arcap, tr, scat
+
+ int i, j
+ file obj, arc, std
+
+ # Expand image lists
+ obj = mktemp ("tmp$iraf")
+ arc = mktemp ("tmp$iraf")
+ std = mktemp ("tmp$iraf")
+ sgetspec (objects, arcs, arctable, standards, obj, arc, std)
+
+ # Remove any leading whitespace from parameters that might be null.
+ if (logfile != "") {
+ j = strlen (logfile)
+ for (i=1; i<=j && substr(logfile,i,i)==" "; i+=1);
+ logfile = substr (logfile, i, j)
+ }
+ if (arctable != "") {
+ j = strlen (arctable)
+ for (i=1; i<=j && substr(arctable,i,i)==" "; i+=1);
+ arctable = substr (arctable, i, j)
+ }
+
+ apslitproc.readnoise = readnoise
+ apslitproc.gain = gain
+ apslitproc.nfind = norders
+ apslitproc.width = width
+ apslitproc.lower = -width / 2.
+ apslitproc.upper = width / 2.
+ apslitproc.b_sample = \
+ str(-2*width)//":"//str(-width)//","//str(width)//":"//str(2*width)
+ apslitproc.t_width = width
+ apslitproc.radius = width
+ apslitproc.minsep = width
+ apslitproc.clean = clean
+ if (background == "scattered") {
+ scat = yes
+ apslitproc.background = "none"
+ } else {
+ scat = no
+ apslitproc.background = background
+ }
+ sproc.datamax = datamax
+
+ recenter = yes
+ tr = trace
+ arcap = yes
+ if (quicklook) {
+ tr = no
+ scat = no
+ arcap = no
+ }
+
+ sproc (obj, apref, arc, arctable, std, recenter,
+ resize, quicklook, tr, scat, arcap, dispcor,
+ extcor, fluxcal, splot, redo, update, batch, listonly)
+ delete (std, verify=no)
+
+ if (sproc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("sbatch&batch") | cl
+ } else {
+ delete (obj, verify=no)
+ delete (arc, verify=no)
+ }
+end
diff --git a/noao/imred/src/doecslit/doecslit.par b/noao/imred/src/doecslit/doecslit.par
new file mode 100644
index 00000000..1d68f729
--- /dev/null
+++ b/noao/imred/src/doecslit/doecslit.par
@@ -0,0 +1,28 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,h,"",,,"Aperture reference spectrum"
+arcs,s,h,"",,,"List of arc spectra"
+arctable,f,h,"",,,"Arc assignment table (optional)"
+standards,s,h,"",,,"List of standard star spectra
+"
+readnoise,s,h,"0.",,,"Read out noise sigma (photons)"
+gain,s,h,"1.",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+norders,i,h,10,,,"Number of orders"
+width,r,h,5.,,,"Width of profiles (pixels)
+"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+extcor,b,h,no,,,"Extinction correct spectra?"
+fluxcal,b,h,no,,,"Flux calibrate spectra?"
+resize,b,h,no,,,"Resize object apertures?"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+trace,b,h,yes,,,"Trace object spectra?"
+background,s,h,"none",none|scattered|average|median|minimum|fit,,"Background to subtract"
+splot,b,h,no,,,"Plot the final spectra?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,no,,,"Update spectra if cal data changes?"
+quicklook,b,h,no,,,"Approximate quicklook reductions?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+sparams,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doecslit/sarcrefs.cl b/noao/imred/src/doecslit/sarcrefs.cl
new file mode 100644
index 00000000..907b446f
--- /dev/null
+++ b/noao/imred/src/doecslit/sarcrefs.cl
@@ -0,0 +1,77 @@
+# SARCREFS -- Determine dispersion relation for reference arc.
+
+procedure sarcrefs (arcref, done, log1, log2)
+
+file arcref
+file done
+file log1
+file log2
+
+struct *fd
+
+begin
+ string arcrefec, arcec, temp
+ int i, dc
+ bool log
+
+ temp = mktemp ("tmp$iraf")
+
+ # Extract the primary arc reference spectrum. Determine the
+ # dispersion function with IDENTIFY/REIDENTIFY. Set the wavelength
+ # parameters with ECDISPCOR.
+
+ arcrefec = arcref // ".ec." // envget ("imtype")
+ i = stridx (",", arcrefec)
+ if (i > 0)
+ arcrefec = substr (arcrefec, 1, i-1)
+ if (!access (arcrefec)) {
+ print ("Extract arc reference image ", arcref) | tee (log1)
+ apslitproc (arcref, background="none", clean=no, weights="none")
+ }
+
+ # Get the dispersion parameters from the header. These are
+ # used for all further spectra and also flag whether this
+ # spectrum has been processed. If the parameters are missing
+ # the spectrum needs to have the dispersion function and
+ # wavelength scale determined. The HEDIT is needed because
+ # in some cases the user may exit ECIDENTIFY without updating
+ # the database (if the image was deleted but the database
+ # entry was not).
+
+ hselect (arcrefec, "dc-flag", yes, > temp)
+ fd = temp
+ dc = -1
+ i = fscan (fd, dc)
+ fd = ""; delete (temp, verify=no)
+ if (i < 1) {
+ print ("Determine dispersion solution for ", arcref) | tee (log1)
+ #delete (database//"/ec"//arcref//".ec*", verify=no)
+ ecidentify (arcrefec, database=database,
+ coordlist=sparams.coordlist, match=sparams.match,
+ maxfeatures=100, zwidth=10., ftype="emission",
+ fwidth=sparams.fwidth, cradius=sparams.cradius,
+ threshold=sparams.threshold, minsep=2.,
+ function=sparams.i_function, xorder=sparams.i_xorder,
+ yorder=sparams.i_yorder, niterate=sparams.i_niterate,
+ lowreject=sparams.i_low, highreject=sparams.i_high,
+ autowrite=yes)
+ hedit (arcrefec, "refspec1", arcref // ".ec", add=yes,
+ show=no, verify=no, update=yes)
+ }
+
+ # Dispersion correct the reference arc. This step is required to
+ # to set the wavelength scale for all further spectra.
+
+ if (i < 1) {
+ dispcor (arcrefec, "", linearize=sparams.linearize,
+ database=database, table="", w1=INDEF, w2=INDEF, dw=INDEF,
+ nw=INDEF, log=sparams.log, flux=sparams.flux, samedisp=no,
+ global=no, ignoreaps=no, confirm=no, listonly=no, verbose=yes,
+ logfile=log1, > log2)
+ hedit (arcrefec, "dc-flag", 0, add=yes, show=no,
+ verify=no, update=yes)
+ sproc.newdisp = yes
+ }
+
+ print (arcref, >> done)
+end
diff --git a/noao/imred/src/doecslit/sarcrefs.par b/noao/imred/src/doecslit/sarcrefs.par
new file mode 100644
index 00000000..38b81646
--- /dev/null
+++ b/noao/imred/src/doecslit/sarcrefs.par
@@ -0,0 +1,6 @@
+arcref,f,a,"",,,
+done,f,a,"",,,
+log1,f,a,"",,,
+log2,f,a,"",,,
+fd,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/doecslit/sbatch.cl b/noao/imred/src/doecslit/sbatch.cl
new file mode 100644
index 00000000..062ac3e5
--- /dev/null
+++ b/noao/imred/src/doecslit/sbatch.cl
@@ -0,0 +1,216 @@
+# SBATCH -- Process spectra in batch.
+# This task is called in batch mode. It only processes objects
+# not previously processed unless the update or redo flags are set.
+
+procedure sbatch ()
+
+file objects {prompt="Object spectra"}
+real datamax {prompt="Max data value / cosmic ray threshold"}
+
+file arcs {prompt="List of arc spectra"}
+file arcref {prompt="Arc reference for dispersion solution"}
+string arcrefs {prompt="Arc references\n"}
+
+file done {prompt="File of spectra already done"}
+file logfile {prompt="Logfile"}
+bool redo {prompt="Redo operations?"}
+bool update {prompt="Update spectra?\n"}
+
+bool scattered {prompt="Subtract scattered light?"}
+bool arcap {prompt="Use object apertures for arcs?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool extcor {prompt="Extinction correct spectra?"}
+bool fluxcal1 {prompt="Flux calibrate spectra?"}
+
+bool newaps, newdisp, newsens, newarcs
+
+struct *fd1, *fd2, *fd3
+
+begin
+ file temp, spec, specec, arc
+ bool reextract, extract, scat, disp, ext, flux, log, disperr
+ string imtype, ectype, str1, str2, str3, str4
+ int i
+ str1 = ""
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+
+ temp = mktemp ("tmp$iraf")
+
+ reextract = redo || (update && (newaps || newdisp))
+
+ fd1 = objects
+ while (fscan (fd1, spec) != EOF) {
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specec) != EOF)
+ if (spec == specec)
+ break
+ if (spec == specec)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ print ("Object spectrum not found - " // spec // imtype,
+ >> logfile)
+ print ("Check setting of imtype", >> logfile)
+ next
+ }
+ specec = spec // ectype
+
+ scat = no
+ extract = no
+ disp = no
+ ext = no
+ flux = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp)
+ fd2 = temp
+ if (fscan (fd2, str1) < 1)
+ scat = yes
+ fd2 = ""; delete (temp, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat))
+ extract = yes
+ else {
+ hselect (specec, "dc-flag", yes, > temp)
+ hselect (specec, "ex-flag", yes, >> temp)
+ hselect (specec, "ca-flag", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1) == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && !extcor
+ else
+ ext = extcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && (!fluxcal1 || newsens)
+ else
+ flux = fluxcal1
+ fd2 = ""; delete (temp, verify=no)
+ }
+
+ if (extract) {
+ disp = dispcor
+ ext = extcor
+ flux = fluxcal1
+ }
+
+ if (extract) {
+ if (access (specec))
+ imdelete (specec, verify=no)
+
+ if (scat) {
+ print ("Subtract scattered light in ", spec, >> logfile)
+ apslitproc (spec, ansextract="NO", ansscat="YES")
+ }
+
+ print ("Extract object spectrum ", spec, >> logfile)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp)
+ fd3 = temp
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> logfile)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> logfile)
+ }
+ fd3 = ""; delete (temp, verify=no)
+ apslitproc (spec, saturation=datamax, verbose=no)
+ }
+
+ disperr = no
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ fd2 = arcs
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp)
+ fd3 = temp
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> logfile)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> logfile)
+ }
+ fd3 = ""; delete (temp, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec, >> logfile)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=sparams.select, sort=sparams.sort,
+ group=sparams.group, time=sparams.time,
+ timewrap=sparams.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no,
+ >> logfile)
+
+ sdoarcs (spec, arcref, reextract, arcap, logfile, yes)
+
+ hselect (specec, "refspec1", yes, > temp)
+ fd2 = temp
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1) {
+ print ("No arc reference assigned for ", spec, >> logfile)
+ disperr = yes
+ } else {
+ print ("Dispersion correct ", spec, >> logfile)
+ dispcor (specec, "", linearize=sparams.linearize,
+ database=database, table=arcref//ectype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=sparams.log, flux=sparams.flux, samedisp=no,
+ global=no, ignoreaps=no, confirm=no, listonly=no,
+ logfile=logfile, > "dev$null")
+ hedit (specec, "dc-flag", 0, add=yes, show=no,
+ verify=no, update=yes)
+ }
+ }
+
+ if (!disperr && (extract || disp)) {
+ if (ext)
+ print ("Extinction correct ", spec, >> logfile)
+ if (flux)
+ print ("Flux calibrate ", spec, >> logfile)
+ if (flux || ext)
+ calibrate (specec, "", extinct=extcor, flux=fluxcal1,
+ extinction=extinction, observatory=observatory,
+ ignoreaps=no, sensitivity="sens", fnu=sparams.fnu,
+ >> logfile)
+ }
+ }
+ fd1 = ""
+ delete (objects, verify=no)
+ delete (arcs, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+
+ flprcache (0)
+end
diff --git a/noao/imred/src/doecslit/sbatch.par b/noao/imred/src/doecslit/sbatch.par
new file mode 100644
index 00000000..9bb5239d
--- /dev/null
+++ b/noao/imred/src/doecslit/sbatch.par
@@ -0,0 +1,24 @@
+objects,f,h,"",,,"Object spectra"
+datamax,r,h,,,,"Max data value / cosmic ray threshold"
+arcs,f,h,"",,,"List of arc spectra"
+arcref,f,h,"",,,"Arc reference for dispersion solution"
+arcrefs,s,h,,,,"Arc references
+"
+done,f,h,"",,,"File of spectra already done"
+logfile,f,h,"",,,"Logfile"
+redo,b,h,,,,"Redo operations?"
+update,b,h,,,,"Update spectra?
+"
+scattered,b,h,,,,"Subtract scattered light?"
+arcap,b,h,,,,"Use object apertures for arcs?"
+dispcor,b,h,,,,"Dispersion correct spectra?"
+extcor,b,h,,,,"Extinction correct spectra?"
+fluxcal1,b,h,,,,"Flux calibrate spectra?"
+newaps,b,h,,,,
+newdisp,b,h,,,,
+newsens,b,h,,,,
+newarcs,b,h,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doecslit/sdoarcs.cl b/noao/imred/src/doecslit/sdoarcs.cl
new file mode 100644
index 00000000..76ccaab8
--- /dev/null
+++ b/noao/imred/src/doecslit/sdoarcs.cl
@@ -0,0 +1,102 @@
+# SDOARCS -- Determine dispersion relation for spectrum based on reference arcs.
+
+procedure sdoarcs (spec, arcref, reextract, arcap, logfile, batch)
+
+file spec
+file arcref
+bool reextract
+bool arcap
+file logfile
+bool batch
+
+struct *fd
+
+begin
+ string imtype, ectype
+ int i, j, k, n
+ file temp, arc1, arc2, str1, str2, arctype, apref, arc, arcec, logs
+ file specec, specarc
+ bool verbose1
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+
+ if (batch)
+ verbose1 = no
+ else
+ verbose1 = verbose
+ if (verbose1)
+ logs = logfile//",STDOUT"
+ else
+ logs = logfile
+
+ for (j=1; j<=2; j+=1) {
+ # The reference spectra refer initially to the 2D image. At the
+ # end we will reset them to refer to the 1D spectra.
+
+ hselect (spec, "refspec"//j, yes, > temp)
+ fd = temp
+ k = fscan (fd, arc1, str1)
+ fd = ""; delete (temp, verify=no)
+ if (k < 1)
+ break
+
+ # Strip possible image extension.
+ i = strlen (arc1)
+ if (i > n && substr (arc1, i-n+1, i) == imtype)
+ arc1 = substr (arc1, 1, i-n)
+
+ # Set extraction output and aperture reference depending on whether
+ # the arcs are to be rextracted using recentered or retraced object
+ # apertures.
+
+ if (arcap) {
+ arc2 = spec // arc1
+ apref = spec
+ if (access (arc2//ectype))
+ imdelete (arc2//ectype, verify=no)
+ delete (database//"/ec"//arc2//".ec*", verify = no)
+ } else {
+ arc2 = arc1
+ apref = apslitproc.references
+ if (reextract && access (arc2//ectype)) {
+ if (arc2 != arcref)
+ imdelete (arc2//ectype, verify=no)
+ }
+ }
+
+ # Extract and determine dispersion function if necessary.
+ if (!access (arc2//ectype)) {
+ delete (database//"/ec"//arc2//".ec*", verify = no)
+ if (!batch)
+ print ("Extract and reidentify arc spectrum ", arc1)
+ print ("Extract and reidentify arc spectrum ", arc1, >> logfile)
+ apslitproc (arc1, output=arc2//".ec", references=apref,
+ background="none", clean=no, weights="none",
+ verbose=verbose1)
+ ecreidentify (arc2//".ec", arcref//".ec", shift=0.,
+ cradius=sparams.cradius, threshold=sparams.threshold,
+ refit=sparams.refit, database=database, logfiles=logs)
+
+ # If not reextracting arcs based on object apertures
+ # then save the extracted arc to avoid doing it again.
+
+ if (arc1 != arc2)
+ imdelete (arc2//".ec", verify=no)
+ }
+
+ # Set the REFSPEC parameters for echelle spectrum.
+ if (k == 1)
+ hedit (spec//".ec", "refspec"//j, arc2//".ec", add=yes,
+ verify=no, show=no, update=yes)
+ else
+ hedit (spec//".ec", "refspec"//j, arc2//".ec "//str1, add=yes,
+ verify=no, show=no, update=yes)
+ }
+end
diff --git a/noao/imred/src/doecslit/sdoarcs.par b/noao/imred/src/doecslit/sdoarcs.par
new file mode 100644
index 00000000..648bacaf
--- /dev/null
+++ b/noao/imred/src/doecslit/sdoarcs.par
@@ -0,0 +1,8 @@
+spec,f,a,"",,,
+arcref,f,a,"",,,
+reextract,b,a,,,,
+arcap,b,a,,,,
+logfile,f,a,"",,,
+batch,b,a,,,,
+fd,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/doecslit/sfluxcal.cl b/noao/imred/src/doecslit/sfluxcal.cl
new file mode 100644
index 00000000..b8b7fd80
--- /dev/null
+++ b/noao/imred/src/doecslit/sfluxcal.cl
@@ -0,0 +1,214 @@
+# SFLUXCAL -- Extract standard stars and determine sensitivity function.
+# If flux calibrating, extract and dispersion correct the standard star
+# spectra. Compile the standard star fluxes from the calibration
+# directory. The user is queried for the star name but the band passes
+# are not allow to change interactively. Next compute the sensitivity
+# function using SENSFUNC. This is interactive. Once the sensitivity
+# function images are created, flux and extinction calibrate the standard
+# stars. This is done in such a way that if new standard stars are added
+# in a later execution only the new stars are added and then a new
+# sensitivity function is computed. If the update flag is set all
+# spectra which are specified are reprocessed if they were previously
+# processed. In a redo the "std" file is deleted, otherwise additions
+# are appended to this file.
+
+procedure sfluxcal (stds, arcs, arcref, arcrefs, redo, update,
+ scattered, arcap, extcor, done, log1, log2)
+
+file stds
+file arcs
+file arcref
+string arcrefs
+bool redo
+bool update
+bool scattered
+bool arcap
+bool extcor
+file done
+file log1
+file log2
+
+struct *fd1, *fd2, *fd3
+
+begin
+ string imtype, ectype
+ string spec, specec, arc, str1, str2, str3, str4
+ file temp1, temp2
+ int i, j
+ bool reextract, log, scat
+ str1 = ""
+ str2 = ""
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+
+ reextract = redo || (update && (sproc.newaps || sproc.newdisp))
+ sproc.newsens = no
+
+ if (redo && access ("std"))
+ delete ("std", verify=no)
+
+ fd1 = stds
+ while (fscan (fd1, spec) != EOF) {
+ specec = spec // ectype
+
+ scat = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp1)
+ fd2 = temp1
+ if (fscan (fd2, str1) < 1)
+ scat = yes
+ fd2 = ""; delete (temp1, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat)) {
+ if (access (specec))
+ imdelete (specec, verify=no)
+
+ if (scat) {
+ print ("Subtract scattered light in ", spec) | tee (log1)
+ apslitproc (spec, ansextract="NO", ansscat="YES")
+ }
+
+ print ("Extract standard star spectrum ", spec) | tee (log1)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp1)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp1)
+ fd2 = temp1
+ if (fscan (fd2, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ if (fscan (fd2, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ }
+ fd2 = ""; delete (temp1, verify=no)
+ apslitproc (spec)
+ }
+
+ hselect (specec, "dc-flag,std-flag", yes, > temp1)
+ fd2 = temp1
+ j = fscan (fd2, str1, str2)
+ fd2 = ""; delete (temp1, verify=no)
+ if (j < 1) {
+ # Fix arc headers if necessary.
+ if (sproc.newarcs) {
+ fd2 = arcs
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp1)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> log1)
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""
+ sproc.newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec) | tee (log1)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=sparams.select, sort=sparams.sort,
+ group=sparams.group, time=sparams.time,
+ timewrap=sparams.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no) |
+ tee (log1, > log2)
+
+ sdoarcs (spec, arcref, reextract, arcap, log1, no)
+
+ hselect (specec, "refspec1", yes, > temp1)
+ fd2 = temp1
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp1, verify=no)
+ if (i < 1) {
+ print ("No arc reference assigned for ", spec) | tee (log1)
+ next
+ } else {
+ print ("Dispersion correct ", spec) | tee (log1)
+ dispcor (specec, "", linearize=sparams.linearize,
+ database=database, table=arcref//ectype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF, log=sparams.log,
+ flux=sparams.flux, global=no, ignoreaps=no, confirm=no,
+ listonly=no, logfile=logfile)
+ hedit (specec, "dc-flag", 0, add=yes, show=no,
+ verify=no, update=yes)
+ }
+ }
+
+ if (j < 2 || !access ("std")) {
+ print ("Compile standard star fluxes for ", spec) | tee (log1)
+ standard (specec, output="std", samestar=yes, beam_switch=no,
+ apertures="", bandwidth=sparams.bandwidth,
+ bandsep=sparams.bandsep, fnuzero=3.68E-20,
+ extinction=extinction, caldir=caldir,
+ observatory=observatory, interact=sparams.s_interact)
+ hedit (specec, "std-flag", "yes", add=yes, verify=no,
+ show=no, update=yes)
+ print (specec, >> temp2)
+ sproc.newsens = yes
+ }
+ }
+ fd1 = ""
+
+ sections ("sens.????"//imtype, option="nolist")
+ if (sproc.newsens || sections.nimages == 0) {
+ if (!access ("std")) {
+ print ("No standard star data") | tee (log1)
+ sproc.fluxcal1 = no
+ } else {
+ imdelete ("sens.????"//imtype, verify=no)
+ print ("Compute sensitivity function") | tee (log1)
+ sensfunc ("std", "sens", apertures="", ignoreaps=no,
+ logfile=logfile, extinction=extinction,
+ newextinction="extinct.dat", observatory=observatory,
+ function=sparams.s_function, order=sparams.s_order,
+ interactive=yes, graphs="sr", marks="plus cross box")
+ sproc.newsens = yes
+ }
+ }
+
+ # Note that if new standard stars are added the old standard
+ # stars are not recalibrated unless the redo flag is used.
+
+ if (sproc.fluxcal1 && sproc.newsens) {
+ print ("Flux and/or extinction calibrate standard stars") |
+ tee (log1)
+ calibrate ("@"//temp2, "", extinct=extcor, flux=sproc.fluxcal1,
+ extinction=extinction, observatory=observatory, ignoreaps=no,
+ sensitivity="sens", fnu=sparams.fnu) | tee (log1, > log2)
+ if (sproc.splot1) {
+ print ("Standard stars:")
+ str1 = sproc.anssplot
+ if (str1 == "NO" || str1 == "YES")
+ sproc.splot1 = no
+ if (str1 == "no" || str1 == "NO")
+ sproc.splot2 = no
+ else
+ sproc.splot2 = yes
+ }
+ if (sproc.splot2)
+ splot ("@"//temp2)
+ sections (temp2, option="fullname", >> done)
+ delete (temp2, verify=no)
+ }
+end
diff --git a/noao/imred/src/doecslit/sfluxcal.par b/noao/imred/src/doecslit/sfluxcal.par
new file mode 100644
index 00000000..b750d265
--- /dev/null
+++ b/noao/imred/src/doecslit/sfluxcal.par
@@ -0,0 +1,16 @@
+stds,s,a,,,,
+arcs,s,a,,,,
+arcref,f,a,"",,,
+arcrefs,s,a,,,,
+redo,b,a,,,,
+update,b,a,,,,
+scattered,b,a,,,,
+arcap,b,a,,,,
+extcor,b,a,,,,
+done,f,a,"",,,
+log1,f,a,"",,,
+log2,f,a,"",,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/doecslit/sgetspec.cl b/noao/imred/src/doecslit/sgetspec.cl
new file mode 100644
index 00000000..7038dcb2
--- /dev/null
+++ b/noao/imred/src/doecslit/sgetspec.cl
@@ -0,0 +1,177 @@
+# SGETSPEC -- Get spectra which are CCD processed and not extracted.
+# This task also recognizes the arc spectra in the object list and arc table.
+# This task also strips the image type extension.
+
+procedure sgetspec (objects, arcs, arctable, standards, obj, arc, std)
+
+string objects {prompt="List of object images"}
+string arcs {prompt="List of arc images"}
+file arctable {prompt="Arc table"}
+string standards {prompt="List of standard images"}
+file obj {prompt="File of object images"}
+file arc {prompt="File of arc images"}
+file std {prompt="File of standard images"}
+bool ccdproc {prompt="Add CCDPROC keyword and continue?",
+ mode="q"}
+struct *fd1, *fd2
+
+begin
+ string imtype, temp, image, itype
+ int n, n1, narcs
+
+ imtype = "." // envget ("imtype")
+ n = stridx (",", imtype)
+ if (n > 0)
+ imtype = substr (imtype, 1, n-1)
+ n1 = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+
+ # Initialize files
+ set clobber=yes
+ sleep (> obj)
+ sleep (> arc)
+ sleep (> std)
+ set clobber=no
+
+ # Do arcs
+ narcs = 0
+ sections (arcs, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ hselect (image, "imagetyp", yes) | scan (itype)
+ if (nscan() == 0)
+ itype = "comp"
+ if (itype != "comp" && itype != "COMPARISON" &&
+ itype != "comparison" && itype != "COMP")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ narcs = narcs + 1
+ printf ("%s %02d\n", image, narcs, >> arc)
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ # Do arc table.
+ if (arctable != "") {
+ fd2 = arctable
+ while (fscan (fd2, image, image) != EOF) {
+ if (nscan() != 2)
+ next
+ sections (image, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ hselect (image, "imagetyp", yes) | scan (itype)
+ if (nscan() == 0)
+ itype = "comp"
+ if (itype != "comp" && itype != "COMPARISON" &&
+ itype != "comparison" && itype != "COMP")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ narcs = narcs + 1
+ printf ("%s %02d\n", image, narcs, >> arc)
+ }
+ fd1 = ""; delete (temp, verify=no)
+ }
+ }
+
+ # Do standards
+ sections (standards, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ print (image, >> std)
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ # Do objects
+ sections (objects, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ hselect (image, "imagetyp", yes) | scan (itype)
+ if (nscan() == 0)
+ itype = "object"
+
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ if (itype == "object" || itype == "OBJECT")
+ print (image, >> obj)
+ else if (itype == "comp" || itype == "COMPARISON" ||
+ itype == "comparison" || itype == "COMP") {
+ narcs = narcs + 1
+ printf ("%s %02d\n", image, narcs, >> arc)
+ }
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ if (narcs > 0) {
+ sort (arc, column=0, ignore=yes, numeric=no, reverse=no, > temp)
+ delete (arc, verify=no)
+ rename (temp, arc, field="all")
+ itype = ""
+ fd1 = arc
+ while (fscan (fd1, image, narcs) != EOF) {
+ if (image != itype)
+ printf ("%s %02d\n", image, narcs, >> temp)
+ itype = image
+ }
+ delete (arc, verify=no)
+ sort (temp, column=2, ignore=yes, numeric=yes, reverse=no) |
+ fields ("STDIN", "1", lines="1-99", > arc)
+ delete (temp, verify=no)
+ }
+end
diff --git a/noao/imred/src/doecslit/sgetspec.par b/noao/imred/src/doecslit/sgetspec.par
new file mode 100644
index 00000000..1f5387cc
--- /dev/null
+++ b/noao/imred/src/doecslit/sgetspec.par
@@ -0,0 +1,11 @@
+objects,s,a,,,,"List of object images"
+arcs,s,a,,,,"List of arc images"
+arctable,f,a,"",,,"Arc table"
+standards,s,a,,,,"List of standard images"
+obj,f,a,"",,,"File of object images"
+arc,f,a,"",,,"File of arc images"
+std,f,a,"",,,"File of standard images"
+ccdproc,b,q,,,,"Add CCDPROC keyword and continue?"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doecslit/slistonly.cl b/noao/imred/src/doecslit/slistonly.cl
new file mode 100644
index 00000000..f765af36
--- /dev/null
+++ b/noao/imred/src/doecslit/slistonly.cl
@@ -0,0 +1,241 @@
+# SLISTONLY -- List processing to be done.
+#
+# This follows pretty much the same logic as the full procedure but doesn't
+# do anything but list the operations.
+
+procedure slistonly (objects, apref, arcs, standards, scattered, dispcor,
+ extcor, fluxcal, redo, update)
+
+string objects
+file apref
+string arcs
+string standards
+
+bool scattered
+bool dispcor
+bool extcor
+bool fluxcal
+bool redo
+bool update
+
+struct *fd1
+struct *fd2
+
+begin
+ string imtype, ectype
+ string spec, arcref
+ string specec, arcrefec
+ string temp1, temp2, done, str
+ bool newaps, newdisp, newsens
+ bool extract, disp, ext, flux, scat, reextract, fluxcal1, stdfile
+ int i, j, n
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ newaps = no
+ newdisp = no
+ newsens = no
+ fluxcal1 = fluxcal
+
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+
+ reextract = redo
+ if (reextract || !access (database // "/ap" // apref)) {
+ print ("Set reference aperture for ", apref)
+ newaps = yes
+ }
+
+ scat = no
+ if (scattered) {
+ hselect (apref, "apscatte", yes, > temp1)
+ fd1 = temp1
+ if (fscan (fd1, str1) < 1)
+ scat = yes
+ fd1 = ""; delete (temp1, verify=no)
+ }
+ if (scat)
+ print ("Subtract scattered light in ", apref) | tee (log1)
+
+ if (dispcor) {
+ hselect (arcs, "$I,wat0_001", yes, > temp1)
+ fd1 = temp1; s1 = ""
+ i = fscanf (fd1, "%s\tsystem=%s", arcref, s1)
+ if (i < 1 || (i == 2 && (s1 == "equispec" || s1 == "multispec")))
+ error (1, "No reference arcs")
+ fd1 = ""; delete (temp1, verify=no)
+ i = strlen (arcref)
+ if (i > n && substr (arcref, i-n+1, i) == imtype)
+ arcref = substr (arcref, 1, i-n)
+ arcrefec = arcref // ectype
+
+ reextract = redo || (update && newaps)
+ if (reextract || !access (arcrefec)) {
+ print ("Extract arc reference image ", arcref)
+ print ("Determine dispersion solution for ", arcref)
+ newdisp = yes
+ } else {
+ hselect (arcrefec, "refspec1,dc-flag", yes, > temp1)
+ fd1 = temp1
+ i = fscan (fd1, str, j)
+ fd1 = ""; delete (temp1, verify=no)
+ if (i < 1) {
+ print ("Determine dispersion solution for ", arcref)
+ newdisp = yes
+ }
+ }
+ print (arcref, > done)
+
+ if (fluxcal1) {
+ stdfile = access ("std")
+ if (redo && stdfile)
+ stdfile = no
+
+ reextract = redo || (update && (newaps || newdisp))
+ hselect (standards, "$I,ctype1", yes, >temp1)
+ fd1 = temp1
+ while (fscan (fd1, spec, s1) != EOF) {
+ if (nscan() == 2 && s1 == "MULTISPE")
+ next
+ i = strlen (spec)
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+ specec = spec // ectype
+
+ scat = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str) < 1)
+ scat = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat)) {
+ if (scat)
+ print ("Subtract scattered light from ", spec)
+ print ("Extract standard star spectrum ", spec)
+ print ("Dispersion correct ", spec)
+ print ("Compile standard star fluxes for ", spec)
+ stdfile = yes
+ newsens = yes
+ } else {
+ hselect (specec, "dc-flag,std-flag", yes, > temp2)
+ fd2 = temp2
+ i = fscan (fd2, str1, str2)
+ fd2 = ""; delete (temp2, verify=no)
+ if (i < 1)
+ print ("Dispersion correct ", spec)
+ if (i < 2) {
+ print ("Compile standard star fluxes for ", spec)
+ stdfile = yes
+ newsens = yes
+ }
+ }
+ print (spec, >> done)
+ }
+ fd1 = ""; delete (temp1, verify=no)
+
+ sections ("sens.????"//imtype, option="nolist")
+ if (newsens || sections.nimages == 0) {
+ if (!stdfile) {
+ print ("No standard stars")
+ fluxcal1 = no
+ } else {
+ print ("Compute sensitivity function")
+ newsens = yes
+ }
+ }
+
+ if (fluxcal1 && newsens)
+ print ("Flux and/or extinction calibrate standard stars")
+ }
+ }
+
+ reextract = redo || (update && (newaps || newdisp))
+ hselect (objects, "$I,ctype1", yes, > temp1)
+ fd1 = temp1
+ while (fscan (fd1, spec, s1) != EOF) {
+ if (nscan() == 2 && s1 == "MULTISPE")
+ next
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specec) != EOF)
+ if (spec == specec)
+ break
+ if (spec == specec)
+ next
+ fd2 = ""
+ }
+
+ specec = spec // ectype
+
+ scat = no
+ extract = no
+ disp = no
+ ext = no
+ flux = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str) < 1)
+ scat = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat)) {
+ extract = yes
+ } else {
+ hselect (specec, "dc-flag", yes, > temp2)
+ hselect (specec, "ex-flag", yes, >> temp2)
+ hselect (specec, "ca-flag", yes, >> temp2)
+ fd2 = temp2
+ extract = update && newaps
+ if (fscan (fd2, str1) == 1)
+ extract = update && newdisp
+ else
+ disp = yes
+ if (fscan (fd2, str1) == 1)
+ extract = update && !extcor
+ else
+ ext = extcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && (!fluxcal1 || newsens)
+ else
+ flux = fluxcal1
+ fd2 = ""; delete (temp2, verify=no)
+ }
+
+ if (extract) {
+ disp = dispcor
+ ext = extcor
+ flux = fluxcal1
+ }
+
+ if (scat)
+ print ("Subtract scattered light from ", spec)
+ if (extract)
+ print ("Extract object spectrum ", spec)
+ if (disp)
+ print ("Dispersion correct ", spec)
+ if (ext)
+ print ("Extinction correct ", spec)
+ if (flux)
+ print ("Flux calibrate ", spec)
+ }
+ fd1 = ""; delete (temp1, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/doecslit/slistonly.par b/noao/imred/src/doecslit/slistonly.par
new file mode 100644
index 00000000..f0986d61
--- /dev/null
+++ b/noao/imred/src/doecslit/slistonly.par
@@ -0,0 +1,13 @@
+objects,s,a,,,,
+apref,f,a,"",,,
+arcs,s,a,,,,
+standards,s,a,,,,
+scattered,b,a,,,,
+dispcor,b,a,,,,
+extcor,b,a,,,,
+fluxcal,b,a,,,,
+redo,b,a,,,,
+update,b,a,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/doecslit/slittasks.cl b/noao/imred/src/doecslit/slittasks.cl
new file mode 100644
index 00000000..ada92452
--- /dev/null
+++ b/noao/imred/src/doecslit/slittasks.cl
@@ -0,0 +1,19 @@
+#{ ECSLITPROC tasks
+
+task doecslit = "doecslit$doecslit.cl"
+task sproc = "doecslit$sproc.cl"
+task sarcrefs = "doecslit$sarcrefs.cl"
+task sdoarcs = "doecslit$sdoarcs.cl"
+task sfluxcal = "doecslit$sfluxcal.cl"
+task sbatch = "doecslit$sbatch.cl"
+task slistonly = "doecslit$slistonly.cl"
+task sgetspec = "doecslit$sgetspec.cl"
+
+task sparams = "doecslit$sparams.par"
+
+task apslitproc = "doecslit$x_apextract.e"
+
+hidetask sproc, sbatch, sarcrefs, sdoarcs, sfluxcal, slistonly, sgetspec
+hidetask sparams, apslitproc
+
+keep
diff --git a/noao/imred/src/doecslit/sparams.par b/noao/imred/src/doecslit/sparams.par
new file mode 100644
index 00000000..62868ed8
--- /dev/null
+++ b/noao/imred/src/doecslit/sparams.par
@@ -0,0 +1,65 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,2,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- BACKGROUND AND SCATTERED LIGHT PARAMETERS --"
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,Background function
+b_order,i,h,1,,,Background function order
+b_naverage,i,h,-100,,,Background average or median
+b_niterate,i,h,0,0,,Background rejection iterations
+b_low,r,h,3.,0.,,Background lower rejection sigma
+b_high,r,h,3.,0.,,Background upper rejection sigma
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,"Fitting parameters along the dispersion
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$thar.dat,,,"Line list"
+match,r,h,1.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,"Centering radius in pixels"
+i_function,s,h,"legendre","legendre|chebyshev",,"Echelle coordinate function"
+i_xorder,i,h,3,1,,Order of coordinate function along dispersion
+i_yorder,i,h,3,1,,Order of coordinate function across dispersion
+i_niterate,i,h,3,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SENSITIVITY CALIBRATION PARAMETERS --"
+bandwidth,r,h,10.,,,Bandpass widths
+bandsep,r,h,10.,,,Bandpass separation
+s_interact,b,h,yes,,,Graphic interaction to examine/define bandpasses
+s_function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,"Fitting function"
+s_order,i,h,1,1,,"Order of sensitivity function"
+fnu,b,h,no,,,"Create spectra having units of FNU?"
diff --git a/noao/imred/src/doecslit/sproc.cl b/noao/imred/src/doecslit/sproc.cl
new file mode 100644
index 00000000..8caeadd9
--- /dev/null
+++ b/noao/imred/src/doecslit/sproc.cl
@@ -0,0 +1,490 @@
+# SPROC -- Process echelle slit spectra
+# This program combines all the operations of scattered light
+# subtraction, extraction, dispersion correction, extinction correction,
+# and flux calibration in as simple and noninteractive manner as
+# possible. The data must all share the same position on the 2D image
+# and the same dispersion solution apart from small instrumental changes
+# which can be followed automatically.
+
+procedure sproc (objects, apref, arcs, arctable, standards, recenter,
+ resize, quicklook, trace, scattered, arcap, dispcor, extcor,
+ fluxcal, splot, redo, update, batch, listonly)
+
+file objects {prompt="List of object spectra"}
+
+file apref {prompt="Aperture reference spectrum"}
+file arcs {prompt="List of arc spectra"}
+file arctable {prompt="Arc assignment table (optional)"}
+file standards {prompt="List of standard star spectra\n"}
+
+bool recenter {prompt="Recenter object apertures?"}
+bool resize {prompt="Resize object apertures?"}
+bool quicklook {prompt="Edit/review object apertures?"}
+bool trace {prompt="Trace object spectra?"}
+bool scattered {prompt="Subtract scattered light?"}
+bool arcap {prompt="Use object apertures for arcs?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool extcor {prompt="Extinction correct spectra?"}
+bool fluxcal {prompt="Flux calibrate spectra?"}
+bool splot {prompt="Plot the final spectrum?"}
+bool redo {prompt="Redo operations if previously done?"}
+bool update {prompt="Update spectra if cal data changes?"}
+bool batch {prompt="Extract objects in batch?"}
+bool listonly {prompt="List steps but don't process?\n"}
+
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+
+string anssplot = "yes" {prompt="Splot spectrum?", mode="q",
+ enum="no|yes|NO|YES"}
+bool newaps, newdisp, newsens, newarcs
+bool fluxcal1, splot1, splot2
+bool dobatch
+
+struct *fd1, *fd2, *fd3
+
+begin
+ string imtype, ectype
+ string arcref, spec, arc
+ string arcrefec, specec, arcec
+ string temp, done
+ string str1, str2, str3, str4, arcrefs, log1, log2
+ bool reextract, extract, scat, disp, ext, flux, log, disperr
+ int i, j, n
+ struct err
+ str1 = ""
+
+ # Call a separate task to do the listing to minimize the size of
+ # this script and improve it's readability.
+
+ dobatch = no
+ if (listonly) {
+ slistonly (objects, apref, arcs, standards, scattered,
+ dispcor, extcor, fluxcal, redo, update)
+ bye
+ }
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ # Temporary files used repeatedly in this script. Under some
+ # abort circumstances these files may be left behind.
+
+ temp = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ # Rather than always have switches on the logfile and verbose flags
+ # we use TEE and set a file to "dev$null" if output is not desired.
+ # We must check for the null string to signify no logfile.
+
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # If the update switch is used changes in the calibration data
+ # can cause images to be reprocessed (if they are in the object
+ # list). Possible changes are in the aperture definitions,
+ # dispersion solution, and sensitivity function. The newarcs
+ # flag is used to only go through the arc image headers once
+ # setting the reference spectrum, airmass, and UT.
+
+ newaps = no
+ newdisp = no
+ newsens = no
+ newarcs = yes
+ fluxcal1 = fluxcal
+
+ # Check if there are aperture definitions in the database and
+ # define them if needed. This is usually somewhat interactive.
+ # Set the newaps flag in case an update is desired.
+
+ # Initialize APSCRIPT for aperture reference.
+ apslitproc.saturation = INDEF
+ apslitproc.references = ""
+ apslitproc.ansfind = "YES"
+ if (recenter)
+ apslitproc.ansrecenter = "YES"
+ else
+ apslitproc.ansrecenter = "NO"
+ if (resize)
+ apslitproc.ansresize = "YES"
+ else
+ apslitproc.ansresize = "NO"
+ apslitproc.ansedit = "yes"
+ apslitproc.anstrace = "YES"
+ apslitproc.ansfittrace = "yes"
+ apslitproc.ansextract = "NO"
+
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+
+ reextract = redo
+ if (reextract || !access (database // "/ap" // apref)) {
+ if (!access (apref // imtype)) {
+ printf ("Aperture reference spectrum not found - %s%s\n",
+ apref, imtype) | scan (err)
+ error (1, err // "\nCheck setting of imtype")
+ }
+ scat = no
+ if (scattered) {
+ hselect (apref, "apscatte", yes, > temp)
+ fd1 = temp
+ if (fscan (fd1, str1) < 1)
+ scat = yes
+ fd1 = ""; delete (temp, verify=no)
+ }
+
+ print ("Set reference aperture for ", apref) | tee (log1)
+ delete (database//"/ap"//apref, verify=no, >& "dev$null")
+ apslitproc (apref)
+ newaps = yes
+ }
+
+ # Initialize APSCRIPT for aperture definitions.
+ if (quicklook) {
+ apslitproc.ansedit = "NO"
+ apslitproc.ansfittrace = "NO"
+ }
+ if (trace) {
+ apslitproc.anstrace = "yes"
+ } else {
+ apslitproc.anstrace = "NO"
+ }
+ apslitproc.ansextract = "NO"
+ apslitproc.ansscat = "NO"
+
+ print ("Define object apertures", >> log1)
+ if (redo)
+ apslitproc ("@"//objects, references=apref)
+ else
+ apslitproc ("@"//objects, references="NEW"//apref)
+ if (dispcor && fluxcal1) {
+ if (redo)
+ apslitproc ("@"//standards, references=apref)
+ else
+ apslitproc ("@"//standards, references="NEW"//apref)
+ }
+
+ # Initialize APSCRIPT for extraction and SPLOT.
+ apslitproc.ansrecenter = "NO"
+ apslitproc.ansresize = "NO"
+ apslitproc.ansedit = "NO"
+ apslitproc.anstrace = "NO"
+ apslitproc.ansextract = "YES"
+ apslitproc.ansreview = "NO"
+ apslitproc.ansscat = "NO"
+ apslitproc.anssmooth = "YES"
+
+ if (splot && !quicklook) {
+ splot1 = yes
+ splot2 = yes
+ } else {
+ splot1 = no
+ splot2 = no
+ }
+
+ # The next step is to setup the scattered light correction if needed.
+ # We use the aperture reference image for the interactive setting.
+ # If this image has been scattered light corrected we assume the
+ # scattered light functions parameters are correctly set.
+
+ scat = no
+ if (scattered) {
+ hselect (apref, "apscatte", yes, > temp)
+ fd1 = temp
+ if (fscan (fd1, str1) < 1)
+ scat = yes
+ fd1 = ""; delete (temp, verify=no)
+ }
+ if (scat) {
+ print ("Setup and do scattered light subtraction in ", apref) |
+ tee (log1)
+ apslitproc.ansfitscatter = "yes"
+ apslitproc.ansfitsmooth = "yes"
+ apslitproc (apref, ansextract="NO", ansscat="YES")
+ apslitproc.ansfitscatter = "NO"
+ apslitproc.ansfitsmooth = "NO"
+ }
+
+ # If not dispersion correcting we can go directly to extracting
+ # the object spectra. The reference arcs are the first on
+ # the arc lists. The processing of the reference arcs is done
+ # by the task ARCREFS.
+
+ arcref = ""
+ arcrefs = ""
+ if (dispcor) {
+ if (arctable == "")
+ arcrefs = "@"//arcs
+ else
+ arcrefs = arctable
+
+ fd1 = arcs
+ if (fscan (fd1, arcref) == EOF)
+ error (1, "No reference arcs")
+ fd1 = ""
+ if (!access (arcref // imtype)) {
+ printf ("Arc reference spectrum not found - %s%s\n",
+ arcref, imtype) | scan (err)
+ error (1, err // "\nCheck setting of imtype")
+ }
+ arcrefec = arcref // ectype
+ reextract = redo || (update && newaps)
+ if (reextract && access (arcrefec))
+ imdelete (arcrefec, verify=no)
+
+ apslitproc.references = apref
+ sarcrefs (arcref, done, log1, log2)
+ apslitproc.references = ""
+
+ if (fluxcal1)
+ sfluxcal (standards, arcs, arcref, arcrefs, redo, update,
+ scattered, arcap, extcor, done, log1, log2)
+ }
+
+ # Now we are ready to process the object spectra.
+
+ reextract = redo || (update && (newaps || newdisp))
+ fd1 = objects
+ while (fscan (fd1, spec) != EOF) {
+ # Check if previously done; i.e. arc.
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specec) != EOF)
+ if (spec == specec)
+ break
+ if (spec == specec)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ printf ("Object spectrum not found - %s%s\n",
+ spec, imtype) | scan (err)
+ print (err) | tee (log1)
+ print ("Check setting of imtype")
+ next
+ }
+ specec = spec // ectype
+
+ # Determine required operations from the flags and image header.
+ scat = no
+ extract = no
+ disp = no
+ ext = no
+ flux = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp)
+ fd2 = temp
+ if (fscan (fd2, str1) < 1)
+ scat = yes
+ fd2 = ""; delete (temp, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat))
+ extract = yes
+ else {
+ hselect (specec, "dc-flag", yes, > temp)
+ hselect (specec, "ex-flag", yes, >> temp)
+ hselect (specec, "ca-flag", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1) == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && !extcor
+ else
+ ext = extcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && (!fluxcal1 || newsens)
+ else
+ flux = fluxcal1
+ fd2 = ""; delete (temp, verify=no)
+ }
+
+ if (extract) {
+ disp = dispcor
+ ext = extcor
+ flux = fluxcal1
+ }
+
+ # If fully processed go to the next object.
+ if (!extract && !disp && !extcor && !flux)
+ next
+
+ # If not interactive and the batch flag is set submit rest to batch.
+ if (batch && !splot1 && !splot2) {
+ fd1 = ""
+ flprcache
+ sbatch.objects = objects
+ sbatch.datamax = datamax
+ sbatch.arcs = arcs
+ sbatch.arcref = arcref
+ sbatch.arcrefs = arcrefs
+ sbatch.done = done
+ sbatch.logfile = log1
+ sbatch.redo = reextract
+ sbatch.update = update
+ sbatch.scattered = scattered
+ sbatch.arcap = arcap
+ sbatch.dispcor = dispcor
+ sbatch.fluxcal1 = fluxcal1
+ sbatch.extcor = extcor
+ sbatch.newaps = newaps
+ sbatch.newdisp = newdisp
+ sbatch.newsens = newsens
+ sbatch.newarcs = newarcs
+ dobatch = yes
+ return
+ }
+
+ # Process the spectrum in foreground.
+ if (extract) {
+ if (access (specec))
+ imdelete (specec, verify=no)
+
+ if (scat) {
+ print ("Subtract scattered light in ", spec) | tee (log1)
+ apslitproc (spec, ansextract="NO", ansscat="YES")
+ }
+
+ print ("Extract object spectrum ", spec) | tee (log1)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ if (fscan (fd2, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ apslitproc (spec, saturation=datamax)
+ }
+
+ disperr = no
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ fd2 = arcs
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp)
+ fd3 = temp
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> log1)
+ }
+ fd3 = ""; delete (temp, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec) | tee (log1)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=sparams.select, sort=sparams.sort,
+ group=sparams.group, time=sparams.time,
+ timewrap=sparams.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no) |
+ tee (log1, > log2)
+
+ sdoarcs (spec, arcref, reextract, arcap, log1, no)
+
+ hselect (specec, "refspec1", yes, > temp)
+ fd2 = temp
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1) {
+ print ("No arc reference assigned for ", spec) | tee (log1)
+ disperr = yes
+ } else {
+ print ("Dispersion correct ", spec) | tee (log1)
+ dispcor (specec, "", linearize=sparams.linearize,
+ database=database, table=arcref//ectype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=sparams.log, flux=sparams.flux, samedisp=no,
+ global=no, confirm=no, ignoreaps=no, listonly=no,
+ logfile=logfile)
+ hedit (specec, "dc-flag", 0, add=yes, show=no,
+ verify=no, update=yes)
+ }
+ }
+
+ if (!disperr && (extract || disp)) {
+ if (ext)
+ print ("Extinction correct ", spec) | tee (log1)
+ if (flux)
+ print ("Flux calibrate ", spec) | tee (log1)
+ if (flux || ext)
+ calibrate (specec, "", extinct=extcor, flux=fluxcal1,
+ extinction=extinction, observatory=observatory,
+ ignoreaps=no, sensitivity="sens", fnu=sparams.fnu) |
+ tee (log1, > log2)
+ }
+ if (extract || disp || ext || flux) {
+ if (splot1) {
+ print (specec, ":")
+ str1 = anssplot
+ if (str1 == "NO" || str1 == "YES")
+ splot1 = no
+ if (str1 == "no" || str1 == "NO")
+ splot2 = no
+ else
+ splot2 = yes
+ }
+ if (splot2)
+ splot (specec)
+ else if (splot && quicklook) {
+ if (disp) {
+ print ("q") |
+ specplot (specec, apertures="", autolayout=no,
+ scale=1., offset=0., step=0., sysid=yes,
+ yscale=yes, xmin=INDEF, xmax=INDEF, ymin=INDEF,
+ ymax=INDEF, logfile="", graphics="stdgraph",
+ cursor="STDIN")
+ } else {
+ print ("q") |
+ specplot (specec, apertures="", autolayout=yes,
+ autoscale=no, scale=1., offset=0., step=0.,
+ sysid=yes, yscale=no, xmin=INDEF, xmax=INDEF,
+ ymin=INDEF, ymax=INDEF, logfile="",
+ graphics="stdgraph", cursor="STDIN")
+ }
+ }
+ }
+ print (spec, >> done)
+ }
+ fd1 = ""
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/doecslit/sproc.par b/noao/imred/src/doecslit/sproc.par
new file mode 100644
index 00000000..a0ecbd0c
--- /dev/null
+++ b/noao/imred/src/doecslit/sproc.par
@@ -0,0 +1,35 @@
+objects,f,a,"",,,"List of object spectra"
+apref,f,a,"",,,"Aperture reference spectrum"
+arcs,f,a,"",,,"List of arc spectra"
+arctable,f,a,"",,,"Arc assignment table (optional)"
+standards,f,a,"",,,"List of standard star spectra
+"
+recenter,b,a,,,,"Recenter object apertures?"
+resize,b,a,,,,"Resize object apertures?"
+quicklook,b,a,,,,"Edit/review object apertures?"
+trace,b,a,,,,"Trace object spectra?"
+scattered,b,a,,,,"Subtract scattered light?"
+arcap,b,a,,,,"Use object apertures for arcs?"
+dispcor,b,a,,,,"Dispersion correct spectra?"
+extcor,b,a,,,,"Extinction correct spectra?"
+fluxcal,b,a,,,,"Flux calibrate spectra?"
+splot,b,a,,,,"Plot the final spectrum?"
+redo,b,a,,,,"Redo operations if previously done?"
+update,b,a,,,,"Update spectra if cal data changes?"
+batch,b,a,,,,"Extract objects in batch?"
+listonly,b,a,,,,"List steps but don\'t process?
+"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+anssplot,s,q,"yes",no|yes|NO|YES,,"Splot spectrum?"
+newaps,b,h,,,,
+newdisp,b,h,,,,
+newsens,b,h,,,,
+newarcs,b,h,,,,
+fluxcal1,b,h,,,,
+splot1,b,h,,,,
+splot2,b,h,,,,
+dobatch,b,h,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/dofoe/Revisions b/noao/imred/src/dofoe/Revisions
new file mode 100644
index 00000000..25357644
--- /dev/null
+++ b/noao/imred/src/dofoe/Revisions
@@ -0,0 +1,47 @@
+.help revisions Jan95 noao.imred.src.dofoe
+.nf
+dofoe$batch.cl
+dofoe$proc.cl
+dofoe$response.cl
+ Error messages now hint to check imtype setting.
+ (4/15/05, Valdes)
+
+========
+V2.11.3b
+========
+
+dofoe$proc.cl
+ Modified code to eliminate goto. This is for use with pyraf.
+ (11/21/00, Valdes)
+
+========
+V2.11.3a
+========
+
+dofoe$arcrefs.cl
+dofoe$batch.cl
+dofoe$doarcs.cl
+dofoe$listonly.cl
+dofoe$proc.cl
+dofoe$response.cl
+ Any additional qualifiers in the imtype string are stripped.
+ (8/14/97, Valdes)
+
+=========
+V2.11BETA
+=========
+
+dofoe$apsript.par
+ Made changes for the new aperture selection option. (9/5/96, Valdes)
+
+dofoe$params.par
+dofoe$arcrefs.cl
+dofoe$doarcs.cl
+ Added "threshold" as a user parameter. (1/16/95, Valdes)
+
+dofoe$proc.cl
+dofoe$batch.cl
+dofoe$listonly.cl
+ The test for extracted spectra based on the system keyword was failing
+ and so it was removed. (1/16/95, Valdes)
+.endhelp
diff --git a/noao/imred/src/dofoe/apscript.par b/noao/imred/src/dofoe/apscript.par
new file mode 100644
index 00000000..c316829d
--- /dev/null
+++ b/noao/imred/src/dofoe/apscript.par
@@ -0,0 +1,145 @@
+# APSCRIPT
+
+input,s,a,,,,List of input images
+output,s,h,"",,,List of output spectra
+apertures,s,h,"",,,Apertures
+scatter,s,h,"",,,List of scattered light images (optional)
+references,s,h,"",,,List of aperture reference images
+profiles,s,h,"",,,"List of aperture profile images
+"
+interactive,b,h,yes,,,Run task interactively?
+find,b,h,yes,,,Find apertures?
+recenter,b,h,yes,,,Recenter apertures?
+resize,b,h,yes,,,Resize apertures?
+edit,b,h,yes,,,Edit apertures?
+trace,b,h,yes,,,Trace apertures?
+fittrace,b,h,yes,,,Fit the traced points interactively?
+extract,b,h,yes,,,Extract spectra?
+review,b,h,yes,,,Review extractions?
+subtract,b,h,yes,,,Subtract scattered light?
+smooth,b,h,yes,,,Smooth scattered light along the dispersion?
+fitscatter,b,h,yes,,,Fit scattered light interactively?
+fitsmooth,b,h,yes,,,"Smooth the scattered light interactively?
+"
+line,i,h,)params.line,,,>params.line
+nsum,i,h,)params.nsum,,,>params.nsum
+buffer,r,h,)params.buffer,,,">params.buffer
+
+# OUTPUT PARAMETERS
+"
+format,s,h,"echelle",,,Extracted spectra format
+extras,b,h,)params.extras,,,>params.extras
+dbwrite,s,h,"YES",,,Write to database?
+initialize,b,h,no,,,Initialize answers?
+verbose,b,h,)_.verbose,,,"Verbose output?
+
+# DEFAULT APERTURE PARAMETERS
+"
+lower,r,h,)params.lower,,,>params.lower
+upper,r,h,)params.upper,,,>params.upper
+apidtable,s,h,"",,,"Aperture ID table (optional)
+
+# DEFAULT BACKGROUND PARAMETERS
+"
+b_function,s,h,)params.b_function,,,>params.b_function
+b_order,i,h,)params.b_order,,,>params.b_order
+b_sample,s,h,)params.b_sample,,,>params.b_sample
+b_naverage,i,h,)params.b_naverage,,,>params.b_naverage
+b_niterate,i,h,)params.b_niterate,,,>params.b_niterate
+b_low_reject,r,h,)params.b_low,,,>params.b_low
+b_high_reject,r,h,)params.b_high,,,>params.b_high
+b_grow,r,h,)params.b_grow,,,">params.b_grow
+
+# APERTURE CENTERING PARAMETERS
+"
+width,r,h,,0.,,Profile centering width
+radius,r,h,,,,Profile centering radius
+threshold,r,h,10.,0.,,"Detection threshold for profile centering
+
+# AUTOMATIC FINDING AND ORDERING PARAMETERS
+"
+nfind,i,h,,,,Number of apertures to be found automatically
+minsep,r,h,1.,,,Minimum separation between spectra
+maxsep,r,h,100000.,,,Maximum separation between spectra
+order,s,h,"increasing","increasing|decreasing",,"Order of apertures
+
+# RECENTERING PARAMETERS
+"
+aprecenter,s,h,"",,,Apertures for recentering calculation
+npeaks,r,h,0.5,,,Select brightest peaks
+shift,b,h,yes,,,"Use average shift instead of recentering?
+
+# RESIZING PARAMETERS
+"
+llimit,r,h,INDEF,,,Lower aperture limit relative to center
+ulimit,r,h,INDEF,,,Upper aperture limit relative to center
+ylevel,r,h,)params.ylevel,,,>params.ylevel
+peak,b,h,yes,,,Is ylevel a fraction of the peak?
+bkg,b,h,yes,,,"Subtract background in automatic width?"
+r_grow,r,h,0.,,,"Grow limits by this factor"
+avglimits,b,h,no,,,"Average limits over all apertures?
+
+# EDITING PARAMETERS
+"
+e_output,s,q,,,,Output spectra rootname
+e_profiles,s,q,,,,"Profile reference image
+
+# TRACING PARAMETERS
+"
+t_nsum,i,h,)params.nsum,,,>params.nsum
+t_step,i,h,)params.t_step,,,>params.t_step
+t_nlost,i,h,3,1,,Number of consecutive times profile is lost before quitting
+t_width,r,h,,0.,,Profile centering width
+t_function,s,h,)params.t_function,,,>params.t_function
+t_sample,s,h,"*",,,Trace sample regions
+t_order,i,h,)params.t_order,,,>params.t_order
+t_naverage,i,h,1,,,Trace average or median
+t_niterate,i,h,)params.t_niterate,,,>params.t_niterate
+t_low_reject,r,h,)params.t_low,,,>params.t_low
+t_high_reject,r,h,)params.t_high,,,>params.t_high
+t_grow,r,h,0.,0.,,"Trace rejection growing radius
+
+# EXTRACTION PARAMETERS
+"
+background,s,h,,"none|average|median|minimum|fit",,Background to subtract
+skybox,i,h,)params.b_smooth,,,>params.b_smooth
+weights,s,h,)params.weights,,,>params.weights
+pfit,s,h,)params.pfit,,,>params.pfit
+clean,b,h,,,,Detect and replace bad pixels?
+nclean,r,h,0.5,,,Maximum number of pixels to clean
+niterate,i,h,5,0,,Number of profile fitting iterations
+saturation,r,h,INDEF,,,Saturation level
+readnoise,s,h,,,,Read out noise sigma (photons)
+gain,s,h,,,,Photon gain (photons/data number)
+lsigma,r,h,)params.lsigma,,,>params.lsigma
+usigma,r,h,)params.usigma,,,>params.usigma
+polysep,r,h,0.95,0.1,0.95,Marsh algorithm polynomial spacing
+polyorder,i,h,10,1,,Marsh algorithm polynomial order
+nsubaps,i,h,1,1,,"Number of subapertures per aperture
+
+# ANSWER PARAMETERS
+"
+ansclobber,s,h,"NO",,," "
+ansclobber1,s,h,"NO",,," "
+ansdbwrite,s,h,"YES",,," "
+ansdbwrite1,s,h,"NO",,," "
+ansedit,s,h,"NO",,," "
+ansextract,s,h,"NO",,," "
+ansfind,s,h,"NO",,," "
+ansfit,s,h,"NO",,," "
+ansfitscatter,s,h,"NO",,," "
+ansfitsmooth,s,h,"NO",,," "
+ansfitspec,s,h,"NO",,," "
+ansfitspec1,s,h,"NO",,," "
+ansfittrace,s,h,"NO",,," "
+ansfittrace1,s,h,"NO",,," "
+ansflat,s,h,"NO",,," "
+ansnorm,s,h,"NO",,," "
+ansrecenter,s,h,"NO",,," "
+ansresize,s,h,"NO",,," "
+ansreview,s,h,"NO",,," "
+ansreview1,s,h,"NO",,," "
+ansscat,s,h,"NO",,," "
+ansskyextract,s,h,"NO",,," "
+anssmooth,s,h,"NO",,," "
+anstrace,s,h,"NO",,," "
diff --git a/noao/imred/src/dofoe/arcrefs.cl b/noao/imred/src/dofoe/arcrefs.cl
new file mode 100644
index 00000000..fa8f950a
--- /dev/null
+++ b/noao/imred/src/dofoe/arcrefs.cl
@@ -0,0 +1,106 @@
+# ARCREFS -- Determine dispersion relation for reference arc.
+
+procedure arcrefs (arcref, arcaps, arcbeams, response, done, log1, log2)
+
+file arcref
+string arcaps
+string arcbeams
+file response
+file done
+file log1
+file log2
+
+struct *fd
+
+begin
+ string arcrefec, arcec, temp, str, imtype
+ int i, dc
+ bool log
+
+ temp = mktemp ("tmp$iraf")
+
+ # Extract the primary arc reference spectrum. Determine the
+ # dispersion function with ECIDENTIFY/ECREIDENTIFY. Set the wavelength
+ # parameters with ECDISPCOR.
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ arcrefec = arcref // ".ec"
+ if (arcaps != "" || arcbeams != "")
+ arcec = arcref // "arc.ec"
+ else
+ arcec = ""
+ if (!access (arcrefec//imtype)) {
+ print ("Extract arc reference image ", arcref) | tee (log1)
+ apscript (arcref, ansrecenter="NO", ansresize="NO", ansedit="NO",
+ anstrace="NO", background="none", clean=no, weights="none")
+ if (response != "")
+ sarith (arcrefec, "/", response, arcrefec, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0, reverse=no,
+ ignoreaps=no, format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ if (arcec != "") {
+ scopy (arcrefec, arcec, w1=INDEF, w2=INDEF, apertures=arcaps,
+ bands="", beams=arcbeams, apmodulus=0, offset=0,
+ format="multispec", clobber=yes, merge=no, renumber=yes,
+ verbose=no)
+ scopy (arcrefec, "", w1=INDEF, w2=INDEF, apertures="!"//arcaps,
+ bands="", beams=arcbeams, apmodulus=0, offset=0,
+ format="multispec", clobber=yes, merge=no, renumber=yes,
+ verbose=no)
+ }
+ }
+
+ # Get the dispersion parameters from the header. These are
+ # used for all further spectra and also flag whether this
+ # spectrum has been processed. If the parameters are missing
+ # the spectrum needs to have the dispersion function and
+ # wavelength scale determined. The HEDIT is needed because
+ # in some cases the user may exit IDENTIFY without updating
+ # the database (if the image was deleted but the database
+ # entry was not).
+
+ hselect (arcrefec, "dc-flag", yes, > temp)
+ fd = temp
+ dc = -1
+ i = fscan (fd, dc)
+ fd = ""; delete (temp, verify=no)
+ if (dc == -1) {
+ print ("Determine dispersion solution for ", arcref) | tee (log1)
+ delete (database//"/ec"//arcref//".ec*", verify=no)
+ ecidentify (arcrefec, database=database,
+ coordlist=params.coordlist, match=params.match,
+ maxfeatures=100, zwidth=10., ftype="emission",
+ fwidth=params.fwidth, cradius=params.cradius,
+ threshold=params.threshold, minsep=2.,
+ function=params.i_function, xorder=params.i_xorder,
+ yorder=params.i_yorder, niterate=params.i_niterate,
+ lowreject=params.i_low, highreject=params.i_high,
+ autowrite=yes)
+ if (arcec != "") {
+ ecreidentify (arcec, arcrefec, shift=0., cradius=params.cradius,
+ threshold=params.threshold, refit=yes, database=database,
+ logfiles=log1//","//log2)
+ imdelete (arcec, verify=no)
+ }
+ hedit (arcrefec, "refspec1", arcref // ".ec", add=yes,
+ show=no, verify=no, update=yes)
+ }
+
+ # Dispersion correct the reference arc. Set the newdisp flag.
+
+ if (dc == -1) {
+ dispcor (arcrefec, "", linearize=params.linearize,
+ database=database, table="", w1=INDEF, w2=INDEF, dw=INDEF,
+ nw=INDEF, log=params.log, flux=params.flux, samedisp=no,
+ global=no, ignoreaps=no, confirm=no, listonly=no, verbose=yes,
+ logfile=log1, > log2)
+ hedit (arcrefec, "dc-flag", 0, add=yes, verify=no,
+ show=no, update=yes)
+ proc.newdisp = yes
+ }
+
+ print (arcref, >> done)
+end
diff --git a/noao/imred/src/dofoe/arcrefs.par b/noao/imred/src/dofoe/arcrefs.par
new file mode 100644
index 00000000..5aa35c57
--- /dev/null
+++ b/noao/imred/src/dofoe/arcrefs.par
@@ -0,0 +1,9 @@
+arcref,f,a,"",,,
+arcaps,s,a,,,,
+arcbeams,s,a,,,,
+response,f,a,"",,,
+done,f,a,"",,,
+log1,f,a,"",,,
+log2,f,a,"",,,
+fd,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/dofoe/batch.cl b/noao/imred/src/dofoe/batch.cl
new file mode 100644
index 00000000..6adcbb04
--- /dev/null
+++ b/noao/imred/src/dofoe/batch.cl
@@ -0,0 +1,207 @@
+# BATCH -- Process spectra in batch.
+# This task is called in batch mode. It only processes objects
+# not previously processed unless the update or redo flags are set.
+
+procedure batch ()
+
+string objects {prompt="Object spectra"}
+real datamax {prompt="Max data value / cosmic ray threshold"}
+
+file response {prompt="Response spectrum"}
+string arcs {prompt="List of arc spectra"}
+file arcref {prompt="Arc reference for dispersion solution"}
+string arcrefs {prompt="Arc references"}
+
+string objaps {prompt="Object apertures"}
+string arcaps {prompt="Arc apertures"}
+string objbeams {prompt="Object beam numbers"}
+string arcbeams {prompt="Arc beam numbers\n"}
+
+file done {prompt="File of spectra already done"}
+file logfile {prompt="Logfile"}
+
+bool redo {prompt="Redo operations?"}
+bool update {prompt="Update spectra?"}
+bool scattered {prompt="Subtract scattered light?"}
+bool arcap {prompt="Use object apertures for arcs?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+
+bool newaps, newresp, newdisp, newarcs
+
+struct *fd1, *fd2
+
+begin
+ file temp1, temp2, spec, specec, arc, arcec
+ bool reextract, extract, scat, disp, log
+ string imtype, ectype, str
+ int i, n
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+
+ # Initialize extraction to be noninteractive.
+ if (apscript.ansrecenter == "yes")
+ apscript.ansrecenter = "YES"
+ else if (apscript.ansrecenter == "no")
+ apscript.ansrecenter = "NO"
+ apscript.ansedit = "NO"
+ if (apscript.anstrace == "yes") {
+ apscript.anstrace = "YES"
+ apscript.ansfittrace = "NO"
+ } else if (apscript.anstrace == "no")
+ apscript.anstrace = "NO"
+
+ reextract = redo || (update && (newaps || newresp || newdisp))
+
+ hselect (objects, "$I", yes, > temp1)
+ #sections (objects, option="fullname", > temp1)
+ fd1 = temp1
+ while (fscan (fd1, spec) != EOF) {
+ i = strlen (spec)
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specec) != EOF)
+ if (spec == specec)
+ break
+ if (spec == specec)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ printf ("Object spectrum not found - %s%s\nCheck setting of imtype\n", spec, imtype) | tee (log1)
+ next
+ }
+ specec = spec // ectype
+
+ scat = no
+ extract = no
+ disp = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str) < 1)
+ scat = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat))
+ extract = yes
+ else {
+ hselect (specec, "dc-flag", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str) == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+ fd2 = ""; delete (temp2, verify=no)
+ }
+
+ if (extract)
+ disp = dispcor
+
+ if (extract) {
+ if (access (specec))
+ imdelete (specec, verify=no)
+ if (scat) {
+ print ("Subtract scattered light in ", spec, >> logfile)
+ apscript (spec, output="", ansextract="NO",
+ ansscat="YES", anssmooth="YES", verbose=no)
+ }
+ print ("Extract object spectrum ", spec, >> logfile)
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> logfile)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> logfile)
+ apscript (spec, saturation=datamax, verbose=no)
+ if (response != "")
+ sarith (specec, "/", response, specec, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ }
+
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ sections (arcs, option="fullname", >temp2)
+ setjd ("@"//temp2, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> logfile)
+ setairmass ("@"//temp2, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> logfile)
+ delete (temp2, verify=no)
+ hselect (arcs, "$I", yes, >temp2)
+ fd2 = temp2
+ while (fscan (fd2, arc) != EOF) {
+ i = strlen (arc)
+ if (i > n && substr (arc, i-n+1, i) == imtype)
+ arc = substr (arc, 1, i-n)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ hedit (arc, "arctype", "henear", add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""; delete (temp2, verify=no)
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec, >> logfile)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=params.select, sort=params.sort,
+ group=params.group, time=params.time,
+ timewrap=params.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no,
+ >> logfile)
+
+ doarcs (spec, response, arcref, arcaps, arcbeams, reextract,
+ arcap, logfile, yes)
+
+ hselect (specec, "refspec1", yes, > temp2)
+ fd2 = temp2
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp2, verify=no)
+ if (i < 1)
+ print ("No arc reference assigned for ", spec, >> logfile)
+ else {
+ print ("Dispersion correct ", spec, >> logfile)
+ dispcor (specec, "", linearize=params.linearize,
+ database=database, table=arcref//ectype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=params.log, samedisp=no, flux=params.flux,
+ global=no, ignoreaps=no, confirm=no, listonly=no,
+ verbose=no, logfile=logfile)
+ hedit (specec, "dc-flag", 0, add=yes, verify=no,
+ show=no, update=yes)
+ disp = no
+ }
+ }
+ }
+ fd1 = ""; delete (temp1, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+
+ flprcache (0)
+end
diff --git a/noao/imred/src/dofoe/batch.par b/noao/imred/src/dofoe/batch.par
new file mode 100644
index 00000000..81b8c8ae
--- /dev/null
+++ b/noao/imred/src/dofoe/batch.par
@@ -0,0 +1,25 @@
+objects,s,h,,,,"Object spectra"
+datamax,r,h,,,,"Max data value / cosmic ray threshold"
+response,f,h,"",,,"Response spectrum"
+arcs,s,h,,,,"List of arc spectra"
+arcref,f,h,"",,,"Arc reference for dispersion solution"
+arcrefs,s,h,,,,"Arc references"
+objaps,s,h,,,,"Object apertures"
+arcaps,s,h,,,,"Arc apertures"
+objbeams,s,h,,,,"Object beam numbers"
+arcbeams,s,h,,,,"Arc beam numbers
+"
+done,f,h,"",,,"File of spectra already done"
+logfile,f,h,"",,,"Logfile"
+redo,b,h,,,,"Redo operations?"
+update,b,h,,,,"Update spectra?"
+scattered,b,h,,,,"Subtract scattered light?"
+arcap,b,h,,,,"Use object apertures for arcs?"
+dispcor,b,h,,,,"Dispersion correct spectra?"
+newaps,b,h,,,,
+newresp,b,h,,,,
+newdisp,b,h,,,,
+newarcs,b,h,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/dofoe/doarcs.cl b/noao/imred/src/dofoe/doarcs.cl
new file mode 100644
index 00000000..653146b1
--- /dev/null
+++ b/noao/imred/src/dofoe/doarcs.cl
@@ -0,0 +1,167 @@
+# DOARCS -- Determine dispersion relation for spectrum based on reference arcs.
+
+procedure doarcs (spec, response, arcref, arcaps, arcbeams, reextract,
+ arcap, logfile, batch)
+
+file spec
+file response
+file arcref
+string arcaps
+string arcbeams
+bool reextract
+bool arcap
+file logfile
+bool batch
+
+struct *fd
+
+begin
+ string imtype, ectype
+ int i, j, k, n
+ file temp, arc1, arc2, str1, str2, arctype, apref, arc, arcec, logs
+ file specec, specarc
+ bool verbose1
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+
+ if (batch)
+ verbose1 = no
+ else
+ verbose1 = verbose
+ if (verbose1)
+ logs = logfile//",STDOUT"
+ else
+ logs = logfile
+
+ # Separate simultaneous arc from object.
+ specec = spec // ".ec"
+ if (arcaps != "" || arcbeams != "")
+ specarc = spec // "arc1.ec"
+ else
+ specarc = ""
+ if (specarc != "") {
+ scopy (specec, specarc, w1=INDEF, w2=INDEF, apertures=arcaps,
+ bands="", beams="", apmodulus=0, format="multispec",
+ renumber=yes, offset=0, clobber=yes, merge=no, verbose=no)
+ scopy (specec, "", w1=INDEF, w2=INDEF, apertures="!"//arcaps,
+ bands="", beams="", apmodulus=0, format="multispec",
+ renumber=yes, offset=0, clobber=yes, merge=no, verbose=no)
+ }
+
+ for (j=1; j<=2; j+=1) {
+ # The reference spectra refer initially to the 2D image. At the
+ # end we will reset them to refer to the 1D spectra.
+
+ hselect (spec, "refspec"//j, yes, > temp)
+ fd = temp
+ k = fscan (fd, arc1, str1)
+ fd = ""; delete (temp, verify=no)
+ if (k < 1)
+ break
+
+ # Strip possible image extension.
+ i = strlen (arc1)
+ if (i > n && substr (arc1, i-n+1, i) == imtype)
+ arc1 = substr (arc1, 1, i-n)
+
+ # Set extraction output and aperture reference depending on whether
+ # the arcs are to be rextracted using recentered or retraced object
+ # apertures.
+
+ if (arcap &&
+ (apscript.ansrecenter=="yes" || apscript.anstrace=="yes" ||
+ apscript.ansrecenter=="YES" || apscript.anstrace=="YES")) {
+ arc2 = spec // arc1
+ apref = spec
+ if (access (arc2//ectype))
+ imdelete (arc2//ectype, verify=no)
+ delete (database//"/ec"//arc2//".ec*", verify = no)
+ } else {
+ arc2 = arc1
+ apref = apscript.references
+ }
+
+ # Arcs are reidentified using the user refit option.
+ # Also internal arcs are checked if HENEAR.
+
+ hselect (arc1, "arctype", yes, > temp)
+ fd = temp
+ i = fscan (fd, arctype)
+ fd = ""; delete (temp, verify=no)
+
+ # Extract and determine dispersion function if necessary.
+ if (!access (arc2//ectype)) {
+ if (!batch)
+ print ("Extract and reidentify arc spectrum ", arc1)
+ print ("Extract and reidentify arc spectrum ", arc1, >> logfile)
+ apscript (arc1, output=arc2//".ec", references=apref,
+ ansrecenter="NO", ansresize="NO", ansedit="NO",
+ anstrace="NO", background="none",
+ clean=no, weights="none", verbose=verbose1)
+ if (response != "")
+ sarith (arc2//".ec", "/", response, arc2//".ec", w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no, errval=0,
+ verbose=no)
+
+ if (arcaps != "") {
+ scopy (arc2//".ec", arc2//"arc.ec", w1=INDEF, w2=INDEF,
+ apertures=arcaps, bands="", beams="", apmodulus=0,
+ format="multispec", renumber=yes, offset=0,
+ clobber=yes, merge=no, verbose=no)
+ scopy (arc2//".ec", "", w1=INDEF, w2=INDEF,
+ apertures="!"//arcaps, bands="", beams="",
+ apmodulus=0, format="multispec", renumber=yes, offset=0,
+ clobber=yes, merge=no, verbose=no)
+ ecreidentify (arc2//"arc.ec", arcref//"arc.ec", shift=0.,
+ cradius=params.cradius, threshold=params.threshold,
+ refit=yes, database=database, logfiles=logs)
+ imdelete (arc2//"arc.ec", verify=no)
+ }
+ ecreidentify (arc2//".ec", arcref//".ec", shift=0.,
+ cradius=params.cradius, threshold=params.threshold,
+ refit=yes, database=database, logfiles=logs)
+
+ # If not reextracting arcs based on object apertures
+ # then save the extracted arc to avoid doing it again.
+
+ if (arc1 != arc2)
+ imdelete (arc2//".ec", verify=no)
+ }
+
+ # Set the REFSPEC parameters for echelle spectrum.
+ if (k == 1)
+ hedit (specec, "refspec"//j, arc2//".ec", add=yes,
+ verify=no, show=no, update=yes)
+ else
+ hedit (specec, "refspec"//j, arc2//".ec "//str1,
+ add=yes, verify=no, show=no, update=yes)
+
+ # Check for arc fibers in object spectra.
+ if (specarc != "") {
+ if (!batch)
+ print ("Reidentify arc fibers in ", spec,
+ " with respect to ", arc1)
+ print ("Reidentify arc fibers in ", spec,
+ " with respect to ", arc1, >> logfile)
+ delete (database//"/ec"//specarc, verify = no, >& "dev$null")
+ ecreidentify (specarc, arc2//"arc.ec", shift=0.,
+ cradius=params.cradius, threshold=params.threshold,
+ refit=no, database=database, logfiles=logs)
+ hedit (specec, "refshft"//j, specarc,
+ add=yes, verify=no, show=no, update=yes)
+ imrename (specarc, spec//"arc"//j+1//".ec", verbose=no)
+ specarc = spec // "arc" // j+1 // ".ec"
+ }
+ }
+ if (specarc != "")
+ imdelete (specarc, verify=no)
+end
diff --git a/noao/imred/src/dofoe/doarcs.par b/noao/imred/src/dofoe/doarcs.par
new file mode 100644
index 00000000..e399380b
--- /dev/null
+++ b/noao/imred/src/dofoe/doarcs.par
@@ -0,0 +1,11 @@
+spec,f,a,"",,,
+response,f,a,"",,,
+arcref,f,a,"",,,
+arcaps,s,a,,,,
+arcbeams,s,a,,,,
+reextract,b,a,,,,
+arcap,b,a,,,,
+logfile,f,a,"",,,
+batch,b,a,,,,
+fd,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/dofoe/dofoe.cl b/noao/imred/src/dofoe/dofoe.cl
new file mode 100644
index 00000000..ae1c2ca8
--- /dev/null
+++ b/noao/imred/src/dofoe/dofoe.cl
@@ -0,0 +1,89 @@
+# DOFOE -- Process FOE spectra from 2D to wavelength calibrated 1D.
+#
+# The task PROC does all of the interactive work and BATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure dofoe (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+string arcs = "" {prompt="List of arc spectra"}
+file arctable = "" {prompt="Arc assignment table (optional)\n"}
+
+string readnoise = "0." {prompt="Read out noise sigma (photons)"}
+string gain = "1." {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+int norders = 12 {prompt="Number of orders"}
+real width = 4. {prompt="Width of profiles (pixels)"}
+string arcaps = "2x2" {prompt="Arc apertures\n"}
+
+bool fitflat = yes {prompt="Fit and ratio flat field spectrum?"}
+string background = "none" {prompt="Background to subtract",
+ enum="none|scattered|average|median|minimum|fit"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = no {prompt="Update spectra if cal data changes?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset params = "" {prompt="Algorithm parameters"}
+
+begin
+ int i, j
+ bool scattered
+
+ # Remove any leading whitespace from parameters that might be null.
+ if (logfile != "") {
+ j = strlen (logfile)
+ for (i=1; i<=j && substr(logfile,i,i)==" "; i+=1);
+ logfile = substr (logfile, i, j)
+ }
+ if (flat != "") {
+ j = strlen (flat)
+ for (i=1; i<=j && substr(flat,i,i)==" "; i+=1);
+ flat = substr (flat, i, j)
+ }
+ if (arctable != "") {
+ j = strlen (arctable)
+ for (i=1; i<=j && substr(arctable,i,i)==" "; i+=1);
+ arctable = substr (arctable, i, j)
+ }
+ if (arcaps != "") {
+ j = strlen (arcaps)
+ for (i=1; i<=j && substr(arcaps,i,i)==" "; i+=1);
+ arcaps = substr (arcaps, i, j)
+ }
+
+ apscript.readnoise = readnoise
+ apscript.gain = gain
+ if (arcaps != "")
+ i = 2 * norders
+ else
+ i = norders
+ apscript.nfind = i
+ apscript.width = width
+ apscript.t_width = width
+ apscript.radius = width
+ apscript.clean = clean
+ if (background == "scattered") {
+ scattered = yes
+ apscript.background = "none"
+ } else {
+ scattered = no
+ apscript.background = background
+ }
+ proc.datamax = datamax
+
+ proc (objects, apref, flat, arcs, arctable, i, "", arcaps,
+ "", "", fitflat, yes, scattered, no, no, no, clean, dispcor,
+ no, redo, update, batch, listonly)
+
+ if (proc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("batch&batch") | cl
+ }
+end
diff --git a/noao/imred/src/dofoe/dofoe.par b/noao/imred/src/dofoe/dofoe.par
new file mode 100644
index 00000000..9853b7e5
--- /dev/null
+++ b/noao/imred/src/dofoe/dofoe.par
@@ -0,0 +1,24 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,h,"",,,"Aperture reference spectrum"
+flat,f,h,"",,,"Flat field spectrum"
+arcs,s,h,"",,,"List of arc spectra"
+arctable,f,h,"",,,"Arc assignment table (optional)
+"
+readnoise,s,h,"0.",,,"Read out noise sigma (photons)"
+gain,s,h,"1.",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+norders,i,h,12,,,"Number of orders"
+width,r,h,4.,,,"Width of profiles (pixels)"
+arcaps,s,h,"2x2",,,"Arc apertures
+"
+fitflat,b,h,yes,,,"Fit and ratio flat field spectrum?"
+background,s,h,"none",none|scattered|average|median|minimum|fit,,"Background to subtract"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,no,,,"Update spectra if cal data changes?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+params,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/dofoe/dofoetasks.cl b/noao/imred/src/dofoe/dofoetasks.cl
new file mode 100644
index 00000000..4c602be0
--- /dev/null
+++ b/noao/imred/src/dofoe/dofoetasks.cl
@@ -0,0 +1,19 @@
+#{ DOFOE tasks
+
+task dofoe = "dofoe$dofoe.cl"
+task params = "dofoe$params.par"
+
+task proc = "dofoe$proc.cl"
+task response = "dofoe$response.cl"
+task arcrefs = "dofoe$arcrefs.cl"
+task doarcs = "dofoe$doarcs.cl"
+task batch = "dofoe$batch.cl"
+task listonly = "dofoe$listonly.cl"
+
+task apscript = "dofoe$x_apextract.e"
+
+# Hide tasks from the user
+hidetask apscript
+hidetask params, proc, batch, arcrefs, doarcs, listonly, response
+
+keep
diff --git a/noao/imred/src/dofoe/listonly.cl b/noao/imred/src/dofoe/listonly.cl
new file mode 100644
index 00000000..bae8aff8
--- /dev/null
+++ b/noao/imred/src/dofoe/listonly.cl
@@ -0,0 +1,167 @@
+# LISTONLY -- List processing to be done.
+#
+# This follows pretty much the same logic as the full procedure but doesn't
+# do anything but list the operations.
+
+procedure listonly (objects, apref, flat, arcs, scattered, dispcor,
+ redo, update)
+
+string objects = "" {prompt="List of object spectra"}
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+string arcs = "" {prompt="List of arc spectra"}
+
+bool scattered {prompt="Subtract scattered light?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool redo {prompt="Redo operations if previously done?"}
+bool update {prompt="Update spectra if cal data changes?"}
+
+struct *fd1
+struct *fd2
+
+begin
+ string imtype, ectype
+ string spec, arcref
+ string specec, arcrefec, response
+ string temp1, temp2, done, str
+ bool reextract, newaps, newresp, newdisp, extract, disp, scat
+ int i, j, n
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ newaps = no
+ newresp = no
+ newdisp = no
+
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+
+ reextract = redo
+ if (reextract || !access (database // "/ap" // apref)) {
+ print ("Set reference aperture for ", apref)
+ newaps = yes
+ }
+
+ if (flat != "") {
+ response = flat
+ i = strlen (response)
+ if (i > n && substr (response, i-n+1, i) == imtype)
+ response = substr (response, 1, i-n)
+ response = response // "norm.ec"
+
+ reextract = redo || (update && newaps)
+ scat = no
+ if (scattered) {
+ hselect (flat, "apscatte", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str) < 1)
+ scat = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+ if (reextract || !access (response // imtype) || (update && scat)) {
+ if (scat)
+ print ("Subtract scattered light from ", flat)
+ print ("Create response function ", response)
+ newresp = yes
+ }
+ }
+
+ if (dispcor) {
+ hselect (arcs, "$I", yes, > temp1)
+ #sections (arcs, option="fullname", > temp1)
+ fd1 = temp1
+ i = fscan (fd1, arcref)
+ if (i < 1)
+ error (1, "No reference arcs")
+ fd1 = ""; delete (temp1, verify=no)
+ i = strlen (arcref)
+ if (i > n && substr (arcref, i-n+1, i) == imtype)
+ arcref = substr (arcref, 1, i-n)
+ arcrefec = arcref // ectype
+
+ reextract = redo || (update && newaps)
+ if (reextract || !access (arcrefec)) {
+ print ("Extract arc reference image ", arcref)
+ print ("Determine dispersion solution for ", arcref)
+ newdisp = yes
+ } else {
+ hselect (arcrefec, "refspec1,dc-flag", yes, > temp1)
+ fd1 = temp1
+ i = fscan (fd1, str, j)
+ fd1 = ""; delete (temp1, verify=no)
+ if (i < 1) {
+ print ("Determine dispersion solution for ", arcref)
+ newdisp = yes
+ }
+ }
+ print (arcref, > done)
+ }
+
+ reextract = redo || (update && (newaps || newresp || newdisp))
+ hselect (objects, "$I", yes, > temp1)
+ #sections (objects, option="fullname", > temp1)
+ fd1 = temp1
+ while (fscan (fd1, spec) != EOF) {
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specec) != EOF)
+ if (spec == specec)
+ break
+ if (spec == specec)
+ next
+ fd2 = ""
+ }
+
+ specec = spec // ectype
+
+ scat = no
+ extract = no
+ disp = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str) < 1)
+ scat = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat)) {
+ extract = yes
+ } else {
+ hselect (specec, "dc-flag", yes, > temp2)
+ fd2 = temp2
+ extract = update && newaps
+ if (fscan (fd2, str) == 1)
+ extract = update && newdisp
+ else
+ disp = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+
+ if (extract)
+ disp = dispcor
+
+ if (scat)
+ print ("Subtract scattered light from ", spec)
+ if (extract)
+ print ("Extract object spectrum ", spec)
+ if (disp)
+ print ("Dispersion correct ", spec)
+ }
+ fd1 = ""; delete (temp1, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/dofoe/listonly.par b/noao/imred/src/dofoe/listonly.par
new file mode 100644
index 00000000..a05b8f94
--- /dev/null
+++ b/noao/imred/src/dofoe/listonly.par
@@ -0,0 +1,11 @@
+objects,s,a,"",,,"List of object spectra"
+apref,f,a,"",,,"Aperture reference spectrum"
+flat,f,a,"",,,"Flat field spectrum"
+arcs,s,a,"",,,"List of arc spectra"
+scattered,b,a,,,,"Subtract scattered light?"
+dispcor,b,a,,,,"Dispersion correct spectra?"
+redo,b,a,,,,"Redo operations if previously done?"
+update,b,a,,,,"Update spectra if cal data changes?"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"q",,,
diff --git a/noao/imred/src/dofoe/params.par b/noao/imred/src/dofoe/params.par
new file mode 100644
index 00000000..fafb71f7
--- /dev/null
+++ b/noao/imred/src/dofoe/params.par
@@ -0,0 +1,69 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE LIMITS --"
+lower,r,h,-3.,,,"Lower aperture limit relative to center"
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,2,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- DEFAULT BACKGROUND PARAMETERS --"
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,Fitting parameters along the dispersion
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,Background function
+b_order,i,h,2,,,Background function order
+b_sample,s,h,"-10:-6,6:10",,,Background sample regions
+b_naverage,i,h,-3,,,Background average or median
+b_niterate,i,h,0,0,,Background rejection iterations
+b_low,r,h,3.,0.,,Background lower rejection sigma
+b_high,r,h,3.,0.,,Background upper rejection sigma
+b_grow,r,h,0.,0.,,Background rejection growing radius
+b_smooth,i,h,10,,,"Background smoothing length
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --"
+f_interactive,b,h,no,,,"Fit flat field interactively?"
+f_function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+f_order,i,h,20,1,,"Fitting function order
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$thar.dat,,,"Line list"
+match,r,h,1.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,4.,,,Centering radius in pixels
+i_function,s,h,"chebyshev","legendre|chebyshev",,"Echelle coordinate function"
+i_xorder,i,h,3,1,,Order of coordinate function along dispersion
+i_yorder,i,h,3,1,,Order of coordinate function across dispersion
+i_niterate,i,h,3,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?"
diff --git a/noao/imred/src/dofoe/proc.cl b/noao/imred/src/dofoe/proc.cl
new file mode 100644
index 00000000..75740bda
--- /dev/null
+++ b/noao/imred/src/dofoe/proc.cl
@@ -0,0 +1,464 @@
+# PROC -- Process echelle fiber spectra
+# This program combines the operations of extraction, flat fielding, and
+# dispersion correction in as simple and noninteractive way as possible.
+# It supports a second simultaneous arc fiber. The data must all share
+# the same position on the 2D image and the same dispersion solution
+# apart from small instrumental changes which can be tracked
+# automatically. The apertures must be identified sequentially and must
+# be properly paired if a arc fiber is used.
+#
+# If every needed on could add sky subtraction (with a sky fiber) and
+# fluxing following the model of the multifiber packages.
+
+procedure proc (objects, apref, flat, arcs, arctable, naps, objaps, arcaps,
+ objbeams, arcbeams, fitflat, recenter, scattered, edit, trace, arcap,
+ clean, dispcor, splot, redo, update, batch, listonly)
+
+string objects {prompt="List of object spectra"}
+
+file apref {prompt="Aperture reference spectrum"}
+file flat {prompt="Flat field spectrum"}
+string arcs {prompt="List of arc spectra"}
+file arctable {prompt="Arc assignment table (optional)\n"}
+
+int naps {prompt="Number of apertures"}
+string objaps {prompt="Object apertures"}
+string arcaps {prompt="Arc apertures"}
+string objbeams {prompt="Object beam numbers"}
+string arcbeams {prompt="Arc beam numbers\n"}
+
+bool fitflat {prompt="Fit and ratio flat field spectrum?"}
+bool recenter {prompt="Recenter object apertures?"}
+bool scattered {prompt="Subtract scattered light?"}
+bool edit {prompt="Edit/review object apertures?"}
+bool trace {prompt="Trace object spectra?"}
+bool arcap {prompt="Use object apertures for arcs?"}
+bool clean {prompt="Detect and replace bad pixels?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool splot {prompt="Plot the final spectrum?"}
+bool redo {prompt="Redo operations if previously done?"}
+bool update {prompt="Update spectra if cal data changes?"}
+bool batch {prompt="Extract objects in batch?"}
+bool listonly {prompt="List steps but don't process?\n"}
+
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+
+bool newaps, newresp, newdisp, newarcs, dobatch
+
+string anssplot = "yes" {prompt="Splot spectrum?", mode="q",
+ enum="no|yes|NO|YES"}
+
+struct *fd1, *fd2
+
+begin
+ string imtype, ectype
+ string arcref, spec, arc
+ string arcrefec, specec, arcec, response
+ string temp1, temp2, done
+ string str1, objs, arcrefs, log1, log2
+ bool reextract, extract, scat, disp, disperr, log
+ bool splot1, splot2
+ int i, j, n, nspec
+ struct err
+
+ # Call a separate task to do the listing to minimize the size of
+ # this script and improve it's readability.
+
+ dobatch = no
+ if (listonly) {
+ listonly (objects, apref, flat, arcs, scattered, dispcor,
+ redo, update)
+ bye
+ }
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ ectype = ".ec" // imtype
+ n = strlen (imtype)
+
+ # Get query parameter.
+ objs = objects
+ if (arctable == "")
+ arcrefs = arcs
+ else
+ arcrefs = arctable
+ arcref = ""
+
+ # Temporary files used repeatedly in this script. Under some
+ # abort circumstances these files may be left behind.
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ # Rather than always have switches on the logfile and verbose flags
+ # we use TEE and set a file to "dev$null" if output is not desired.
+ # We must check for the null string to signify no logfile.
+
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # If the update switch is used changes in the calibration data
+ # can cause images to be reprocessed (if they are in the object
+ # list). Possible changes are in the aperture definitions,
+ # response function, dispersion solution, and sensitivity
+ # function. The newarcs flag is used to only go through the arc
+ # image headers once setting the reference spectrum, airmass, and
+ # UT.
+
+ newaps = no
+ newresp = no
+ newdisp = no
+ newarcs = yes
+
+ # Check if there are aperture definitions in the database and
+ # define them if needed. This is usually somewhat interactive.
+ # Delete the database entry to start fresh if we enter this
+ # because of a redo. Set the newaps flag in case an update is
+ # desired.
+
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+
+ # Initialize
+ apscript.saturation = INDEF
+ apscript.references = apref
+ apscript.profiles = ""
+ apscript.nfind = naps
+ apscript.clean = clean
+ if (splot) {
+ splot1 = yes
+ splot2 = yes
+ } else {
+ splot1 = no
+ splot2 = no
+ }
+
+ reextract = redo
+ if (reextract || !access (database // "/ap" // apref)) {
+ if (!access (apref // imtype)) {
+ printf ("Aperture reference spectrum not found - %s%s\n",
+ apref, imtype) | scan (err)
+ error (1, err // "\nCheck setting of imtype")
+ }
+ print ("Set reference apertures for ", apref) | tee (log1)
+ if (access (database // "/ap" // apref))
+ delete (database // "/ap" // apref, verify=no)
+ apscript.ansresize = "yes"
+ apscript.ansedit = "YES"
+ apscript.ansfittrace = "yes"
+ apscript (apref, references="", ansfind="YES", ansrecenter="NO",
+ anstrace="YES", ansextract="NO")
+ newaps = yes
+ }
+
+ if (recenter)
+ apscript.ansrecenter = "YES"
+ else
+ apscript.ansrecenter = "NO"
+ apscript.ansresize = "NO"
+ if (edit)
+ apscript.ansedit = "yes"
+ else
+ apscript.ansedit = "NO"
+ if (trace)
+ apscript.anstrace = "YES"
+ else
+ apscript.anstrace = "NO"
+ apscript.ansfittrace = "NO"
+ apscript.ansextract = "YES"
+ apscript.ansreview = "NO"
+
+ # The next step is to setup the scattered light correction if needed.
+ # We use the flat field image for the interactive setting unless
+ # one is not used an then we use the aperture reference.
+ # If these images have been scattered light corrected we assume the
+ # scattered light functions parameters are correctly set.
+
+ i = strlen (flat)
+ if (i > n && substr (flat, i-n+1, i) == imtype)
+ flat = substr (flat, 1, i-n)
+
+ if (flat != "")
+ spec = flat
+ else
+ spec = apref
+
+ scat = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp1)
+ fd1 = temp1
+ if (fscan (fd1, str1) < 1)
+ scat = yes
+ fd1 = ""; delete (temp1, verify=no)
+ }
+ if (scat) {
+ print ("Subtract scattered light in ", spec) | tee (log1)
+ apscript.ansfitscatter = "yes"
+ apscript.ansfitsmooth = "yes"
+ apscript (spec, output="", ansextract="NO", ansscat="YES",
+ anssmooth="YES")
+ apscript.ansfitscatter = "NO"
+ apscript.ansfitsmooth = "NO"
+ }
+
+ response = ""
+ if (flat != "") {
+ response = flat // "norm.ec"
+ reextract = redo || (update && newaps)
+ if (reextract || !access (response // imtype) || (update && scat)) {
+ print ("Create response function ", response) | tee (log1)
+
+ if (access (response // imtype))
+ imdelete (response, verify=no)
+ if (access (flat //ectype))
+ imdelete (flat//ectype, verify=no)
+
+ response (flat, apref, response, recenter=recenter,
+ edit=edit, trace=trace, clean=clean, fitflat=fitflat,
+ interactive=params.f_interactive,
+ function=params.f_function, order=params.f_order)
+
+ newresp = yes
+ }
+ }
+
+ # If not dispersion correcting we can go directly to extracting
+ # the object spectra. The reference arcs are the first on
+ # the arc lists. The processing of the reference arcs is done
+ # by the task ARCREFS.
+
+ if (dispcor) {
+ hselect (arcs, "$I", yes, >temp1)
+ fd1 = temp1
+ i = fscan (fd1, arcref)
+ if (i < 1)
+ error (1, "No reference arcs")
+ fd1 = ""; delete (temp1, verify=no)
+ i = strlen (arcref)
+ if (i > n && substr (arcref, i-n+1, i) == imtype)
+ arcref = substr (arcref, 1, i-n)
+ if (!access (arcref // imtype)) {
+ printf ("Arc reference spectrum not found - %s%s\n",
+ arcref, imtype) | scan (err)
+ error (1, err // "\nCheck setting of imtype")
+ }
+ arcrefec = arcref // ectype
+ reextract = redo || (update && newaps)
+ if (reextract && access (arcrefec))
+ imdelete (arcrefec, verify=no)
+
+ arcrefs (arcref, arcaps, arcbeams, response, done, log1, log2)
+ }
+
+ # Now we are ready to process the object spectra.
+
+ reextract = redo || (update && (newaps || newresp || newdisp))
+ hselect (objs, "$I", yes, > temp1)
+ fd1 = temp1
+ while (fscan (fd1, spec) != EOF) {
+ i = strlen (spec)
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+
+ # Check if previously done; i.e. arc.
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specec) != EOF)
+ if (spec == specec)
+ break
+ if (spec == specec)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ printf ("Object spectrum not found - %s%s\n",
+ spec, imtype) | scan (err)
+ print (err) | tee (log1)
+ print ("Check setting of imtype")
+ next
+ }
+ specec = spec // ectype
+
+ # Determine required operations from the flags and image header.
+ scat = no
+ extract = no
+ disp = no
+ if (scattered) {
+ hselect (spec, "apscatte", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str1) < 1)
+ scat = yes
+ fd2 = ""; delete (temp2, verify=no)
+ }
+ if (reextract || !access (specec) || (update && scat))
+ extract = yes
+ else {
+ hselect (specec, "dc-flag", yes, > temp2)
+ fd2 = temp2
+ if (fscan (fd2, str1) == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+
+ fd2 = ""; delete (temp2, verify=no)
+ }
+
+ if (extract)
+ disp = dispcor
+
+ # If fully processed go to the next object.
+ if (!extract && !disp)
+ next
+
+ # If not interactive and the batch flag is set submit rest to batch.
+ if (batch && !splot1 && !splot2 && apscript.ansedit == "NO") {
+ fd1 = ""; delete (temp1, verify=no)
+ flprcache
+ batch.objects = objs
+ batch.datamax = datamax
+ batch.response = response
+ batch.arcs = arcs
+ batch.arcref = arcref
+ batch.arcrefs = arcrefs
+ batch.objaps = objaps
+ batch.arcaps = arcaps
+ batch.objbeams = objbeams
+ batch.arcbeams = arcbeams
+ batch.done = done
+ batch.logfile = log1
+ batch.redo = reextract
+ batch.update = update
+ batch.scattered = scattered
+ batch.arcap = arcap
+ batch.dispcor = dispcor
+ batch.newaps = newaps
+ batch.newresp = newresp
+ batch.newdisp = newdisp
+ batch.newarcs = newarcs
+ dobatch = yes
+ return
+ }
+
+ # Process the spectrum in foreground.
+ if (extract) {
+ if (access (specec))
+ imdelete (specec, verify=no)
+
+ if (scat) {
+ print ("Subtract scattered light in ", spec) | tee (log1)
+ apscript (spec, output="", ansextract="NO",
+ ansscat="YES", anssmooth="YES")
+ }
+
+ print ("Extract object spectrum ", spec) | tee (log1)
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ apscript (spec, saturation=datamax)
+ if (response != "")
+ imarith (specec, "/", response, specec)
+ }
+
+ disperr = no
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ sections (arcs, option="fullname", >temp2)
+ setjd ("@"//temp2, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ setairmass ("@"//temp2, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ delete (temp2, verify=no)
+ hselect (arcs, "$I", yes, >temp2)
+ fd2 = temp2
+ while (fscan (fd2, arc) != EOF) {
+ i = strlen (arc)
+ if (i > n && substr (arc, i-n+1, i) == imtype)
+ arc = substr (arc, 1, i-n)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ hedit (arc, "arctype", "henear", add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""; delete (temp2, verify=no)
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec) | tee (log1)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=params.select, sort=params.sort,
+ group=params.group, time=params.time,
+ timewrap=params.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no) |
+ tee (log1, > log2)
+
+ doarcs (spec, response, arcref, arcaps, arcbeams, reextract,
+ arcap, log1, no)
+
+ hselect (specec, "refspec1", yes, > temp2)
+ fd2 = temp2
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp2, verify=no)
+ if (i < 1) {
+ print ("No arc reference assigned for ", spec) | tee (log1)
+ disperr = yes
+ } else {
+ print ("Dispersion correct ", spec) | tee (log1)
+ dispcor (specec, "", linearize=params.linearize,
+ database=database, table=arcref//ectype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=params.log, samedisp=no, flux=params.flux,
+ global=no, ignoreaps=no, confirm=no, listonly=no,
+ verbose=verbose, logfile=logfile)
+ hedit (specec, "dc-flag", 0, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ }
+
+ if (!disperr && (extract || disp)) {
+ if (splot1) {
+ print (specec, ":")
+ str1 = anssplot
+ if (str1 == "NO" || str1 == "YES")
+ splot1 = no
+ if (str1 == "no" || str1 == "NO")
+ splot2 = no
+ else
+ splot2 = yes
+ }
+ if (splot2)
+ splot (specec)
+ }
+
+ print (spec, >> done)
+ }
+ fd1 = ""; delete (temp1, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/dofoe/proc.par b/noao/imred/src/dofoe/proc.par
new file mode 100644
index 00000000..f74d6651
--- /dev/null
+++ b/noao/imred/src/dofoe/proc.par
@@ -0,0 +1,36 @@
+objects,s,a,,,,"List of object spectra"
+apref,f,a,"",,,"Aperture reference spectrum"
+flat,f,a,"",,,"Flat field spectrum"
+arcs,s,a,,,,"List of arc spectra"
+arctable,f,a,"",,,"Arc assignment table (optional)
+"
+naps,i,a,,,,"Number of apertures"
+objaps,s,a,,,,"Object apertures"
+arcaps,s,a,,,,"Arc apertures"
+objbeams,s,a,,,,"Object beam numbers"
+arcbeams,s,a,,,,"Arc beam numbers
+"
+fitflat,b,a,,,,"Fit and ratio flat field spectrum?"
+recenter,b,a,,,,"Recenter object apertures?"
+scattered,b,a,,,,"Subtract scattered light?"
+edit,b,a,,,,"Edit/review object apertures?"
+trace,b,a,,,,"Trace object spectra?"
+arcap,b,a,,,,"Use object apertures for arcs?"
+clean,b,a,,,,"Detect and replace bad pixels?"
+dispcor,b,a,,,,"Dispersion correct spectra?"
+splot,b,a,,,,"Plot the final spectrum?"
+redo,b,a,,,,"Redo operations if previously done?"
+update,b,a,,,,"Update spectra if cal data changes?"
+batch,b,a,,,,"Extract objects in batch?"
+listonly,b,a,,,,"List steps but don\'t process?
+"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+newaps,b,h,,,,
+newresp,b,h,,,,
+newdisp,b,h,,,,
+newarcs,b,h,,,,
+dobatch,b,h,,,,
+anssplot,s,q,"yes",no|yes|NO|YES,,"Splot spectrum?"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/dofoe/response.cl b/noao/imred/src/dofoe/response.cl
new file mode 100644
index 00000000..a59c5ea3
--- /dev/null
+++ b/noao/imred/src/dofoe/response.cl
@@ -0,0 +1,99 @@
+# RESPONSE -- Make a fiber response spectrum using a flat field and sky flat.
+
+procedure response (flat, apreference, response)
+
+string flat {prompt="Flat field spectrum"}
+string apreference {prompt="Aperture reference spectrum"}
+string response {prompt="Response spectrum"}
+
+bool recenter = no {prompt="Recenter sky apertures?"}
+bool edit = no {prompt="Edit/review sky apertures?"}
+bool trace = no {prompt="Trace sky spectra?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool fitflat = no {prompt="Fit and ratio flat field spectrum?"}
+bool interactive = yes {prompt="Fit flat field interactively?"}
+string function = "spline3" {prompt="Fitting function",
+ enum="spline3|legendre|chebyshev|spline1"}
+int order = 20 {prompt="Fitting function order", min=1}
+
+begin
+ string imtype
+ file log1, log2, flat2d, flatec, resp
+ int i, n
+ struct err
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ n = strlen (imtype)
+
+ flat2d = flat
+ resp = response
+
+ if (flat2d == "")
+ error (1, "No flat field defined")
+ if (flat2d != "") {
+ i = strlen (flat2d)
+ if (i > n && substr (flat2d, i-n+1, i) == imtype)
+ flat2d = substr (flat2d, 1, i-n)
+ flatec = flat2d // ".ec"
+ if (!access (flat2d // imtype)) {
+ printf ("Flat field spectrum not found - %s%s\n",
+ flat2d, imtype) | scan (err)
+ error (1, err // "\nCheck setting of imtype")
+ }
+ }
+
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # Initialize APALL
+ apscript.references = apreference
+ if (recenter)
+ apscript.ansrecenter = "YES"
+ else
+ apscript.ansrecenter = "NO"
+ apscript.ansresize = "NO"
+ if (edit)
+ apscript.ansedit = "yes"
+ else
+ apscript.ansedit = "NO"
+ if (trace)
+ apscript.anstrace = "YES"
+ else
+ apscript.anstrace = "NO"
+ apscript.ansextract = "YES"
+
+ print ("Extract flat field ", flat2d) | tee (log1)
+ if (flat2d == apscript.references)
+ apscript (flat2d, ansrecenter="NO", ansedit="NO", anstrace="NO",
+ background="none", clean=clean, extras=no)
+ else
+ apscript (flat2d, clean=clean, extras=no)
+
+ if (fitflat) {
+ print ("Fit and ratio flat field ", flat2d) | tee (log1)
+ fit1d (flatec, resp, "fit", axis=1, interactive=interactive,
+ sample="*", naverage=1, function=function, order=order,
+ low_reject=0., high_reject=0., niterate=1, grow=0.,
+ graphics="stdgraph")
+ sarith (flatec, "/", resp, resp, w1=INDEF, w2=INDEF, apertures="",
+ bands="", beams="", apmodulus=0, reverse=no, ignoreaps=yes,
+ format="multispec", renumber=no, offset=0, clobber=yes,
+ merge=no, errval=0, verbose=no)
+ imdelete (flatec, verify=no)
+ } else
+ imrename (flatec, resp, verbose=no)
+
+ print ("Create the normalized response ", resp) | tee (log1)
+ bscale (resp, resp, bzero="0.", bscale="mean", section="",
+ step=1, upper=INDEF, lower=INDEF, verbose=yes) | tee (log1, >log2)
+end
diff --git a/noao/imred/src/dofoe/response.par b/noao/imred/src/dofoe/response.par
new file mode 100644
index 00000000..2bff1f0f
--- /dev/null
+++ b/noao/imred/src/dofoe/response.par
@@ -0,0 +1,12 @@
+flat,s,a,,,,"Flat field spectrum"
+apreference,s,a,,,,"Aperture reference spectrum"
+response,s,a,,,,"Response spectrum"
+recenter,b,h,no,,,"Recenter sky apertures?"
+edit,b,h,no,,,"Edit/review sky apertures?"
+trace,b,h,no,,,"Trace sky spectra?"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+fitflat,b,h,no,,,"Fit and ratio flat field spectrum?"
+interactive,b,h,yes,,,"Fit flat field interactively?"
+function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+order,i,h,20,1,,"Fitting function order"
+mode,s,h,"q",,,
diff --git a/noao/imred/src/doslit/Revisions b/noao/imred/src/doslit/Revisions
new file mode 100644
index 00000000..5e219369
--- /dev/null
+++ b/noao/imred/src/doslit/Revisions
@@ -0,0 +1,129 @@
+.help revisions Dec94 noao.imred.src.doslit
+.nf
+
+=======
+V2.12.3
+=======
+
+doslit$sbatch.cl
+doslit$sproc.cl
+ Error messages now hint to check imtype setting.
+ (4/15/05, Valdes)
+
+========
+V2.12.2b
+========
+
+=======
+V2.12.1
+=======
+
+doslit$sproc.cl
+doslit$sbatch.cl
+doslit$sarcrefs.cl
+doslit$sfluxcal.cl
+ Added a flpr after dispcor to workaround a bug with the FITS kernel
+ header caching. (6/24/02, Valdes)
+
+=====
+V2.12
+=====
+
+doslit$sproc.cl
+ Modified code to eliminate goto. This is for use with pyraf.
+ (11/21/00, Valdes)
+
+========
+V2.11.3a
+========
+
+doslit$sarcrefs.cl
+ The test for INDEF values on CRVAL/CDELT did not work correctly.
+ (9/24/99, Valdes)
+
+doslit$doslit.cl
+doslit$doslit.par
+doslit$sarcrefs.cl
+doslit$sarcrefs.par
+doslit$sproc.cl
+doslit$sproc.par
+ No change made though dates were modified. (9/24/99, Valdes)
+
+=======
+V2.11.2
+=======
+
+doslit$sarcrefs.cl
+ The test on CRVAL and CDELT would not work with header keywords.
+ (9/22/98, Valdes)
+
+doslit$sarcrefs.cl
+doslit$sbatch.cl
+doslit$sdoarcs.cl
+doslit$sfluxcal.cl
+doslit$sgetspec.cl
+doslit$slistonly.cl
+doslit$sproc.cl
+ Any additional qualifiers in the imtype string are stripped.
+ (8/14/97, Valdes)
+
+doslit$sgetspec.cl
+ Added the field parameter to the RENAME call. (8/12/97, Valdes)
+
+=========
+V2.11Beta
+=========
+
+doslit$sarcrefs.cl
+ If both crval and cdelt are INDEF then the autoidentify option is not
+ used. (5/2/97, Valdes)
+
+doslit$apslitproc.par
+ Made changes for the new aperture selection option. (9/5/96, Valdes)
+
+doslit.cl
+doslit.par
+sproc.cl
+sproc.par
+sarcrefs.cl
+sarcrefs.par
+sparams.par
+ Modified to use autoidentify. (4/5/96, Valdes)
+
+doslit$sproc.cl
+doslit$sproc.par
+ Added missing parameter declaration. (5/25/95, Valdes)
+
+doslit$sgetspec.cl
+doslit$doslit.cl
+ The arc table will now be checked for arc spectra. (5/1/95, Valdes)
+
+doslit$sparams.par
+doslit$sdoarcs.cl
+doslit$sarcrefs.cl
+ Added "threshold" as a user parameter. (1/16/95, Valdes)
+
+doslit$sproc.cl
+doslit$sbatch.cl
+doslit$sfluxcal.cl
+doslit$sproc.par
+doslit$sbatch.par
+doslit$sfluxcal.par
+ SETAIRMASS and SETJD are only called if the required keywords are
+ present. Errors from missing airmass or JD are then reported from
+ the task that actually uses them. (12/31/94, Valdes)
+
+doslit$sgetspec.cl
+doslit$sgetspec.par
+ Added warning and query for missing CCDPROC keyword. (12/31/94, Valdes)
+
+doslit$sproc.cl
+doslit$sarcrefs.cl
+ 1. If the object apertures used for reference arc contain more than
+ one aperture then after extraction all apertures but the first
+ are removed. This will be one reference arc aperture for the
+ master dispersion solution.
+ 2. Set ignoreaps=yes so that any new apertures added to the objects
+ will inherit the wavelength scale of the reference arc.
+ (10/12/94, Valdes)
+.endhelp
diff --git a/noao/imred/src/doslit/apslitproc.par b/noao/imred/src/doslit/apslitproc.par
new file mode 100644
index 00000000..bab5b58e
--- /dev/null
+++ b/noao/imred/src/doslit/apslitproc.par
@@ -0,0 +1,145 @@
+# APSCRIPT
+
+input,s,a,,,,List of input images
+output,s,h,"",,,List of output spectra
+apertures,s,h,"",,,Apertures
+scatter,s,h,"",,,List of scattered light images (optional)
+references,s,h,"",,,List of aperture reference images
+profiles,s,h,"",,,"List of aperture profile images
+"
+interactive,b,h,yes,,,Run task interactively?
+find,b,h,yes,,,Find apertures?
+recenter,b,h,yes,,,Recenter apertures?
+resize,b,h,yes,,,Resize apertures?
+edit,b,h,yes,,,Edit apertures?
+trace,b,h,yes,,,Trace apertures?
+fittrace,b,h,yes,,,Fit the traced points interactively?
+extract,b,h,yes,,,Extract spectra?
+review,b,h,yes,,,Review extractions?
+subtract,b,h,yes,,,Subtract scattered light?
+smooth,b,h,yes,,,Smooth scattered light along the dispersion?
+fitscatter,b,h,yes,,,Fit scattered light interactively?
+fitsmooth,b,h,yes,,,"Smooth the scattered light interactively?
+"
+line,i,h,)sparams.line,,,>sparams.line
+nsum,i,h,)sparams.nsum,,,>sparams.nsum
+buffer,r,h,)sparams.buffer,,,">sparams.buffer
+
+# OUTPUT PARAMETERS
+"
+format,s,h,"multispec",,,Extracted spectra format
+extras,b,h,)sparams.extras,,,"Extract sky, sigma, etc.?"
+dbwrite,s,h,"YES",,,Write to database?
+initialize,b,h,no,,,Initialize answers?
+verbose,b,h,)_.verbose,,,"Verbose output?
+
+# DEFAULT APERTURE PARAMETERS
+"
+lower,r,h,)sparams.lower,,,>sparams.lower
+upper,r,h,)sparams.upper,,,>sparams.upper
+apidtable,s,h,"",,,"Aperture ID table (optional)
+
+# DEFAULT BACKGROUND PARAMETERS
+"
+b_function,s,h,)sparams.b_function,,,>sparams.b_function
+b_order,i,h,)sparams.b_order,,,>sparams.b_order
+b_sample,s,h,)sparams.b_sample,,,>sparams.b_sample
+b_naverage,i,h,)sparams.b_naverage,,,>sparams.b_naverage
+b_niterate,i,h,)sparams.b_niterate,,,>sparams.b_niterate
+b_low_reject,r,h,)sparams.b_low,,,>sparams.b_low
+b_high_reject,r,h,)sparams.b_high,,,>sparams.b_high
+b_grow,r,h,0.,0.,,"Background rejection growing radius
+
+# APERTURE CENTERING PARAMETERS
+"
+width,r,h,,,,Profile centering width
+radius,r,h,,,,Profile centering radius
+threshold,r,h,10.,0.,,"Detection threshold for profile centering
+
+# AUTOMATIC FINDING AND ORDERING PARAMETERS
+"
+nfind,i,h,1,,,Number of apertures to be found automatically
+minsep,r,h,1.,,,Minimum separation between spectra
+maxsep,r,h,100000.,,,Maximum separation between spectra
+order,s,h,"increasing",,,"Order of apertures
+
+# RECENTERING PARAMETERS
+"
+aprecenter,s,h,"",,,Apertures for recentering calculation
+npeaks,r,h,INDEF,,,Select brightest peaks
+shift,b,h,yes,,,"Use average shift instead of recentering?
+
+# RESIZING PARAMETERS
+"
+llimit,r,h,INDEF,,,Lower aperture limit relative to center
+ulimit,r,h,INDEF,,,Upper aperture limit relative to center
+ylevel,r,h,)sparams.ylevel,,,>sparams.ylevel
+peak,b,h,yes,,,Is ylevel a fraction of the peak?
+bkg,b,h,yes,,,Subtract background in automatic width?
+r_grow,r,h,0.,,,Grow limits by this factor
+avglimits,b,h,no,,,"Average limits over all apertures?
+
+# EDITING PARAMETERS
+"
+e_output,s,q,,,,Output spectra rootname
+e_profiles,s,q,,,,"Profile reference image
+
+# TRACING PARAMETERS
+"
+t_nsum,i,h,)sparams.nsum,,,>sparams.nsum
+t_step,i,h,)sparams.t_step,,,>sparams.t_step
+t_nlost,i,h,3,1,,Number of consecutive times profile is lost before quitting
+t_width,r,h,)sparams.width,,,>sparams.width
+t_function,s,h,)sparams.t_function,,,>sparams.t_function
+t_sample,s,h,"*",,,Trace sample regions
+t_order,i,h,)sparams.t_order,,,>sparams.t_order
+t_naverage,i,h,1,,,Trace average or median
+t_niterate,i,h,)sparams.t_niterate,,,>sparams.t_niterate
+t_low_reject,r,h,)sparams.t_low,,,>sparams.t_low
+t_high_reject,r,h,)sparams.t_high,,,>sparams.t_high
+t_grow,r,h,0.,0.,,"Trace rejection growing radius
+
+# EXTRACTION PARAMETERS
+"
+background,s,h,)sparams.background,,,>sparams.background
+skybox,i,h,1,,,Box car smoothing length for sky
+weights,s,h,)sparams.weights,,,>sparams.weights
+pfit,s,h,)sparams.pfit,,,>sparams.pfit
+clean,b,h,no,,,Detect and replace bad pixels?
+nclean,r,h,0.5,,,Maximum number of pixels to clean
+niterate,i,h,5,0,,Number of profile fitting iterations
+saturation,r,h,INDEF,,,Saturation level
+readnoise,s,h,,,,Read out noise sigma (photons)
+gain,s,h,,,,Photon gain (photons/data number)
+lsigma,r,h,)sparams.lsigma,,,>sparams.lsigma
+usigma,r,h,)sparams.usigma,,,>sparams.usigma
+polysep,r,h,0.95,0.1,0.95,Marsh algorithm polynomial spacing
+polyorder,i,h,10,1,,Marsh algorithm polynomial order
+nsubaps,i,h,1,1,,"Number of subapertures per aperture
+
+# ANSWER PARAMETERS
+"
+ansclobber,s,h,"NO",,," "
+ansclobber1,s,h,"NO",,," "
+ansdbwrite,s,h,"YES",,," "
+ansdbwrite1,s,h,"NO",,," "
+ansedit,s,h,"NO",,," "
+ansextract,s,h,"NO",,," "
+ansfind,s,h,"NO",,," "
+ansfit,s,h,"NO",,," "
+ansfitscatter,s,h,"NO",,," "
+ansfitsmooth,s,h,"NO",,," "
+ansfitspec,s,h,"NO",,," "
+ansfitspec1,s,h,"NO",,," "
+ansfittrace,s,h,"NO",,," "
+ansfittrace1,s,h,"NO",,," "
+ansflat,s,h,"NO",,," "
+ansnorm,s,h,"NO",,," "
+ansrecenter,s,h,"NO",,," "
+ansresize,s,h,"NO",,," "
+ansreview,s,h,"NO",,," "
+ansreview1,s,h,"NO",,," "
+ansscat,s,h,"NO",,," "
+ansskyextract,s,h,"NO",,," "
+anssmooth,s,h,"NO",,," "
+anstrace,s,h,"NO",,," "
diff --git a/noao/imred/src/doslit/demologfile b/noao/imred/src/doslit/demologfile
new file mode 100644
index 00000000..a5a245c2
--- /dev/null
+++ b/noao/imred/src/doslit/demologfile
@@ -0,0 +1 @@
+Define object apertures
diff --git a/noao/imred/src/doslit/doslit.cl b/noao/imred/src/doslit/doslit.cl
new file mode 100644
index 00000000..56458435
--- /dev/null
+++ b/noao/imred/src/doslit/doslit.cl
@@ -0,0 +1,64 @@
+# DOSLIT -- Process slit spectra from 2D to wavelength calibrated 1D.
+#
+# The task SPROC does all of the interactive work and SBATCH does the
+# background work. This procedure is organized this way to minimize the
+# dictionary space when the background task is submitted.
+
+procedure doslit (objects)
+
+string objects = "" {prompt="List of object spectra"}
+
+string arcs = "" {prompt="List of arc spectra"}
+file arctable {prompt="Arc assignment table (optional)"}
+string standards = "" {prompt="List of standard star spectra\n"}
+
+string readnoise = "rdnoise" {prompt="Read out noise sigma (photons)"}
+string gain = "gain" {prompt="Photon gain (photons/data number)"}
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+real width = 5. {prompt="Width of profiles (pixels)"}
+string crval = "INDEF" {prompt="Approximate wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion\n"}
+
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool extcor = no {prompt="Extinction correct spectra?"}
+bool fluxcal = no {prompt="Flux calibrate spectra?"}
+bool resize = no {prompt="Automatically resize apertures?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool splot = no {prompt="Plot the final spectrum?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = no {prompt="Update spectra if cal data changes?"}
+bool quicklook = no {prompt="Minimally interactive quick-look?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+pset sparams = "" {prompt="Algorithm parameters"}
+
+begin
+ file obj, arc, std
+
+ # Expand image lists
+ obj = mktemp ("tmp$iraf")
+ arc = mktemp ("tmp$iraf")
+ std = mktemp ("tmp$iraf")
+ sgetspec (objects, arcs, arctable, standards, obj, arc, std)
+
+ apslitproc.readnoise = readnoise
+ apslitproc.gain = gain
+ apslitproc.width = width
+ apslitproc.t_width = width
+ apslitproc.radius = width
+ apslitproc.clean = clean
+ sproc.datamax = datamax
+
+ sproc (obj, arc, arctable, std, crval, cdelt, dispcor, extcor, fluxcal,
+ resize, clean, splot, redo, update, quicklook, batch, listonly)
+ delete (std, verify=no)
+
+ if (sproc.dobatch) {
+ print ("-- Do remaining spectra as a batch job --")
+ print ("sbatch&batch") | cl
+ } else {
+ delete (obj, verify=no)
+ delete (arc, verify=no)
+ }
+end
diff --git a/noao/imred/src/doslit/doslit.par b/noao/imred/src/doslit/doslit.par
new file mode 100644
index 00000000..6e4119f6
--- /dev/null
+++ b/noao/imred/src/doslit/doslit.par
@@ -0,0 +1,26 @@
+objects,s,a,"",,,"List of object spectra"
+arcs,s,h,"",,,"List of arc spectra"
+arctable,f,h,"",,,"Arc assignment table (optional)"
+standards,s,h,"",,,"List of standard star spectra
+"
+readnoise,s,h,"rdnoise",,,"Read out noise sigma (photons)"
+gain,s,h,"gain",,,"Photon gain (photons/data number)"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+width,r,h,5.,,,"Width of profiles (pixels)"
+crval,s,h,"INDEF",,,"Approximate wavelength"
+cdelt,s,h,"INDEF",,,"Approximate dispersion
+"
+dispcor,b,h,yes,,,"Dispersion correct spectra?"
+extcor,b,h,no,,,"Extinction correct spectra?"
+fluxcal,b,h,no,,,"Flux calibrate spectra?"
+resize,b,h,no,,,"Automatically resize apertures?"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+splot,b,h,no,,,"Plot the final spectrum?"
+redo,b,h,no,,,"Redo operations if previously done?"
+update,b,h,no,,,"Update spectra if cal data changes?"
+quicklook,b,h,no,,,"Minimally interactive quick-look?"
+batch,b,h,no,,,"Extract objects in batch?"
+listonly,b,h,no,,,"List steps but don\'t process?
+"
+sparams,pset,h,"",,,"Algorithm parameters"
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/doslittasks.cl b/noao/imred/src/doslit/doslittasks.cl
new file mode 100644
index 00000000..a7b635a9
--- /dev/null
+++ b/noao/imred/src/doslit/doslittasks.cl
@@ -0,0 +1,17 @@
+#{ SLITPROC tasks
+
+task doslit = "doslit$doslit.cl"
+task sproc = "doslit$sproc.cl"
+task sarcrefs = "doslit$sarcrefs.cl"
+task sdoarcs = "doslit$sdoarcs.cl"
+task sfluxcal = "doslit$sfluxcal.cl"
+task sbatch = "doslit$sbatch.cl"
+task slistonly = "doslit$slistonly.cl"
+task sgetspec = "doslit$sgetspec.cl"
+
+task apslitproc = "doslit$x_apextract.e"
+
+hidetask sproc, sbatch, sarcrefs, sdoarcs, sfluxcal, slistonly, sgetspec
+hidetask apslitproc
+
+keep
diff --git a/noao/imred/src/doslit/sarcrefs.cl b/noao/imred/src/doslit/sarcrefs.cl
new file mode 100644
index 00000000..304983f1
--- /dev/null
+++ b/noao/imred/src/doslit/sarcrefs.cl
@@ -0,0 +1,118 @@
+# SARCREFS -- Determine dispersion relation for reference arcs.
+
+procedure sarcrefs (arcref1, crval, cdelt, done, log1, log2)
+
+file arcref1
+string crval = "INDEF"
+string cdelt = "INDEF"
+file done
+file log1
+file log2
+bool newdisp = no
+
+struct *fd
+
+begin
+ string arcref, arcrefms, arc, arcms, temp, str1, str2
+ int i, dc, nspec
+ bool log
+ struct str3
+
+ temp = mktemp ("tmp$iraf")
+
+ # Extract the primary arc reference spectrum. Determine the
+ # dispersion function with IDENTIFY/REIDENTIFY. Set the wavelength
+ # parameters with MSDISPCOR.
+
+ newdisp = no
+ arcref = arcref1
+ arcrefms = arcref1 // ".ms." // envget ("imtype")
+ i = stridx (",", arcrefms)
+ if (i > 0)
+ arcrefms = substr (arcrefms, 1, i-1)
+ if (!access (arcrefms)) {
+ print ("Extract arc reference image ", arcref) | tee (log1)
+ if (apslitproc.reference == "") {
+ delete (database//"/ap"//arcref, verify=no, >& "dev$null")
+ apslitproc (arcref, nfind=-1, ansfind="YES",
+ background="none", clean=no, weights="none")
+ } else
+ apslitproc (arcref, background="none", clean=no, weights="none")
+
+ nspec = 1
+ hselect (arcrefms, "naxis2", yes) | scan (nspec)
+ if (nspec > 1)
+ scopy (arcrefms//"[*,1]", arcrefms, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ format="multispec", renumber=no, offset=0, clobber=yes,
+ merge=no, rebin=yes, verbose=no)
+ }
+
+ # Check for dispersion correction. If missing determine the
+ # dispersion function and dispersion correct. Dispersion
+ # correction is required to define the dispersion parameters
+ # for the objects.
+
+ hselect (arcrefms, "dispcor", yes, > temp)
+ fd = temp
+ dc = -1
+ i = fscan (fd, dc)
+ fd = ""; delete (temp, verify=no)
+ if (i < 1 || dc == -1) {
+ print ("Determine dispersion solution for ", arcref) | tee (log1)
+ #delete (database//"/id"//arcref//".ms*", verify=no)
+ printf ("%s %s\n", crval, cdelt) | scan (str3)
+ if (str3 == "INDEF INDEF")
+ identify (arcrefms, section="middle line", database=database,
+ coordlist=sparams.coordlist, nsum=1, match=sparams.match,
+ maxfeatures=50, zwidth=100., ftype="emission",
+ fwidth=sparams.fwidth, cradius=sparams.cradius,
+ threshold=sparams.threshold, minsep=2.,
+ function=sparams.i_function, order=sparams.i_order,
+ sample="*", niterate=sparams.i_niterate,
+ low_reject=sparams.i_low, high_reject=sparams.i_high,
+ grow=0., autowrite=yes)
+ else
+ autoidentify (arcrefms, crval, cdelt,
+ coordlist=sparams.coordlist,
+ interactive="YES", section="middle line", nsum="1",
+ ftype="emission", fwidth=sparams.fwidth,
+ cradius=sparams.cradius, threshold=sparams.threshold,
+ minsep=2., match=sparams.match, function=sparams.i_function,
+ order=sparams.i_order, sample="*",
+ niterate=sparams.i_niterate, low_reject=sparams.i_low,
+ high_reject=sparams.i_high, grow=0., dbwrite="YES",
+ overwrite=yes, database="database", verbose=yes,
+ logfile=logfile, plotfile=plotfile,
+ reflist="", refspec="", crpix="INDEF", cddir="unknown",
+ crsearch="-0.5", cdsearch="INDEF", aidpars="")
+
+ hedit (arcrefms, "refspec1", arcref // ".ms", add=yes,
+ show=no, verify=no)
+
+ dispcor (arcrefms, "", linearize=sparams.linearize,
+ database=database, table="", w1=INDEF, w2=INDEF, dw=INDEF,
+ nw=INDEF, log=sparams.log, flux=sparams.flux, samedisp=yes,
+ global=no, ignoreaps=yes, confirm=yes, verbose=no, listonly=no,
+ logfile=logfile)
+ flpr
+
+ hedit (arcrefms, "dispcor", 0, add=yes, verify=no,
+ show=no, update=yes)
+ newdisp = yes
+
+# if (sproc.splot1) {
+# print (arcrefms, ":")
+# str1 = sproc.anssplot
+# if (str1 == "NO" || str1 == "YES")
+# sproc.splot1 = no
+# if (str1 == "no" || str1 == "NO")
+# sproc.splot2 = no
+# else
+# sproc.splot2 = yes
+# }
+# if (sproc.splot2)
+# splot (arcrefms)
+ }
+ print (arcref, >> done)
+end
diff --git a/noao/imred/src/doslit/sarcrefs.par b/noao/imred/src/doslit/sarcrefs.par
new file mode 100644
index 00000000..012dcaf7
--- /dev/null
+++ b/noao/imred/src/doslit/sarcrefs.par
@@ -0,0 +1,9 @@
+arcref1,f,a,"",,,
+crval,s,a,"INDEF",,,
+cdelt,s,a,"INDEF",,,
+done,f,a,"",,,
+log1,f,a,"",,,
+log2,f,a,"",,,
+newdisp,b,h,no,,,
+fd,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/sbatch.cl b/noao/imred/src/doslit/sbatch.cl
new file mode 100644
index 00000000..8e01b09e
--- /dev/null
+++ b/noao/imred/src/doslit/sbatch.cl
@@ -0,0 +1,199 @@
+# SBATCH -- Process spectra in batch.
+# This task is called in batch mode. It only processes objects
+# not previously processed unless the update or redo flags are set.
+
+procedure sbatch ()
+
+file objects {prompt="List of object spectra"}
+real datamax {prompt="Max data value / cosmic ray threshold"}
+file arcs1 {prompt="List of arc spectra"}
+file arcref1 {prompt="Arc reference for dispersion solution"}
+string arcrefs {prompt="Arc references\n"}
+
+file done {prompt="File of spectra already done"}
+file logfile {prompt="Logfile"}
+
+bool redo {prompt="Redo operations?"}
+bool update {prompt="Update spectra?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool extcor {prompt="Extinction correct spectra?"}
+bool fluxcal1 {prompt="Flux calibrate spectra?"}
+
+bool newdisp, newsens, newarcs
+
+struct *fd1, *fd2, *fd3
+
+begin
+ file temp, spec, specms, arc, arcms
+ bool reextract, extract, disp, ext, flux, log
+ string imtype, mstype, str1, str2, str3, str4
+ int i
+ struct err
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ temp = mktemp ("tmp$iraf")
+
+ reextract = redo || (update && newdisp)
+
+ fd1 = objects
+ while (fscan (fd1, spec) != EOF) {
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specms) != EOF)
+ if (spec == specms)
+ break
+ if (spec == specms)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ printf ("Object spectrum not found - %s%s\n",
+ spec, imtype) | scan (err)
+ print (err, >> logfile)
+ print ("Check setting of imtype", >> logfile)
+ next
+ }
+ specms = spec // mstype
+
+ extract = no
+ disp = no
+ ext = no
+ flux = no
+ if (reextract || !access (specms))
+ extract = yes
+ else {
+ hselect (specms, "dispcor", yes, > temp)
+ hselect (specms, "ex-flag", yes, >> temp)
+ hselect (specms, "ca-flag", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1) == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && !extcor
+ else
+ ext = extcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && (!fluxcal1 || newsens)
+ else
+ flux = fluxcal1
+ fd2 = ""; delete (temp, verify=no)
+ }
+
+ if (extract) {
+ disp = dispcor
+ ext = extcor
+ flux = fluxcal1
+ }
+
+ if (extract) {
+ if (access (specms))
+ imdelete (specms, verify=no)
+ print ("Extract object spectrum ", spec, >> logfile)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> logfile)
+ if (fscan (fd2, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> logfile)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ apslitproc (spec, saturation=datamax, verbose=no)
+ }
+
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ fd2 = arcs1
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp)
+ fd3 = temp
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> logfile)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> logfile)
+ }
+ fd3 = ""; delete (temp, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec, >> logfile)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=sparams.select, sort=sparams.sort,
+ group=sparams.group, time=sparams.time,
+ timewrap=sparams.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no,
+ >> logfile)
+
+ sdoarcs (spec, arcref1, reextract, logfile, yes)
+
+ hselect (specms, "refspec1", yes, > temp)
+ fd2 = temp
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1)
+ print ("No arc reference assigned for ", spec, >> logfile)
+ else {
+ print ("Dispersion correct ", spec, >> logfile)
+ dispcor (specms, "", linearize=sparams.linearize,
+ database=database, table=arcref1//mstype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=sparams.log, flux=sparams.flux, samedisp=no,
+ global=no, ignoreaps=no, confirm=no, listonly=no,
+ verbose=no, logfile=logfile)
+ flpr
+ hedit (specms, "dispcor", 0, add=yes, verify=no,
+ show=no, update=yes)
+ disp = no
+ }
+ }
+
+ if (!disp) {
+ if (ext)
+ print ("Extinction correct ", spec, >> logfile)
+ if (flux)
+ print ("Flux calibrate ", spec, >> logfile)
+ if (flux || ext)
+ calibrate (specms, "", extinct=extcor, flux=fluxcal1,
+ extinction=extinction, observatory=observatory,
+ ignoreaps=yes, sensitivity="sens", fnu=sparams.fnu,
+ >> logfile)
+ }
+ }
+ fd1 = ""
+ delete (objects, verify=no)
+ delete (arcs1, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+
+ flprcache (0)
+end
diff --git a/noao/imred/src/doslit/sbatch.par b/noao/imred/src/doslit/sbatch.par
new file mode 100644
index 00000000..582bdf03
--- /dev/null
+++ b/noao/imred/src/doslit/sbatch.par
@@ -0,0 +1,20 @@
+objects,f,h,"",,,"List of object spectra"
+datamax,r,h,,,,"Max data value / cosmic ray threshold"
+arcs1,f,h,"",,,"List of arc spectra"
+arcref1,f,h,"",,,"Arc reference for dispersion solution"
+arcrefs,s,h,,,,"Arc references
+"
+done,f,h,"",,,"File of spectra already done"
+logfile,f,h,"",,,"Logfile"
+redo,b,h,,,,"Redo operations?"
+update,b,h,,,,"Update spectra?"
+dispcor,b,h,,,,"Dispersion correct spectra?"
+extcor,b,h,,,,"Extinction correct spectra?"
+fluxcal1,b,h,,,,"Flux calibrate spectra?"
+newdisp,b,h,,,,
+newsens,b,h,,,,
+newarcs,b,h,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/sdoarcs.cl b/noao/imred/src/doslit/sdoarcs.cl
new file mode 100644
index 00000000..19b8f3af
--- /dev/null
+++ b/noao/imred/src/doslit/sdoarcs.cl
@@ -0,0 +1,101 @@
+# SDOARCS -- Determine dispersion relation for spectrum based on refernece arcs.
+
+procedure sdoarcs (spec, arcref1, reextract, logfile, batch)
+
+file spec
+file arcref1
+bool reextract
+file logfile
+bool batch
+
+struct *fd
+
+begin
+ file temp, arc1, arc2, str1
+ string imtype, mstype, reid
+ bool verbose1
+ int i, n
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ n = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+
+ for (j=1; j<=2; j+=1) {
+
+ # Setup interactive/batch parameters
+ if (batch) {
+ verbose1 = no
+ reid = "no"
+ } else {
+ verbose1 = verbose
+ reidentify.answer.p_mode = "h"
+ reid = reidentify.answer
+ reidentify.answer.p_mode = "q"
+ if (reid == "no")
+ reid = "yes"
+ }
+
+ # The reference spectra refer initially to the 2D image. At the
+ # end we will reset them to refer to the 1D spectra.
+
+ hselect (spec, "refspec"//j, yes, > temp)
+ fd = temp
+ i = fscan (fd, arc1, str1)
+ fd = ""; delete (temp, verify=no)
+ if (nscan() < 1)
+ break
+
+ # Strip possible image extension.
+ i = strlen (arc1)
+ if (i > n && substr (arc1, i-n+1, i) == imtype)
+ arc1 = substr (arc1, 1, i-n)
+
+ # Set extraction output and aperture reference depending on whether
+ # the arcs are to be rextracted using recentered or retraced object
+ # apertures.
+
+ arc2 = spec // arc1
+ if (access (arc2//mstype))
+ imdelete (arc2//mstype, verify=no)
+ delete (database//"/id"//arc2//".ms*", verify = no, >& "dev$null")
+
+ # Extract and determine dispersion function if necessary.
+ if (!access (arc2//mstype)) {
+ if (!batch)
+ print ("Extract and reidentify arc spectrum ", arc1)
+ print ("Extract and reidentify arc spectrum ", arc1, >> logfile)
+ apslitproc (arc1, output=arc2//".ms", references=spec,
+ background="none", clean=no, weights="none",
+ verbose=verbose1)
+ delete (database//"/id"//arc2//".ms*", verify = no,
+ >& "dev$null")
+ reidentify (arcref1//".ms", arc2//".ms", interactive=reid,
+ section="middle line", shift=0., step=1, nsum=1,
+ cradius=sparams.cradius, threshold=sparams.threshold,
+ nlost=100, refit=sparams.refit, trace=no, override=no,
+ newaps=yes, addfeatures=sparams.addfeatures,
+ coordlist=sparams.coordlist, match=sparams.match,
+ maxfeatures=50, minsep=2., database=database,
+ plotfile=plotfile, logfiles=logfile, verbose=verbose1)
+
+ # If not reextracting arcs based on object apertures
+ # then save the extracted arc to avoid doing it again.
+
+ if (arc1 != arc2)
+ imdelete (arc2//".ms", verify=no)
+ }
+
+ # Set the REFSPEC parameters for multispec spectrum.
+ if (nscan() == 1)
+ hedit (spec//".ms", "refspec"//j, arc2//".ms", add=yes,
+ verify=no, show=no, update=yes)
+ else
+ hedit (spec//".ms", "refspec"//j, arc2//".ms "//str1,
+ add=yes, verify=no, show=no, update=yes)
+ }
+end
diff --git a/noao/imred/src/doslit/sdoarcs.par b/noao/imred/src/doslit/sdoarcs.par
new file mode 100644
index 00000000..cea554c2
--- /dev/null
+++ b/noao/imred/src/doslit/sdoarcs.par
@@ -0,0 +1,7 @@
+spec,f,a,"",,,
+arcref1,f,a,"",,,
+reextract,b,a,,,,
+logfile,f,a,"",,,
+batch,b,a,,,,
+fd,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/sfluxcal.cl b/noao/imred/src/doslit/sfluxcal.cl
new file mode 100644
index 00000000..e9399ac3
--- /dev/null
+++ b/noao/imred/src/doslit/sfluxcal.cl
@@ -0,0 +1,196 @@
+# SFLUXCAL -- Extract standard stars and determine sensitivity function.
+# If flux calibrating, extract and dispersion correct the standard star
+# spectra. Compile the standard star fluxes from the calibration
+# directory. The user is queried for the star name but the band passes
+# are not allow to change interactively. Next compute the sensitivity
+# function using SENSFUNC. This is interactive. Once the sensitivity
+# function images are created, flux and extinction calibrate the standard
+# stars. This is done in such a way that if new standard stars are added
+# in a later execution only the new stars are added and then a new
+# sensitivity function is computed. If the update flag is set all
+# spectra which are specified are reprocessed if they were previously
+# processed. In a redo the "std" file is deleted, otherwise additions
+# are appended to this file.
+
+procedure sfluxcal (stds, arcs1, arcref1, arcrefs, redo, update, extcor,
+ done, log1, log2)
+
+file stds
+file arcs1
+file arcref1
+string arcrefs
+bool redo
+bool update
+bool extcor
+file done
+file log1
+file log2
+
+struct *fd1, *fd2, *fd3
+
+begin
+ string imtype, mstype
+ string spec, specms, arc, str1, str2, str3, str4
+ file temp1, temp2
+ int i, j
+ bool reextract, log
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+
+ reextract = redo || (update && sproc.newdisp)
+ sproc.newsens = no
+
+ if (redo && access ("std"))
+ delete ("std", verify=no)
+
+ fd1 = stds
+ while (fscan (fd1, spec) != EOF) {
+ specms = spec // mstype
+
+ if (reextract && access (specms))
+ imdelete (specms, verify=no)
+ if (!access (specms)) {
+ print ("Extract standard star spectrum ", spec) | tee (log1)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp1)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp1)
+ fd2 = temp1
+ if (fscan (fd2, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ if (fscan (fd2, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ }
+ fd2 = ""; delete (temp1, verify=no)
+ apslitproc (spec)
+ }
+
+ hselect (specms, "dispcor,std-flag", yes, > temp1)
+ fd2 = temp1
+ j = fscan (fd2, str1, str2)
+ fd2 = ""; delete (temp1, verify=no)
+ if (j < 1) {
+ # Fix arc headers if necessary.
+ if (sproc.newarcs) {
+ fd2 = arcs1
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp1)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> log1)
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""
+ sproc.newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec) | tee (log1)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=sparams.select, sort=sparams.sort,
+ group=sparams.group, time=sparams.time,
+ timewrap=sparams.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no) |
+ tee (log1, > log2)
+
+ sdoarcs (spec, arcref1, reextract, log1, no)
+
+ hselect (specms, "refspec1", yes, > temp1)
+ fd2 = temp1
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp1, verify=no)
+ if (i < 1) {
+ print ("No arc reference assigned for ", spec) | tee (log1)
+ next
+ } else {
+ print ("Dispersion correct ", spec) | tee (log1)
+ dispcor (specms, "", linearize=sparams.linearize,
+ database=database, table=arcref1//mstype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=sparams.log, flux=sparams.flux, samedisp=no,
+ global=no, ignoreaps=no, confirm=no, listonly=no,
+ logfile=logfile, > log2)
+ flpr
+ hedit (specms, "dispcor", 0, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ }
+
+ if (j < 2 || !access ("std")) {
+ print ("Compile standard star fluxes for ", spec) | tee (log1)
+ standard (specms, output="std", samestar=no, beam_switch=no,
+ apertures="", bandwidth=INDEF, bandsep=INDEF,
+ fnuzero=3.68E-20, extinction=extinction, caldir=caldir,
+ observatory=observatory, interact=no)
+ hedit (specms, "std-flag", "yes", add=yes, verify=no,
+ show=no, update=yes)
+ print (specms, >> temp2)
+ sproc.newsens = yes
+ }
+ }
+ fd1 = ""
+
+ if (sproc.newsens || !access ("sens"//imtype)) {
+ if (!access ("std")) {
+ print ("No standard star data") | tee (log1)
+ sproc.fluxcal1 = no
+ } else {
+ imdelete ("sens"//imtype, verify=no, >& "dev$null")
+ print ("Compute sensitivity function") | tee (log1)
+ sensfunc ("std", "sens", apertures="", ignoreaps=yes,
+ logfile=logfile, extinction=extinction,
+ newextinction="extinct.dat", observatory=observatory,
+ function=sparams.s_function, order=sparams.s_order,
+ interactive=yes, graphs="sr", marks="plus cross box")
+ sproc.newsens = yes
+ }
+ }
+
+ # Note that if new standard stars are added the old standard
+ # stars are not recalibrated unless the redo flag is used.
+
+ if (sproc.fluxcal1 && sproc.newsens && access (temp2)) {
+ print ("Flux and/or extinction calibrate standard stars") |
+ tee (log1)
+ calibrate ("@"//temp2, "", extinct=extcor, flux=sproc.fluxcal1,
+ extinction=extinction, observatory=observatory, ignoreaps=yes,
+ sensitivity="sens", fnu=sparams.fnu) | tee (log1, > log2)
+ if (sproc.splot1) {
+ print ("Standard stars:")
+ str1 = sproc.anssplot
+ if (str1 == "NO" || str1 == "YES")
+ sproc.splot1 = no
+ if (str1 == "no" || str1 == "NO")
+ sproc.splot2 = no
+ else
+ sproc.splot2 = yes
+ }
+ if (sproc.splot2)
+ splot ("@"//temp2)
+ sections (temp2, option="fullname", >> done)
+ delete (temp2, verify=no)
+ }
+end
diff --git a/noao/imred/src/doslit/sfluxcal.par b/noao/imred/src/doslit/sfluxcal.par
new file mode 100644
index 00000000..44474335
--- /dev/null
+++ b/noao/imred/src/doslit/sfluxcal.par
@@ -0,0 +1,14 @@
+stds,f,a,,,,
+arcs1,f,a,,,,
+arcref1,f,a,"",,,
+arcrefs,s,a,,,,
+redo,b,a,,,,
+update,b,a,,,,
+extcor,b,a,,,,
+done,f,a,"",,,
+log1,f,a,"",,,
+log2,f,a,"",,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/sgetspec.cl b/noao/imred/src/doslit/sgetspec.cl
new file mode 100644
index 00000000..1140e610
--- /dev/null
+++ b/noao/imred/src/doslit/sgetspec.cl
@@ -0,0 +1,178 @@
+# SGETSPEC -- Get spectra which are CCD processed and not extracted.
+# This task also recognizes the arc spectra in the object list and arc table.
+# This task also strips the image type extension.
+
+procedure sgetspec (objects, arcs, arctable, standards, obj, arc, std)
+
+string objects {prompt="List of object images"}
+string arcs {prompt="List of arc images"}
+file arctable {prompt="Arc table"}
+string standards {prompt="List of standard images"}
+file obj {prompt="File of object images"}
+file arc {prompt="File of arc images"}
+file std {prompt="File of standard images"}
+bool ccdproc {prompt="Add CCDPROC keyword and continue?",
+ mode="q"}
+struct *fd1, *fd2
+
+begin
+ string imtype, temp, image, itype
+ int n, n1, narcs
+
+ imtype = "." // envget ("imtype")
+ n = stridx (",", imtype)
+ if (n > 0)
+ imtype = substr (imtype, 1, n-1)
+ n1 = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+
+ # Initialize files
+ set clobber=yes
+ sleep (> obj)
+ sleep (> arc)
+ sleep (> std)
+ set clobber=no
+
+ # Do arcs
+ itype = ""
+ narcs = 0
+ sections (arcs, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ hselect (image, "imagetyp", yes) | scan (itype)
+ if (nscan() == 0)
+ itype = "comp"
+ if (itype != "comp" && itype != "COMPARISON" &&
+ itype != "comparison" && itype != "COMP")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ narcs = narcs + 1
+ printf ("%s %02d\n", image, narcs, >> arc)
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ # Do arc table.
+ if (arctable != "") {
+ fd2 = arctable
+ while (fscan (fd2, image, image) != EOF) {
+ if (nscan() != 2)
+ next
+ sections (image, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ hselect (image, "imagetyp", yes) | scan (itype)
+ if (nscan() == 0)
+ itype = "comp"
+ if (itype != "comp" && itype != "COMPARISON" &&
+ itype != "comparison" && itype != "COMP")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ narcs = narcs + 1
+ printf ("%s %02d\n", image, narcs, >> arc)
+ }
+ fd1 = ""; delete (temp, verify=no)
+ }
+ }
+
+ # Do standards
+ sections (standards, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ print (image, >> std)
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ # Do objects
+ sections (objects, option="fullname", > temp)
+ fd1 = temp
+ while (fscan (fd1, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (itype)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DOSLIT", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", itype)
+ if (itype == "equispec" || itype == "multispec")
+ next
+ hselect (image, "imagetyp", yes) | scan (itype)
+ if (nscan() == 0)
+ itype = "object"
+
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ if (itype == "object" || itype == "OBJECT")
+ print (image, >> obj)
+ else if (itype == "comp" || itype == "COMPARISON" ||
+ itype == "comparison" || itype == "COMP") {
+ narcs = narcs + 1
+ printf ("%s %02d\n", image, narcs, >> arc)
+ }
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ if (narcs > 0) {
+ sort (arc, column=0, ignore=yes, numeric=no, reverse=no, > temp)
+ delete (arc, verify=no)
+ rename (temp, arc, field="all")
+ itype = ""
+ fd1 = arc
+ while (fscan (fd1, image, narcs) != EOF) {
+ if (image != itype)
+ printf ("%s %02d\n", image, narcs, >> temp)
+ itype = image
+ }
+ delete (arc, verify=no)
+ sort (temp, column=2, ignore=yes, numeric=yes, reverse=no) |
+ fields ("STDIN", "1", lines="1-99", > arc)
+ delete (temp, verify=no)
+ }
+end
diff --git a/noao/imred/src/doslit/sgetspec.par b/noao/imred/src/doslit/sgetspec.par
new file mode 100644
index 00000000..1f5387cc
--- /dev/null
+++ b/noao/imred/src/doslit/sgetspec.par
@@ -0,0 +1,11 @@
+objects,s,a,,,,"List of object images"
+arcs,s,a,,,,"List of arc images"
+arctable,f,a,"",,,"Arc table"
+standards,s,a,,,,"List of standard images"
+obj,f,a,"",,,"File of object images"
+arc,f,a,"",,,"File of arc images"
+std,f,a,"",,,"File of standard images"
+ccdproc,b,q,,,,"Add CCDPROC keyword and continue?"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/slistonly.cl b/noao/imred/src/doslit/slistonly.cl
new file mode 100644
index 00000000..71ee3b40
--- /dev/null
+++ b/noao/imred/src/doslit/slistonly.cl
@@ -0,0 +1,180 @@
+# SLISTONLY -- List processing to be done.
+#
+# This follows pretty much the same logic as the full procedure but doesn't
+# do anything but list the operations.
+
+procedure slistonly (objects, arcs1, standards, dispcor, extcor, fluxcal,
+ redo, update)
+
+file objects
+file arcs1
+file standards
+
+bool dispcor
+bool extcor
+bool fluxcal
+bool redo
+bool update
+
+struct *fd1
+struct *fd2
+
+begin
+ string imtype, mstype
+ string spec, arcref1
+ string specms, arcref1ms
+ string temp, done, str1, str2
+ bool reextract, fluxcal1, stdfile
+ bool newdisp, newsens, extract, disp, ext, flux
+ int i, dc, sf
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ temp = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ newdisp = no
+ newsens = no
+ fluxcal1 = fluxcal
+
+ print ("Check and set new object aperture definitions")
+
+ if (dispcor) {
+ fd1 = arcs1
+ if (fscan (fd1, arcref1) == EOF)
+ error (1, "No reference arcs")
+ fd1 = ""
+ arcref1ms = arcref1 // mstype
+
+ if (redo || !access (arcref1ms)) {
+ print ("Extract arc reference image ", arcref1)
+ print ("Determine dispersion solution for ", arcref1)
+ newdisp = yes
+ } else {
+ hselect (arcref1ms, "dispcor", yes, > temp)
+ fd1 = temp
+ dc = -1
+ i = fscan (fd1, dc)
+ fd1 = ""; delete (temp, verify=no)
+ if (i < 1) {
+ print ("Determine dispersion solution for ", arcref1)
+ newdisp = yes
+ }
+ }
+ print (arcref1, > done)
+
+ if (fluxcal1) {
+ stdfile = access ("std")
+ if (redo && stdfile)
+ stdfile = no
+
+ reextract = redo || (update && newdisp)
+ fd1 = standards
+ while (fscan (fd1, spec) != EOF) {
+ specms = spec // mstype
+ if (reextract || !access (specms)) {
+ print ("Extract standard star spectrum ", spec)
+ print ("Dispersion correct ", spec)
+ print ("Compile standard star fluxes for ", spec)
+ stdfile = yes
+ newsens = yes
+ } else {
+ hselect (specms, "dispcor,std-flag", yes, > temp)
+ fd2 = temp
+ dc = -1
+ sf = -1
+ i = fscan (fd2, dc, sf)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1)
+ print ("Dispersion correct ", spec)
+ if (i < 2) {
+ print ("Compile standard star fluxes for ", spec)
+ stdfile = yes
+ newsens = yes
+ }
+ }
+ print (spec, >> done)
+ }
+ fd1 = ""
+
+ sections ("sens.????" // imtype, option="nolist")
+ if (newsens || sections.nimages == 0) {
+ if (!stdfile) {
+ print ("No standard stars")
+ fluxcal1 = no
+ } else {
+ print ("Compute sensitivity function")
+ newsens = yes
+ }
+ }
+
+ if (fluxcal1 && newsens)
+ print ("Flux and/or extinction calibrate standard stars")
+ }
+ }
+
+ reextract = redo || (update && newdisp)
+ fd1 = objects
+ while (fscan (fd1, spec) != EOF) {
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specms) != EOF)
+ if (spec == specms)
+ break
+ if (spec == specms)
+ next
+ fd2 = ""
+ }
+
+ specms = spec // mstype
+
+ extract = no
+ disp = no
+ ext = no
+ flux = no
+ if (reextract || !access (specms))
+ extract = yes
+ else {
+ hselect (specms, "dispcor", yes, > temp)
+ hselect (specms, "ex-flag", yes, >> temp)
+ hselect (specms, "ca-flag", yes, >> temp)
+ fd2 = temp
+ extract = update
+ if (fscan (fd2, str1) == 1)
+ extract = update && newdisp
+ else
+ disp = yes
+ if (fscan (fd2, str1) == 1)
+ extract = update && !extcor
+ else
+ ext = extcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && (!fluxcal1 || newsens)
+ else
+ flux = fluxcal1
+ fd2 = ""; delete (temp, verify=no)
+ }
+
+ if (extract) {
+ disp = dispcor
+ ext = extcor
+ flux = fluxcal1
+ }
+
+ if (extract)
+ print ("Extract object spectrum ", spec)
+ if (disp)
+ print ("Dispersion correct ", spec)
+ if (ext)
+ print ("Extinction correct ", spec)
+ if (flux)
+ print ("Flux calibrate ", spec)
+ }
+ fd1 = ""
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/doslit/slistonly.par b/noao/imred/src/doslit/slistonly.par
new file mode 100644
index 00000000..08d4e016
--- /dev/null
+++ b/noao/imred/src/doslit/slistonly.par
@@ -0,0 +1,12 @@
+objects,f,a,,,,
+arcs1,f,a,,,,
+standards,f,a,,,,
+dispcor,b,a,,,,
+extcor,b,a,,,,
+fluxcal,b,a,,,,
+redo,b,a,,,,
+update,b,a,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/doslit/sparams.par b/noao/imred/src/doslit/sparams.par
new file mode 100644
index 00000000..1cc001d8
--- /dev/null
+++ b/noao/imred/src/doslit/sparams.par
@@ -0,0 +1,65 @@
+line,i,h,INDEF,,,"Default dispersion line"
+nsum,i,h,10,,,"Number of dispersion lines to sum or median"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE PARAMETERS -- "
+lower,r,h,-3.,,,Lower aperture limit relative to center
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,1,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none",,,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,"Upper rejection threshold
+
+-- BACKGROUND SUBTRACTION PARAMETERS --"
+background,s,h,"fit","none|average|median|minimum|fit",,Background to subtract
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,"Background function"
+b_order,i,h,1,,,"Background function order"
+b_sample,s,h,"-10:-6,6:10",,,"Background sample regions"
+b_naverage,i,h,-100,,,"Background average or median"
+b_niterate,i,h,1,0,,"Background rejection iterations"
+b_low,r,h,3.,0.,,"Background lower rejection sigma"
+b_high,r,h,3.,0.,,"Background upper rejection sigma
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelists$idhenear.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"spline3","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,1,1,,"Order of dispersion function"
+i_niterate,i,h,1,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SENSITIVITY CALIBRATION PARAMETERS --"
+s_function,s,h,"spline3","chebyshev|legendre|spline3|spline1",,"Fitting function"
+s_order,i,h,1,1,,"Order of sensitivity function"
+fnu,b,h,no,,,"Create spectra having units of FNU?"
diff --git a/noao/imred/src/doslit/sproc.cl b/noao/imred/src/doslit/sproc.cl
new file mode 100644
index 00000000..0b13d71a
--- /dev/null
+++ b/noao/imred/src/doslit/sproc.cl
@@ -0,0 +1,404 @@
+# SPROC -- Process spectra from 2D to 1D
+#
+# This program combines all the operations of extraction, dispersion
+# correction, extinction correction, and flux calibration in as simple
+# and noninteractive manner as possible.
+
+procedure sproc (objects, arcs1, arctable, standards, crval, cdelt, dispcor,
+ extcor, fluxcal, resize, clean, splot, redo, update, quicklook, batch,
+ listonly)
+
+file objects = "" {prompt="List of object spectra"}
+
+file arcs1 = "" {prompt="List of arc spectra"}
+file arctable = "" {prompt="Arc assignment table (optional)"}
+file standards = "" {prompt="List of standard star spectra\n"}
+
+string crval = "INDEF" {prompt="Approximate wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion\n"}
+
+bool dispcor = yes {prompt="Dispersion correct spectra?"}
+bool extcor = no {prompt="Extinction correct spectra?"}
+bool fluxcal = no {prompt="Flux calibrate spectra?"}
+bool resize = no {prompt="Automatically resize apertures?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool splot = no {prompt="Plot the final spectrum?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = no {prompt="Update spectra if cal data changes?"}
+bool quicklook = no {prompt="Minimally interactive quick-look?"}
+bool batch = no {prompt="Extract objects in batch?"}
+bool listonly = no {prompt="List steps but don't process?\n"}
+
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+
+string anssplot {prompt="Splot spectrum?", mode="q",
+ enum="no|yes|NO|YES"}
+bool newdisp, newsens, newarcs
+bool fluxcal1, splot1, splot2
+bool dobatch
+
+struct *fd1, *fd2, *fd3
+
+begin
+ file arcref1, spec, arc
+ file arcref1ms, specms, arcms
+ file temp, done
+ string imtype, mstype
+ string str1, str2, str3, str4, arcrefs, log1, log2
+ bool reextract, extract, disp, ext, flux, log
+ int i, j, n, nspec
+ struct err
+
+ # Call a separate task to do the listing to minimize the size of
+ # this script and improve it's readability.
+
+ dobatch = no
+ if (listonly) {
+ slistonly (objects, arcs1, standards, dispcor, extcor,
+ fluxcal, redo, update)
+ bye
+ }
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ n = strlen (imtype)
+
+ # Temporary files used repeatedly in this script. Under some
+ # abort circumstances these files may be left behind.
+
+ temp = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ # Get query parameters
+ if (arctable == "")
+ arcrefs = "@"//arcs1
+ else
+ arcrefs = arctable
+ arcref1 = ""
+
+ # Rather than always have switches on the logfile and verbose flags
+ # we use TEE and set a file to "dev$null" if output is not desired.
+ # We must check for the null string to signify no logfile.
+
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # If the update switch is used changes in the calibration data can
+ # cause images to be reprocessed (if they are in the object list).
+ # Possible changes are in the dispersion solution and sensitivity
+ # function. The newarcs flag is used to only go through the arc
+ # image headers once setting the reference spectrum, airmass, and UT.
+
+ newdisp = no
+ newsens = no
+ newarcs = yes
+ fluxcal1 = fluxcal
+
+ # Check if there are aperture definitions in the database and
+ # define them if needed. This is interactive.
+
+ print ("Define object apertures", >> log1)
+ if (resize)
+ apslitproc.ansresize = "YES"
+ else
+ apslitproc.ansresize = "NO"
+ if (quicklook) {
+ apslitproc.ansedit = "NO"
+ apslitproc.ansfittrace = "NO"
+ } else {
+ apslitproc.ansedit = "yes"
+ apslitproc.ansfittrace = "yes"
+ }
+ if (redo) {
+ delete (database//"/ap//@"//objects, verify=no, >& "dev$null")
+ apslitproc ("@"//objects, references="", ansfind="YES",
+ ansrecenter="NO", anstrace="YES", ansextract="NO")
+ } else
+ apslitproc ("@"//objects, references="NEW", ansfind="YES",
+ ansrecenter="NO", anstrace="YES", ansextract="NO")
+ if (dispcor && fluxcal1) {
+ if (redo) {
+ delete (database//"/ap//@"//standards, verify=no, >& "dev$null")
+ apslitproc ("@"//standards, references="", ansfind="YES",
+ ansrecenter="NO", anstrace="YES", ansextract="NO")
+ } else
+ apslitproc ("@"//standards, references="NEW", ansfind="YES",
+ ansrecenter="NO", anstrace="YES", ansextract="NO")
+ }
+
+ # Initialize APSLITPROC.
+ apslitproc.saturation = INDEF
+ apslitproc.references = ""
+ apslitproc.profiles = ""
+ apslitproc.ansrecenter = "NO"
+ apslitproc.ansresize = "NO"
+ apslitproc.ansedit = "NO"
+ apslitproc.anstrace = "NO"
+ apslitproc.ansfittrace = "NO"
+ apslitproc.ansextract = "YES"
+ apslitproc.ansreview = "NO"
+
+ # Initialize REIDENTIFY
+ if (quicklook)
+ reidentify.answer = "NO"
+ else
+ reidentify.answer = "yes"
+
+ if (splot && !quicklook) {
+ splot1 = yes
+ splot2 = yes
+ } else {
+ splot1 = no
+ splot2 = no
+ }
+
+ # If not dispersion correcting we can go directly to extracting
+ # the object spectra. The reference arcs are the first on
+ # the arc lists. The processing of the reference arcs is done
+ # by the task SARCREFS.
+
+ if (dispcor) {
+ fd1 = arcs1
+ fd2 = objects
+ if (fscan (fd1, arcref1) == EOF)
+ error (1, "No reference arcs")
+ fd1 = ""
+ if (fscan (fd2, spec) == EOF)
+ error (1, "No object spectra for arc reference")
+ fd2 = ""
+ i = strlen (arcref1)
+ if (!access (arcref1 // imtype)) {
+ printf ("Arc reference spectrum not found - %s%s\n",
+ arcref1, imtype) | scan (err)
+ error (1, err // "\nCheck setting of imtype")
+ }
+ arcref1ms = arcref1 // mstype
+ if (redo && access (arcref1ms))
+ imdelete (arcref1ms, verify=no)
+ apslitproc.references = spec
+ sarcrefs (arcref1, crval, cdelt, done, log1, log2)
+ apslitproc.references = ""
+
+ if (fluxcal1)
+ sfluxcal (standards, arcs1, arcref1, arcrefs,
+ redo, update, extcor, done, log1, log2)
+ }
+
+ # Now we are ready to process the object spectra.
+
+ reextract = redo || (update && newdisp)
+ fd1 = objects
+ while (fscan (fd1, spec) != EOF) {
+ i = strlen (spec)
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+
+ # Check if previously done; i.e. arc.
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specms) != EOF)
+ if (spec == specms)
+ break
+ if (spec == specms)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ print ("Object spectrum not found - " // spec // imtype) |
+ tee (log1)
+ print ("Check setting of imtype")
+ next
+ }
+ specms = spec // mstype
+
+ # Determine required operations from the flags and image header.
+ extract = no
+ disp = no
+ ext = no
+ flux = no
+ if (reextract || !access (specms))
+ extract = yes
+ else {
+ hselect (specms, "dispcor", yes, > temp)
+ hselect (specms, "ex-flag", yes, >> temp)
+ hselect (specms, "ca-flag", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1) == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && !extcor
+ else
+ ext = extcor
+ if (fscan (fd2, str1) == 1)
+ extract = update && (!fluxcal1 || newsens)
+ else
+ flux = fluxcal1
+ fd2 = ""; delete (temp, verify=no)
+ }
+
+ if (extract) {
+ disp = dispcor
+ ext = extcor
+ flux = fluxcal1
+ }
+
+ # If fully processed go to the next object.
+ if (!extract && !disp && !extcor && !flux)
+ next
+
+ # If not interactive and the batch flag is set submit rest to batch.
+ if (batch && !splot1 && !splot2) {
+ fd1 = ""
+ flprcache
+ sbatch.objects = objects
+ sbatch.datamax = datamax
+ sbatch.arcs1 = arcs1
+ sbatch.arcref1 = arcref1
+ sbatch.arcrefs = arcrefs
+ sbatch.done = done
+ sbatch.logfile = log1
+ sbatch.redo = reextract
+ sbatch.update = update
+ sbatch.dispcor = dispcor
+ sbatch.fluxcal1 = fluxcal1
+ sbatch.extcor = extcor
+ sbatch.newdisp = newdisp
+ sbatch.newsens = newsens
+ sbatch.newarcs = newarcs
+ dobatch = yes
+ return
+ }
+
+ # Process the spectrum in foreground.
+ if (extract) {
+ if (access (specms))
+ imdelete (specms, verify=no)
+ print ("Extract object spectrum ", spec) | tee (log1)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp)
+ fd2 = temp
+ if (fscan (fd2, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ if (fscan (fd2, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ apslitproc (spec, saturation=datamax)
+ }
+
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ fd2 = arcs1
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp)
+ fd3 = temp
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> log1)
+ }
+ fd3 = ""; delete (temp, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec) | tee (log1)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=sparams.select, sort=sparams.sort,
+ group=sparams.group, time=sparams.time,
+ timewrap=sparams.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no) |
+ tee (log1, > log2)
+
+ sdoarcs (spec, arcref1, reextract, log1, no)
+
+ hselect (specms, "refspec1", yes, > temp)
+ fd2 = temp
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1)
+ print ("No arc reference assigned for ", spec) | tee (log1)
+ else {
+ print ("Dispersion correct ", spec) | tee (log1)
+ dispcor (specms, "", linearize=sparams.linearize,
+ database=database, table=arcref1//mstype,
+ w1=INDEF, w2=INDEF, dw=INDEF, nw=INDEF,
+ log=sparams.log, flux=sparams.flux, samedisp=no,
+ global=no, ignoreaps=yes, confirm=no, listonly=no,
+ verbose=verbose, logfile=logfile)
+ flpr
+ hedit (specms, "dispcor", 0, add=yes, verify=no,
+ show=no, update=yes)
+ disp = no
+ }
+ }
+
+ if (!disp) {
+ if (ext)
+ print ("Extinction correct ", spec) | tee (log1)
+ if (flux)
+ print ("Flux calibrate ", spec) | tee (log1)
+ if (flux || ext)
+ calibrate (specms, "", extinct=extcor, flux=fluxcal1,
+ extinction=extinction, observatory=observatory,
+ ignoreaps=yes, sensitivity="sens", fnu=sparams.fnu) |
+ tee (log1, > log2)
+ }
+ if (extract || disp || ext || flux) {
+ if (splot1) {
+ print (specms, ":")
+ str1 = anssplot
+ if (str1 == "NO" || str1 == "YES")
+ splot1 = no
+ if (str1 == "no" || str1 == "NO")
+ splot2 = no
+ else
+ splot2 = yes
+ }
+ if (splot2)
+ splot (specms)
+ else if (splot && quicklook)
+ bplot (specms, apertures="1", band=1, graphics="stdgraph",
+ cursor="onedspec$gcurval")
+ }
+ print (spec, >> done)
+ }
+ fd1 = ""
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/doslit/sproc.par b/noao/imred/src/doslit/sproc.par
new file mode 100644
index 00000000..536e05e4
--- /dev/null
+++ b/noao/imred/src/doslit/sproc.par
@@ -0,0 +1,33 @@
+objects,f,a,"",,,"List of object spectra"
+arcs1,f,a,"",,,"List of arc spectra"
+arctable,f,a,"",,,"Arc assignment table (optional)"
+standards,f,a,"",,,"List of standard star spectra
+"
+crval,s,a,"INDEF",,,"Approximate wavelength"
+cdelt,s,a,"INDEF",,,"Approximate dispersion
+"
+dispcor,b,a,yes,,,"Dispersion correct spectra?"
+extcor,b,a,no,,,"Extinction correct spectra?"
+fluxcal,b,a,no,,,"Flux calibrate spectra?"
+resize,b,a,no,,,"Automatically resize apertures?"
+clean,b,a,no,,,"Detect and replace bad pixels?"
+splot,b,a,no,,,"Plot the final spectrum?"
+redo,b,a,no,,,"Redo operations if previously done?"
+update,b,a,no,,,"Update spectra if cal data changes?"
+quicklook,b,a,no,,,"Minimally interactive quick-look?"
+batch,b,a,no,,,"Extract objects in batch?"
+listonly,b,a,no,,,"List steps but don\'t process?
+"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+anssplot,s,q,,no|yes|NO|YES,,"Splot spectrum?"
+newdisp,b,h,,,,
+newsens,b,h,,,,
+newarcs,b,h,,,,
+fluxcal1,b,h,,,,
+splot1,b,h,,,,
+splot2,b,h,,,,
+dobatch,b,h,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/Revisions b/noao/imred/src/fibers/Revisions
new file mode 100644
index 00000000..a21892a2
--- /dev/null
+++ b/noao/imred/src/fibers/Revisions
@@ -0,0 +1,223 @@
+.help revisions Nov90 noao.imred.src.fibers
+.nf
+
+srcfibers$proc.cl
+srcfibers$listonly.cl
+ For reasons that are now lost in the mists of time, the shortening of
+ filenames can cause problems. This has been removed.
+ (2/11/11, Valdes) (See bug log 577)
+
+=======
+V2.15.1
+=======
+
+=======
+V2.12.3
+=======
+
+srcfibers$batch.cl
+srcfibers$proc.cl
+srcfibers$fibresponse.cl
+ Error messages now hint to check imtype setting.
+ (4/15/05, Valdes)
+
+
+=======
+V2.12.2
+=======
+
+srcfibers$proc.cl
+srcfibers$batch.cl
+ Because dispcor.samedisp=yes when doing the objects any dispersion change
+ applied to the reference arc was not being applied to the objects.
+ (5/21/03, Valdes)
+
+=======
+V2.12.1
+=======
+
+srcfibers$skysub.cl
+ Added a flpr to workaround a FITS kernel caching problem.
+ (6/21/02, Valdes)
+
+=====
+V2.12
+=====
+
+srcfibers$proc.cl
+ Modified code to eliminate goto. This is for use with pyraf.
+ (11/21/00, Valdes)
+
+========
+V2.11.3a
+========
+
+srcfibers$arcrefs.cl
+ The test for crval/cdelt both INDEF was not working. (9/24/99, Valdes)
+
+srcfibers$mkfibers.cl
+ Had to define a dummy variable 'ar' to get rid of ambiguous parameter
+ error. (9/14/99, Valdes)
+
+=======
+V2.11.2
+=======
+
+doslit$arcrefs.cl
+ The test on CRVAL and CDELT would not work with header keywords.
+ (9/22/98, Valdes)
+
+srcfibers$arcrefs.cl
+srcfibers$batch.cl
+srcfibers$doarcs.cl
+srcfibers$fibresponse.cl
+srcfibers$getspec.cl
+srcfibers$listonly.cl
+srcfibers$mkfibers.cl
+srcfibers$skysub.cl
+srcfibers$proc.cl
+ Any additional qualifiers in the imtype string are stripped.
+ (8/14/97, Valdes)
+
+=========
+V2.11BETA
+=========
+
+arcrefs.cl
+ If both crval and cdelt are INDEF then the autoidentify option is
+ not used. (5/2/97, Valdes)
+
+apsript.par
+ Made changes for the new aperture selection option. (9/5/96, Valdes)
+
+skysub.cl
+ Added package name to calls of "match", "sort", "uniq" to avoid
+ possible conflicts. (5/6/96, Valdes)
+
+proc.cl
+proc.par
+arcrefs.cl
+arcrefs.par
+params.par
+ Modified to use autoidentify. (4/5/96, Valdes)
+
+srcfibers$proc.cl
+srcfibers$batch.cl
+ When using subapertures the subapertures were not wavelength
+ calibrated correctly because the reference arc spectrum which
+ provides the wavelength scale does not contain the subapertures
+ and DISPCOR does not use samedisp=yes. Changed the value of
+ samedisp to yes. (10/27/95, Valdes)
+
+srcfibers$mkfibers.cl
+ The calls to mk1dspec did not specify the header file which would
+ then default to the task parameter which might be invalid.
+ (10/17/95, Valdes)
+
+srcfibers$proc.cl
+ Needed to initialize arcref2 in order work in batch when no dispersion
+ correction is requested. (10/16/95, Valdes)
+
+srcfibers$mkfibers.cl
+ The calls to MK1DSPEC were changed in accordance with parameter changes
+ to that task.
+ (7/28/95, Valdes)
+
+srcfibers$proc.cl
+ Any image extension is stripped from the apidtable parameter.
+ (7/24/95, Valdes)
+
+srcfibers$doalign.cl +
+srcfibers$doalign.par +
+srcfibers$proc.cl
+srcfibers$batch.cl
+ Added the sky alignment option. (7/19/95, Valdes)
+
+srcfibers$proc.cl
+srcfibers$batch.cl
+srcfibers$arcrefs.cl
+ The wrong range syntax is used with subapertures in SARITH/SCOPY.
+ Changed all -999 to 1-999. (6/14/95, Valdes)
+
+=======
+V2.10.4
+=======
+
+srcfibers$proc.cl
+srcfibers$fibresponse.cl
+ 1. Need to add check for the throughput being a file rather than
+ an image when checking whether to apply a scattered light
+ correction.
+ 2. Removed a warning message when using a throughput file containing
+ fiber values which are not in the flat field (for example, if a fiber
+ is broken).
+ (1/25/95, Valdes)
+
+srcfibers$params.par
+srcfibers$doarcs.cl
+srcfibers$arcrefs.cl
+ Added "threshold" as a user parameter. (1/16/95, Valdes)
+
+srcfibers$response.cl -> imred$src/fibers/fibresponse.cl
+srcfibers$response.par -> imred$src/fibers/fibresponse.par
+srcfibers$proc.par
+ Changed the fiber response task name from "response" to "fibresponse"
+ to avoid conflict with longslit.response. (12/31/94, Valdes)
+
+srcfibers$proc.cl
+ The check for arcs2 = " " was done incorrectly. (9/12/94, Valdes)
+
+srcfibers$proc.cl
+srcfibers$batch.cl
+srcfibers$doarcs.cl
+ A check was needed on whether the arc spectra were extracted during
+ the current execution to avoid reextracting the same arc multiple
+ times during a "redo" or the initial time. In both those cases
+ the rextract flag is set causing spectra to be reextracted if they
+ exist. Previously doarcs could not tell if the arc exists because
+ of a previous run or during the current run with the same arc
+ used multiple times. (5/18/94, Valdes)
+
+===========
+V2.10.3beta
+===========
+
+srcfibers$skysub.cl
+imred$specred/doc/skysub.hlp
+ 1. The combine option was being ignored.
+ 2. The help did not mention the reject option and was otherwised
+ out of date.
+ (3/31/94, Valdes)
+
+srcfibers$proc.cl
+ The scattered light correction is now queried for all images and may
+ be turned off with NO. (9/1/93, Valdes)
+
+===========
+V2.10.3beta
+===========
+
+srcfibers$arcrefs.cl
+ MOdified to use shift=INDEF in REIDENTIFY.
+ (2/18/93, Valdes)
+
+srcfibers$*.cl
+ Modified to use the "imtype" environment variable to define the
+ extension type.
+ (2/18/93, Valdes)
+
+=======
+V2.10.2
+=======
+
+srcfibers$proc.cl
+ The aperture reference is redone when a new aperture ID file is seen.
+ (1/11/93, Valdes)
+
+srcfibers$*
+ Updated for new ONEDSPEC. (7/24/91, Valdes)
+
+srcfibers$*
+ All occurrences of latitude replaced by observatory as required by
+ recent changes to setairmass, etc. (11/20/90, Valdes)
+.endhelp
diff --git a/noao/imred/src/fibers/apscript.par b/noao/imred/src/fibers/apscript.par
new file mode 100644
index 00000000..e52248de
--- /dev/null
+++ b/noao/imred/src/fibers/apscript.par
@@ -0,0 +1,145 @@
+# APSCRIPT
+
+input,s,a,,,,List of input images
+output,s,h,"",,,List of output spectra
+apertures,s,h,"",,,Apertures
+scatter,s,h,"",,,List of scattered light images (optional)
+references,s,h,"",,,List of aperture reference images
+profiles,s,h,"",,,"List of aperture profile images
+"
+interactive,b,h,yes,,,Run task interactively?
+find,b,h,yes,,,Find apertures?
+recenter,b,h,yes,,,Recenter apertures?
+resize,b,h,yes,,,Resize apertures?
+edit,b,h,yes,,,Edit apertures?
+trace,b,h,yes,,,Trace apertures?
+fittrace,b,h,yes,,,Fit the traced points interactively?
+extract,b,h,yes,,,Extract spectra?
+review,b,h,yes,,,Review extractions?
+subtract,b,h,yes,,,Subtract scattered light?
+smooth,b,h,yes,,,Smooth scattered light along the dispersion?
+fitscatter,b,h,yes,,,Fit scattered light interactively?
+fitsmooth,b,h,yes,,,"Smooth the scattered light interactively?
+"
+line,i,h,)params.line,,,>params.line
+nsum,i,h,)params.nsum,,,>params.nsum
+buffer,r,h,)params.buffer,,,">params.buffer
+
+# OUTPUT PARAMETERS
+"
+format,s,h,"multispec",,,Extracted spectra format
+extras,b,h,)params.extras,,,"Extract sky, sigma, etc.?"
+dbwrite,s,h,"YES",,,Write to database?
+initialize,b,h,no,,,Initialize answers?
+verbose,b,h,no,,,"Verbose output?
+
+# DEFAULT APERTURE PARAMETERS
+"
+lower,r,h,)params.lower,,,>params.lower
+upper,r,h,)params.upper,,,>params.upper
+apidtable,s,h,"",,,"Aperture ID table (optional)
+
+# DEFAULT BACKGROUND PARAMETERS
+"
+b_function,s,h,"legendre","chebyshev|legendre|spline1|spline3",,Background function
+b_order,i,h,1,,,Background function order
+b_sample,s,h,"-10:-6,6:10",,,Background sample regions
+b_naverage,i,h,-3,,,Background average or median
+b_niterate,i,h,0,0,,Background rejection iterations
+b_low_reject,r,h,3.,0.,,Background lower rejection sigma
+b_high_reject,r,h,3.,0.,,Background upper rejection sigma
+b_grow,r,h,0.,0.,,"Background rejection growing radius
+
+# APERTURE CENTERING PARAMETERS
+"
+width,r,h,5.,0.,,Profile centering width
+radius,r,h,10.,,,Profile centering radius
+threshold,r,h,10.,0.,,"Detection threshold for profile centering
+
+# AUTOMATIC FINDING AND ORDERING PARAMETERS
+"
+nfind,i,h,,,,Number of apertures to be found automatically
+minsep,r,h,1.,,,Minimum separation between spectra
+maxsep,r,h,100000.,,,Maximum separation between spectra
+order,s,h,)params.order,,,"Order of apertures
+
+# RECENTERING PARAMETERS
+"
+aprecenter,s,h,"",,,Apertures to use in recentering
+npeaks,r,h,0.5,,,Select brightest peaks
+shift,b,h,yes,,,"Use average shift instead of recentering?
+
+# RESIZING PARAMETERS
+"
+llimit,r,h,INDEF,,,Lower aperture limit relative to center
+ulimit,r,h,INDEF,,,Upper aperture limit relative to center
+ylevel,r,h,)params.ylevel,,,>params.ylevel
+peak,b,h,yes,,,Is ylevel a fraction of the peak?
+bkg,b,h,yes,,,"Subtract background in automatic width?"
+r_grow,r,h,0.,,,Grow limits by this factor
+avglimits,b,h,no,,,"Average limits over all apertures?
+
+# EDITING PARAMETERS
+"
+e_output,s,q,,,,Output spectra rootname
+e_profiles,s,q,,,,"Profile reference image
+
+# TRACING PARAMETERS
+"
+t_nsum,i,h,)params.nsum,,,>params.nsum
+t_step,i,h,)params.t_step,,,>params.t_step
+t_nlost,i,h,3,1,,Number of consecutive times profile is lost before quitting
+t_width,r,h,5.,0.,,Profile centering width
+t_function,s,h,)params.t_function,,,>params.t_function
+t_sample,s,h,"*",,,Trace sample regions
+t_order,i,h,)params.t_order,,,>params.t_order
+t_naverage,i,h,1,,,Trace average or median
+t_niterate,i,h,)params.t_niterate,,,>params.t_niterate
+t_low_reject,r,h,)params.t_low,,,>params.t_low
+t_high_reject,r,h,)params.t_high,,,>params.t_high
+t_grow,r,h,0.,0.,,"Trace rejection growing radius
+
+# EXTRACTION PARAMETERS
+"
+background,s,h,"none","none|average|median|minimum|fit",,Background to subtract
+skybox,i,h,1,,,Box car smoothing length for sky
+weights,s,h,)params.weights,,,>params.weights
+pfit,s,h,)params.pfit,,,>params.pfit
+clean,b,h,no,,,Detect and replace bad pixels?
+nclean,r,h,0.5,,,Maximum number of pixels to clean
+niterate,i,h,5,0,,Number of profile fitting iterations
+saturation,r,h,INDEF,,,Saturation level
+readnoise,s,h,"0.",,,Read out noise sigma (photons)
+gain,s,h,"1.",,,Photon gain (photons/data number)
+lsigma,r,h,)params.lsigma,,,>params.lsigma
+usigma,r,h,)params.usigma,,,>params.usigma
+polysep,r,h,0.95,0.1,0.95,Marsh algorithm polynomial spacing
+polyorder,i,h,10,1,,Marsh algorithm polynomial order
+nsubaps,i,h,1,1,,"Number of subapertures per aperture
+
+# ANSWER PARAMETERS
+"
+ansclobber,s,h,"NO",,," "
+ansclobber1,s,h,"NO",,," "
+ansdbwrite,s,h,"YES",,," "
+ansdbwrite1,s,h,"NO",,," "
+ansedit,s,h,"NO",,," "
+ansextract,s,h,"NO",,," "
+ansfind,s,h,"NO",,," "
+ansfit,s,h,"NO",,," "
+ansfitscatter,s,h,"NO",,," "
+ansfitsmooth,s,h,"NO",,," "
+ansfitspec,s,h,"NO",,," "
+ansfitspec1,s,h,"NO",,," "
+ansfittrace,s,h,"NO",,," "
+ansfittrace1,s,h,"NO",,," "
+ansflat,s,h,"NO",,," "
+ansnorm,s,h,"NO",,," "
+ansrecenter,s,h,"NO",,," "
+ansresize,s,h,"NO",,," "
+ansreview,s,h,"NO",,," "
+ansreview1,s,h,"NO",,," "
+ansscat,s,h,"NO",,," "
+ansskyextract,s,h,"NO",,," "
+anssmooth,s,h,"NO",,," "
+anstrace,s,h,"NO",,," "
diff --git a/noao/imred/src/fibers/arcrefs.cl b/noao/imred/src/fibers/arcrefs.cl
new file mode 100644
index 00000000..d0d6fcf1
--- /dev/null
+++ b/noao/imred/src/fibers/arcrefs.cl
@@ -0,0 +1,326 @@
+# ARCREFS -- Determine dispersion relation for reference arcs.
+
+procedure arcrefs (arcref1, arcref2, extn, arcreplace, apidtable, response,
+ crval, cdelt, done, log1, log2)
+
+file arcref1
+file arcref2
+string extn
+file arcreplace
+file apidtable
+file response
+string crval = "INDEF"
+string cdelt = "INDEF"
+file done
+file log1
+file log2
+
+struct *fd
+
+begin
+ string imtype
+ string arcref, arcrefms, arc, arcms
+ string temp, temp1, temp2, str1, str2
+ int i, n, nspec, dc
+ real w
+ bool log
+ struct str3
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ n = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+
+ # Extract the primary arc reference spectrum. Extract and replace
+ # any replacement arcs defined in the arcreplace file. Determine the
+ # dispersion function with IDENTIFY/REIDENTIFY. Set the wavelength
+ # parameters with MSDISPCOR.
+
+ arcref = arcref1
+ arcrefms = arcref1 // extn
+ if (!access (arcrefms//imtype)) {
+ print ("Extract arc reference image ", arcref) | tee (log1)
+ apscript (arcref, output=arcrefms, ansrecenter="NO",
+ ansresize="NO", ansedit="NO", anstrace="NO",
+ nsubaps=params.nsubaps, background="none", clean=no,
+ weights="none")
+ sapertures (arcrefms, apertures="", apidtable=apidtable,
+ wcsreset=no, verbose=no, beam=INDEF, dtype=INDEF, w1=INDEF,
+ dw=INDEF, z=INDEF, aplow=INDEF, aphigh=INDEF, title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (arcrefms, "/", response, arcrefms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (arcrefms, "/", temp, arcrefms, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=yes, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+
+ if (arcreplace != "") {
+ if (!access (arcreplace))
+ error (1, "Can't access file "//arcreplace)
+ fd = arcreplace
+ while (fscan (fd, arc, str1, str2) != EOF) {
+ i = strlen (arc)
+ if (i > n && substr (arc, i-n+1, i) == imtype)
+ arc = substr (arc, 1, i-n)
+ if (arc != arcref)
+ next
+ arc = str1
+ if (i > n && substr (arc, i-n+1, i) == imtype)
+ arc = substr (arc, 1, i-n)
+ arcms = arc // extn
+
+ if (access (arcms//imtype))
+ imdelete (arcms, verify=no)
+
+ print ("Extract arc reference image ", arc) | tee (log1)
+ apscript (arc, output=arcms, ansrecenter="NO",
+ ansresize="NO", ansedit="NO", anstrace="NO",
+ nsubaps=params.nsubaps, background="none", clean=no,
+ weights="none")
+ sapertures (arcms, apertures="", apidtable=apidtable,
+ wcsreset=no, verbose=no, beam=INDEF, dtype=INDEF,
+ w1=INDEF, dw=INDEF, z=INDEF, aplow=INDEF, aphigh=INDEF,
+ title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (arcms, "/", response, arcms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=no,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (arcms, "/", temp, arcms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=yes,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ scopy (arcms, arcrefms, w1=INDEF, w2=INDEF, apertures=str2,
+ bands="", beams="", apmodulus=1000, offset=0,
+ format="multispec", clobber=yes, merge=yes, renumber=no,
+ verbose=yes) | tee (log1, > log2)
+ imdelete (arcms, verify=no)
+ }
+ fd = ""
+ }
+ }
+
+ # Get the dispersion parameters from the header. These are
+ # used for all further spectra and also flag whether this
+ # spectrum has been processed. If the parameters are missing
+ # the spectrum needs to have the dispersion function and
+ # wavelength scale determined. The HEDIT is needed because
+ # in some cases the user may exit IDENTIFY without updating
+ # the database (if the image was deleted but the database
+ # entry was not).
+
+ hselect (arcrefms, "dclog1", yes) | scan (str1)
+ if (nscan () != 1) {
+ print ("Determine dispersion solution for ", arcref) | tee (log1)
+ #delete (database//"/id"//arcrefms//"*", verify=no)
+ printf ("%s %s\n", crval, cdelt) | scan (str3)
+ if (str3 == "INDEF INDEF")
+ identify (arcrefms, section="middle line", database=database,
+ coordlist=params.coordlist, nsum=1, match=params.match,
+ maxfeatures=50, zwidth=100., ftype="emission",
+ fwidth=params.fwidth, cradius=params.cradius,
+ threshold=params.threshold, minsep=2.,
+ function=params.i_function, order=params.i_order,
+ sample="*", niterate=params.i_niterate,
+ low_reject=params.i_low, high_reject=params.i_high,
+ grow=0., autowrite=yes)
+ else
+ autoidentify (arcrefms, crval, cdelt,
+ coordlist=params.coordlist,
+ interactive="YES", section="middle line", nsum="1",
+ ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., match=params.match, function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate, low_reject=params.i_low,
+ high_reject=params.i_high, grow=0., dbwrite="YES",
+ overwrite=yes, database="database", verbose=yes,
+ logfile=logfile, plotfile=plotfile,
+ reflist="", refspec="", crpix="INDEF", cddir="unknown",
+ crsearch="-0.5", cdsearch="INDEF", aidpars="")
+
+ hedit (arcrefms, "refspec1", arcrefms, add=yes,
+ show=no, verify=no)
+
+ nspec = 1
+ hselect (arcrefms, "naxis2", yes) | scan (nspec)
+ if (nspec > 1)
+ reidentify (arcrefms, "", interactive=yes,
+ section="middle line", shift=0., step=1, nsum=1,
+ cradius=params.cradius, threshold=params.threshold,
+ nlost=100, refit=params.refit, trace=no, override=yes,
+ addfeatures=params.addfeatures, newaps=no,
+ coordlist=params.coordlist, match=params.match,
+ maxfeatures=50, minsep=2., database=database,
+ plotfile=plotfile, logfiles=logfile, verbose=yes)
+
+ # Dispersion correct the reference arc. This step is required to
+ # use the confirm option of MSDISPCOR to set the wavelength scale
+ # for all further spectra. Set the newdisp flag.
+
+ print ("Dispersion correct ", arcref) | tee (log1)
+ dispcor (arcrefms, "", linearize=params.linearize,
+ database=database, table="", w1=INDEF, w2=INDEF, dw=INDEF,
+ nw=INDEF, log=params.log, flux=params.flux, samedisp=yes,
+ global=no, ignoreaps=no, confirm=yes, listonly=no, verbose=no,
+ logfile=logfile)
+ if (params.nsubaps > 1) {
+ imrename (arcrefms, temp, verbose=no)
+ scopy (temp, arcrefms, w1=INDEF, w2=INDEF, apertures="1-999",
+ bands="", beams="", apmodulus=0, offset=0,
+ format="multispec", clobber=no, merge=no, renumber=no,
+ verbose=no)
+ blkavg (temp, temp, 1, params.nsubaps, option="sum")
+ imcopy (temp, arcrefms//"[*,*]", verbose=no)
+ imdelete (temp, verify=no)
+ }
+ proc.newdisp = yes
+ }
+ if (extn == ".ms")
+ print (arcref, >> done)
+
+ # Extract the alternate shift arc reference. Transfer the dispersion
+ # function from the primary arc reference and then identify shift
+ # lines.
+
+ if (arcref2 != "") {
+ arcref = arcref2
+ arcrefms = arcref2 // extn
+ if (proc.newdisp && access (arcrefms//imtype))
+ imdelete (arcrefms, verify=no)
+ if (!access (arcrefms)) {
+ print ("Extract arc reference image ", arcref) | tee (log1)
+ apscript (arcref, output=arcrefms, ansrecenter="NO",
+ ansresize="NO", ansedit="NO", anstrace="NO",
+ nsubaps=params.nsubaps, background="none", clean=no,
+ weights="none")
+ sapertures (arcrefms, apertures="", apidtable=apidtable,
+ wcsreset=no, verbose=no, beam=INDEF, dtype=INDEF, w1=INDEF,
+ dw=INDEF, z=INDEF, aplow=INDEF, aphigh=INDEF, title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (arcrefms, "/", response, arcrefms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=no,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (arcrefms, "/", temp, arcrefms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=yes,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ }
+
+ hselect (arcrefms, "dclog1", yes) | scan (str1)
+ if (nscan () != 1) {
+ print ("Determine dispersion solution for ", arcref) |
+ tee (log1)
+ #delete (database//"/id"//arcrefms//"*", verify=no)
+
+ print (":r ", arcref1//extn, "\na\nd") |
+ identify (arcrefms, section="middle line", database=database,
+ coordlist="", nsum=1, match=params.match, maxfeatures=50,
+ zwidth=100., ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate, low_reject=params.i_low,
+ high_reject=params.i_high, grow=0., autowrite=yes,
+ cursor="STDIN", >G "dev$null", >& "dev$null")
+ identify (arcrefms, section="middle line", database=database,
+ coordlist="", nsum=1, match=params.match, maxfeatures=50,
+ zwidth=100., ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate, low_reject=params.i_low,
+ high_reject=params.i_high, grow=0., autowrite=yes)
+ print (":feat ", temp) |
+ identify (arcrefms, section="middle line", database=database,
+ coordlist="", nsum=1, match=params.match, maxfeatures=50,
+ zwidth=100., ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate, low_reject=params.i_low,
+ high_reject=params.i_high, grow=0., autowrite=yes,
+ cursor="STDIN", >G "dev$null", >& "dev$null")
+ print (":r ", arcref1//extn, "\na\nd", > temp1)
+ fd = temp
+ while (fscan (fd, i, w, w, w) != EOF) {
+ if (nscan() == 4) {
+ print (w, 1, 1, "m", >> temp1)
+ print (w, >> temp2)
+ }
+ }
+ print ("g", >> temp1)
+ fd = ""; delete (temp, verify=no)
+
+ nspec = 1
+ hselect (arcrefms, "naxis2", yes) | scan (nspec)
+ for (i = 1; i <= nspec; i+=1)
+ identify (arcrefms, section="line "//i,
+ database=database, coordlist="", nsum=1,
+ match=params.match, maxfeatures=50, zwidth=100.,
+ ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate,
+ low_reject=params.i_low, high_reject=params.i_high,
+ grow=0., autowrite=yes, cursor=temp1, < temp2,
+ >G "dev$null", >>& temp)
+ delete (temp1, verify=no); delete (temp2, verify=no)
+ system.match ("Coordinate shift", temp, stop=no, print_file_n=yes,
+ metacharacte=yes) | tee (log1, > log2)
+ delete (temp, verify=no)
+
+ dispcor (arcrefms, "", linearize=params.linearize,
+ database=database, table="", w1=INDEF, w2=INDEF,
+ dw=INDEF, nw=INDEF, log=params.log, flux=params.flux,
+ samedisp=yes, global=no, ignoreaps=no, confirm=no,
+ listonly=no, verbose=yes, logfile=logfile, > log2)
+ if (params.nsubaps > 1) {
+ imrename (arcrefms, temp, verbose=no)
+ scopy (temp, arcrefms, w1=INDEF, w2=INDEF, apertures="1-999",
+ bands="", beams="", apmodulus=0, offset=0,
+ format="multispec", clobber=no, merge=no, renumber=no,
+ verbose=no)
+ blkavg (temp, temp, 1, params.nsubaps, option="sum")
+ imcopy (temp, arcrefms//"[*,*]", verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ if (extn == ".ms")
+ print (arcref, >> done)
+ }
+end
diff --git a/noao/imred/src/fibers/arcrefs.par b/noao/imred/src/fibers/arcrefs.par
new file mode 100644
index 00000000..56f69145
--- /dev/null
+++ b/noao/imred/src/fibers/arcrefs.par
@@ -0,0 +1,13 @@
+arcref1,f,a,"",,,
+arcref2,f,a,"",,,
+extn,s,a,,,,
+arcreplace,f,a,"",,,
+apidtable,f,a,"",,,
+response,f,a,"",,,
+crval,s,a,INDEF,,,
+cdelt,s,a,INDEF,,,
+done,f,a,"",,,
+log1,f,a,"",,,
+log2,f,a,"",,,
+fd,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/batch.cl b/noao/imred/src/fibers/batch.cl
new file mode 100644
index 00000000..84b0a7b7
--- /dev/null
+++ b/noao/imred/src/fibers/batch.cl
@@ -0,0 +1,297 @@
+# BATCH -- Process spectra in batch.
+# This task is called in batch mode. It only processes objects
+# not previously processed unless the update or redo flags are set.
+
+procedure batch ()
+
+string objects {prompt="Object spectra"}
+real datamax {prompt="Max data value / cosmic ray threshold"}
+
+file response {prompt="Response spectrum"}
+string arcs1 {prompt="List of arc spectra"}
+string arcs2 {prompt="List of shift arc spectra"}
+file arcref1 {prompt="Arc reference for dispersion solution"}
+file arcref2 {prompt="Arc reference for dispersion solution"}
+file arcreplace {prompt="Special aperture replacements"}
+string arcrefs {prompt="Arc references"}
+string extn {prompt="Extraction extension"}
+
+file apidtable {prompt="Aperture identifications"}
+string objaps {prompt="Object apertures"}
+string skyaps {prompt="Sky apertures"}
+string arcaps {prompt="Arc apertures"}
+string objbeams {prompt="Object beam numbers"}
+string skybeams {prompt="Sky beam numbers"}
+string arcbeams {prompt="Arc beam numbers\n"}
+
+file done {prompt="File of spectra already done"}
+file logfile {prompt="Logfile"}
+
+bool redo {prompt="Redo operations?"}
+bool update {prompt="Update spectra?"}
+bool scattered {prompt="Subtracted scattered light?"}
+bool arcap {prompt="Use object apertures for arcs?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool savearcs {prompt="Save internal arcs?"}
+bool skyalign {prompt="Align sky lines?"}
+bool skysubtract {prompt="Subtract sky?"}
+bool saveskys {prompt="Save sky spectra?\n"}
+
+bool newaps, newresp, newdisp, newarcs
+
+struct *fd1, *fd2, *fd3
+
+begin
+ file objs, temp, temp1, spec, specms, arc
+ bool reextract, extract, scat, disp, sky, log
+ string imtype, mstype, str, str2, str3, str4
+ int i
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+
+ objs = mktemp ("tmp$iraf")
+ temp = mktemp ("tmp$iraf")
+ temp1 = mktemp ("tmp$iraf")
+
+ # Initialize extraction to be noninteractive.
+ if (apscript.ansrecenter == "yes")
+ apscript.ansrecenter = "YES"
+ else if (apscript.ansrecenter == "no")
+ apscript.ansrecenter = "NO"
+ apscript.ansedit = "NO"
+ if (apscript.anstrace == "yes") {
+ apscript.anstrace = "YES"
+ apscript.ansfittrace = "NO"
+ } else if (apscript.anstrace == "no")
+ apscript.anstrace = "NO"
+
+ reextract = redo || (update && (newaps || newresp || newdisp))
+
+ getspec (objects, objs)
+ fd1 = objs
+ while (fscan (fd1, spec) != EOF) {
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specms) != EOF)
+ if (spec == specms)
+ break
+ if (spec == specms)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ printf ("Object spectrum not found - %s%s\nCheck setting of imtype\n", spec, imtype, >> logfile)
+ next
+ }
+ specms = spec // mstype
+
+ scat = no
+ extract = no
+ disp = no
+ sky = no
+ if (scattered) {
+ if (redo && access (spec//"noscat"//imtype)) {
+ imdelete (spec, verify=no)
+ imrename (spec//"noscat", spec)
+ }
+ hselect (spec, "apscatte", yes) | scan (str)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (reextract || !access (specms) || (update && scat))
+ extract = yes
+ else {
+ hselect (specms, "dclog1", yes) | scan (str)
+ if (nscan () == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+
+ hselect (specms, "skysub", yes) | scan (str)
+ if (nscan() == 0)
+ sky = skysubtract
+ }
+
+ if (extract) {
+ disp = dispcor
+ sky = skysubtract
+ }
+
+ if (extract) {
+ if (access (specms))
+ imdelete (specms, verify=no)
+ if (scat) {
+ print ("Subtract scattered light from ", spec, >> logfile)
+ imrename (spec, spec//"noscat")
+ apscript (spec//"noscat", output=spec, ansextract="NO",
+ ansscat="YES", anssmooth="YES", verbose=no)
+ }
+ print ("Extract object spectrum ", spec, >> logfile)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp1)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> logfile)
+ if (fscan (fd3, str, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> logfile)
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ apscript (spec, nsubaps=params.nsubaps, saturation=datamax,
+ verbose=no)
+ sapertures (specms, apertures="", apidtable=apidtable,
+ wcsreset=no, verbose=no, beam=INDEF, dtype=INDEF, w1=INDEF,
+ dw=INDEF, z=INDEF, aplow=INDEF, aphigh=INDEF, title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (specms, "/", response, specms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=no,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (specms, "/", temp, specms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=yes,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ }
+
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ getspec (arcs1, temp)
+ fd2 = temp
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp1)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str, str2, str3) == 3) {
+ setjd (arc, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> logfile)
+ if (fscan (fd3, str, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> logfile)
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ hedit (arc, "arctype", "henear", add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ getspec (arcs2, temp)
+ fd2 = temp
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp1)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> logfile)
+ if (fscan (fd3, str, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> logfile)
+
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ hedit (arc, "arctype", "shift", add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec, >> logfile)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=params.select, sort=params.sort,
+ group=params.group, time=params.time,
+ timewrap=params.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no,
+ >> logfile)
+
+ doarcs (spec, response, arcref1, arcref2, extn, arcreplace,
+ apidtable, arcaps, arcbeams, savearcs, reextract, arcap,
+ logfile, yes, done)
+
+ hselect (specms, "refspec1", yes, > temp)
+ fd2 = temp
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1)
+ print ("No arc reference assigned for ", spec, >> logfile)
+ else {
+ if (skyalign)
+ doalign (spec, specms, "align"//extn//imtype,
+ arcref1//extn, logfile, yes)
+ print ("Dispersion correct ", spec, >> logfile)
+ dispcor (specms, "", linearize=params.linearize,
+ database=database, table=arcref1//extn, w1=INDEF,
+ w2=INDEF, dw=INDEF, nw=INDEF, log=params.log,
+ flux=params.flux, samedisp=no, global=no,
+ ignoreaps=no, confirm=no, listonly=no, verbose=no,
+ logfile=logfile)
+ if (params.nsubaps > 1) {
+ imrename (specms, temp, verbose=no)
+ scopy (temp, specms, w1=INDEF, w2=INDEF,
+ apertures="1-999", bands="", beams="", apmodulus=0,
+ offset=0, format="multispec", clobber=no, merge=no,
+ renumber=no, verbose=no)
+ blkavg (temp, temp, 1, params.nsubaps, option="sum")
+ imcopy (temp, specms//"[*,*]", verbose=no)
+ imdelete (temp, verify=no)
+ }
+ disp = no
+ }
+ }
+
+ if (sky && !disp) {
+ str = ""
+ if (skyaps != "")
+ str = "skyaps=" // skyaps
+ if (skybeams != "")
+ str = str // " skybeams=" // skybeams
+ print ("Sky subtract ", spec, ": ", str, >> logfile)
+ skysub (specms, output="", objaps=objaps, skyaps=skyaps,
+ objbeams=objbeams, skybeams=skybeams, skyedit=no,
+ combine=params.combine, reject=params.reject,
+ scale=params.scale, saveskys=saveskys, logfile=logfile)
+ hedit (specms, "skysub", str, add=yes, show=no, verify=no,
+ update=yes)
+ }
+ }
+ fd1 = ""; delete (objs, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+
+ flprcache (0)
+end
diff --git a/noao/imred/src/fibers/batch.par b/noao/imred/src/fibers/batch.par
new file mode 100644
index 00000000..54575594
--- /dev/null
+++ b/noao/imred/src/fibers/batch.par
@@ -0,0 +1,38 @@
+objects,s,h,,,,"Object spectra"
+datamax,r,h,,,,"Max data value / cosmic ray threshold"
+response,f,h,"",,,"Response spectrum"
+arcs1,s,h,,,,"List of arc spectra"
+arcs2,s,h,,,,"List of shift arc spectra"
+arcref1,f,h,"",,,"Arc reference for dispersion solution"
+arcref2,f,h,"",,,"Arc reference for dispersion solution"
+arcreplace,f,h,"",,,"Special aperture replacements"
+arcrefs,s,h,,,,"Arc references"
+extn,s,h,,,,"Extraction extension"
+apidtable,f,h,"",,,"Aperture identifications"
+objaps,s,h,,,,"Object apertures"
+skyaps,s,h,,,,"Sky apertures"
+arcaps,s,h,,,,"Arc apertures"
+objbeams,s,h,,,,"Object beam numbers"
+skybeams,s,h,,,,"Sky beam numbers"
+arcbeams,s,h,,,,"Arc beam numbers
+"
+done,f,h,"",,,"File of spectra already done"
+logfile,f,h,"",,,"Logfile"
+redo,b,h,,,,"Redo operations?"
+update,b,h,,,,"Update spectra?"
+scattered,b,h,,,,"Subtracted scattered light?"
+arcap,b,h,,,,"Use object apertures for arcs?"
+dispcor,b,h,,,,"Dispersion correct spectra?"
+savearcs,b,h,,,,"Save internal arcs?"
+skyalign,b,h,,,,"Align sky lines?"
+skysubtract,b,h,,,,"Subtract sky?"
+saveskys,b,h,,,,"Save sky spectra?
+"
+newaps,b,h,,,,
+newresp,b,h,,,,
+newdisp,b,h,,,,
+newarcs,b,h,,,,
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/doalign.cl b/noao/imred/src/fibers/doalign.cl
new file mode 100644
index 00000000..e3b8d4db
--- /dev/null
+++ b/noao/imred/src/fibers/doalign.cl
@@ -0,0 +1,78 @@
+# DOALIGN -- Align sky lines in objects.
+# If there is no database of features for alignment have user identify
+# them interactively.
+
+procedure doalign (spec, specms, align, table, logfile, batch)
+
+file spec
+file specms
+file align
+file table
+file logfile
+bool batch
+
+begin
+ file temp
+ bool log, verbose1
+
+ if (batch)
+ verbose1 = no
+ else
+ verbose1 = verbose
+
+ if (!access (align)) {
+ print ("Identify alignment features")
+ dispcor (specms, align, linearize=no,
+ database=database, table=table, w1=INDEF,
+ w2=INDEF, dw=INDEF, nw=INDEF, log=params.log,
+ flux=params.flux, samedisp=no, global=no,
+ ignoreaps=no, confirm=no, listonly=no,
+ verbose=no, logfile="")
+ identify (align, section="middle line", database=database,
+ coordlist="", nsum=1, match=params.match, maxfeatures=50,
+ zwidth=100., ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate, low_reject=params.i_low,
+ high_reject=params.i_high, grow=0., autowrite=yes)
+ print ("g") |
+ identify (align, section="middle line", database=database,
+ coordlist="", nsum=1, match=params.match, maxfeatures=50,
+ zwidth=100., ftype="emission", fwidth=params.fwidth,
+ cradius=params.cradius, threshold=params.threshold,
+ minsep=2., function=params.i_function,
+ order=params.i_order, sample="*",
+ niterate=params.i_niterate, low_reject=params.i_low,
+ high_reject=params.i_high, grow=0., autowrite=yes,
+ cursor="STDIN", >G "dev$null", >& "dev$null")
+ reidentify (align, "",
+ interactive=no, section="middle line", shift=0.,
+ step=1, nsum=1, cradius=params.cradius,
+ threshold=params.threshold, nlost=100, newaps=no,
+ refit=no, trace=no, override=yes, addfeatures=no,
+ database=database, plotfile=plotfile,
+ logfiles=logfile, verbose=verbose1)
+ }
+
+ # Set arc dispersion function in image header.
+ if (!batch)
+ print ("Identify alignment features in ", spec)
+ print ("Identify alignment features in ", spec, >> logfile)
+ dispcor (specms, "", linearize=no,
+ database=database, table=table, w1=INDEF,
+ w2=INDEF, dw=INDEF, nw=INDEF, log=params.log,
+ flux=params.flux, samedisp=no, global=no,
+ ignoreaps=no, confirm=no, listonly=no,
+ verbose=no, logfile="")
+ hedit (specms, "refspec1", align, add=yes,
+ verify=no, show=no, update=yes)
+ delete (database//"/id"//spec//".ms", verify=no, >& "dev$null")
+ reidentify (align, specms,
+ interactive=no, section="middle line", shift=0.,
+ step=1, nsum=1, cradius=params.cradius,
+ threshold=params.threshold, nlost=100, newaps=no,
+ refit=no, trace=no, override=no, addfeatures=no,
+ database=database, plotfile=plotfile,
+ logfiles=logfile, verbose=verbose1)
+end
diff --git a/noao/imred/src/fibers/doalign.par b/noao/imred/src/fibers/doalign.par
new file mode 100644
index 00000000..0ddd375f
--- /dev/null
+++ b/noao/imred/src/fibers/doalign.par
@@ -0,0 +1,7 @@
+spec,f,a,"",,,
+specms,f,a,"",,,
+align,f,a,"",,,
+table,f,a,"",,,
+logfile,f,a,"",,,
+batch,b,a,,,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/doarcs.cl b/noao/imred/src/fibers/doarcs.cl
new file mode 100644
index 00000000..bd4e3748
--- /dev/null
+++ b/noao/imred/src/fibers/doarcs.cl
@@ -0,0 +1,264 @@
+# DOARCS -- Determine dispersion relation for spectrum based on reference arcs.
+# This procedure is complicated by:
+# 1. The need to reextract arcs if the objects spectra are being
+# recentered or retraced.
+# 2. The use of shift spectra to track shifts in the dispersion from
+# the reference arc spectrum.
+# 3. The use of multiple exposures to correct for illumination problems
+# in taking the arcs.
+
+procedure doarcs (spec, response, arcref1, arcref2, extn, arcreplace, apidtable,
+ arcaps, arcbeams, savearcs, reextract, arcap, logfile, batch, done)
+
+file spec
+file response
+file arcref1
+file arcref2
+string extn
+file arcreplace
+file apidtable
+string arcaps
+string arcbeams
+bool savearcs
+bool reextract
+bool arcap
+file logfile
+bool batch
+file done
+
+struct *fd
+
+begin
+ string imtype
+ int i, j, k, n
+ file temp, arc1, arc2, str1, str2, arctype, apref, arc, arcms
+ bool verbose1
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ n = strlen (imtype)
+
+ temp = mktemp ("tmp$iraf")
+
+ if (batch)
+ verbose1 = no
+ else
+ verbose1 = verbose
+
+ for (j=1; j<=2; j+=1) {
+ # The reference spectra refer initially to the 2D image. At the
+ # end we will reset them to refer to the 1D spectra.
+
+ hselect (spec, "refspec"//j, yes, > temp)
+ fd = temp
+ k = fscan (fd, arc1, str1)
+ fd = ""; delete (temp, verify=no)
+ if (k < 1)
+ break
+
+ # Strip possible image extension.
+ i = strlen (arc1)
+ if (i > n && substr (arc1, i-n+1, i) == imtype)
+ arc1 = substr (arc1, 1, i-n)
+
+ # Set extraction output and aperture reference depending on whether
+ # the arcs are to be rextracted using recentered or retraced object
+ # apertures.
+
+ if (arcap &&
+ (apscript.ansrecenter=="yes" || apscript.anstrace=="yes" ||
+ apscript.ansrecenter=="YES" || apscript.anstrace=="YES")) {
+ arc2 = spec // arc1 // ".ms"
+ apref = spec
+ if (access (arc2//imtype))
+ imdelete (arc2//imtype, verify=no)
+ delete (database//"/id"//arc2//"*", verify = no)
+ } else {
+ arc2 = arc1 // extn
+ apref = apscript.references
+ if (reextract && access (arc2//imtype)) {
+ if (arc2 != arcref1 // extn && arc2 != arcref2 // extn) {
+ if (access (done)) {
+ fd = done
+ while (fscan (fd, arcms) != EOF)
+ if (arcms == arc2)
+ break
+ fd = ""
+ } else
+ arcms = ""
+ if (arcms != arc2)
+ imdelete (arc2, verify=no)
+ }
+ }
+ }
+
+ # SHIFT arcs are reidentified with only a shift.
+ # HENEAR arcs are reidentified using the user refit option.
+ # Also internal arcs are checked if HENEAR.
+
+ hselect (arc1, "arctype", yes, > temp)
+ fd = temp
+ i = fscan (fd, arctype)
+ fd = ""; delete (temp, verify=no)
+
+ # Extract and determine dispersion function if necessary.
+ if (!access (arc2//imtype)) {
+ delete (database//"/id"//arc2//"*", verify = no)
+ if (!batch)
+ print ("Extract and reidentify arc spectrum ", arc1)
+ print ("Extract and reidentify arc spectrum ", arc1, >> logfile)
+ apscript (arc1, output=arc2, references=apref,
+ ansrecenter="NO", ansresize="NO", ansedit="NO",
+ anstrace="NO", nsubaps=params.nsubaps, background="none",
+ clean=no, weights="none", verbose=verbose1)
+ sapertures (arc2, apertures="", apidtable=apidtable,
+ wcsreset=no, verbose=no, beam=INDEF, dtype=INDEF, w1=INDEF,
+ dw=INDEF, z=INDEF, aplow=INDEF, aphigh=INDEF, title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (arc2, "/", response, arc2, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (arc2, "/", temp, arc2, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=yes, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ print (arc2, >> done)
+
+ if (arctype == "shift") {
+ reidentify (arcref2//extn, arc2,
+ interactive=no, section="middle line", shift=0.,
+ step=1, nsum=1, cradius=params.cradius,
+ threshold=params.threshold, nlost=100, newaps=no,
+ refit=no, trace=no, override=no, addfeatures=no,
+ database=database, plotfile=plotfile,
+ logfiles=logfile, verbose=verbose1)
+ } else {
+ if (arcreplace != "") {
+ fd = arcreplace
+ while (fscan (fd, arc, arcms, str2) != EOF) {
+ i = strlen (arc)
+ if (i > n && substr (arc, i-n+1, i) == imtype)
+ arc = substr (arc, 1, i-n)
+ if (arc != arc1)
+ next
+ arc = arcms
+ if (i > n && substr (arc, i-n+1, i) == imtype)
+ arc = substr (arc, 1, i-n)
+ arcms = arc // extn // imtype
+
+ if (access (arcms))
+ imdelete (arcms, verify=no)
+
+ if (!batch)
+ print ("Extract arc spectrum ", arc)
+ print ("Extract arc spectrum ", arc, >> logfile)
+ apscript (arc, references=apref,
+ ansrecenter="NO", ansresize="NO", ansedit="NO",
+ anstrace="NO", nsubaps=params.nsubaps,
+ background="none", clean=no,
+ weights="none", verbose=verbose1)
+ sapertures (arcms, apertures="",
+ apidtable=apidtable, wcsreset=no, verbose=no,
+ beam=INDEF, dtype=INDEF, w1=INDEF, dw=INDEF,
+ z=INDEF, aplow=INDEF, aphigh=INDEF, title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (arcms, "/", response, arcfms,
+ w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="",
+ apmodulus=0, reverse=no,
+ ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes,
+ merge=no, errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (arcms, "/", temp, arcfms,
+ w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="",
+ apmodulus=0, reverse=no,
+ ignoreaps=yes, format="multispec",
+ renumber=no, offset=0, clobber=yes,
+ merge=no, errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ scopy (arcms, arc2, w1=INDEF, w2=INDEF,
+ apertures=str2, bands="", beams="",
+ apmodulus=1000, offset=0, format="multispec",
+ clobber=yes, merge=yes, renumber=no,
+ verbose=yes, >> logfile)
+ imdelete (arcms, verify=no)
+ }
+ fd = ""
+ }
+ reidentify (arcref1//extn, arc2,
+ interactive=!batch, section="middle line",
+ shift=0., step=1, nsum=1, cradius=params.cradius,
+ threshold=params.threshold, nlost=100,
+ refit=params.refit, trace=no, override=no,
+ addfeatures=params.addfeatures,
+ coordlist=params.coordlist, match=params.match,
+ maxfeatures=50, minsep=2., database=database,
+ plotfile=plotfile, logfiles=logfile,
+ verbose=verbose1)
+ }
+
+ # If not reextracting arcs based on object apertures
+ # then save the extracted arc to avoid doing it again.
+
+ if (arc1//extn != arc2)
+ imdelete (arc2, verify=no)
+ }
+
+ # Set the REFSPEC parameters for multispec spectrum.
+ if (k == 1)
+ hedit (spec//".ms", "refspec"//j, arc2, add=yes,
+ verify=no, show=no, update=yes)
+ else
+ hedit (spec//".ms", "refspec"//j, arc2//" "//str1,
+ add=yes, verify=no, show=no, update=yes)
+
+ # Check for arc fibers in object spectra.
+ if (arctype != "shift" && (arcaps != "" || arcbeams != "")) {
+ scopy (spec//".ms", spec//"arc.ms", w1=INDEF, w2=INDEF,
+ apertures=arcaps, bands="", beams=arcbeams, apmodulus=1000,
+ offset=0, format="multispec", clobber=yes, merge=no,
+ renumber=no, verbose=no, >& "dev$null")
+ if (access (spec//"arc.ms"//imtype)) {
+ if (!batch)
+ print ("Reidentify arc fibers in ", spec,
+ " with respect to ", arc1)
+ print ("Reidentify arc fibers in ", spec,
+ " with respect to ", arc1, >> logfile)
+ delete (database//"/id"//spec//"arc.ms*", verify = no)
+ reidentify (arc2, spec//"arc.ms", interactive=no,
+ section="middle line", shift=0., step=1, nsum=1,
+ cradius=params.cradius, threshold=params.threshold,
+ nlost=100, refit=no, trace=no, override=no,
+ addfeatures=no, database=database,
+ plotfile=plotfile, logfiles=logfile,
+ verbose=verbose1)
+ imdelete (spec//"arc.ms", verify=no)
+ hedit (spec//".ms", "refshft"//j, spec//"arc.ms interp",
+ add=yes, verify=no, show=no, update=yes)
+ if (!savearcs)
+ scopy (spec//".ms", "", w1=INDEF, w2=INDEF,
+ apertures="!"//arcaps, bands="", beams=arcbeams,
+ apmodulus=1000, offset=0, format="multispec",
+ clobber=yes, merge=no, renumber=no,
+ verbose=yes, >> logfile)
+ }
+ }
+ }
+end
diff --git a/noao/imred/src/fibers/doarcs.par b/noao/imred/src/fibers/doarcs.par
new file mode 100644
index 00000000..a93b16c6
--- /dev/null
+++ b/noao/imred/src/fibers/doarcs.par
@@ -0,0 +1,17 @@
+spec,f,a,"",,,
+response,f,a,"",,,
+arcref1,f,a,"",,,
+arcref2,f,a,"",,,
+extn,s,a,,,,
+arcreplace,f,a,"",,,
+apidtable,f,a,"",,,
+arcaps,s,a,,,,
+arcbeams,s,a,,,,
+savearcs,b,a,,,,
+reextract,b,a,,,,
+arcap,b,a,,,,
+logfile,f,a,"",,,
+batch,b,a,,,,
+done,f,a,"",,,
+fd,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/fibresponse.cl b/noao/imred/src/fibers/fibresponse.cl
new file mode 100644
index 00000000..2379fcc4
--- /dev/null
+++ b/noao/imred/src/fibers/fibresponse.cl
@@ -0,0 +1,261 @@
+# FIBRESPONSE -- Make a aperture response spectrum using a flat field
+# and a throughput file or image.
+
+procedure fibresponse (flat, throughput, apreference, response)
+
+string flat {prompt="Flat field spectrum"}
+string throughput {prompt="Throughput file or image"}
+string apreference {prompt="Aperture reference spectrum"}
+string response {prompt="Response spectrum"}
+
+bool recenter = no {prompt="Recenter apertures?"}
+bool edit = yes {prompt="Edit/review apertures?"}
+bool trace = no {prompt="Trace spectra?"}
+bool clean = no {prompt="Detect and replace bad pixels?"}
+bool fitflat = no {prompt="Fit and ratio flat field spectrum?"}
+bool interactive = yes {prompt="Fit flat field interactively?"}
+string function = "spline3" {prompt="Fitting function",
+ enum="spline3|legendre|chebyshev|spline1"}
+int order = 20 {prompt="Fitting function order", min=1}
+
+begin
+ string imtype, mstype
+ file flat2d, skyflat2d, apref, resp
+ file temp1, temp2, log1, log2
+ int n, ap, naxis
+ real respval
+ struct err
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ n = strlen (imtype)
+
+ flat2d = flat
+ skyflat2d = throughput
+ apref = apreference
+ resp = response
+ temp1 = mktemp ("tmp")
+ temp2 = mktemp ("tmp")
+
+ # Check required input and output.
+ if (resp == "" || resp == flat2d || resp == skyflat2d)
+ error (1, "Bad response image name")
+ if (flat2d == "" && skyflat2d == "")
+ error (1, "No flat field or throughput specified")
+
+ if (flat2d != "") {
+ i = strlen (flat2d)
+ if (i > n && substr (flat2d, i-n+1, i) == imtype)
+ flat2d = substr (flat2d, 1, i-n)
+ if (!access (flat2d // imtype)) {
+ printf ("Flat field spectrum not found - %s%s\n",
+ flat2d, imtype) | scan (err)
+ error (1, err // "\nCheck settting of imtype")
+ }
+ }
+ if (skyflat2d != "") {
+ i = strlen (skyflat2d)
+ if (i > n && substr (skyflat2d, i-n+1, i) == imtype)
+ skyflat2d = substr (skyflat2d, 1, i-n)
+ if (!access (skyflat2d // imtype)) {
+ if (!access (skyflat2d)) {
+ printf ("Throughput file or image not found - %s%s\n",
+ skyflat2d, imtype) | scan (err)
+ error (1, err // "\nCheck settting of imtype")
+ }
+
+ if (flat2d == "") {
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+ if (!access (apref // imtype))
+ error (1, "Aperture reference image required")
+ }
+ }
+ }
+
+ # Set logging
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # Initialize APSCRIPT
+ apscript.references = apref
+ if (recenter)
+ apscript.ansrecenter = "YES"
+ else
+ apscript.ansrecenter = "NO"
+ apscript.ansresize = "NO"
+ if (edit)
+ apscript.ansedit = "yes"
+ else
+ apscript.ansedit = "NO"
+ if (trace)
+ apscript.anstrace = "YES"
+ else
+ apscript.anstrace = "NO"
+ apscript.ansextract = "YES"
+
+ # If using a flat field extract it if necessary and possibly fit it
+ # and ratio the individual apertures by an overall smooth function
+
+ if (flat2d != "") {
+ if (!access (flat2d // mstype)) {
+ print ("Extract flat field ", flat2d) | tee (log1)
+ if (flat2d != apref)
+ apscript (flat2d, output=resp, background="none",
+ clean=clean, extras=no)
+ else
+ apscript (flat2d, output=resp, ansrecenter="NO",
+ ansrecenter="NO", ansresize="NO", ansedit="NO",
+ anstrace="NO", background="none",
+ clean=clean, extras=no)
+ } else
+ imcopy (flat2d//".ms", resp, verbose=no)
+
+ if (fitflat) {
+ print ("Fit and ratio flat field ", flat2d) | tee (log1)
+ blkavg (resp, temp1, option="average", b1=1, b2=10000)
+ imcopy (temp1//"[*,1]", temp1, verbose=no)
+ fit1d (temp1, temp1, "fit", axis=1, interactive=interactive,
+ sample="*", naverage=1, function=function, order=order,
+ low_reject=0., high_reject=0., niterate=1, grow=0.,
+ graphics="stdgraph")
+ sarith (resp, "/", temp1, resp, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0, reverse=no,
+ ignoreaps=yes, format="multispec", renumber=no,
+ offset=0, clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp1, verify=no)
+ }
+ }
+
+ # If using a throughput image extract it if necessary.
+ # Apply it to the flat field if given and otherwise only
+ # compute the throughput through each aperture.
+
+ if (skyflat2d != "") {
+ if (access (skyflat2d // imtype)) {
+ if (!access (skyflat2d // mstype)) {
+ print ("Extract throughput image ", skyflat2d) | tee (log1)
+ apscript (skyflat2d, output=temp1, background="none",
+ clean=clean, extras=no)
+ temp2 = temp1
+ } else
+ temp2 = skyflat2d // ".ms"
+
+ if (flat2d != "") {
+ print ("Correct flat field to throughput image") |
+ tee (log1)
+ sarith (temp2, "/", resp, temp1, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ fit1d (temp1, temp1, type="fit", axis=1,
+ interactive=no, sample="*", naverage=1,
+ function="legendre", order=1, niterate=0)
+ sarith (resp, "*", temp1, resp, w1=INDEF, w2=INDEF,
+ apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=yes, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=no,
+ errval=0, verbose=no)
+ imdelete (temp1, verify=no)
+ } else {
+ print ("Compute aperture throughput from image") |
+ tee (log1)
+ fit1d (temp2, resp, type="fit", axis=1,
+ interactive=no, sample="*", naverage=1,
+ function="legendre", order=1, niterate=0)
+ if (temp2 == temp1)
+ imdelete (temp2, verify=no)
+ }
+
+ # If a flat field and throughput file are used scale the average
+ # flat field in each aperture to those values
+
+ } else if (flat2d != "") {
+ print ("Correct flat field with throughput file ", skyflat2d) |
+ tee (log1)
+ fit1d (resp, resp, type="ratio", axis=1,
+ interactive=no, sample="*", naverage=1,
+ function="legendre", order=1, niterate=0)
+
+ list = skyflat2d
+ while (fscan (list, ap, respval) != EOF) {
+ sarith (resp, "*", respval, resp, w1=INDEF, w2=INDEF,
+ apertures=ap, bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=yes,
+ errval=0., verbose=no, >& "dev$null")
+ }
+ list = ""
+
+ # If only a throughput file is given create the response from the
+ # aperture reference and set the aperture response to the specified
+ # values.
+
+ } else {
+ print ("Set aperture throughput using ", skyflat2d) | tee (log1)
+ if (!access (apref // mstype)) {
+ apscript (apref, output=resp, ansrecenter="NO",
+ ansrecenter="NO", ansresize="NO", ansedit="NO",
+ anstrace="NO", background="none",
+ clean=no, extras=no)
+ sarith (resp, "replace", "0", resp, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=yes,
+ errval=0., verbose=no)
+ } else
+ sarith (apref//".ms", "replace", "0", resp, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=yes,
+ errval=0., verbose=no)
+
+ list = skyflat2d
+ while (fscan (list, ap, respval) != EOF) {
+ sarith (resp, "replace", respval, resp, w1=INDEF, w2=INDEF,
+ apertures=ap, bands="", beams="", apmodulus=0,
+ reverse=no, ignoreaps=no, format="multispec",
+ renumber=no, offset=0, clobber=yes, merge=yes,
+ errval=0., verbose=no)
+ }
+ list = ""
+ }
+ }
+
+ # The final response is normalized to overall unit mean and the
+ # average aperture response is printed.
+
+ print ("Create the normalized response ", resp) | tee (log1)
+ bscale (resp, resp, bzero="0.", bscale="mean", section="",
+ step=1, upper=INDEF, lower=INDEF, verbose=yes) | tee (log1, >log2)
+ blkavg (resp, temp1, option="average", b1=10000, b2=1)
+ print ("Average aperture response:") | tee (log1, >log2)
+ naxis = 5
+ #hselect (temp1, "naxis", yes) | scan (naxis)
+ #if (naxis == 1)
+ # listpixels (temp1) | tee (log1, >log2)
+ #else
+ # listpixels (temp1//"[1,*]") | tee (log1, >log2)
+
+ hselect (temp1, "naxis", yes, > temp2)
+ list = temp2; ap = fscan (list, naxis)
+ if (naxis == 1)
+ listpixels (temp1) | tee (log1, >log2)
+ else
+ listpixels (temp1//"[1,*]") | tee (log1, >log2)
+ list = ""; delete (temp2, verify=no)
+
+ imdelete (temp1, verify=no)
+end
diff --git a/noao/imred/src/fibers/fibresponse.par b/noao/imred/src/fibers/fibresponse.par
new file mode 100644
index 00000000..9a59eb46
--- /dev/null
+++ b/noao/imred/src/fibers/fibresponse.par
@@ -0,0 +1,13 @@
+flat,s,a,,,,"Flat field spectrum"
+throughput,s,a,,,,"Throughput file or image"
+apreference,s,a,,,,"Aperture reference spectrum"
+response,s,a,,,,"Response spectrum"
+recenter,b,h,no,,,"Recenter apertures?"
+edit,b,h,yes,,,"Edit/review apertures?"
+trace,b,h,no,,,"Trace spectra?"
+clean,b,h,no,,,"Detect and replace bad pixels?"
+fitflat,b,h,no,,,"Fit and ratio flat field spectrum?"
+interactive,b,h,yes,,,"Fit flat field interactively?"
+function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+order,i,h,20,1,,"Fitting function order"
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/getspec.cl b/noao/imred/src/fibers/getspec.cl
new file mode 100644
index 00000000..84ce9a1c
--- /dev/null
+++ b/noao/imred/src/fibers/getspec.cl
@@ -0,0 +1,49 @@
+# GETSPEC -- Get spectra which are processed but are not extracted.
+# Strip the imtype extension.
+
+procedure getspec (images, output)
+
+string images {prompt="List of images"}
+file output {prompt="Output file of images"}
+bool ccdproc {prompt="Add CCDPROC keyword and continue?",
+ mode="q"}
+struct *fd
+
+begin
+ string imtype, temp, image, system=""
+ int n, n1
+
+ imtype = "." // envget ("imtype")
+ n = stridx (",", imtype)
+ if (n > 0)
+ imtype = substr (imtype, 1, n-1)
+ n1 = strlen (imtype)
+
+ # Initialize files
+ set clobber=yes
+ sleep (> output)
+ set clobber=no
+
+ temp = mktemp ("tmp$iraf")
+ sections (images, option="fullname", > temp)
+ fd = temp
+ while (fscan (fd, image) != EOF) {
+ hselect (image, "ccdproc", yes) | scan (system)
+ if (nscan() == 0) {
+ printf ("%s: CCDPROC keyword not found.\n", image)
+ printf (" Either run CCDPROC or add CCDPROC keyword with HEDIT.\n")
+ if (!ccdproc)
+ error (1, "Exit")
+ hedit (image, "ccdproc", "DONE", add=yes, update=yes,
+ verify=no, show=no)
+ }
+ hselect (image, "wat0_001", yes) | scanf ("system=%s", system)
+ if (system=="equispec" || system=="multispec")
+ next
+ n = strlen (image)
+ if (n > n1 && substr (image, n-n1+1, n) == imtype)
+ image = substr (image, 1, n-n1)
+ print (image, >> output)
+ }
+ fd = ""; delete (temp, verify=no)
+end
diff --git a/noao/imred/src/fibers/getspec.par b/noao/imred/src/fibers/getspec.par
new file mode 100644
index 00000000..e676c18e
--- /dev/null
+++ b/noao/imred/src/fibers/getspec.par
@@ -0,0 +1,5 @@
+images,s,a,,,,"List of images"
+output,f,a,,,,"Output file of images"
+ccdproc,b,q,,,,"Add CCDPROC keyword and continue?"
+fd,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/listonly.cl b/noao/imred/src/fibers/listonly.cl
new file mode 100644
index 00000000..ac215892
--- /dev/null
+++ b/noao/imred/src/fibers/listonly.cl
@@ -0,0 +1,237 @@
+# LISTONLY -- List processing to be done.
+#
+# This follows pretty much the same logic as the full procedure but doesn't
+# do anything but list the operations.
+
+procedure listonly (objects, apidtable, apref, flat, throughput, arcs1, arcs2,
+ scattered, dispcor, skysubtract, redo, update)
+
+string objects = "" {prompt="List of object spectra"}
+file apidtable = "" {prompt="Aperture ID table"}
+file apref = "" {prompt="Aperture reference spectrum"}
+file flat = "" {prompt="Flat field spectrum"}
+file throughput = "" {prompt="Throughput file or image"}
+string arcs1 = "" {prompt="List of arc spectra"}
+string arcs2 = "" {prompt="List of shift arc spectra"}
+
+bool scattered {prompt="Subtract scattered light?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool skysubtract {prompt="Subtract sky?"}
+bool redo = no {prompt="Redo operations if previously done?"}
+bool update = yes {prompt="Update spectra if cal data changes?"}
+
+struct *fd1
+struct *fd2
+
+begin
+ string imtype, mstype, extn
+ string spec, arcref1, arcref2
+ string specms, arcref1ms, arcref2ms, response
+ string objs, temp, done, str
+ bool reextract, newaps, newresp, newdisp, scat, extract, disp, sky
+ int i, j, n, dc
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ n = strlen (imtype)
+
+ objs = mktemp ("tmp$iraf")
+ temp = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ if (apidtable != "") {
+ j = strlen (apidtable)
+ for (i=1; i<=j && substr(apidtable,i,i)==" "; i+=1);
+ apidtable = substr (apidtable, i, j)
+ }
+ i = strlen (apidtable)
+ if (i == 0)
+ extn = ".ms"
+ else {
+ extn = apidtable
+ while (yes) {
+ i = stridx ("/$]", extn)
+ if (i == 0)
+ break
+ j = strlen (extn)
+ extn = substr (extn, i+1, j)
+ }
+ extn = extn // ".ms"
+ }
+
+ newaps = no
+ newresp = no
+ newdisp = no
+
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+
+ reextract = redo
+ if (reextract || !access (database // "/ap" // apref // extn)) {
+ print ("Set reference aperture for ", apref)
+ newaps = yes
+ }
+
+ i = strlen (flat)
+ if (i > n && substr (flat, i-n+1, i) == imtype)
+ flat = substr (flat, 1, i-n)
+ if (flat != "") {
+ scat = no
+ if (scattered) {
+ if (redo && access (flat//"noscat"//imtype))
+ hselect (flat//"noscat", "apscatte", yes) | scan (str)
+ else
+ hselect (flat, "apscatte", yes) | scan (str)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (scat)
+ print ("Subtract scattered light from ", flat)
+ }
+
+ spec = throughput
+ i = strlen (spec)
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+ if (spec != "") {
+ scat = no
+ if (scattered) {
+ if (redo && access (flat//"noscat"//imtype))
+ hselect (flat//"noscat", "apscatte", yes) | scan (str)
+ else
+ hselect (flat, "apscatte", yes) | scan (str)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (scat)
+ print ("Subtract scattered light from ", spec)
+ }
+
+ response = ""
+ if (flat != "" || spec != "") {
+ if (extn == ".ms")
+ response = flat // spec // "norm.ms"
+ else
+ response = flat // spec // extn
+
+ reextract = redo || (update && newaps)
+ if (reextract || !access (response // imtype) || (redo && scat)) {
+ print ("Create response function ", response)
+ newresp = yes
+ }
+ }
+
+ if (dispcor) {
+ getspec (arcs1, temp)
+ fd1 = temp
+ if (fscan (fd1, arcref1) == EOF)
+ error (1, "No reference arcs")
+ fd1 = ""; delete (temp, verify=no)
+ arcref1ms = arcref1 // extn
+
+ getspec (arcs2, temp)
+ fd1 = temp
+ if (fscan (fd1, arcref2) == EOF)
+ arcref2 = ""
+ fd1 = ""; delete (temp, verify=no)
+ arcref2ms = arcref2 // extn
+
+ reextract = redo || (update && newaps)
+ if (reextract || !access (arcref1ms//imtype)) {
+ print ("Extract arc reference image ", arcref1)
+ print ("Determine dispersion solution for ", arcref1)
+ newdisp = yes
+ } else {
+ hselect (arcref1ms, "dclog1", yes, > temp)
+ fd1 = temp
+ dc = -1
+ i = fscan (fd1, dc)
+ fd1 = ""; delete (temp, verify=no)
+ if (i < 1) {
+ print ("Determine dispersion solution for ", arcref1)
+ newdisp = yes
+ }
+ }
+ print (arcref1, > done)
+
+ if (arcref2 != "") {
+ if (reextract || !access (arcref2ms//imtype) || newdisp) {
+ print ("Extract shift arc reference image ", arcref2)
+ print ("Determine dispersion solution for ", arcref2)
+ } else {
+ hselect (arcref2ms, "dclog1", yes, > temp)
+ fd1 = temp
+ dc = -1
+ i = fscan (fd1, dc)
+ fd1 = ""; delete (temp, verify=no)
+ if (i < 1)
+ print ("Determine dispersion solution for ", arcref2)
+ }
+ }
+ print (arcref2, >> done)
+ }
+
+ reextract = redo || (update && (newaps || newresp || newdisp))
+ getspec (objects, objs)
+ fd1 = objs
+ while (fscan (fd1, spec) != EOF) {
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specms) != EOF)
+ if (spec == specms)
+ break
+ if (spec == specms)
+ next
+ fd2 = ""
+ }
+
+ specms = spec // mstype
+
+ scat = no
+ extract = no
+ disp = no
+ sky = no
+ if (scattered) {
+ if (redo && access (spec//"noscat"//imtype))
+ hselect (spec//"noscat", "apscatte", yes) | scan (str)
+ else
+ hselect (spec, "apscatte", yes) | scan (str)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (reextract || !access (specms) || (redo && scat))
+ extract = yes
+ else {
+ hselect (specms, "dclog1", yes) | scan (str)
+ if (nscan() == 0)
+ disp = yes
+ else
+ extract = update && newdisp
+ hselect (specms, "skysub", yes) | scan (str)
+ if (nscan() == 0)
+ sky = skysubtract
+ }
+
+ if (extract) {
+ disp = dispcor
+ sky = skysubtract
+ }
+
+ if (scat)
+ print ("Subtract scattered light from ", spec)
+ if (extract)
+ print ("Extract object spectrum ", spec)
+ if (disp)
+ print ("Dispersion correct ", spec)
+ if (sky)
+ print ("Sky subtract ", spec)
+ }
+ fd1 = ""; delete (objs, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/fibers/listonly.par b/noao/imred/src/fibers/listonly.par
new file mode 100644
index 00000000..aa52691e
--- /dev/null
+++ b/noao/imred/src/fibers/listonly.par
@@ -0,0 +1,15 @@
+objects,s,a,"",,,"List of object spectra"
+apidtable,f,a,"",,,"Aperture ID table"
+apref,f,a,"",,,"Aperture reference spectrum"
+flat,f,a,"",,,"Flat field spectrum"
+throughput,f,a,"",,,"Throughput file or image"
+arcs1,s,a,"",,,"List of arc spectra"
+arcs2,s,a,"",,,"List of shift arc spectra"
+scattered,b,a,,,,"Subtract scattered light?"
+dispcor,b,a,,,,"Dispersion correct spectra?"
+skysubtract,b,a,,,,"Subtract sky?"
+redo,b,a,no,,,"Redo operations if previously done?"
+update,b,a,yes,,,"Update spectra if cal data changes?"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/mkfibers.cl b/noao/imred/src/fibers/mkfibers.cl
new file mode 100644
index 00000000..b7ffdac2
--- /dev/null
+++ b/noao/imred/src/fibers/mkfibers.cl
@@ -0,0 +1,167 @@
+# MKFIBERS - Make multifiber examples
+
+procedure mkfibers (image)
+
+file image {prompt="Image name"}
+string type="object" {prompt="Object type",
+ enum="object|objnosky|sky|flat|henear|ehenear|ohenear|mercury"}
+file fibers="" {prompt="Fiber data file"}
+string title="Multifiber artificial image" {prompt="Title"}
+file header="artdata$stdhdr.dat" {prompt="Header keyword file"}
+int ncols=400 {prompt="Number of columns"}
+int nlines=512 {prompt="Number of lines"}
+real wstart=4210. {prompt="Starting wavelength"}
+real wend=7362. {prompt="Ending wavelength"}
+int seed=1 {prompt="Noise seed"}
+
+begin
+ int i, ap, beam
+ real ar
+ file out, obj, sky, arc, dat
+ string htype, imtype
+
+ out = image
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ if (access (out) || access (out//imtype))
+ return
+
+ print ("Creating image ", out, " ...")
+
+ obj = mktemp ("art")
+ sky = mktemp ("art")
+ arc = mktemp ("art")
+ dat = mktemp ("art")
+
+ list = fibers
+ if (type == "object") { # Object spectrum + sky
+ htype = "object"
+ mk1dspec (obj, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=1000., slope=0.,
+ temperature=7000., lines="", nlines=50, peak=-0.5,
+ profile="gaussian", gfwhm=24, seed=2, comments=no, header="")
+ mk1dspec (sky, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=1000., slope=0.,
+ temperature=5800., lines="", nlines=20, peak=1.,
+ profile="gaussian", gfwhm=12, seed=1, comments=no, header="")
+ imarith (obj, "+", sky, obj, verbose=no, noact=no)
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=0.8, slope=0.,
+ temperature=0., lines="mkexamples$henear2.dat",
+ profile="gaussian", gfwhm=14, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, continuum=20,
+ slope=0., temperature=0., lines="", nlines=0,
+ comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF) {
+ if (beam == 0)
+ print (sky, " ", line, >> dat)
+ else if (beam == 1)
+ print (obj, " ", line, >> dat)
+ else if (beam == 2)
+ print (arc, " ", line, >> dat)
+ }
+ } else if (type == "objnosky") { # Object spectrum
+ htype = "object"
+ mk1dspec (obj, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=1000., slope=0.,
+ temperature=7000., lines="", nlines=50, peak=-0.5,
+ profile="gaussian", gfwhm=24, seed=2, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=0.8, slope=0.,
+ temperature=0., lines="mkexamples$henear2.dat",
+ profile="gaussian", gfwhm=14, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, continuum=20,
+ slope=0., temperature=0., lines="", nlines=0,
+ comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF) {
+ if (beam == 1)
+ print (obj, " ", line, >> dat)
+ else if (beam == 2)
+ print (arc, " ", line, >> dat)
+ }
+ } else if (type == "sky") { # Sky only
+ htype = "object"
+ mk1dspec (sky, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=1000., slope=0.,
+ temperature=5800., lines="", nlines=20, peak=1.,
+ profile="gaussian", gfwhm=12, seed=1, comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF)
+ print (sky, " ", line, >> dat)
+ } else if (type == "flat") { # Flat field
+ htype = "flat"
+ mk1dspec (obj, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=10000., slope=0.,
+ temperature=8000., lines="", nlines=0, comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF)
+ print (obj, " ", line, >> dat)
+ } else if (type == "henear") { # HE-NE-AR
+ htype = "comp"
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=0.8, slope=0.,
+ temperature=0., lines="mkexamples$henear2.dat",
+ profile="gaussian", gfwhm=14, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, continuum=20,
+ slope=0., temperature=0., lines="", nlines=0,
+ comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF)
+ print (arc, " ", line, >> dat)
+ } else if (type == "ehenear") { # HE-NE-AR Even fibers
+ htype = "comp"
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=0.8, slope=0.,
+ temperature=0., lines="mkexamples$henear2.dat",
+ profile="gaussian", gfwhm=14, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, continuum=20,
+ slope=0., temperature=0., lines="", nlines=0,
+ comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF) {
+ if (mod (ap, 2) == 0) {
+ print (arc, " ", line, >> dat)
+ }
+ }
+ } else if (type == "ohenear") { # HE-NE-AR Odd fibers
+ htype = "comp"
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=0.8, slope=0.,
+ temperature=0., lines="mkexamples$henear2.dat",
+ profile="gaussian", gfwhm=14, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, continuum=20,
+ slope=0., temperature=0., lines="", nlines=0,
+ comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF) {
+ if (mod (ap, 2) == 1) {
+ print (arc, " ", line, >> dat)
+ }
+ }
+ } else if (type == "mercury") { # Emission lines
+ htype = "comp"
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, ncols=nlines, naps=1,
+ wstart=wstart, wend=wend, continuum=0., slope=0.,
+ temperature=0., lines="", nlines=30, peak=10000.,
+ profile="gaussian", gfwhm=7, seed=i, comments=no, header="")
+ mk1dspec (arc, output="", ap=1, rv=0., z=no, continuum=20,
+ slope=0., temperature=0., lines="", nlines=0,
+ comments=no, header="")
+ while (fscan (list, ap, beam, line) != EOF) {
+ print (arc, " ", line, >> dat)
+ }
+ }
+ list = ""
+
+ mk2dspec (out, output="", model=dat, ncols=ncols, nlines=nlines,
+ title=title, header=header, comments=no)
+ hedit (out, "imagetyp", htype, update=yes, add=no, delete=no,
+ show=no, verify=no)
+
+
+ mknoise (out, output="", background=0., gain=1., rdnoise=3.,
+ poisson=yes, seed=seed, cosrays="", ncosrays=0, energy=30000.,
+ radius=0.5, ar=1., pa=0., comments=no)
+
+ imdelete (obj, verify=no, >& "dev$null")
+ imdelete (sky, verify=no, >& "dev$null")
+ imdelete (arc, verify=no, >& "dev$null")
+ delete (dat, verify=no, >& "dev$null")
+end
diff --git a/noao/imred/src/fibers/mkfibers.par b/noao/imred/src/fibers/mkfibers.par
new file mode 100644
index 00000000..9ba33353
--- /dev/null
+++ b/noao/imred/src/fibers/mkfibers.par
@@ -0,0 +1,11 @@
+image,f,a,"",,,"Image name"
+type,s,h,"object",object|objnosky|sky|flat|henear|ehenear|ohenear|mercury,,"Object type"
+fibers,f,h,"",,,"Fiber data file"
+title,s,h,"Multifiber artificial image",,,"Title"
+header,f,h,"artdata$stdhdr.dat",,,"Header keyword file"
+ncols,i,h,400,,,"Number of columns"
+nlines,i,h,512,,,"Number of lines"
+wstart,r,h,4210.,,,"Starting wavelength"
+wend,r,h,7362.,,,"Ending wavelength"
+seed,i,h,1,,,"Noise seed"
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/params.par b/noao/imred/src/fibers/params.par
new file mode 100644
index 00000000..bf41e8ec
--- /dev/null
+++ b/noao/imred/src/fibers/params.par
@@ -0,0 +1,75 @@
+line,i,h,INDEF,,,Default dispersion line
+nsum,i,h,10,,,Number of dispersion lines to sum or median
+width,r,h,,,,Width of profiles
+minsep,r,h,1.,,,Minimum separation between spectra
+maxsep,r,h,100000.,,,Maximum separation between spectra
+order,s,h,"increasing","increasing|decreasing",,"Order of apertures"
+extras,b,h,no,,,"Extract sky, sigma, etc.?
+
+-- DEFAULT APERTURE LIMITS --"
+lower,r,h,-3.,,,"Lower aperture limit relative to center"
+upper,r,h,3.,,,"Upper aperture limit relative to center
+
+-- AUTOMATIC APERTURE RESIZING PARAMETERS --"
+ylevel,r,h,0.05,,,"Fraction of peak or intensity for resizing"
+peak,b,h,yes,,,"Is ylevel a fraction of the peak?"
+bkg,b,h,yes,,,"Subtract background for resizing?"
+avglimits,b,h,no,,,"Average limits over all apertures?
+
+-- TRACE PARAMETERS --"
+t_step,i,h,10,,,"Tracing step"
+t_function,s,h,"spline3","chebyshev|legendre|spline1|spline3",,"Trace fitting function"
+t_order,i,h,2,,,"Trace fitting function order"
+t_niterate,i,h,1,0,,"Trace rejection iterations"
+t_low,r,h,3.,0.,,"Trace lower rejection sigma"
+t_high,r,h,3.,0.,,"Trace upper rejection sigma
+
+-- SCATTERED LIGHT PARAMETERS --"
+buffer,r,h,1.,0.,,Buffer distance from apertures
+apscat1,pset,h,"",,,Fitting parameters across the dispersion
+apscat2,pset,h,"",,,"Fitting parameters along the dispersion
+
+-- APERTURE EXTRACTION PARAMETERS --"
+weights,s,h,"none","none|variance",,Extraction weights (none|variance)
+pfit,s,h,"fit1d","fit1d|fit2d",,Profile fitting algorithm (fit1d|fit2d)
+readnoise,s,h,0.,,,Read out noise sigma (photons)
+gain,s,h,1.,,,Photon gain (photons/data number)
+lsigma,r,h,3.,,,Lower rejection threshold
+usigma,r,h,3.,,,Upper rejection threshold
+nsubaps,i,h,1,1,,"Number of subapertures
+
+-- FLAT FIELD FUNCTION FITTING PARAMETERS --"
+f_interactive,b,h,yes,,,"Fit flat field interactively?"
+f_function,s,h,"spline3",spline3|legendre|chebyshev|spline1,,"Fitting function"
+f_order,i,h,20,1,,"Fitting function order
+
+-- ARC DISPERSION FUNCTION PARAMETERS --"
+threshold,r,h,10.,0.,,"Minimum line contrast threshold"
+coordlist,f,h,linelist$idhenear.dat,,,"Line list"
+match,r,h,-3.,,,"Line list matching limit in Angstroms"
+fwidth,r,h,4.,,,"Arc line widths in pixels"
+cradius,r,h,10.,,,Centering radius in pixels
+i_function,s,h,"chebyshev","legendre|chebyshev|spline1|spline3",,"Coordinate function"
+i_order,i,h,3,1,,"Order of dispersion function"
+i_niterate,i,h,2,0,,"Rejection iterations"
+i_low,r,h,3.,0.,,"Lower rejection sigma"
+i_high,r,h,3.,0.,,"Upper rejection sigma"
+refit,b,h,yes,,,"Refit coordinate function when reidentifying?"
+addfeatures,b,h,no,,,"Add features when reidentifying?
+
+-- AUTOMATIC ARC ASSIGNMENT PARAMETERS --"
+select,s,h,"interp",,,"Selection method for reference spectra"
+sort,s,h,"jd",,,"Sort key"
+group,s,h,"ljd",,,"Group key"
+time,b,h,no,,,"Is sort key a time?"
+timewrap,r,h,17.,0.,24.,"Time wrap point for time sorting
+
+-- DISPERSION CORRECTION PARAMETERS --"
+linearize,b,h,yes,,,Linearize (interpolate) spectra?
+log,b,h,no,,,"Logarithmic wavelength scale?"
+flux,b,h,yes,,,"Conserve flux?
+
+-- SKY SUBTRACTION PARAMETERS --"
+combine,s,h,"average","average|median",,Type of combine operation
+reject,s,h,"avsigclip","none|minmax|avsigclip",,"Sky rejection option"
+scale,s,h,"none","none|mode|median|mean",,"Sky scaling option"
diff --git a/noao/imred/src/fibers/proc.cl b/noao/imred/src/fibers/proc.cl
new file mode 100644
index 00000000..a55039d8
--- /dev/null
+++ b/noao/imred/src/fibers/proc.cl
@@ -0,0 +1,707 @@
+# PROC -- Process spectra from 2D to wavelength calibrated 1D.
+# This program combines the operations of extraction, flat fielding,
+# fiber throughput correction, dispersion correction, and sky subtraction
+# in as simple and noninteractive way as possible. Certain assumptions
+# are made about the data and the output. A blank sky image, called a
+# sky flat, may be used to determine the instrument throughput. The data
+# must all share the same position on the 2D image and the same
+# dispersion solution apart from small instrumental changes which can be
+# tracked automatically.
+
+procedure proc (objects, apref, flat, throughput, arcs1, arcs2, arcreplace,
+ arctable, fibers, apidtable, crval, cdelt, objaps, skyaps, arcaps,
+ objbeams, skybeams, arcbeams, scattered, fitflat, recenter, edit,
+ trace, arcap, clean, dispcor, savearcs, skyalign, skysubtract,
+ skyedit, saveskys, splot, redo, update, batch, listonly)
+
+string objects {prompt="List of object spectra"}
+
+file apref {prompt="Aperture reference spectrum"}
+file flat {prompt="Flat field spectrum"}
+file throughput {prompt="Throughput file or image (optional)"}
+string arcs1 {prompt="List of arc spectra"}
+string arcs2 {prompt="List of shift arc spectra"}
+file arcreplace {prompt="Special aperture replacements"}
+file arctable {prompt="Arc assignment table (optional)\n"}
+
+int fibers {prompt="Number of fibers"}
+file apidtable {prompt="Aperture identifications"}
+string crval = "INDEF" {prompt="Approximate wavelength"}
+string cdelt = "INDEF" {prompt="Approximate dispersion"}
+string objaps {prompt="Object apertures"}
+string skyaps {prompt="Sky apertures"}
+string arcaps {prompt="Arc apertures"}
+string objbeams {prompt="Object beam numbers"}
+string skybeams {prompt="Sky beam numbers"}
+string arcbeams {prompt="Arc beam numbers\n"}
+
+bool scattered {prompt="Subtract scattered light?"}
+bool fitflat {prompt="Fit and ratio flat field spectrum?"}
+bool recenter {prompt="Recenter object apertures?"}
+bool edit {prompt="Edit/review object apertures?"}
+bool trace {prompt="Trace object spectra?"}
+bool arcap {prompt="Use object apertures for arcs?"}
+bool clean {prompt="Detect and replace bad pixels?"}
+bool dispcor {prompt="Dispersion correct spectra?"}
+bool savearcs {prompt="Save internal arcs?"}
+bool skyalign {prompt="Align sky lines?"}
+bool skysubtract {prompt="Subtract sky?"}
+bool skyedit {prompt="Edit the sky spectra?"}
+bool saveskys {prompt="Save sky spectra?"}
+bool splot {prompt="Plot the final spectrum?"}
+bool redo {prompt="Redo operations if previously done?"}
+bool update {prompt="Update spectra if cal data changes?"}
+bool batch {prompt="Extract objects in batch?"}
+bool listonly {prompt="List steps but don't process?\n"}
+
+real datamax = INDEF {prompt="Max data value / cosmic ray threshold"}
+
+string ansskyedit = "yes" {prompt="Edit the sky spectra?", mode="q"}
+bool newaps, newresp, newdisp, newarcs, dobatch
+
+string anssplot = "yes" {prompt="Splot spectrum?", mode="q",
+ enum="no|yes|NO|YES"}
+
+string extn = ".ms" {prompt="Extraction extension"}
+struct *fd1, *fd2, *fd3
+
+begin
+ string imtype, mstype
+ string arcref1, arcref2, spec, arc, align=""
+ string arcref1ms, arcref2ms, specms, arcms, response
+ string objs, temp, temp1, done
+ string str1, str2, str3, str4, arcrefs, log1, log2
+ bool scat, reextract, extract, disp, disperr, sky, log
+ bool skyedit1, skyedit2, splot1, splot2
+ int i, j, n, nspec
+ struct err
+
+ # Call a separate task to do the listing to minimize the size of
+ # this script and improve it's readability.
+
+ dobatch = no
+ if (listonly) {
+ listonly (objects, apidtable, apref, flat, throughput, arcs1, arcs2,
+ scattered, dispcor, skysubtract, redo, update)
+ bye
+ }
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ mstype = ".ms" // imtype
+ n = strlen (imtype)
+
+ # Temporary files used repeatedly in this script. Under some
+ # abort circumstances these files may be left behind.
+
+ objs = mktemp ("tmp$iraf")
+ temp = mktemp ("tmp$iraf")
+ temp1 = mktemp ("tmp$iraf")
+ done = mktemp ("tmp$iraf")
+
+ if (apidtable != "") {
+ i = strlen (apidtable)
+ if (i > n && substr (apidtable, i-n+1, i) == imtype) {
+ apidtable = substr (apidtable, 1, i-n)
+ i = strlen (apidtable)
+ }
+ for (j=1; j<=i && substr(apidtable,j,j)==" "; j+=1);
+ apidtable = substr (apidtable, j, i)
+ }
+ i = strlen (apidtable)
+ if (i == 0)
+ extn = ".ms"
+ else {
+ extn = apidtable
+ while (yes) {
+ i = stridx ("/$]", extn)
+ if (i == 0)
+ break
+ j = strlen (extn)
+ extn = substr (extn, i+1, j)
+ }
+ extn = extn // ".ms"
+ }
+
+ # Get query parameter.
+ getspec (objects, objs)
+ if (arctable == "" || arctable == " ") {
+ if (arcs2 == "" || arcs2 == " ")
+ arcrefs = arcs1
+ else
+ arcrefs = arcs2
+ } else
+ arcrefs = arctable
+ arcref1 = ""
+ arcref2 = ""
+
+ # Rather than always have switches on the logfile and verbose flags
+ # we use TEE and set a file to "dev$null" if output is not desired.
+ # We must check for the null string to signify no logfile.
+
+ tee.append = yes
+ if (logfile == "")
+ log1 = "dev$null"
+ else
+ log1 = logfile
+ if (verbose)
+ log2 = "STDOUT"
+ else
+ log2 = "dev$null"
+
+ # If the update switch is used changes in the calibration data
+ # can cause images to be reprocessed (if they are in the object
+ # list). Possible changes are in the aperture definitions,
+ # response function, dispersion solution, and sensitivity
+ # function. The newarcs flag is used to only go through the arc
+ # image headers once setting the reference spectrum, airmass, and
+ # UT.
+
+ newaps = no
+ newresp = no
+ newdisp = no
+ newarcs = yes
+
+ # Check if there are aperture definitions in the database and
+ # define them if needed. This is usually somewhat interactive.
+ # Delete the database entry to start fresh if we enter this
+ # because of a redo. Set the newaps flag in case an update is
+ # desired.
+
+ i = strlen (apref)
+ if (i > n && substr (apref, i-n+1, i) == imtype)
+ apref = substr (apref, 1, i-n)
+
+ getspec (apref, temp)
+ fd1 = temp
+ if (fscan (fd1, apref) == EOF)
+ error (1, "No aperture reference")
+ fd1 = ""; delete (temp, verify=no)
+
+ # Initialize
+ apscript.saturation = INDEF
+ apscript.references = apref
+ apscript.profiles = ""
+ apscript.apidtable = apidtable
+ apscript.nfind = fibers
+ apscript.clean = clean
+ if (splot) {
+ splot1 = yes
+ splot2 = yes
+ } else {
+ splot1 = no
+ splot2 = no
+ }
+
+ reextract = redo
+ if (reextract || !access (database // "/ap" // apref // extn)) {
+ if (!access (apref // imtype)) {
+ printf ("Aperture reference spectrum not found - %s%s\n",
+ apref, imtype) | scan (err)
+ error (1, err // "\nCheck settting of imtype")
+ }
+ print ("Set reference apertures for ", apref) | tee (log1)
+ if (access (database // "/ap" // apref))
+ delete (database // "/ap" // apref, verify=no)
+ if (access (database // "/ap" // apref//extn))
+ delete (database // "/ap" // apref // extn, verify=no)
+ apscript.ansresize = "yes"
+ apscript.ansedit = "yes"
+ apscript.ansfittrace = "yes"
+ apscript (apref, references="", ansfind="YES", ansrecenter="NO",
+ anstrace="YES", ansextract="NO")
+ newaps = yes
+ copy (database//"/ap"//apref, database//"/ap"//apref//extn,
+ verbose=no)
+ } else {
+ if (access (database // "/ap" // apref))
+ delete (database // "/ap" // apref, verify=no)
+ copy (database//"/ap"//apref//extn, database//"/ap"//apref,
+ verbose=no)
+ }
+
+ if (recenter)
+ apscript.ansrecenter = "YES"
+ else
+ apscript.ansrecenter = "NO"
+ apscript.ansresize = "NO"
+ if (edit)
+ apscript.ansedit = "yes"
+ else
+ apscript.ansedit = "NO"
+ if (trace)
+ apscript.anstrace = "YES"
+ else
+ apscript.anstrace = "NO"
+ if (scattered) {
+ apscript.ansfitscatter = "yes"
+ apscript.ansfitsmooth = "yes"
+ }
+ apscript.ansfittrace = "NO"
+ apscript.ansextract = "YES"
+ apscript.ansreview = "NO"
+ if (skyedit) {
+ skyedit1 = yes
+ skyedit2 = yes
+ } else {
+ skyedit1 = no
+ skyedit2 = no
+ }
+
+ # The next step is to setup the scattered light correction if needed.
+ # We use the flat field image for the interactive setting unless
+ # one is not used and then we use the aperture reference.
+ # If these images have been scattered light corrected we assume the
+ # scattered light functions parameters are correctly set.
+
+ i = strlen (flat)
+ if (i > n && substr (flat, i-n+1, i) == imtype)
+ flat = substr (flat, 1, i-n)
+
+ if (flat != "")
+ spec = flat
+ else
+ spec = apref
+
+ getspec (spec, temp)
+ fd1 = temp
+ if (fscan (fd1, spec) == EOF)
+ error (1, "No flat field")
+ fd1 = ""; delete (temp, verify=no)
+
+ scat = no
+ if (scattered) {
+ if (redo && access (spec//"noscat"//imtype)) {
+ imdelete (spec, verify=no)
+ imrename (spec//"noscat", spec)
+ }
+ hselect (spec, "apscatte", yes) | scan (str1)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (scat) {
+ print ("Subtract scattered light from ", spec) | tee (log1)
+ #apscript.ansfitscatter = "yes"
+ #apscript.ansfitsmooth = "yes"
+ imrename (spec, spec//"noscat")
+ apscript (spec//"noscat", output=spec, ansextract="NO",
+ ansscat="YES", anssmooth="YES")
+ #apscript.ansfitscatter = "NO"
+ #apscript.ansfitsmooth = "NO"
+ }
+
+ # The next step is to process the flat field image which is used
+ # as a flat field and a throughput correction.
+
+ spec = throughput
+ i = strlen (spec)
+ if (i > n && substr (spec, i-n+1, i) == imtype)
+ spec = substr (spec, 1, i-n)
+ specms = spec // mstype
+
+ if (spec != "" && access (spec//imtype)) {
+ getspec (spec, temp)
+ fd1 = temp
+ if (fscan (fd1, spec) == EOF)
+ error (1, "No flat field")
+ fd1 = ""; delete (temp, verify=no)
+
+ scat = no
+ if (scattered) {
+ if (redo && access (spec//"noscat"//imtype)) {
+ imdelete (spec, verify=no)
+ imrename (spec//"noscat", spec)
+ }
+ hselect (spec, "apscatte", yes) | scan (str1)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (scat) {
+ print ("Subtract scattered light from ", spec) | tee (log1)
+ imrename (spec, spec//"noscat")
+ apscript (spec//"noscat", output=spec, ansextract="NO",
+ ansscat="YES", anssmooth="YES")
+ }
+ }
+
+ response = ""
+ if (flat != "" || spec != "") {
+ if (extn == ".ms")
+ response = flat // spec // "norm.ms"
+ else
+ response = flat // spec // extn
+ reextract = redo || (update && newaps)
+ if (reextract || !access (response // imtype) || (redo && scat)) {
+ print ("Create response function ", response) | tee (log1)
+
+ if (access (response // imtype))
+ imdelete (response, verify=no)
+ if (access (flat //mstype))
+ imdelete (flat//mstype, verify=no)
+ if (access (specms))
+ imdelete (specms, verify=no)
+
+ fibresponse (flat, spec, apref, response, recenter=recenter,
+ edit=edit, trace=trace, clean=clean,
+ fitflat=fitflat, interactive=params.f_interactive,
+ function=params.f_function, order=params.f_order)
+
+ newresp = yes
+ }
+ }
+
+ # If not dispersion correcting we can go directly to extracting
+ # the object spectra. The reference arcs are the first on
+ # the arc lists. The processing of the reference arcs is done
+ # by the task ARCREFS.
+
+ if (dispcor) {
+ getspec (arcs1, temp)
+ fd1 = temp
+ if (fscan (fd1, arcref1) == EOF)
+ error (1, "No reference arcs")
+ fd1 = ""; delete (temp, verify=no)
+ if (!access (arcref1 // imtype)) {
+ printf ("Arc reference spectrum not found - %s%s\n",
+ arcref1, imtype) | scan (err)
+ error (1, err // "\nCheck settting of imtype")
+ }
+ arcref1ms = arcref1 // extn
+ reextract = redo || (update && newaps)
+ if (reextract && access (arcref1ms//imtype))
+ imdelete (arcref1ms, verify=no)
+
+ getspec (arcs2, temp)
+ fd1 = temp
+ if (fscan (fd1, arcref2) == EOF)
+ arcref2 = ""
+ else {
+ if (!access (arcref2 // imtype)) {
+ printf ("Arc reference spectrum not found - %s%s\n",
+ arcref2, imtype) | scan (err)
+ error (1, err // "\nCheck settting of imtype")
+ }
+ arcref2ms = arcref2 // extn
+ if (reextract && access (arcref2ms//imtype))
+ imdelete (arcref2ms, verify=no)
+ }
+ fd1 = ""; delete (temp, verify=no)
+
+ arcrefs (arcref1, arcref2, extn, arcreplace, apidtable, response,
+ crval, cdelt, done, log1, log2)
+
+ # Define alignment if needed.
+ if (skyalign) {
+ align = "align" // extn // imtype
+ if (reextract)
+ imdelete (align, verify=no, >& "dev$null")
+ }
+ }
+
+ # Now we are ready to process the object spectra.
+
+ reextract = redo || (update && (newaps || newresp || newdisp))
+ fd1 = objs
+ while (fscan (fd1, spec) != EOF) {
+ # Check if previously done; i.e. arc.
+ if (access (done)) {
+ fd2 = done
+ while (fscan (fd2, specms) != EOF)
+ if (spec == specms)
+ break
+ if (spec == specms)
+ next
+ fd2 = ""
+ }
+ if (!access (spec // imtype)) {
+ printf ("Object spectrum not found - %s%s\n",
+ spec, imtype) | tee (log1)
+ print ("Check settting of imtype")
+ }
+ specms = spec // mstype
+
+ # Determine required operations from the flags and image header.
+ scat = no
+ extract = no
+ disp = no
+ sky = no
+ if (scattered) {
+ if (redo && access (spec//"noscat"//imtype)) {
+ imdelete (spec, verify=no)
+ imrename (spec//"noscat", spec)
+ }
+ hselect (spec, "apscatte", yes) | scan (str1)
+ if (nscan() == 0)
+ scat = yes
+ }
+ if (reextract || !access (specms) || (redo && scat))
+ extract = yes
+ else {
+ hselect (specms, "dclog1", yes) | scan (str1)
+ if (nscan () == 1) {
+ extract = update && newdisp
+ if (update && !newdisp)
+ # We really should check if REFSPEC will assign
+ # different reference spectra.
+ ;
+ } else
+ disp = dispcor
+
+ hselect (specms, "skysub", yes) | scan (str1)
+ if (nscan() == 0)
+ sky = skysubtract
+ }
+
+ if (extract) {
+ disp = dispcor
+ sky = skysubtract
+ }
+
+ # If fully processed go to the next object.
+ if (!extract && !disp && !sky)
+ next
+
+ # If not interactive and the batch flag is set submit rest to batch.
+ if (batch && !skyedit1 && !skyedit2 && !splot1 && !splot2 &&
+ apscript.ansedit == "NO" && (!skyalign || access (align))) {
+ fd1 = ""; delete (objs, verify=no)
+ apscript.ansfitscatter = "NO"
+ apscript.ansfitsmooth = "NO"
+
+ flprcache
+ batch.objects = objects
+ batch.datamax = datamax
+ batch.response = response
+ batch.arcs1 = arcs1
+ batch.arcs2 = arcs2
+ batch.arcref1 = arcref1
+ batch.arcref2 = arcref2
+ batch.arcreplace = arcreplace
+ batch.apidtable = apidtable
+ batch.arcrefs = arcrefs
+ batch.extn = extn
+ batch.objaps = objaps
+ batch.skyaps = skyaps
+ batch.arcaps = arcaps
+ batch.objbeams = objbeams
+ batch.skybeams = skybeams
+ batch.arcbeams = arcbeams
+ batch.done = done
+ batch.logfile = log1
+ batch.redo = reextract
+ batch.update = update
+ batch.scattered = scattered
+ batch.arcap = arcap
+ batch.dispcor = dispcor
+ batch.savearcs = savearcs
+ batch.skyalign = skyalign
+ batch.skysubtract = skysubtract
+ batch.saveskys = saveskys
+ batch.newaps = newaps
+ batch.newresp = newresp
+ batch.newdisp = newdisp
+ batch.newarcs = newarcs
+ dobatch = yes
+ return
+ }
+
+ # Process the spectrum in foreground.
+ if (extract) {
+ if (access (specms))
+ imdelete (specms, verify=no)
+
+ if (scat) {
+ print ("Subtract scattered light from ", spec) | tee (log1)
+ imrename (spec, spec//"noscat")
+ apscript (spec//"noscat", output=spec, ansextract="NO",
+ ansscat="YES", anssmooth="YES")
+ }
+
+ print ("Extract object spectrum ", spec) | tee (log1)
+ hselect (spec, "date-obs,ut,exptime", yes, > temp1)
+ hselect (spec, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (spec, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (spec, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ apscript (spec, nsubaps=params.nsubaps, saturation=datamax)
+ sapertures (specms, apertures="", apidtable=apidtable,
+ wcsreset=no, verbose=no, beam=INDEF, dtype=INDEF, w1=INDEF,
+ dw=INDEF, z=INDEF, aplow=INDEF, aphigh=INDEF, title=INDEF)
+ if (response != "") {
+ if (params.nsubaps == 1)
+ sarith (specms, "/", response, specms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=no,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ else {
+ blkrep (response, temp, 1, params.nsubaps)
+ sarith (specms, "/", temp, specms, w1=INDEF,
+ w2=INDEF, apertures="", bands="", beams="",
+ apmodulus=0, reverse=no, ignoreaps=yes,
+ format="multispec", renumber=no, offset=0,
+ clobber=yes, merge=no, errval=0, verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ }
+
+ disperr = no
+ if (disp) {
+ # Fix arc headers if necessary.
+ if (newarcs) {
+ getspec (arcs1, temp)
+ fd2 = temp
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp1)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory, date="date-obs",
+ time="ut", exposure="exptime", jd="jd", hjd="",
+ ljd="ljd", utdate=yes, uttime=yes, listonly=no,
+ >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no, update=yes,
+ override=yes, >> log1)
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ hedit (arc, "arctype", "henear", add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ getspec (arcs2, temp)
+ fd2 = temp
+ while (fscan (fd2, arc) != EOF) {
+ hselect (arc, "date-obs,ut,exptime", yes, > temp1)
+ hselect (arc, "ra,dec,epoch,st", yes, >> temp1)
+ fd3 = temp1
+ if (fscan (fd3, str1, str2, str3) == 3) {
+ setjd (arc, observatory=observatory,
+ date="date-obs", time="ut", exposure="exptime",
+ jd="jd", hjd="", ljd="ljd", utdate=yes,
+ uttime=yes, listonly=no, >> log1)
+ if (fscan (fd3, str1, str2, str3, str4) == 4)
+ setairmass (arc, intype="beginning",
+ outtype="effective", exposure="exptime",
+ observatory=observatory, show=no,
+ update=yes, override=yes, >> log1)
+
+ }
+ fd3 = ""; delete (temp1, verify=no)
+ hedit (arc, "refspec1", arc, add=yes, verify=no,
+ show=no, update=yes)
+ hedit (arc, "arctype", "shift", add=yes, verify=no,
+ show=no, update=yes)
+ }
+ fd2 = ""; delete (temp, verify=no)
+ newarcs = no
+ }
+
+ print ("Assign arc spectra for ", spec) | tee (log1)
+ refspectra (spec, references=arcrefs,
+ apertures="", refaps="", ignoreaps=no,
+ select=params.select, sort=params.sort,
+ group=params.group, time=params.time,
+ timewrap=params.timewrap, override=yes, confirm=no,
+ assign=yes, logfiles="STDOUT", verbose=no) |
+ tee (log1, > log2)
+
+ doarcs (spec, response, arcref1, arcref2, extn, arcreplace,
+ apidtable, arcaps, arcbeams, savearcs, reextract, arcap,
+ log1, no, done)
+
+ hselect (specms, "refspec1", yes, > temp)
+ fd2 = temp
+ i = fscan (fd2, arc)
+ fd2 = ""; delete (temp, verify=no)
+ if (i < 1) {
+ print ("No arc reference assigned for ", spec) | tee (log1)
+ disperr = yes
+ } else {
+ if (skyalign)
+ doalign (spec, specms, align, arcref1ms, log1, no)
+ print ("Dispersion correct ", spec) | tee (log1)
+ dispcor (specms, "", linearize=params.linearize,
+ database=database, table=arcref1ms, w1=INDEF,
+ w2=INDEF, dw=INDEF, nw=INDEF, log=params.log,
+ flux=params.flux, samedisp=no, global=no,
+ ignoreaps=no, confirm=no, listonly=no,
+ verbose=verbose, logfile=logfile)
+ if (params.nsubaps > 1) {
+ imrename (specms, temp, verbose=no)
+ scopy (temp, specms, w1=INDEF, w2=INDEF,
+ apertures="1-999", bands="", beams="", apmodulus=0,
+ offset=0, format="multispec", clobber=no, merge=no,
+ renumber=no, verbose=no)
+ blkavg (temp, temp, 1, params.nsubaps, option="sum")
+ imcopy (temp, specms//"[*,*]", verbose=no)
+ imdelete (temp, verify=no)
+ }
+ }
+ }
+
+ if (sky && !disperr) {
+ str1 = ""
+ if (skyaps != "")
+ str1 = "skyaps=" // skyaps
+ if (skybeams != "")
+ str1 = str1 // " skybeams=" // skybeams
+ print ("Sky subtract ", spec, ": ", str1) | tee (log1)
+ if (skyedit1) {
+ str1 = substr (ansskyedit, 1, 1)
+ if (str1 == "N" || str1 == "Y")
+ skyedit1 = no
+ if (str1 == "n" || str1 == "N")
+ skyedit2 = no
+ else
+ skyedit2 = yes
+ }
+ skysub.reject = params.reject
+ skysub (specms, output="", objaps=objaps, skyaps=skyaps,
+ objbeams=objbeams, skybeams=skybeams, skyedit=skyedit2,
+ combine=params.combine, scale=params.scale,
+ saveskys=saveskys, logfile=logfile)
+ params.reject = skysub.reject
+ hedit (specms, "skysub", yes, add=yes, show=no, verify=no,
+ update=yes)
+ }
+
+ if (!disperr && (extract || disp || sky)) {
+ if (splot1) {
+ print (specms, ":")
+ str1 = anssplot
+ if (str1 == "NO" || str1 == "YES")
+ splot1 = no
+ if (str1 == "no" || str1 == "NO")
+ splot2 = no
+ else
+ splot2 = yes
+ }
+ if (splot2)
+ splot (specms)
+ }
+
+ print (spec, >> done)
+ }
+ fd1 = ""; delete (objs, verify=no)
+
+ if (access (done))
+ delete (done, verify=no)
+end
diff --git a/noao/imred/src/fibers/proc.par b/noao/imred/src/fibers/proc.par
new file mode 100644
index 00000000..1b3961ea
--- /dev/null
+++ b/noao/imred/src/fibers/proc.par
@@ -0,0 +1,52 @@
+objects,s,a,,,,"List of object spectra"
+apref,f,a,"",,,"Aperture reference spectrum"
+flat,f,a,"",,,"Flat field spectrum"
+throughput,f,a,"",,,"Throughput file or image (optional)"
+arcs1,s,a,,,,"List of arc spectra"
+arcs2,s,a,,,,"List of shift arc spectra"
+arcreplace,f,a,"",,,"Special aperture replacements"
+arctable,f,a,"",,,"Arc assignment table (optional)
+"
+fibers,i,a,,,,"Number of fibers"
+apidtable,f,a,"",,,"Aperture identifications"
+crval,s,a,INDEF,,,"Approximate wavelength"
+cdelt,s,a,INDEF,,,"Approximate dispersion"
+objaps,s,a,,,,"Object apertures"
+skyaps,s,a,,,,"Sky apertures"
+arcaps,s,a,,,,"Arc apertures"
+objbeams,s,a,,,,"Object beam numbers"
+skybeams,s,a,,,,"Sky beam numbers"
+arcbeams,s,a,,,,"Arc beam numbers
+"
+scattered,b,a,,,,"Subtract scattered light?"
+fitflat,b,a,,,,"Fit and ratio flat field spectrum?"
+recenter,b,a,,,,"Recenter object apertures?"
+edit,b,a,,,,"Edit/review object apertures?"
+trace,b,a,,,,"Trace object spectra?"
+arcap,b,a,,,,"Use object apertures for arcs?"
+clean,b,a,,,,"Detect and replace bad pixels?"
+dispcor,b,a,,,,"Dispersion correct spectra?"
+savearcs,b,a,,,,"Save internal arcs?"
+skyalign,b,a,,,,"Align sky lines?"
+skysubtract,b,a,,,,"Subtract sky?"
+skyedit,b,a,,,,"Edit the sky spectra?"
+saveskys,b,a,,,,"Save sky spectra?"
+splot,b,a,,,,"Plot the final spectrum?"
+redo,b,a,,,,"Redo operations if previously done?"
+update,b,a,,,,"Update spectra if cal data changes?"
+batch,b,a,,,,"Extract objects in batch?"
+listonly,b,a,,,,"List steps but don\'t process?
+"
+datamax,r,h,INDEF,,,"Max data value / cosmic ray threshold"
+ansskyedit,s,q,"yes",,,"Edit the sky spectra?"
+newaps,b,h,,,,
+newresp,b,h,,,,
+newdisp,b,h,,,,
+newarcs,b,h,,,,
+dobatch,b,h,,,,
+anssplot,s,q,"yes",no|yes|NO|YES,,"Splot spectrum?"
+extn,s,h,".ms",,,"Extraction extension"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/skysub.cl b/noao/imred/src/fibers/skysub.cl
new file mode 100644
index 00000000..b7522591
--- /dev/null
+++ b/noao/imred/src/fibers/skysub.cl
@@ -0,0 +1,145 @@
+# SKYSUB -- Sky subtract fiber spectra.
+# Subtract the selected sky apertures from the selected object apertures.
+# The object apertures may include the sky apertures if desired.
+# The sky apertures are combined to a single master sky which is subtracted
+# from each selected object aperture. match. The subtracted sky may be
+# saved in an image with the prefix "sky" and the same output name. Note
+# that existing output images are clobbered.
+
+procedure skysub (input)
+
+string input = "" {prompt="Input spectra to sky subtract"}
+
+string output = "" {prompt="Output sky subtracted spectra"}
+string objaps = "" {prompt="Object apertures"}
+string skyaps = "" {prompt="Sky apertures"}
+string objbeams = "" {prompt="Object beam numbers"}
+string skybeams = "" {prompt="Sky beam numbers"}
+bool skyedit = yes {prompt="Edit the sky spectra?"}
+string combine = "average" {prompt="Combining option",
+ enum="average|median|sum"}
+string reject = "avsigclip" {prompt="Sky rejection option",
+ enum="none|minmax|avsigclip"}
+string scale = "none" {prompt="Sky scaling option",
+ enum="none|mode|median|mean"}
+bool saveskys = yes {prompt="Save sky spectra?"}
+file logfile = "" {prompt="Logfile"}
+
+struct *fd1
+struct *fd2
+struct *fd3
+
+begin
+ string imtype, mstype
+ string in, out, out1, sky, log, aps, str, str2
+ file temp1, temp2, temp3, temp4
+ int i, j, n
+
+ imtype = "." // envget ("imtype")
+ i = stridx (",", imtype)
+ if (i > 0)
+ imtype = substr (imtype, 1, i-1)
+ n = strlen (imtype)
+
+ temp1 = mktemp ("tmp$iraf")
+ temp2 = mktemp ("tmp$iraf")
+ temp3 = mktemp ("tmp$iraf")
+ temp4 = mktemp ("tmp$iraf")
+
+ if (logfile == "")
+ log = "dev$null"
+ else
+ log = logfile
+
+ sections (input, option="fullname", > temp1)
+ sections (output, option="fullname", > temp2)
+ fd1 = temp1
+ fd2 = temp2
+ while (fscan (fd1, in) != EOF) {
+ i = strlen (in)
+ if (i > n && substr (in, i-n+1, i) == imtype)
+ in = substr (in, 1, i-n)
+ if (fscan (fd2, out) < 1)
+ out = in
+ out1 = out
+ i = strlen (out1)
+ if (i > 3 && substr (out1, i-2, i) == ".ms")
+ out1 = substr (out1, 1, i-3)
+
+ aps = skyaps
+ sky = "sky" // out1
+ if (access (sky // imtype))
+ imdelete (sky, verify=no)
+ if (skyedit) {
+ scopy (in, sky, w1=INDEF, w2=INDEF, apertures=aps, bands="1",
+ beams=skybeams, apmodulus=0, offset=0, clobber=yes,
+ format="multispec", merge=no, renumber=no,
+ verbose=yes, >> "dev$null")
+ specplot (sky, apertures="", bands="1", autolayout=no,
+ autoscale=yes, fraction=1., scale=1., offset=0.,
+ step=0., ptype="1", labels="user", ulabels="",
+ sysid=yes, yscale=yes, xlpos=1.02, ylpos=0.,
+ title="Edit sky spectra from "//in, xlabel="",
+ ylabel="", xmin=INDEF, xmax=INDEF, ymin=INDEF,
+ ymax=INDEF, logfile=temp4, graphics="stdgraph")
+ imdelete (sky, verify=no)
+ system.match (sky, temp4, stop=no) |
+ fields (fields="2", lines="1-9999") |
+ system.sort (column=0, ignore=yes, numeric=no,
+ reverse_sort=no) |
+ lists.unique (> temp3)
+ delete (temp4, verify=no)
+ aps = "@" // temp4
+ fd3 = temp3
+ while (fscan (fd3, str) != EOF) {
+ i = stridx ("(", str)
+ j = stridx (")", str)
+ if (i > 0 && j > i)
+ str = substr(str,i+1,j-1)
+ else
+ str = ""
+ print (str, >> temp4)
+ }
+ fd3 = ""; delete (temp3, verify=no)
+
+ reject.p_mode="q"
+ str = reject
+ reject.p_mode="h"
+ }
+
+ if (skybeams == "") {
+ scombine (in, sky, noutput="", logfile=logfile,
+ apertures=aps, group="all", combine=combine,
+ reject=reject, first=yes, scale=scale, zero="none",
+ weight="none", sample="", lthreshold=INDEF,
+ hthreshold=INDEF, nlow=1, nhigh=1, nkeep=1, mclip=yes,
+ lsigma=3., hsigma=2., rdnoise="0.", gain="1.", snoise="0.",
+ sigscale=0., pclip=-0.5, grow=0, blank=0.)
+ } else {
+ temp3 = mktemp ("sky")
+ scopy (in, sky, w1=INDEF, w2=INDEF, apertures=aps, bands="",
+ beams=skybeams, apmodulus=0, offset=0, clobber=yes,
+ format="multispec", merge=no, renumber=no,
+ verbose=yes, >> log)
+ scombine (sky, temp3, noutput="", logfile=logfile,
+ apertures=aps, group="all", combine=combine,
+ reject=reject, first=yes, scale=scale, zero="none",
+ weight="none", sample="", lthreshold=INDEF,
+ hthreshold=INDEF, nlow=1, nhigh=1, nkeep=1, mclip=yes,
+ lsigma=3., hsigma=2., rdnoise="0.", gain="1.", snoise="0.",
+ sigscale=0., pclip=-0.5, grow=0, blank=0.)
+ flpr
+ imdelete (sky, verify=no)
+ imrename (temp3, sky, verbose=yes, >> log)
+ }
+ sarith (in, "-", sky, out, w1=INDEF, w2=INDEF, apertures=objaps,
+ bands="", beams=objbeams, reverse=no, ignoreaps=yes,
+ format="multispec", renumber=no, offset=0, clobber=yes,
+ merge=no, errval=0., verbose=yes, >> log)
+ if (!saveskys)
+ imdelete (sky, verify=no)
+ }
+ fd1 = ""; delete (temp1, verify=no)
+ fd2 = ""; delete (temp2, verify=no)
+ delete (temp4, verify=no, >>& "dev$null")
+end
diff --git a/noao/imred/src/fibers/skysub.par b/noao/imred/src/fibers/skysub.par
new file mode 100644
index 00000000..bf1f4c2c
--- /dev/null
+++ b/noao/imred/src/fibers/skysub.par
@@ -0,0 +1,16 @@
+input,s,a,"",,,"Input spectra to sky subtract"
+output,s,h,"",,,"Output sky subtracted spectra"
+objaps,s,h,"",,,"Object apertures"
+skyaps,s,h,"",,,"Sky apertures"
+objbeams,s,h,"",,,"Object beam numbers"
+skybeams,s,h,"",,,"Sky beam numbers"
+skyedit,b,h,yes,,,"Edit the sky spectra?"
+combine,s,h,"average",average|median|sum,,"Combining option"
+reject,s,h,"avsigclip",none|minmax|avsigclip,,"Sky rejection option"
+scale,s,h,"none",none|mode|median|mean,,"Sky scaling option"
+saveskys,b,h,yes,,,"Save sky spectra?"
+logfile,f,h,"",,,"Logfile"
+fd1,*struct,h,"",,,
+fd2,*struct,h,"",,,
+fd3,*struct,h,"",,,
+mode,s,h,"ql",,,
diff --git a/noao/imred/src/fibers/temp b/noao/imred/src/fibers/temp
new file mode 100644
index 00000000..2205597e
--- /dev/null
+++ b/noao/imred/src/fibers/temp
@@ -0,0 +1,16 @@
+Prototype Data Manager Keywords
+
+COMMAND
+STATUS
+EOF
+
+LABEL
+
+IMAGEID
+MJD-OBS
+
+FILTER
+XTALK
+BPM
+ZERO
+FLAT
diff --git a/noao/imred/src/temp b/noao/imred/src/temp
new file mode 100644
index 00000000..25086705
--- /dev/null
+++ b/noao/imred/src/temp
@@ -0,0 +1,10 @@
+doslit/sproc.cl
+fibers/batch.cl
+fibers/fibresponse.cl
+fibers/proc.cl
+doecslit/sbatch.cl
+dofoe/batch.cl
+doslit/sbatch.cl
+doecslit/sproc.cl
+dofoe/response.cl
+dofoe/proc.cl
diff --git a/noao/imred/tutor.cl b/noao/imred/tutor.cl
new file mode 100644
index 00000000..52cf6a98
--- /dev/null
+++ b/noao/imred/tutor.cl
@@ -0,0 +1,14 @@
+# TUTOR -- Tutorial Help
+
+procedure tutor (topic)
+
+string topic {prompt="Tutorial topic"}
+string package="" {prompt="Tutorial package"}
+string tutordb="helpdb" {prompt="Tutorial database"}
+
+begin
+ if ($nargs == 0)
+ help (package//".Tutorial", section="topics", helpdb=tutordb)
+ else
+ help (package//".Tutorial", section=topic, helpdb=tutordb)
+end
diff --git a/noao/imred/vtel/README b/noao/imred/vtel/README
new file mode 100644
index 00000000..8f0be8f3
--- /dev/null
+++ b/noao/imred/vtel/README
@@ -0,0 +1,81 @@
+This is the home directory for the Kitt Peak vacuum telescope
+reduction programs.
+
+README this file
+Revisions revisions file
+asciilook lookup table for ascii values into the pixelfont
+d1900.x calculate number of days since turn of century
+decodeheader.x decode/print vacuum telescope tape header
+destreak.par
+destreak.x destreak 10830 full disk helium grams
+destreak5.cl script for processing 10830 tape containing 5 grams
+destreak5.par
+dicoplot.h header file containing defines for DICOPLOT
+dicoplot.par
+dicoplot.x program to make Carrington rotation mape on the Dicomed
+doc documentation directory
+ephem.x program to calculate solar ephemeris data
+fitslogr.cl script, make a log file of a fits tape (daily grams)
+fitslogr.par
+getsqib.par
+getsqib.x get the squibby brightness image from a full disk gram
+gryscl.dico greyscale lookup table for use with DICOPLOT
+imfglexr.x Get Line with EXtension Real for use with IMFilt
+imfilt.x convolve an image with gaussian kernel, used in destreak
+imratio.x find the ratio between two images, used in merge
+imtext.x subroutine to load text into an image by overwriting pixels
+lstsq.x least squares fitting subroutine
+makehelium.cl script to process a helium 10830 tape into daily grams (180)
+makehelium.par
+makeimages.cl script to process a magnetogram tape into daily grams (180)
+makeimages.par
+merge.par
+merge.x program to merge daily grams into Carrington rotation maps
+mkpkg make the package
+mrotlogr.cl script, make a log file of a fits tape (Carrington rotations)
+mrotlogr.par
+mscan.par
+mscan.x read vacuum telescope area scan tapes
+numeric.h header file for numeric subroutine
+numeric.x subroutine to calculate derivitives of latitude and longitude
+ with respect to x and y respectively (used in rmap)
+pimtext.par
+pimtext.x program to put text into images by overwriting pixels
+pixbit.x subroutine that looks up text pixel format in pixelfont
+pixelfont pixel font for use with pimtext (no lower case, no decenders)
+putsqib.par
+putsqib.x program to put the squibby brightness back in a full disk gram
+quickfit.par
+quickfit.x fit an ellipse to the limb of the sun
+readheader.x read a vacuum telescope header
+readss1.x subroutine to read a type 1 area scan
+readss2.x subroutine to read a type 2 area scan
+readss3.x subroutine to read a type 3 area scan
+readss4.x subroutine to read a type 4 area scan
+readsubswath.x subroutine to read a sub-swath
+readvt.par
+readvt.x read full disk grams from tape
+rmap.par
+rmap.x map full disk grams into daily grams (180x180)
+syndico.x Make dicomed print of daily grams 18 cm across.
+tcopy.par
+tcopy.x tape to tape copy program
+trim.par
+trim.x trim a full disk gram using squibby brightness info
+unwrap.par
+unwrap.x program to remove binary wrap-around from images
+vt.h
+vtblink.cl script to blink images on the IIS to check registration
+vtblink.par
+vtel.cl load the vacuum telescope package
+vtel.hd info about locations of various files
+vtel.men menu for package
+vtel.par
+vtexamine.par
+vtexamine.x program to examine a vacuum telescope tape (tell about record
+ lengths, header info, number of files, etc.)
+writetape.cl script to write five full disk grams to tape
+writetape.par
+writevt.par
+writevt.x program to write a full disk gram to tape in mountain format
+x_vtel.x package parent program
diff --git a/noao/imred/vtel/Revisions b/noao/imred/vtel/Revisions
new file mode 100644
index 00000000..054bb80e
--- /dev/null
+++ b/noao/imred/vtel/Revisions
@@ -0,0 +1,209 @@
+This is the vacuum telescope package revisions file.
+
+mkpkg
+ Added some missing file dependencies and removed unnecessary ones from
+ the mkpkg file. (9/30/99, Davis)
+
+doc/dicoplot.hlp
+doc/readvt.hlp
+doc/unwrap.hlp
+doc/pimtext.hlp
+ Fixed minor formating problems. (4/22/99, Valdes)
+
+=======
+V2.11.1
+=======
+
+May 16, 1989 by Dyer Lytle mods to 'readvt', 'syndico', and 'mscan'
+
+Fixed readvt to work with tape drives over the network [(if (mtfile(...].
+Modified syndico to take advantage of the disk-center info in the image
+header.
+
+Modified mscan to be much faster by taking out the geometrical correction.
+Also simplified it by removing the date/time pimtext call. Also made it
+create only the images needed. Also made it have a short file name option.
+Also made it work on tape drives over the net.
+
+
+June 5, 1988 by Dyer Lytle modification to PUTSQIB
+
+PUTSQIB had code in it for triming the limb as well as merging the two
+images. I simplified the program to just make the merge. The task TRIM
+can be used to trim the limb, and do a better job of it at that.
+
+September 29, 1987 by Dyer Lytle add SYNDICO to package
+
+Added this new program for makeing dicomed prints of daily
+grams 18 cm across.
+
+July 17, 1987 by Dyer Lytle fix bug in numeric.x
+
+There was a bug in the way an error flag was being set that made
+the program fail with a 'divide by zero' error on some data sets.
+
+June 8, 1987 by Dyer Lytle Overhaul of the package
+
+Major modifications were made to the code to make it conform to IRAF
+standards. Dynamic memory allocation replaced fixed memory allocation
+in many places. Readvt was modified to accept templates for input
+and output file names. New structures were provided for the vacuum
+telescope header, the tapeio buffer, and to reduce the argument count
+for the subroutine 'numeric'. Vtfix was dropped from the package
+since 'readvt' was modified to check for long records by doing its
+own buffering. Unwrap was updated to a new, more general and powerful
+version. A major bug was found and fixed in 'rmap' which was causing
+the total mapped pixel count to be off by about 20%.
+
+June 10, 1986 by Dyer Lytle Modification of PIMTEXT
+
+Pimtext was modified to allow the user to magnify the text in x and/or y
+and to get the date and/or time from a reference image if desired.
+
+May 21, 1986 by Dyer Lytle Addition of PIMTEXT to package
+
+Pimtext was added to the vacuum telescope package. This program allows
+the user to insert text directly into images. The default action of the
+program is to look up the date and time in the image headers and insert
+this information in the lower left corner of each image. The user can
+modify the parameters to write any text string.
+
+May 15, 1986 by Dyer Lytle Modification to Mscan
+
+Mscan was modified to write the date and time into the images using
+a pixel font. A hidden argument controls this option. The characters
+are written into the image itself to speed up the moviemaking process.
+Various hidden parameters were added to allow the user to specify
+things about the text such as postition, pixel value, background fill,
+and background value.
+
+May 7, 1986 by Dyer Lytle Modification to Makeimages and Destreak5
+
+Makeimages and Destreak5 were modified to accept as another argument
+the input scratch disk on which the input files are to be expected.
+
+February 19, 1986 by Dyer Lytle Modification to Fitslogr
+
+Rfits was changed to produce a short header by default instead of
+a long header. I changed fitslogr to force the long header it needs.
+
+February 6, 1986 by Dyer Lytle Modification to Dicoplot
+
+Dicoplot was plotting all of the dates in the input image header
+file. Sometimes, this list includes dates which should appear
+off the plot, before the zero or after the 360 degree marks.
+The modification involved teaching the program to dump these
+extra dates instead of putting them on the plots.
+
+January 30, 1986 by Dyer Lytle Modification to vtfix
+
+Vtfix was originally set up to correct extra long records on
+vacuum telescope tapes. It looked to record lengths of 10242
+bytes and truncated them to 10240 bytes. Today I found a tape
+with lots of different record lengths all larger than 10240 so
+I modified vtfix to look for records with lengths longer than
+10240 bytes and truncate them to 10240.
+
+January 29, 1986 by Dyer Lytle Modification to makehelium.
+
+Makehelium was modified to automatically delete the absolute
+value image output from RMAP since this image is junk anyway.
+
+January 29, 1986 by Dyer Lytle Bug fix and mods to dicoplot.
+
+Dicoplot had a bug which caused the Ratio (POLARITY) images to
+come out zero. This was corrected. Also some of the constants
+in GREYMAP were changed to increase the contrast in the weights
+image and in the abs. flux image. The greyscale as drawn on the
+images was modified to not have white boxes around each grey level
+and to have the number associated with each grey level printed on the
+plot.
+
+January 28, 1986 by Dyer Lytle Modifications to mscan.
+
+Mscan was using too much memory when processing large images.
+This was causing a lot of page fault errors on VMS. A modification
+was made to mscan to use fixed size subrasters, decreasing the
+memory needs drastically.
+
+January 20, 1986 by Dyer Lytle Modifications to readss4.x.
+
+Readss4, which is a subroutine called by mscan to read type 4
+sector scans was set up to add the average field to each pixel
+of the output image. This was found to be useful only in the
+special case of type 4 intensity scans and was removed.
+"It wasn't a BUG, it was a FEATURE!"
+
+January 20, 1986 by Dyer Lytle Modifications to destreak.x.
+
+Destreak was set up to use a temporary image for data storage
+between the two destreaking passes. The temporary image was
+hardwired into the name "tempim". This was found to unacceptable
+since two or more destreaking jobs run at the same time would have
+a collision at "tempim". The temporary image was made into an input
+parameter.
+
+January 20, 1986 by Dyer Lytle Modifications to CL scripts.
+
+The CL scripts makeimages.cl, makehelium.cl, destreak5.cl, and
+writetape.cl were modified to check for the existence of each file
+before it tries to use it. An error message is output if an image
+cannot be accessed.
+
+January 20, 1986 by Dyer Lytle Modification to vtblink.cl
+
+Vtblink was modified so that the command "stat" can be entered to the
+"next image" prompt and the script will list which images are loaded
+into which IIS memory plane.
+
+January 20, 1986 by Dyer Lytle Modification to merge.x
+
+Merge was not set up to handle the differences between the magnetogram
+reduction and the 10830 reduction. Magnetogram data has three(3) images
+per day and 10830 data has two(2) images per day. The extra image for
+magnetogram data is the absolute value immage. Merge was designed to
+expect all three images and to produce four(4) output images. When
+10830 data is input merge should expect two input images per day and
+only produce two output images. This modification was made.
+Also the output images were set up such that the data and absolute
+value images were output without being divided by the weight image.
+This was changed since no information is lost by doing this division
+since the weight image is also saved. Merge was also restructured
+quite a bit but is still a mess and needs rewriting, but it works.
+
+January 20, 1986 by Dyer Lytle Modification to rmap.x
+
+Rmap was changed to calculate the average field, the average absolute
+field, and the total number of pixels for each gram reduced.
+These parameters are stored in the reduced data image header as
+MEAN_FLD, MEANAFLD, and NUM_PIX.
+
+January 10, 1986 by Dyer Lytle Bug fix in tcopy.
+
+Tcopy was reporting errors incorrectly. The record number identified
+with the error was one less than the actual error record.
+
+January 10, 1986 by Dyer Lytle Modification to decodeheader.x.
+
+Changed the format used by decodeheader to print out the date and time,
+the format was of variable width depending on the size of the number printed.
+The new format has fixed length fields.
+
+January 9, 1986 by Dyer Lytle Modification to merge.
+
+Merge was modified to expect the images in the textfile 'mergelist' to be in the
+order (data, abs value, weights) instead of (data, weights, abs value).
+
+January 3, 1986 by Dyer Lytle Correction to dicoplot.
+
+Dicoplot had, for some integer expressions, TRUE/FALSE instead of YES/NO.
+This works fine on the UNIX system but was found to fail on VMS.
+
+January 3, 1986 by Dyer Lytle Correction to mscan.
+
+Mscan was not reading type one(1) area scans properly. The error occurred
+in readss1 where a temporary array was being salloced with the wrong length.
+The correction involved replacing "ny" by "2*ny".
+Also, readss1 and readss3 had a rather contrived error recovery mechanism built
+in, I removed this and will add a more general and reliable error procedure
+based on the fset(VALIDATE) call in a later revision.
diff --git a/noao/imred/vtel/asciilook.inc b/noao/imred/vtel/asciilook.inc
new file mode 100644
index 00000000..68974d34
--- /dev/null
+++ b/noao/imred/vtel/asciilook.inc
@@ -0,0 +1,19 @@
+data (asciilook[i], i=1,7) / 449, 449, 449, 449, 449, 449, 449 /
+data (asciilook[i], i=8,14) / 449, 449, 449, 449, 449, 449, 449 /
+data (asciilook[i], i=15,21) / 449, 449, 449, 449, 449, 449, 449 /
+data (asciilook[i], i=22,28) / 449, 449, 449, 449, 449, 449, 449 /
+data (asciilook[i], i=29,35) / 449, 449, 449, 449, 001, 008, 015 /
+data (asciilook[i], i=36,42) / 022, 029, 036, 043, 050, 057, 064 /
+data (asciilook[i], i=43,49) / 071, 078, 085, 092, 099, 106, 113 /
+data (asciilook[i], i=50,56) / 120, 127, 134, 141, 148, 155, 162 /
+data (asciilook[i], i=57,63) / 169, 176, 183, 190, 197, 204, 211 /
+data (asciilook[i], i=64,70) / 218, 225, 232, 239, 246, 253, 260 /
+data (asciilook[i], i=71,77) / 267, 274, 281, 288, 295, 302, 309 /
+data (asciilook[i], i=78,84) / 316, 323, 330, 337, 344, 351, 358 /
+data (asciilook[i], i=85,91) / 365, 372, 379, 386, 393, 400, 407 /
+data (asciilook[i], i=92,98) / 414, 421, 428, 435, 442, 449, 232 /
+data (asciilook[i], i=99,105) / 239, 246, 253, 260, 267, 274, 281 /
+data (asciilook[i], i=106,112) / 288, 295, 302, 309, 316, 323, 330 /
+data (asciilook[i], i=113,119) / 337, 344, 351, 358, 365, 372, 379 /
+data (asciilook[i], i=120,126) / 386, 393, 400, 407, 449, 449, 449 /
+data (asciilook[i], i=127,128) / 449, 449/
diff --git a/noao/imred/vtel/d1900.x b/noao/imred/vtel/d1900.x
new file mode 100644
index 00000000..7af25a4b
--- /dev/null
+++ b/noao/imred/vtel/d1900.x
@@ -0,0 +1,15 @@
+# D1900 -- Function to return the number of days since the turn of the
+# century.
+
+int procedure d1900 (month, day, year)
+
+int month, day, year # m,d,y of date
+
+int mac[12]
+data mac/0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334/
+
+begin
+ d1900 = 365 * year + (year - 1) / 4 + mac[month] + day
+ if (month >= 3 && mod(year,4) == 0)
+ d1900 = d1900 + 1
+end
diff --git a/noao/imred/vtel/decodeheader.x b/noao/imred/vtel/decodeheader.x
new file mode 100644
index 00000000..5d54753d
--- /dev/null
+++ b/noao/imred/vtel/decodeheader.x
@@ -0,0 +1,67 @@
+include <mach.h>
+include "vt.h"
+
+# DECODEHEADER -- Unpack date and time, and, if 'verbose' flag is set,
+# display some information to the user.
+
+procedure decodeheader (hbuf, hs, verbose)
+
+pointer hbuf # header data input buffer pointer (short, SZ_VTHDR)
+pointer hs # header data structure
+bool verbose # verbose flag
+
+int hour, minute, second
+int bitupk()
+
+begin
+ # Unpack date, time. The constants below are explained in the
+ # description of the image header and how it is packed. If any
+ # changes are made the following code will have to be rewritten.
+
+ # Month. The month and day are stored in the first header word.
+ VT_HMONTH[hs] = (bitupk (int(Mems[hbuf]), 13, 4)) * 10 +
+ bitupk (int(Mems[hbuf]), 9, 4)
+
+ # Day.
+ VT_HDAY[hs] = (bitupk (int(Mems[hbuf]), 5, 4)) * 10 +
+ bitupk (int(Mems[hbuf]), 1, 4)
+
+ # Year. The year is stored in the second header word.
+ VT_HYEAR[hs] = (bitupk (int(Mems[hbuf+1]), 13, 4)) * 10 +
+ bitupk (int(Mems[hbuf+1]), 9, 4)
+
+ # Time (seconds since midnight). Stored in the third and forth words.
+ VT_HTIME[hs] = (bitupk (int(Mems[hbuf+2]), 1, 2)) * 2**15 +
+ bitupk (int(Mems[hbuf+3]), 1, 15)
+
+ # Store other header parameters. Stored one per word.
+ VT_HWVLNGTH[hs] = Mems[hbuf+4] # Wavelength (angstroms)
+ VT_HOBSTYPE[hs] = Mems[hbuf+5] # Observation type (0,1,2,3,or 4)
+ VT_HAVINTENS[hs] = Mems[hbuf+6] # Average intensity
+ VT_HNUMCOLS[hs] = Mems[hbuf+7] # Number of columns
+ VT_HINTGPIX[hs] = Mems[hbuf+8] # Integrations per pixel
+ VT_HREPTIME[hs] = Mems[hbuf+9] # Repitition time
+
+ # Calculate the time in hours, minutes, and seconds instead of
+ # seconds since midnight.
+
+ hour = int(VT_HTIME[hs]/3600)
+ minute = int((VT_HTIME[hs] - hour * 3600)/60)
+ second = VT_HTIME[hs] - hour * 3600 - minute * 60
+
+ # If verbose, print out some header info on one line no <CR>.
+ if (verbose) {
+ call printf ("%02d/%02d/%02d %02d:%02d:%02d")
+ call pargi (VT_HMONTH[hs])
+ call pargi (VT_HDAY[hs])
+ call pargi (VT_HYEAR[hs])
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (second)
+ call printf (" wvlngth %d obstype %d numcols %d")
+ call pargi (VT_HWVLNGTH[hs])
+ call pargi (VT_HOBSTYPE[hs])
+ call pargi (VT_HNUMCOLS[hs])
+ call flush (STDOUT)
+ }
+end
diff --git a/noao/imred/vtel/dephem.x b/noao/imred/vtel/dephem.x
new file mode 100644
index 00000000..6c8c315d
--- /dev/null
+++ b/noao/imred/vtel/dephem.x
@@ -0,0 +1,139 @@
+# EPHEM -- Calculate ephemeris data for the sun, return latitude and
+# longitude of sub-earth point.
+
+procedure ephem (month, day, year, hour, minute, second, image_r,
+ bn_degrees, cldc_degrees, verbose)
+
+int month # time of observation
+int day #
+int year #
+int hour #
+int minute #
+int second #
+real image_r # image radius
+real bn_degrees # solar latitude of sub-earth point (degrees)
+real cldc_degrees # Carrington longitude of disk center
+bool verbose # verbose flag
+
+double radians_per_degree, pi, two_pi, st, d, dd
+double ma, sin_ma, sin_two_ma, ml, e, e_squared, e_cubed
+double ep, ea, r, image_r_squared, tl
+double lan, bn, p, p_degrees
+double sl1, sl2, cldc, cos_bn, x, cl1
+double sin_three_ma, sec_bn, y
+double dd_squared, dd_cubed, c, s, cl2, sln
+int mac[12]
+
+data mac/0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334/
+
+begin
+ # This version ignores lunar and planetary perturbations.
+ radians_per_degree = .017453292519943d+0
+ pi = 3.1415926536d+0
+ two_pi = pi + pi
+
+ d = double(365 * year + (year - 1)/4 + mac[month] + day)
+ if (month >= 3 && mod(year, 4) == 0)
+ d = d + 1.d+0
+ st = double(second / 3600. + minute / 60. + hour)
+ d = d + st/24.d+0 -.5d+0
+ dd = d / 10000.d+0
+ dd_squared = dd * dd
+ dd_cubed = dd * dd * dd
+
+ # Mean anomaly.
+ ma = radians_per_degree * (358.475845d+0 + .985600267d+0 *
+ d - 1.12d-5 * dd_squared - 7.d-8 * dd_cubed)
+ ma = mod(ma, two_pi)
+ sin_ma = sin(ma)
+ sin_two_ma = sin(2.d+0 * ma)
+ sin_three_ma = sin(3.d+0 * ma)
+
+ # Mean longitude.
+ ml = radians_per_degree *
+ (279.696678d+0 + .9856473354d+0 * d + 2.267d-5 * dd_squared)
+ ml = mod(ml, two_pi)
+
+ # Ecentricity.
+ e = 0.01675104d+0 - 1.1444d-5 * dd - 9.4d-9 * dd_squared
+ e_squared = e * e
+ e_cubed = e_squared * e
+
+ # Obliquity.
+ ep = radians_per_degree * (23.452294d+0 -
+ 3.5626d-3 * dd - 1.23d-7 * dd_squared + 1.03d-8 * dd_cubed)
+
+ # Eccentric anomaly.
+ ea = ma + (e - e_cubed/8.d+0) * sin_ma + e_squared * sin_two_ma/2.d+0 +
+ 3.d+0 * e_cubed * sin_three_ma/8.d+0
+
+ # Radius vector.
+ r = 1.00000003d+0 * (1.d+0 - e * cos(ea))
+
+ # Image radius.
+ image_r = real(961.18d+0 / r)
+ image_r_squared = double(image_r * image_r)
+
+ # True longitude.
+ tl = ml + (2.d+0 * e - e_cubed/4.d+0) * sin_ma + 5.d+0 * e_squared *
+ sin_two_ma/4.d+0 + 13.d+0 * e_cubed * sin_three_ma/12.d+0
+ tl = mod(tl, two_pi)
+
+ # Longitude of ascending node of solar equator.
+ lan = radians_per_degree * (73.666667d+0 + 0.0139583d+0 *
+ (year + 50.d+0))
+
+ # Solar latitude of sub-earth point.
+ bn = asin(sin(tl - lan) * .12620d+0)
+ bn_degrees = real(bn / radians_per_degree)
+ if (verbose) {
+ call printf("B0 (degrees) = %10.5f\n")
+ call pargr(bn_degrees)
+ }
+
+ # Position angle of rotation axis.
+ p = atan(-cos(tl) * tan(ep)) + atan(-cos(tl - lan) * .12722d+0)
+ p_degrees = p/radians_per_degree
+ if (verbose) {
+ call printf("P-angle (degrees) = %10.5f\n")
+ call pargr(real(p_degrees))
+ }
+
+ # Carrington longitude of disk center.
+ sl1 = (d + 16800.d+0) * 360.d+0/25.38d+0
+ sl2 = mod(sl1, 360.d+0)
+ sln = 360.d+0 - sl2
+ sln = radians_per_degree * sln
+
+ cos_bn = cos(bn)
+ sec_bn = 1.d+0/cos_bn
+ c = +1.d+0
+ s = +1.d+0
+ x = -sec_bn * cos(tl - lan)
+ if (x < 0.)
+ c = -1.d+0
+ y = -sec_bn * sin(tl - lan) * .99200495d+0
+ if (y < 0.)
+ s = -1.d+0
+
+ cl1 = tan(tl - lan) * 0.99200495d+0
+ cl2 = atan(cl1)
+ if (s == 1.d+0 && c == 1.d+0)
+ cldc = sln + cl2
+ if (s == -1.d+0 && c == -1.d+0)
+ cldc = sln + cl2 + pi
+ if (s == 1.d+0 && c == -1.d+0)
+ cldc = sln + cl2 + pi
+ if (s == -1.d+0 && c == 1.d+0)
+ cldc = sln + cl2
+ if (cldc < 0.d+0)
+ cldc = cldc + two_pi
+ if (cldc > two_pi)
+ cldc = mod(cldc, two_pi)
+
+ cldc_degrees = real(cldc / radians_per_degree)
+ if (verbose) {
+ call printf ("L0 (degrees) = %10.5f\n")
+ call pargr (cldc_degrees)
+ }
+end
diff --git a/noao/imred/vtel/destreak.par b/noao/imred/vtel/destreak.par
new file mode 100644
index 00000000..4b03ee85
--- /dev/null
+++ b/noao/imred/vtel/destreak.par
@@ -0,0 +1,5 @@
+heimage,s,q,,,,Helium 10830 image to be destreaked
+heout,s,q,,,,Output image
+tempim,s,q,,,,Temporary image
+verbose,b,h,no,,,Print out header data and give progress reports
+threshold,i,h,3,,,Squibby brightness threshold defining the limb
diff --git a/noao/imred/vtel/destreak.x b/noao/imred/vtel/destreak.x
new file mode 100644
index 00000000..5002bab9
--- /dev/null
+++ b/noao/imred/vtel/destreak.x
@@ -0,0 +1,432 @@
+include <mach.h>
+include <imhdr.h>
+include <imset.h>
+include "vt.h"
+
+define WINDEXC 800. # constant for weight index calculation
+define WINDEX6TH 75. # constant for weight index calculation
+define LIMBR .97 # Limb closeness rejection coefficient.
+define SOWTHRESH 20. # Sum of weights threshold.
+define SZ_WT10830 1024 # size of weight table for destreak
+define FCORRECT .9375 # fractional term for lattitude correction
+
+# Structure for least square fitting parameters.
+
+define VT_LENSQSTRUCT 8 # Length of VT sq structure
+
+# Pointers
+define VT_SQ1P Memi[$1] # pointers to arrays for least
+define VT_SQ1Q1P Memi[$1+1] # squares fit
+define VT_SQ1Q2P Memi[$1+2] #
+define VT_SQ1Q3P Memi[$1+3] #
+define VT_SQ2Q2P Memi[$1+4] #
+define VT_SQ2Q3P Memi[$1+5] #
+define VT_SQ3Q3P Memi[$1+6] #
+define VT_NUMDATAP Memi[$1+7] #
+
+# Macro definitions
+define VT_SQ1 Memr[VT_SQ1P($1)+$2-1]
+define VT_SQ1Q1 Memr[VT_SQ1Q1P($1)+$2-1]
+define VT_SQ1Q2 Memr[VT_SQ1Q2P($1)+$2-1]
+define VT_SQ1Q3 Memr[VT_SQ1Q3P($1)+$2-1]
+define VT_SQ2Q2 Memr[VT_SQ2Q2P($1)+$2-1]
+define VT_SQ2Q3 Memr[VT_SQ2Q3P($1)+$2-1]
+define VT_SQ3Q3 Memr[VT_SQ3Q3P($1)+$2-1]
+define VT_NUMDATA Memi[VT_NUMDATAP($1)+$2-1]
+
+
+# DESTREAK -- Destreak 10830 grams. On a 10830 full disk image. For
+# each diode, based on the data from that diode calculate coefficients for
+# a best fit function and subtract this function from the data. Apply a
+# spatial filter to the resulting image.
+
+procedure t_destreak()
+
+char heimage[SZ_FNAME] # input image
+char heout[SZ_FNAME] # output image
+char tempim[SZ_FNAME] # temporary image
+bool verbose # verbose flag
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+int threshold # squibby brightness threshold
+
+int diode, npix, i, line
+int kxdim, kydim
+real kernel[3,9]
+pointer weights
+pointer lgp1, lpp
+pointer heim, heoutp
+pointer a, c
+pointer sqs, sp
+
+bool clgetb()
+int clgeti()
+real imgetr()
+pointer imgl2s(), impl2s(), immap()
+errchk immap, imgl2s, impl2s, imfilt
+
+begin
+ call smark (sp)
+ call salloc (sqs, VT_LENSQSTRUCT, TY_STRUCT)
+ call salloc (VT_SQ1P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_SQ1Q1P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_SQ1Q2P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_SQ1Q3P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_SQ2Q2P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_SQ2Q3P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_SQ3Q3P(sqs), DIM_VTFD, TY_REAL)
+ call salloc (VT_NUMDATAP(sqs), DIM_VTFD, TY_INT)
+ call salloc (a, DIM_VTFD, TY_REAL)
+ call salloc (c, DIM_VTFD, TY_REAL)
+ call salloc (weights, SZ_WT10830, TY_REAL)
+
+ # Get parameters from the cl.
+
+ call clgstr ("heimage", heimage, SZ_FNAME)
+ call clgstr ("heout", heout, SZ_FNAME)
+ call clgstr ("tempim", tempim, SZ_FNAME)
+ verbose = clgetb ("verbose")
+ threshold = clgeti("threshold")
+
+ # Open the images
+ heim = immap (heimage, READ_WRITE, 0)
+ heoutp = immap (tempim, NEW_COPY, heim)
+
+ # Ellipse parameters.
+ E_XCENTER[el] = imgetr (heim, "E_XCEN")
+ E_YCENTER[el] = imgetr (heim, "E_YCEN")
+ E_XSEMIDIAMETER[el] = imgetr (heim, "E_XSMD")
+ E_YSEMIDIAMETER[el] = imgetr (heim, "E_XSMD")
+
+ # Generate the weight array.
+ do i = 1, SZ_WT10830
+ Memr[weights+i-1] = exp((real(i) - WINDEXC)/WINDEX6TH)
+
+ # Set the sq arrays and the a and c arrays to zero.
+ call aclrr (VT_SQ1(sqs,1), DIM_VTFD)
+ call aclrr (VT_SQ1Q1(sqs,1), DIM_VTFD)
+ call aclrr (VT_SQ1Q2(sqs,1), DIM_VTFD)
+ call aclrr (VT_SQ1Q3(sqs,1), DIM_VTFD)
+ call aclrr (VT_SQ2Q2(sqs,1), DIM_VTFD)
+ call aclrr (VT_SQ2Q3(sqs,1), DIM_VTFD)
+ call aclrr (VT_SQ3Q3(sqs,1), DIM_VTFD)
+ call aclri (VT_NUMDATA(sqs,1), DIM_VTFD)
+ call aclrr (Memr[a], DIM_VTFD)
+ call aclrr (Memr[c], DIM_VTFD)
+
+ # for all lines in the image {
+ # calculate which diode this line corresponds to
+ # get the line from the image
+ # sum the q's for this line
+ # }
+
+ npix = IM_LEN(heim,1)
+ do line = 1, DIM_VTFD {
+ diode = mod((line - 1), SWTH_HIGH) + 1
+ lgp1 = imgl2s (heim, line)
+ call qsumq (Mems[lgp1], npix, el, threshold, weights, LIMBR,
+ line, sqs)
+ }
+
+ # Fit the function to the data for each line.
+ do line = 1, DIM_VTFD {
+ call qfitdiode(sqs, line, npix, Memr[a+line-1], Memr[c+line-1],
+ threshold, verbose)
+ if (verbose) {
+ call printf ("line = %d\n")
+ call pargi (line)
+ call flush (STDOUT)
+ }
+ }
+
+ # For each image line subtract the function from the data.
+ do line = 1, DIM_VTFD {
+ diode = mod((line - 1), SWTH_HIGH) + 1
+ lgp1 = imgl2s (heim, line)
+ lpp = impl2s (heoutp, line)
+ call qrfunct(Mems[lgp1], Mems[lpp], npix, el, threshold,
+ Memr[a+line-1], Memr[c+line-1], LIMBR, line)
+ }
+
+ # Switch images
+ call imunmap (heim)
+ call imunmap (heoutp)
+ heim = immap (tempim, READ_WRITE, 0)
+ heoutp = immap (heout, NEW_COPY, heim)
+
+ # Call the spacial filter program.
+
+ # First we have to load up the filter kernel
+ kxdim = 3
+ kydim = 9
+ kernel[1,1] = .017857
+ kernel[1,2] = .017857
+ kernel[1,3] = .035714
+ kernel[1,4] = .035714
+ kernel[1,5] = .035714
+ kernel[1,6] = .035714
+ kernel[1,7] = .035714
+ kernel[1,8] = .017857
+ kernel[1,9] = .017857
+ kernel[2,1] = .017857
+ kernel[2,2] = .053571
+ kernel[2,3] = .071428
+ kernel[2,4] = .071428
+ kernel[2,5] = .071428
+ kernel[2,6] = .071428
+ kernel[2,7] = .071428
+ kernel[2,8] = .053571
+ kernel[2,9] = .017857
+ kernel[3,1] = .017857
+ kernel[3,2] = .017857
+ kernel[3,3] = .035714
+ kernel[3,4] = .035714
+ kernel[3,5] = .035714
+ kernel[3,6] = .035714
+ kernel[3,7] = .035714
+ kernel[3,8] = .017857
+ kernel[3,9] = .017857
+
+ if (verbose) {
+ call printf ("filtering\n")
+ call flush(STDOUT)
+ }
+ call imfilt(heim, heoutp, kernel, kxdim, kydim, el)
+
+ # Unmap the images.
+ call imunmap(heim)
+ call imunmap(heoutp)
+
+ call sfree (sp)
+
+end
+
+
+# QFITDIODE -- Calculate the coefficients of the best fit functions.
+
+procedure qfitdiode (sqs, line, npix, a, c, threshold, verbose)
+
+pointer sqs # q's structure
+int line # line in image
+int npix # number of pixels
+real a, c # returned coeffs
+int threshold # sqib threshold
+bool verbose # verbose flag
+
+int i, j
+real zz[4,4], limbr
+
+begin
+ # If the number of points is insufficient, skip.
+ if (VT_NUMDATA(sqs,line) < 50) {
+ a = 0.0
+ c = 0.0
+ return
+ }
+
+ # First set the out arrays equal to the in arrays, initialize limbr.
+ limbr = LIMBR
+
+
+ # Clear the z array.
+ do i = 1,4
+ do j = 1,4
+ zz[i,j] = 0.0
+
+ # Fill the z array.
+ zz[1,2] = VT_SQ1Q1(sqs,line)
+ zz[1,3] = VT_SQ1Q2(sqs,line)
+ zz[1,4] = VT_SQ1Q3(sqs,line)
+ zz[2,3] = VT_SQ2Q2(sqs,line)
+ zz[2,4] = VT_SQ2Q3(sqs,line)
+ zz[3,4] = VT_SQ3Q3(sqs,line)
+
+ # Do the fit if the sum of weights is sufficient.
+ if (VT_SQ1(sqs,line) > SOWTHRESH)
+ call lstsq(zz,4,VT_SQ1(sqs,line))
+ else {
+ zz[3,1] = 0.0
+ zz[3,2] = 0.0
+ }
+
+ # Coefficients are:
+ if (verbose) {
+ call printf ("a = %g, c = %g ")
+ call pargr(zz[3,1])
+ call pargr(zz[3,2])
+ call flush(STDOUT)
+ }
+ c = zz[3,1]
+ a = zz[3,2]
+end
+
+
+# SUMQ -- Sum up the values of the Qs for the least squares fit.
+
+procedure qsumq (in, npix, el, threshold, weights, limbr, y, sqs)
+
+short in[npix] # array to sum from
+pointer weights # weights
+real el[LEN_ELSTRUCT] # limb fit ellipse struct
+real limbr # limb closeness rejection coefficient
+int npix # numpix in im line
+int threshold # sqib threshold
+int y # line in image
+pointer sqs # pointer to q's structure
+
+real q1, q2, q3
+int i, windex, itemp
+real rsq, r4th, r6th, r8th
+real x, xfr, yfr, data
+short k
+
+int and()
+short shifts()
+
+begin
+ k = -4
+
+ # First, calculate the y fractional radius squared.
+ yfr = (abs(real(y) - E_YCENTER[el]))**2 / (E_YSEMIDIAMETER[el]**2)
+
+ # Do this for all the pixels in this row.
+ do i = 1, npix {
+ # Calculate the x fractional radius squared.
+ x = real(i)
+ xfr = (abs(x - E_XCENTER[el]))**2 / E_XSEMIDIAMETER[el]**2
+
+ # If off the disk, skip.
+ if (xfr > 1.0) {
+ next
+ }
+
+ # Check to see if the brightness of this data point is above the
+ # threshold, if not, skip.
+
+ itemp = in[i]
+ if (and(itemp,17B) < threshold)
+ next
+
+ # Strip off the squibby brightness, if data too big skip.
+ data = real(shifts(in[i], k))
+ if (data > 100.)
+ next
+
+ # Calculate the radius squared. (fractional)
+ rsq = xfr + yfr
+
+ # Check to see if the data point is on the disk.
+ if (rsq > limbr)
+ next
+
+ r4th = rsq * rsq
+ r6th = rsq * r4th
+ r8th = r4th * r4th
+
+ # Calculate the weight index.
+ windex = WINDEXC + data + WINDEX6TH * r6th
+ if (windex < 1)
+ windex = 1
+ if (windex > SZ_WT10830)
+ windex = SZ_WT10830
+
+ # Calculate the Qs.
+ q1 = Memr[weights+windex-1]
+ q2 = q1 * r6th
+ q3 = q1 * data
+ VT_SQ1(sqs,y) = VT_SQ1(sqs,y) + q1
+ VT_SQ1Q1(sqs,y) = VT_SQ1Q1(sqs,y) + q1 * q1
+ VT_SQ1Q2(sqs,y) = VT_SQ1Q2(sqs,y) + q1 * q2
+ VT_SQ1Q3(sqs,y) = VT_SQ1Q3(sqs,y) + q1 * q3
+ VT_SQ2Q2(sqs,y) = VT_SQ2Q2(sqs,y) + q2 * q2
+ VT_SQ2Q3(sqs,y) = VT_SQ2Q3(sqs,y) + q2 * q3
+ VT_SQ3Q3(sqs,y) = VT_SQ3Q3(sqs,y) + q3 * q3
+ VT_NUMDATA(sqs,y) = VT_NUMDATA(sqs,y) + 1
+ }
+end
+
+
+# QRFUNCT -- Remove FUNCTion. Remove the calculated function from the data
+# from a particular diode. Each data point is checked to see if it is on
+# disk. If it is not then the input pixel is copied to the output array.
+# if it is on the disk, the function defined by a and c is subtracted from
+# the data point before it is copied to the output array.
+
+procedure qrfunct (in, out, npix, el, threshold, a, c, limbr, y)
+
+short in[npix] # inline without fit removed
+short out[npix] # inline with fit removed
+real el[LEN_ELSTRUCT] # ellipse parameter struct
+real a, c # fit coefficients
+real limbr # limb closeness coefficient
+int y # line of image
+int npix # number of pixels in this line
+int threshold # sqib threshold
+
+int i
+short fvalue
+short data
+real x, xfr, yfr, rsq, y4th, y6th
+short correction
+short k, kk
+
+short shifts()
+
+begin
+ k = -4
+ kk = 4
+
+ # If a and c have zeros, skip.
+ if (abs(a) < EPSILONR && abs(c) < EPSILONR) {
+ do i = 1, npix {
+ out[i] = in[i] # leave original data.
+ }
+ return
+ }
+
+ # First, calculate the y fractional radius.
+ yfr = (abs(real(y) - E_YCENTER[el]))**2 / (E_YSEMIDIAMETER[el]**2)
+
+ # Calculate the correction.
+ y4th = yfr*yfr
+ y6th = y4th*yfr
+ correction = short(FCORRECT*(6.0*yfr + 8.0*y4th + 16.0*y6th))
+
+ # Do this for all the pixels in the row.
+ do i = 1, npix {
+ # Calculate the x fractional radius.
+ x = real(npix/2 - i + 1)
+ xfr = (abs(real(i) - E_XCENTER[el]))**2 / E_XSEMIDIAMETER[el]**2
+
+ # If off the disk, skip.
+ if (xfr > 1.0) {
+ out[i] = in[i] # leave original data
+ next
+ }
+
+ # Check to see if the brightness of this data point is above the
+ # threshold, if not, skip.
+
+ if (and(int(in[i]),17B) < threshold) {
+ out[i] = in[i] # leave original data
+ next
+ }
+
+ # Strip off the squibby brightness
+ data = shifts(in[i], k)
+
+ # Calculate the radius squared. (fractional)
+ rsq = xfr + yfr
+
+ # Check to see if the data point is on the disk.
+ if (rsq > 1.0) {
+ out[i] = in[i] # leave original data
+ next
+ }
+
+ # Calculate the function value. Subtract it from the data value.
+ fvalue = short(a * rsq**3 + c) # a * r**6 + c
+ data = data - fvalue + correction
+ # data + squib bright
+ out[i] = shifts(data, kk) + short(and(int(in[i]),17B))
+ }
+end
diff --git a/noao/imred/vtel/destreak5.cl b/noao/imred/vtel/destreak5.cl
new file mode 100644
index 00000000..40a3be55
--- /dev/null
+++ b/noao/imred/vtel/destreak5.cl
@@ -0,0 +1,91 @@
+#{ DESTREAK5 -- Destreak all five images from a vacuum telescope tape. The
+# script accepts the general input image filename and the general output
+# image filename from the user (and now the scratch disk). Destreak5
+# appends a digit [1-5] to the file name for each file read and each
+# corresponding file written.
+
+# getinput,s,a,,,,General input filename for the 5 images
+# getoutput,s,a,,,,General output filename for the 5 images
+# inim,s,h
+# outim,s,h
+
+{
+
+ inim = getinput
+ outim = getoutput
+
+ if (access("vtelscr$"//inim//"001")) {
+ readvt ("vtelscr$"//inim//"001", inim//"tmp1")
+ quickfit (inim//"tmp1001",verbose=yes)
+ delete ("vtelscr$"//inim//"001")
+ getsqib (inim//"tmp1001", inim//"sqib1")
+ destreak (inim//"tmp1001", inim//"temp1", inim//"tmpr1")
+ imdelete (inim//"tmp1001")
+ imdelete (inim//"tmpr1")
+ putsqib (inim//"temp1", inim//"sqib1", outim//"1")
+ imdelete (inim//"temp1")
+ imdelete (inim//"sqib1")
+ } else {
+ print ("vtelscr$"//inim//"001 not accessable")
+ }
+
+ if (access("vtelscr$"//inim//"002")) {
+ readvt ("vtelscr$"//inim//"002", inim//"tmp2")
+ quickfit (inim//"tmp2001",verbose=yes)
+ delete ("vtelscr$"//inim//"002")
+ getsqib (inim//"tmp2001", inim//"sqib2")
+ destreak (inim//"tmp2001", inim//"temp2", inim//"tmpr2")
+ imdelete (inim//"tmp2001")
+ imdelete (inim//"tmpr2")
+ putsqib (inim//"temp2", inim//"sqib2", outim//"2")
+ imdelete (inim//"temp2")
+ imdelete (inim//"sqib2")
+ } else {
+ print ("vtelscr$"//inim//"002 not accessable")
+ }
+
+ if (access("vtelscr$"//inim//"003")) {
+ readvt ("vtelscr$"//inim//"003", inim//"tmp3")
+ quickfit (inim//"tmp3001",verbose=yes)
+ delete ("vtelscr$"//inim//"003")
+ getsqib (inim//"tmp3001", inim//"sqib3")
+ destreak (inim//"tmp3001", inim//"temp3", inim//"tmpr3")
+ imdelete (inim//"tmp3001")
+ imdelete (inim//"tmpr3")
+ putsqib (inim//"temp3", inim//"sqib3", outim//"3")
+ imdelete (inim//"temp3")
+ imdelete (inim//"sqib3")
+ } else {
+ print ("vtelscr$"//inim//"003 not accessable")
+ }
+
+ if (access("vtelscr$"//inim//"004")) {
+ readvt ("vtelscr$"//inim//"004", inim//"tmp4")
+ quickfit (inim//"tmp4001",verbose=yes)
+ delete ("vtelscr$"//inim//"004")
+ getsqib (inim//"tmp4001", inim//"sqib4")
+ destreak (inim//"tmp4001", inim//"temp4", inim//"tmpr4")
+ imdelete (inim//"tmp4001")
+ imdelete (inim//"tmpr4")
+ putsqib (inim//"temp4", inim//"sqib4", outim//"4")
+ imdelete (inim//"temp4")
+ imdelete (inim//"sqib4")
+ } else {
+ print ("vtelscr$"//inim//"004 not accessable")
+ }
+
+ if (access("vtelscr$"//inim//"005")) {
+ readvt ("vtelscr$"//inim//"005", inim//"tmp5")
+ quickfit (inim//"tmp5001",verbose=yes)
+ delete ("vtelscr$"//inim//"005")
+ getsqib (inim//"tmp5001", inim//"sqib5")
+ destreak (inim//"tmp5001", inim//"temp5", inim//"tmpr5")
+ imdelete (inim//"tmp5001")
+ imdelete (inim//"tmpr5")
+ putsqib (inim//"temp5", inim//"sqib5", outim//"5")
+ imdelete (inim//"temp5")
+ imdelete (inim//"sqib5")
+ } else {
+ print ("vtelscr$"//inim//"004 not accessable")
+ }
+}
diff --git a/noao/imred/vtel/destreak5.par b/noao/imred/vtel/destreak5.par
new file mode 100644
index 00000000..41accc84
--- /dev/null
+++ b/noao/imred/vtel/destreak5.par
@@ -0,0 +1,4 @@
+getinput,s,a,,,,Root input filename for the 5 images
+getoutput,s,a,,,,Root output filename for the 5 images
+inim,s,h
+outim,s,h
diff --git a/noao/imred/vtel/dicoplot.h b/noao/imred/vtel/dicoplot.h
new file mode 100644
index 00000000..592fc8c8
--- /dev/null
+++ b/noao/imred/vtel/dicoplot.h
@@ -0,0 +1,35 @@
+# for the following it is assumed the scale of the coordinate system is zero
+# to one in both x and y. (0.0,0.0) to (1.0,1.0)
+# coordinates of first image (bottom-left-x, bottom-left-y, top-right-x, t-r-y)
+define IM1BL_X .242
+define IM1BL_Y .142
+define IM1TR_X .452
+define IM1TR_Y .822
+
+# coordinates of second image
+define IM2BL_X .525
+define IM2BL_Y .142
+define IM2TR_X .735
+define IM2TR_Y .822
+
+# coordinates of greyscale box
+define IMGBL_X .229
+define IMGBL_Y .867
+define IMGTR_X .748
+define IMGTR_Y .902
+
+# coordinates of outside boundary of entire plot
+define IMDBL_X .210
+define IMDBL_Y .076
+define IMDTR_X .810
+define IMDTR_Y .950
+
+# length of tics when labeling axes
+define TICLENGTH .002
+
+#image types
+define T10830 1
+define TFLUX 4
+define TWEIGHT 3
+define TABSFLX 2
+define TPLRTY 5
diff --git a/noao/imred/vtel/dicoplot.par b/noao/imred/vtel/dicoplot.par
new file mode 100644
index 00000000..e8348a76
--- /dev/null
+++ b/noao/imred/vtel/dicoplot.par
@@ -0,0 +1,4 @@
+image1,s,q,,,,Image1
+image2,s,q,,,,Image2
+rotnum,i,q,,,,carrington rotation number
+device,s,h,dicomed,,,plot device
diff --git a/noao/imred/vtel/dicoplot.x b/noao/imred/vtel/dicoplot.x
new file mode 100644
index 00000000..3754bb06
--- /dev/null
+++ b/noao/imred/vtel/dicoplot.x
@@ -0,0 +1,522 @@
+include <mach.h>
+include <imhdr.h>
+include <imset.h>
+include <math/curfit.h>
+include <gset.h>
+include "dicoplot.h"
+include "vt.h"
+
+# DICOPLOT -- Make dicomed (or other graphics device) plots of Carrington
+# rotation maps. The output of this program is a metacode file called
+# "metacode" which can be plotted on whichever graphics device the user
+# chooses. Before the program is run, STDGRAPH should be set to the target
+# device.
+
+procedure t_dicoplot()
+
+char image1[SZ_FNAME] # first image to draw
+char image2[SZ_FNAME] # second image to draw
+int rotnum # carrington rotation number
+char device[SZ_FNAME] # plot device
+
+int type1, type2 # types of the two images
+pointer imout1
+pointer imout2
+int count, obsdate
+int i, longitude, latitude, month, day, year
+int xresolution, yresolution
+real delta_gray, delta_long, delta_gblock, x, y
+real offset, longituder
+real mapx1, mapx2, mapy1, mapy2
+char ltext[SZ_LINE]
+char system_id[SZ_LINE]
+
+bool up, pastm
+int dateyn
+
+short gray[16]
+pointer imgray1
+pointer imgray2
+pointer gp, p, sp
+pointer im1, im2
+pointer subras1, subras2
+
+pointer imgs2r()
+pointer immap()
+pointer gopen()
+int imaccf()
+int ggeti()
+real imgetr()
+int clgeti(), imgeti()
+errchk gopen, immap, imgs2r, sysid
+
+begin
+ call smark (sp)
+ call salloc (imout1, DIM_SQUAREIM*DIM_XCARMAP, TY_REAL)
+ call salloc (imout2, DIM_SQUAREIM*DIM_XCARMAP, TY_REAL)
+ call salloc (imgray1, DIM_SQUAREIM*DIM_XCARMAP, TY_SHORT)
+ call salloc (imgray2, DIM_SQUAREIM*DIM_XCARMAP, TY_SHORT)
+
+ # Get parameters from the cl.
+ call clgstr ("image1", image1, SZ_FNAME)
+ call clgstr ("image2", image2, SZ_FNAME)
+ rotnum = clgeti ("rotnum")
+ call clgstr ("device", device, SZ_FNAME)
+
+ # Open the output file.
+ gp = gopen (device, NEW_FILE, STDPLOT)
+
+ # Open the images
+ im1 = immap (image1, READ_ONLY, 0)
+ im2 = immap (image2, READ_ONLY, 0)
+
+ # Find out what kind of images we have.
+ call gimtype (im1, type1)
+ call gimtype (im2, type2)
+
+ # Draw boxes around the grayscale and the data images.
+ call box (gp, IM1BL_X, IM1BL_Y, IM1TR_X, IM1TR_Y)
+ call box (gp, IM2BL_X, IM2BL_Y, IM2TR_X, IM2TR_Y)
+
+ delta_gblock = (IMGTR_X - IMGBL_X)/16.
+ y = IMGBL_Y - .005
+ do i = 1, 16 {
+ x = IMGBL_X + real(i-1) * delta_gblock + delta_gblock/2.
+ call sprintf (ltext, SZ_LINE, "%d")
+ call pargi ((i-1)*int((254./15.)+0.5))
+ call gtext (gp, x, y, ltext, "v=t;h=c;s=.20")
+ }
+
+
+ # Draw tic marks and labels on the image boxes.
+ # First the longitudes.
+
+ delta_long = (IM1TR_Y-IM1BL_Y)/36.
+ longitude = 0
+ do i = 1,37 {
+ call sprintf (ltext, SZ_LINE, "%d")
+ call pargi (longitude)
+ y = IM1TR_Y - real(i-1)*delta_long
+ x = IM1TR_X
+ call gline (gp, x,y,x+TICLENGTH,y)
+ x = IM1BL_X
+ call gline (gp, x,y,x-TICLENGTH,y)
+ call gtext (gp, x-.005, y, ltext, "v=c;h=r;s=.25;u=0")
+ x = IM2TR_X
+ call gline (gp, x,y,x+TICLENGTH,y)
+ x = IM2BL_X
+ call gline (gp, x,y,x-TICLENGTH,y)
+ call gtext (gp, x-.005, y, ltext, "v=c;h=r;s=.25;u=0")
+ longitude = longitude + 10
+ }
+
+ # Now the latitudes.
+ # First draw the tics and labels at 0 degrees on both images
+
+ latitude = 0
+ call sprintf (ltext, SZ_LINE, "%d")
+ call pargi (latitude)
+ x = (IM1BL_X + IM1TR_X)/2.
+ y = IM1TR_Y
+ call gline (gp, x, y, x, y+TICLENGTH)
+ call gtext (gp, x, y+.005, ltext, "v=b;h=c;s=.25;u=0")
+ y = IM1BL_Y
+ call gline (gp, x, y, x, y-TICLENGTH)
+ x = (IM2BL_X + IM2TR_X)/2.
+ y = IM2TR_Y
+ call gline (gp, x, y, x, y+TICLENGTH)
+ call gtext (gp, x, y+.005, ltext, "v=b;h=c;s=.25;u=0")
+ y = IM2BL_Y
+ call gline (gp, x, y, x, y-TICLENGTH)
+
+ # Now the north latitudes.
+ do i = 1,4 {
+ switch (i) {
+ case 1:
+ latitude = 20
+ case 2:
+ latitude = 40
+ case 3:
+ latitude = 60
+ case 4:
+ latitude = 90
+ }
+ offset = ((IM1TR_X - IM1BL_X)/2.) * sin(real(latitude)*3.1415/180.)
+ x = IM1BL_X + ((IM1TR_X - IM1BL_X)/2.) + offset
+ y = IM1TR_Y
+ call sprintf (ltext, SZ_LINE, "%s%d")
+ call pargstr ("N")
+ call pargi (latitude)
+ call gline (gp, x, y, x, y+TICLENGTH)
+ call gtext (gp, x, y+.005, ltext, "v=b;h=c;s=.25;u=0")
+ y = IM1BL_Y
+ call gline (gp, x, y, x, y-TICLENGTH)
+ x = x + IM2BL_X - IM1BL_X
+ y = IM2TR_Y
+ call gline (gp, x, y, x, y+TICLENGTH)
+ call gtext (gp, x, y+.005, ltext, "v=b;h=c;s=.25;u=0")
+ y = IM2BL_Y
+ call gline (gp, x, y, x, y-TICLENGTH)
+ }
+
+ # Finally the south latitudes.
+ do i = 1,4 {
+ switch (i) {
+ case 1:
+ latitude = -20
+ case 2:
+ latitude = -40
+ case 3:
+ latitude = -60
+ case 4:
+ latitude = -90
+ }
+ offset = ((IM2TR_X - IM2BL_X)/2.) * sin(real(latitude)*3.1415/180.)
+ x = IM1BL_X + ((IM1TR_X - IM1BL_X)/2.) + offset
+ y = IM1TR_Y
+ call sprintf (ltext, SZ_LINE, "%s%d")
+ call pargstr ("S")
+ call pargi (-latitude)
+ call gline (gp, x, y, x, y+TICLENGTH)
+ call gtext (gp, x, y+.005, ltext, "v=b;h=c;s=.25;u=0")
+ y=IM1BL_Y
+ call gline (gp, x, y, x, y-TICLENGTH)
+ x = x + IM2BL_X - IM1BL_X
+ y = IM2TR_Y
+ call gline (gp, x, y, x, y+TICLENGTH)
+ call gtext (gp, x, y+.005, ltext, "v=b;h=c;s=.25;u=0")
+ y=IM2BL_Y
+ call gline (gp, x, y, x, y-TICLENGTH)
+ }
+
+ # Put the titles on.
+ # We got the carrington rotation number from the cl.
+
+ call sprintf (ltext, SZ_LINE, "CARRINGTON ROTATION %d %s")
+ call pargi (rotnum)
+ switch (type1) {
+ case T10830:
+ call pargstr ("10830")
+ case TABSFLX:
+ call pargstr ("ABS. FLUX")
+ case TWEIGHT:
+ call pargstr ("WEIGHT")
+ case TFLUX:
+ call pargstr ("FLUX")
+ case TPLRTY:
+ call pargstr ("POLARITY")
+ }
+
+ x = IM1TR_X+.025
+ y = IM1BL_Y + (IM1TR_Y - IM1BL_Y) / 2.
+ call gtext (gp, x, y, ltext, "v=c;h=c;s=.5;u=0")
+ call sprintf (ltext, SZ_LINE, "CARRINGTON ROTATION %d %s")
+ call pargi (rotnum)
+ switch (type2) {
+ case T10830:
+ call pargstr ("10830")
+ case TABSFLX:
+ call pargstr ("ABS. FLUX")
+ case TWEIGHT:
+ call pargstr ("WEIGHT")
+ case TFLUX:
+ call pargstr ("FLUX")
+ case TPLRTY:
+ call pargstr ("POLARITY")
+ }
+
+ x = IM2TR_X+.025
+ y = IM2BL_Y + (IM2TR_Y - IM2BL_Y) / 2.
+ call gtext (gp, x, y, ltext, "v=c;h=c;s=.5;u=0")
+
+ # Put on the dates at the appropriate longitudes.
+ # Get the dates and longitudes from the image header.
+ # Read dates until we run out.
+ # This code alternates between long and short tics for the dates.
+ # For this to work it is assumed that the dates are in
+ # cronological order.
+
+ # Get the first date and longitude from the image header to check
+ # whether or not there are any dates.
+
+ count = 1
+ call sprintf (ltext, SZ_LINE, "DATE%04d")
+ call pargi (count)
+ dateyn = imaccf (im1, ltext)
+ if (dateyn == NO)
+ call error(0, "no dates in image header")
+ obsdate = imgeti (im1, ltext)
+ call sprintf (ltext, SZ_LINE, "LONG%04d")
+ call pargi (count)
+ longituder = imgetr (im1, ltext)
+ longitude = int(longituder + .5)
+
+ # If we find some dates near the beginning of the list which have
+ # longitudes smaller than 180, they probably are some "extra" grams
+ # merged in to fill out the plot, don't plot these dates because they
+ # are really off the image and will come out in the wrong place if we
+ # allow them to be plotted.
+
+ while (longitude < 180) {
+ count = count + 1
+ call sprintf (ltext, SZ_LINE, "DATE%04d")
+ call pargi (count)
+ dateyn = imaccf (im1, ltext)
+ if (dateyn == NO)
+ break
+ obsdate = imgeti (im1, ltext)
+ call sprintf (ltext, SZ_LINE, "LONG%04d")
+ call pargi (count)
+ longituder = imgetr (im1, ltext)
+ longitude = int(longituder + .5)
+ }
+
+ # Calculate the month/day/year.
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+
+ up = FALSE
+ pastm = FALSE
+
+ while (dateyn == YES) {
+
+ # We check to see whether or not we have gotten past 180 degrees
+ # so that if we find some images near the end of the list with
+ # longitudes greater than 180 degrees we will know not to plot
+ # them since they are off the image. Longitudes of images in the
+ # image merge list decrease as we go down the list.
+
+ # Past the middle yet?
+ if (longitude < 180)
+ pastm = true
+
+ # Figure out where this longitude is in y on the image.
+ y = real(IM1BL_Y) + ((360. - real(longitude))/360.) *
+ real(IM1TR_Y - IM1BL_Y)
+ x = real(IM1TR_X)
+
+ # Draw the tic and the label.
+ if (!up)
+ call gline (gp, x, y, x+.005, y)
+ else
+ call gline (gp, x, y, x+.011, y)
+ call sprintf(ltext, SZ_LINE, "%d/%d/%d")
+ call pargi(month)
+ call pargi(day)
+ call pargi(year)
+ if (!up)
+ call gtext (gp, x+.006, y, ltext, "v=c;h=l;s=.20;u=0")
+ else
+ call gtext (gp, x+.012, y, ltext, "v=c;h=l;s=.20;u=0")
+
+ # Do the other image.
+ x = real(IM2TR_X)
+ if (!up)
+ call gline (gp, x, y, x+.005, y)
+ else
+ call gline (gp, x, y, x+.011, y)
+ if (!up)
+ call gtext (gp, x+.006, y, ltext, "v=c;h=l;s=.20;u=0")
+ else
+ call gtext (gp, x+.012, y, ltext, "v=c;h=l;s=.20;u=0")
+
+ # Toggle up switch.
+ up = !up
+
+ count = count + 1
+ call sprintf (ltext, SZ_LINE, "DATE%04d")
+ call pargi (count)
+ dateyn = imaccf (im1, ltext)
+
+ if (dateyn == YES) {
+ # Calculate the month/day/year.
+ obsdate = imgeti (im1, ltext)
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+
+ # Read in the next longitude.
+ call sprintf (ltext, SZ_LINE, "LONG%04d")
+ call pargi (count)
+ longituder = imgeti (im1, ltext)
+ longitude = int(longituder + .5)
+
+ # If we are past the middle and find a longitude in the list
+ # which is greater than 180 degrees, do not plot this date
+ # since it is off the image and will be plotted in the wrong
+ # place.
+
+ if (pastm && longitude > 180)
+ dateyn = NO
+ }
+ } # End of while loop on dates/longitudes.
+
+ # Fill in the gray scale.
+ delta_gray = 254./15.
+ do i = 1, 16 {
+ gray[i] = 1.+real(i-1)*delta_gray+0.5
+ }
+ call gpcell (gp, gray, 16, 1, IMGBL_X, IMGBL_Y, IMGTR_X, IMGTR_Y)
+
+ # Now map the input images from 360x180 to 180x360 and put them
+ # out to the image. We also map the data values into the appropriate
+ # gray scale.
+
+ # Get subrasters of the images.
+ subras1 = imgs2r (im1, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+ subras2 = imgs2r (im2, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+
+ # Call the image maping routine on both images.
+ call remap (Memr[subras1], DIM_XCARMAP, DIM_SQUAREIM, Memr[imout1])
+ call remap (Memr[subras2], DIM_XCARMAP, DIM_SQUAREIM, Memr[imout2])
+
+ # Call the gray scale mapper.
+ call graymap (Memr[imout1], DIM_SQUAREIM, DIM_XCARMAP, Mems[imgray1],
+ type1)
+ call graymap (Memr[imout2], DIM_SQUAREIM, DIM_XCARMAP, Mems[imgray2],
+ type2)
+
+ # Put the images out to the final image.
+ xresolution = ggeti (gp, "xr")
+ yresolution = ggeti (gp, "yr")
+ mapx1 = IM1BL_X
+ mapx2 = IM1TR_X
+ mapy1 = IM1BL_Y
+ mapy2 = IM1TR_Y
+ call gpcell (gp, Mems[imgray1], DIM_SQUAREIM, DIM_XCARMAP, mapx1, mapy1,
+ mapx2, mapy2)
+ mapx1 = IM2BL_X
+ mapx2 = IM2TR_X
+ mapy1 = IM2BL_Y
+ mapy2 = IM2TR_Y
+ call gpcell (gp, Mems[imgray2], DIM_SQUAREIM, DIM_XCARMAP, mapx1, mapy1,
+ mapx2, mapy2)
+
+ # Put the system identification on the plot.
+ call sysid (system_id, SZ_LINE)
+ call gtext (gp, .51, .076, system_id, "h=c;s=0.45")
+
+ # Close the graphics pointer.
+ call gclose(gp)
+ call close(p)
+
+ call sfree (sp)
+end
+
+
+# BOX -- Draw a box around the square described by x1, y1 (bottom left corner)
+# and x2, y2 (top right corner).
+
+procedure box(gp, x1, y1, x2, y2)
+
+real x1, y1 # bottom left corner position
+real x2, y2 # top right corner position
+pointer gp # graphics pointer
+
+begin
+ call gline (gp, x1, y1, x1, y2)
+ call gline (gp, x1, y2, x2, y2)
+ call gline (gp, x2, y2, x2, y1)
+ call gline (gp, x2, y1, x1, y1)
+end
+
+
+# REMAP -- Reformat a 360x180 image into a 180x360 image by rotating the image
+# by 90 degrees clockwise.
+
+procedure remap (inim, x, y, outim)
+
+real inim[x,y] # input image
+real outim[y,x] # output image
+int x, y # size of images
+
+int i, j
+
+begin
+ do i = 1, x
+ do j = 1, y
+ outim[j,x-i+1] = inim[i,j]
+end
+
+
+# GREYMAP -- Map an integer image into a short integer image using a specific
+# scaling algorithm to make the full scale 1 to 256.
+
+procedure graymap (inim, x, y, outim, type)
+
+real inim[x,y] # input image
+int x, y # size of images
+int type # type of image
+short outim[x,y] # output image
+
+real zpp[5], zcc[5], zp, zc # parameters for different image types
+int i, j, index
+short ztbl[512] # grayscale map array, (in gryscl.inc)
+
+data zpp /.25, .80, 0.2, 1.0, 100. /
+data zcc /384., 80., 0., 128., 128. /
+include "gryscl.inc"
+
+begin
+ # If the image is not a 10830 gram then just multiply each pixel
+ # by a constant and then add another constant. (different constants
+ # for flux, abs. flux, weight, and polarity)
+ # If it is a 10830 gram then multiply and add as above, then use
+ # the result as an index into a lookup table. The table is enumerated
+ # above.
+
+ zp = zpp[type]
+ zc = zcc[type]
+ do i = 1, x {
+ do j = 1, y {
+ outim[i,j] = inim[i,j] * zp + zc
+ if (type == 1) { # if this is a 10830 gram:
+ if (outim[i,j] <= 0) # make it fit in the table
+ outim[i,j] = 1
+ if (outim[i,j] > 512)
+ outim[i,j] = 512
+ index = outim[i,j]
+ outim[i,j] = ztbl[index] + 10 # look it up in the table.
+ }
+ if (outim[i,j] <= 0) # check boundaries
+ outim[i,j] = 1
+ if (outim[i,j] >= 255)
+ outim[i,j] = 254
+ }
+ }
+end
+
+
+# GIMTYPE -- Get IMage TYPE. Using information in the image header determine
+# what type of image it is. 1 = 10830, 2 = ABS. FLUX, 3 = WEIGHTS,
+# 4 = ABS. VALUE, 5 = POLARITY.
+
+procedure gimtype (im, type)
+
+pointer im # image pointer
+int type # type
+
+int wavelength, imgeti()
+int weightyn, absyn, polarityyn
+int imaccf()
+
+begin
+ wavelength = imgeti (im, "WV_LNGTH")
+ weightyn = imaccf (im, "WEIGHTS")
+ absyn = imaccf (im, "ABS_VALU")
+ polarityyn = imaccf (im, "POLARITY")
+
+ if (weightyn == NO && absyn == NO && polarityyn == NO) {
+ if (wavelength == 10830)
+ type = T10830
+ if (wavelength == 8688)
+ type = TFLUX
+ }
+ if (weightyn == YES)
+ type = TWEIGHT
+ if (absyn == YES)
+ type = TABSFLX
+ if (polarityyn == YES)
+ type = TPLRTY
+end
diff --git a/noao/imred/vtel/doc/destreak.hlp b/noao/imred/vtel/doc/destreak.hlp
new file mode 100644
index 00000000..ef05d905
--- /dev/null
+++ b/noao/imred/vtel/doc/destreak.hlp
@@ -0,0 +1,50 @@
+.help destreak Dec84 noao.imred.vtel
+.ih
+NAME
+destreak -- Remove streaks from Helium 10830 grams
+.ih
+USAGE
+destreak input_image output_image
+.ih
+PARAMETERS
+.ls input_image
+Image to be destreaked.
+.le
+.ls output_image
+Name to give destreaked output image (must be a separate image).
+.le
+.ls tempim
+Temporary image used for pixel storage between destreak passes.
+.le
+.ls verbose=no
+Flag to signal program that it should produce verbose output.
+.le
+.ls threshold = 4
+Squibby brightness threshold to use in determining limb points.
+.le
+.ih
+DESCRIPTION
+The helium 10830 grams as taken by the vacuum telescope have horizontal
+streaks caused by the detecting apparatus. Destreak removes these streaks
+and the limb darkening
+using a two pass procedure. First, for each diode, a function of the form
+'a + b*r**4', where r is the radius from disk center and a, b are parameters,
+is fit to the intensity distribution and is then subtracted from the data.
+Then a spatial filter is applied to the result and the final image is
+written to disk. The full disk images are 2048 x 2048 and are taken using
+a 512 diode array which is scanned from west to east across the solar disk
+4 times. Thus, data from a particular diode consists of four lines of the
+image.
+.ih
+EXAMPLES
+1. To destreak "image1", put the output in "image2", put the temporary image in
+"temp2", and see verbose output, the command would be:
+
+.nf
+ vt> destreak image1 image2 temp2 v+
+.fi
+
+.ih
+SEE ALSO
+readvt, writevt, quickfit, getsqib, putsqib
+.endhelp
diff --git a/noao/imred/vtel/doc/destreak5.hlp b/noao/imred/vtel/doc/destreak5.hlp
new file mode 100644
index 00000000..8bf383fa
--- /dev/null
+++ b/noao/imred/vtel/doc/destreak5.hlp
@@ -0,0 +1,43 @@
+.help destreak5 Dec85 noao.imred.vtel
+.ih
+NAME
+destreak5 -- First pass of 10830 processing
+.ih
+USAGE
+destreak5 input_root output_root
+.ih
+PARAMETERS
+.ls input_root
+Root name for input files.
+.le
+.ls output_root
+Root name of output files.
+.le
+.ih
+DESCRIPTION
+Destreak5 takes as input the 5 files from a vacuum telescope 10830
+tape and produces 5 nearly identical files but with the streaks
+removed from the solar images and with the best fit ellipse parameters
+added to the image header. The input files are expected to be in the
+directory 'imdir' and to have the extensions '001' thru '005'. These
+input files are expected to be mag tape images produced by T2D. The output
+files are stored in the current directory with the same extensions.
+Destreak5 calls 'readvt','quickfit', 'destreak', and various other utilities
+and is a cl script file.
+If an input image is not found, the processing for that image is skipped and
+a message is printed telling about the missing image.
+The next step in the 10830 reduction process is 'makehelium' which produces
+the projected daily grams.
+.ih
+EXAMPLES
+1. To destreak five files with root name m1585 and store the resulting images
+with root name M1585 the command would be:
+
+.nf
+ vt> destreak5 m1585 M1585
+.fi
+
+.ih
+SEE ALSO
+readvt, destreak, quickfit
+.endhelp
diff --git a/noao/imred/vtel/doc/dicoplot.hlp b/noao/imred/vtel/doc/dicoplot.hlp
new file mode 100644
index 00000000..5bb9f071
--- /dev/null
+++ b/noao/imred/vtel/doc/dicoplot.hlp
@@ -0,0 +1,36 @@
+.help dicoplot Dec84 noao.imred.vtel
+.ih
+NAME
+dicoplot -- Make plots of Carrington maps on the Dicomed
+.ih
+USAGE
+dicoplot input_image1 input_image2 rot_number
+.ih
+PARAMETERS
+.ls input_image1
+First image to plot on the output.
+.le
+.ls input_image2
+Second image to plot on the output.
+.le
+.ls rot_number
+Carrington rotation number.
+.le
+.ih
+DESCRIPTION
+Dicoplot produces plots on the Dicomed.
+.ih
+EXAMPLES
+1. To make a plot containing a 10830 gram and the associated weight gram where
+the carrington rotation number is 1841, the 10830 gram is "temp1",
+and the weight gram is "carweight" type:
+
+.nf
+ vt> dicoplot temp1 carweight 1841
+.fi
+
+The program gets information about the dates and longitudes from the image
+headers.
+.ih
+SEE ALSO
+.endhelp
diff --git a/noao/imred/vtel/doc/fitslogr.hlp b/noao/imred/vtel/doc/fitslogr.hlp
new file mode 100644
index 00000000..4a195e45
--- /dev/null
+++ b/noao/imred/vtel/doc/fitslogr.hlp
@@ -0,0 +1,58 @@
+.help fitslogr Dec85 noao.imred.vtel
+.ih
+NAME
+fitslogr -- Make a log of header information from a fits tape
+.ih
+USAGE
+fitslogr input_dev out_file startfnum endfnum
+.ih
+PARAMETERS
+.ls input_dev
+Tape drive, e.g. "mta1600" or just "mta"
+.le
+.ls out_file
+Name of output file to store information. Information is appended to this
+file to allow one to update a previously created file.
+.le
+.ls startfnum
+Tape file to start logging.
+.le
+.ls endfnum
+Tape file to stop logging.
+.le
+.ih
+DESCRIPTION
+Fitslogr reads FITS headers from successive tape files and compiles
+certain information into a single line of output for each file.
+Currently, the information output for each file includes:
+
+ Tape file number, IRAF image name, date, time, and the
+ Carrington longitude for each image.
+
+If all of these header parameters are not present, only the ones found
+will be printed out and garbage will come out for the empty parameters.
+The date is stored in a header parameter called OBS_DATE, the time is
+stored as 'seconds since midnight' in OBS_TIME and the Carrington
+longitude is stored in L_ZERO.
+To use this script, both the DATAIO package and the VTEL package must
+be loaded.
+.ih
+EXAMPLES
+1. To log all of the FITS images on a tape mounted on 'mta' and store the
+information in a file called 'CX052' the command would be:
+
+.nf
+ vt> fitslogr mta CX052 1 999
+.fi
+
+2. To log just the 40th through 60th files on mtb and see the output on
+your terminal, the command would be:
+
+.nf
+ vt> fitslogr mtb STDOUT 40 60
+.fi
+
+.ih
+SEE ALSO
+rfits
+.endhelp
diff --git a/noao/imred/vtel/doc/getsqib.hlp b/noao/imred/vtel/doc/getsqib.hlp
new file mode 100644
index 00000000..1bf24fb0
--- /dev/null
+++ b/noao/imred/vtel/doc/getsqib.hlp
@@ -0,0 +1,33 @@
+.help getsqib Jan85 noao.imred.vtel
+.ih
+NAME
+getsqib -- Extract a full disk squibby brightness image from a full disk image
+.ih
+USAGE
+getsqib inputimage outputimage
+.ih
+PARAMETERS
+.ls inputimage
+Name of image to get squibby brightness from.
+.le
+.ls outputimage
+Name of new output squibby brightness image.
+.le
+.ih
+DESCRIPTION
+Getsqib takes as input any full disk image and extracts the lower four bits
+from each pixel and stores this information in a new output image the same
+size as the input image.
+.ih
+EXAMPLES
+1. To extract the squibby brightness image from the image "test1" and store
+it in an image called "test1.sqib" the command would be:
+
+.nf
+ vt> getsqib test1 test1.sqib
+.fi
+
+.ih
+SEE ALSO
+putsqib
+.endhelp
diff --git a/noao/imred/vtel/doc/makehelium.hlp b/noao/imred/vtel/doc/makehelium.hlp
new file mode 100644
index 00000000..df27430c
--- /dev/null
+++ b/noao/imred/vtel/doc/makehelium.hlp
@@ -0,0 +1,38 @@
+.help makehelium Jan86 noao.imred.vtel
+.ih
+NAME
+makehelium -- Second pass of 10830 processing
+.ih
+USAGE
+makehelium input_root output_root
+.ih
+PARAMETERS
+.ls input_root
+Root name for input files.
+.le
+.ls output_root
+Root name of output files.
+.le
+.ih
+DESCRIPTION
+Makehelium takes the files output by 'destreak5' and projects them the
+small [180x180] maps. The input files are expected to be in the current
+directory and have the extensions '1' thru '5'. The output files are
+stored in the current directory with the extensions 'a1', 'a2', 'a3', 'b1', etc.
+This coding scheme is the same as that used in makeimages. Note that the
+absolute value images for 10830 grams should be thrown out since they are
+garbage.
+Makehelium calls 'rmap' and 'imdelete' and is a cl script file.
+.ih
+EXAMPLES
+1. To run makehelium on five files with root name m1585 and store the resulting
+images with root name M1585 the command would be:
+
+.nf
+ vt> makehelium m1585 M1585
+.fi
+
+.ih
+SEE ALSO
+rmap
+.endhelp
diff --git a/noao/imred/vtel/doc/makeimages.hlp b/noao/imred/vtel/doc/makeimages.hlp
new file mode 100644
index 00000000..d5f5fe31
--- /dev/null
+++ b/noao/imred/vtel/doc/makeimages.hlp
@@ -0,0 +1,64 @@
+.help makeimages Jan86 noao.imred.vtel
+.ih
+NAME
+makeimages -- Magnetogram batch processing script
+.ih
+USAGE
+makeimages input_root output_root
+.ih
+PARAMETERS
+.ls input_root
+Root name for input files.
+.le
+.ls output_root
+Root name of output files.
+.le
+.ih
+DESCRIPTION
+Makeimages processes 5 magnetograms from raw data tape images into projected
+small [180x180] maps. The input images are expected be output from T2D,
+be in the current imdir, and have the extensions '001' through '005'.
+The output files are stored in the current directory with the extensions
+'a1', 'a2', 'a3', 'b1', etc. The output image coding scheme is the following:
+
+.nf
+ On the filename extensions the first character is a letter
+ corresponding to the tape file position.
+ a = first file on tape
+ b = second
+ .
+ .
+ e = fifth
+
+ The second character specifies which type of image this is.
+ 1 = data
+ 2 = absolute value
+ 3 = weights
+.fi
+
+Note: A logical directory called "scratch" must be set up before this
+program is run. This logical directory must point to the directory
+containing the input images. This can be set up as in the following
+example:
+
+vt> set scratch = "scr1:[recely]"
+
+where this particular directory is a VAX/VMS type name. If the image
+files are in the user's home directory then "scratch" can be set to
+"home".
+
+Makeimages calls 'readvt', 'quickfit', 'rmap',
+'delete', and 'imdelete' and is a cl script.
+.ih
+EXAMPLES
+1. To process five magnetograms with root name m1585 and produce output images
+with the root name M1585, the command would be.
+
+.nf
+ vt> makeimages m1585 M1585
+.fi
+
+.ih
+SEE ALSO
+readvt, quickfit, rmap, delete, imdelete
+.endhelp
diff --git a/noao/imred/vtel/doc/merge.hlp b/noao/imred/vtel/doc/merge.hlp
new file mode 100644
index 00000000..24dbb778
--- /dev/null
+++ b/noao/imred/vtel/doc/merge.hlp
@@ -0,0 +1,90 @@
+.help merge Dec84 noao.imred.vtel
+.ih
+NAME
+merge -- Merge together daily synoptic grams into a complete Carrington map
+.ih
+USAGE
+merge outimage outweight outabs outratio month day year
+.ih
+PARAMETERS
+.ls outimage
+Name of output image.
+.le
+.ls outweight
+Output image containing weights, number of pixels per pixel.
+.le
+.ls outabs
+Output image containing the sums of the absolute values of the flux.
+Not used when merging 10830 maps.
+.le
+.ls outratio
+Output image containing the ratio of outimage/outabs.
+Not used when merging 10830 maps.
+.le
+.ls month, day, year
+Date of the center of this Carrington rotation.
+.le
+.ls longout = 180
+Longitude of the center of this Carrington rotation.
+.le
+.ls mergelist = "mergelist"
+File containing list of files to be merged.
+.le
+.ih
+DESCRIPTION
+Merge adds up daily synoptic grams to produce a Carrington rotation map.
+the input images are 180x180 and the output images are 360x180. The input
+images are read from the file mergelist. Merge then weights the input
+image as cos**4 in x where the center of the image corresponds to zero angle
+and the left and right edges of the image correspond to -90 and +90 degrees
+respectively. The input image consists of an unweighted "data" image,
+a weight image, and an absolute value image. The summing is done on the
+"data" image, on the weight image, and on the absolute value image
+separately to produce three output images. Finally the "data" image is
+divided by the absolute value image to produce a 4th output image.
+If 10830 data is being merged there are only two(2) images per day, the
+"data" image and the "weight" image. Also there are only two(2) output images,
+the "data" merged image and the "weights" merged image.
+A note about the mergelist file, the three grams for each day must be stored
+in the following sequence (data, absolute value, weight) for magnetograms
+and the two grams for each day must be stored as (data, weight) for 10830.
+The filenames must be one file name per line in the mergelist and files
+for different days must be grouped together, for example mergelist might look
+like:
+
+.nf
+ MAG01 MAG01
+ MAG01a MAG01w
+ MAG01w for magnetograms or MAG02 for 10830 grams
+ MAG02 MAG02w
+ MAG02a
+ MAG02w
+.fi
+
+for merging only two days of data where the first day is MAG01 and the second
+is MAG02. The 'a' extension stands for absolute value and the 'w' for weights.
+.ih
+EXAMPLES
+1. To merge a number of images on disk into output images called "im",
+"imweight", "imabs", and "imratio", where the date corresponding to the
+center of the Carrington map is 3/20/84 the command would be (magnetograms):
+
+.nf
+ vt> merge im imweight imabs imratio 3 20 84
+.fi
+
+The same command used for 10830 grams would be:
+
+.nf
+ vt> merge im imweight 3 20 84
+.fi
+
+2. If you have the list of files to be merged listed in a file called "mlist"
+instead of "mergelist" the command would be modified to read:
+
+.nf
+ vt> merge im imweight 3 20 84 mergelist="mlist"
+.fi
+.ih
+SEE ALSO
+.endhelp
diff --git a/noao/imred/vtel/doc/mrotlogr.hlp b/noao/imred/vtel/doc/mrotlogr.hlp
new file mode 100644
index 00000000..f86dbc0e
--- /dev/null
+++ b/noao/imred/vtel/doc/mrotlogr.hlp
@@ -0,0 +1,63 @@
+.help mrotlogr Jul86 "noao.imred.vtel"
+.ih
+NAME
+mrotlogr -- Make a log of header information from a fits tape (Carrington maps).
+.ih
+USAGE
+mrotlogr input_dev out_file startfnum endfnum append
+.ih
+PARAMETERS
+.ls input_dev
+Tape drive, e.g. "mta1600" or just "mta"
+.le
+.ls out_file
+Name of output file to store information. Information is appended to this
+file to allow one to update a previously created file.
+.le
+.ls startfnum
+Tape file to start logging.
+.le
+.ls endfnum
+Tape file to stop logging.
+.le
+.ls append
+Flag to signal that we are appending to an existing file.
+.le
+.ih
+DESCRIPTION
+Mrotlogr reads FITS headers from successive tape files and compiles
+certain information into a single line of output for each file.
+Currently, the information output for each file includes:
+
+.nf
+ Tape file number, IRAF image name, date, time, and the
+ Carrington longitude for each image.
+.fi
+
+If all of these header parameters are not present, only the ones found
+will be printed out and garbage will come out for the empty parameters.
+The date is stored in a header parameter called OBS_DATE, the time is
+stored as 'seconds since midnight' in OBS_TIME and the Carrington
+longitude is stored in L_ZERO.
+To use this script, both the DATAIO package and the VTEL package must
+be loaded.
+.ih
+EXAMPLES
+1. To log all of the FITS images on a tape mounted on 'mta' and store the
+information in a file called 'CX052' the command would be:
+
+.nf
+ vt> mrotlogr mta CX052 1 999 no
+.fi
+
+2. To log just the 40th through 60th files on mtb and see the output on
+your terminal, the command would be:
+
+.nf
+ vt> mrotlogr mtb STDOUT 40 60 no
+.fi
+
+.ih
+SEE ALSO
+rfits
+.endhelp
diff --git a/noao/imred/vtel/doc/mscan.hlp b/noao/imred/vtel/doc/mscan.hlp
new file mode 100644
index 00000000..d6b7f46b
--- /dev/null
+++ b/noao/imred/vtel/doc/mscan.hlp
@@ -0,0 +1,86 @@
+.help mscan May88 noao.imred.vtel
+.ih
+NAME
+mscan -- Read sector scans from tape into IRAF images
+.ih
+USAGE
+mscan input
+.ih
+PARAMETERS
+.ls input
+File template or device, e.g. "junk" or "s*" or "mta1600[1]" or "mtb800"
+.le
+.ls files
+List of tape file numbers or ranges delimited by commas, e.g. "1-3,5-8".
+`Files' is requested only if no file number is given in `input' and the
+input is tape.
+Files will be read in ascending order, regardless of the order of the list.
+Reading will terminate if EOT is reached, thus a list such as "1-999"
+may be used to read all the files on the tape.
+.le
+.ls verbose = yes
+Flag to signal program that it should produce verbose output. This means
+header information.
+.le
+.ls makeimage = yes
+Flag to signal the program that it should make images. If this parameter
+is set to no, the header will be read and decoded but no data will be read
+and no image will be produced on disk.
+.le
+.ls brief = yes
+Flag to make mscan produce brief filenames for the output images. These
+filenames have the form [svb]nnn e.g. s034 or b122. The b is for a brightness
+image, the v is for a velocity image, and the s is for a select image. The
+'nnn' is the tape sequence number or the filenumber in a template expansion.
+If this flag is set to false the long filenames described below in the
+"Description" section will be produced.
+.le
+.ls select = yes
+Flag to tell the program to make a select image.
+.le
+.ls bright = yes
+Flag to tell the program to make a brightness image.
+.le
+.ls velocity = yes
+Flag to tell the program to make a velocity image.
+.le
+.ih
+DESCRIPTION
+Mscan reads all or selected area scans from a vacuum telescope tape
+and formats the data into multiple IRAF images. Type 1, 2, and 3 area
+scans can produce 3 output images and type 4 produces one output image.
+The long image names are assembled in the following way:
+.nf
+
+ The first letter is one of [bsv] for brightness, select, or velocity.
+ The next two digits are the day of the month.
+ Underbar.
+ The next 4 digits are the hour and minute.
+ Underbar.
+ Finally there is a three digit tape sequence number.
+ ie.
+
+ b13_1709_002
+.fi
+
+.ih
+EXAMPLES
+1. To read files 5-7 from mta at 1600 bpi, the command would be:
+
+.nf
+ vt> mscan mta1600 5-7
+.fi
+
+2. To see the header information only for file 6, one could use the command:
+
+.nf
+ vt> mscan mta1600[6] make-
+.fi
+
+3. To read file 4 from mta and only produce a velocity image:
+
+.nf
+ vt> mscan mta[4] bri- sel-
+.fi
+
+.endhelp
diff --git a/noao/imred/vtel/doc/pimtext.hlp b/noao/imred/vtel/doc/pimtext.hlp
new file mode 100644
index 00000000..e78fdc8d
--- /dev/null
+++ b/noao/imred/vtel/doc/pimtext.hlp
@@ -0,0 +1,110 @@
+.help pimtext May86 noao.imred.vtel
+.ih
+NAME
+pimtext -- Put image text. Use pixel font to write text into image.
+.ih
+USAGE
+pimtext iraf_files
+.ih
+PARAMETERS
+.ls iraf_files
+Image or images to be written into. This entry may contain wild cards and
+will be expanded into however many files match the wild card.
+.le
+.ls refim
+Reference image to pull date and time parameters from in the event the "ref"
+flag is set.
+.le
+.ls ref
+Reference flag. When set, causes the program to take information (date/time)
+from the reference image and write it into the image or images expanded from
+the template "iraf_images".
+.le
+.ls x = 10
+X position (column) in image to write text.
+.le
+.ls y = 10
+Y position (line) in image to write text.
+.le
+.ls xmag = 2
+Factor by which to magnify the text in the x direction. This must be an
+integer. The pixelfont is expanded by pixel replication. The font width
+at xmag=1 is 6.
+.le
+.ls ymag = 2
+Factor by which to magnify the text in the y direction. This must be an
+integer. The pixelfont is expanded by pixel replication. The font width
+at ymag=1 is 7.
+.le
+.ls val = -10000
+Value to put in text pixels.
+.le
+.ls setbgnd = yes
+Boolean parameter to signal the program to fill in the area behind the
+characters with pixels set to bgndval.
+.le
+.ls bgndval = 10000
+Pixel value to use to fill in background in text block.
+.le
+.ls date = yes
+Flag that instructs the program to look for the date in the
+image header and write it into the image. If the date and time
+flags are both set, both will be written into the image as a single
+string.
+.le
+.ls time = yes
+Flag that instructs the program to look for the time in the
+image header and write it into the image.
+.le
+.ls text
+Text string to write into image.
+.le
+.ih
+DESCRIPTION
+Pimtext writes either the date and/or time or the indicated text string into
+the image or images specified.
+Pimtext, by default, writes the date and/or time into the image in the lower
+left corner. If it cannot find the date or time pimtext will give a warning
+and read a text string from the users terminal. If the date and time flags are
+set to 'no', pimtext will take the text string to be written from the user.
+The position of the text may be adjusted by setting
+the parameters 'x' and 'y' which set the lower left pixel of
+the text block. The pixels in the text block behind the characters may
+be set to a particular value when the 'setbgnd' flag is set. The pixel
+values used to write the text and the background can be set by adjusting
+the parameters 'val' and 'bgndval'. If the text overlaps the image
+edge in the X direction it will be truncated. If it overlaps in Y it will
+not be written.
+The user may magnify the text by adjusting the "xmag" and "ymag" parameters.
+The default (2,2) is a nice size for display in a 512 by 512 image. Bigger
+images may need bigger text, smaller images may need smaller text.
+The "ref" flag is used to write information from one image into another
+image.
+
+.ih
+EXAMPLES
+1. To write the date and time into the three images s13_1709_001, v13_1709_001,
+and b13_1709_001 (assuming the directory contains only these three images)
+the command would be:
+
+.nf
+ vt> pimtext ?13*
+.fi
+
+2. To write the text string "hello world" into the image 'testim' the command
+would be
+
+.nf
+ vt> pimtext testim 'hello world' date=no time=no
+.fi
+
+3. To write the date and time into the images s1, s2, s3, s4 and position
+the text at pixel 30,30, and turn off the text background fill, the command
+would be:
+
+.nf
+ vt> pimtext s* x=30 y=30 setbgnd=no
+.fi
+.ih
+SEE ALSO
+.endhelp
diff --git a/noao/imred/vtel/doc/putsqib.hlp b/noao/imred/vtel/doc/putsqib.hlp
new file mode 100644
index 00000000..f6400cfe
--- /dev/null
+++ b/noao/imred/vtel/doc/putsqib.hlp
@@ -0,0 +1,38 @@
+.help putsqib Jan85 noao.imred.vtel
+.ih
+NAME
+putsqib -- Merge a full disk image with a squibby brightness image
+.ih
+USAGE
+putsqib inputimage sqibimage outputimage
+.ih
+PARAMETERS
+.ls inputimage
+Name of data image to merge with squibby brightness image.
+.le
+.ls sqibimage
+Name of squibby brightness image to merge with data image.
+.le
+.ls outputimage
+Name of new, merged, output image.
+.le
+.ih
+DESCRIPTION
+Putsqib accepts as input a data image and a squibby brightness image. It
+multiplies each pixel in the input data image by 16 and adds the associated
+pixel from the squibby brightness input image. The pixel is then written
+to the new, output image.
+.ih
+EXAMPLES
+1. To merge a data image called 'data' and a squibby brightness image called
+'sqib' and store the result in an image called 'complete', the command
+would be:
+
+.nf
+ vt> putsqib data sqib complete
+.fi
+
+.ih
+SEE ALSO
+getsqib
+.endhelp
diff --git a/noao/imred/vtel/doc/quickfit.hlp b/noao/imred/vtel/doc/quickfit.hlp
new file mode 100644
index 00000000..41621b6d
--- /dev/null
+++ b/noao/imred/vtel/doc/quickfit.hlp
@@ -0,0 +1,59 @@
+.help quickfit Dec84 noao.imred.vtel
+.ih
+NAME
+quickfit -- Fit an ellipse to the limb for a full disk scan
+.ih
+USAGE
+quickfit image
+.ih
+PARAMETERS
+.ls image
+Name of image to be fit.
+.le
+.ls threshold = 4
+Squibby brightness threshold to use in determining limb points.
+.le
+.ls xguess = 1024
+X coordinate of center of first guess circle.
+.le
+.ls yguess = 1024
+Y coordinate of center of first guess circle.
+.le
+.ls halfwidth = 50
+Halfwidth of window centered on previous limb point to search through
+for a limb point on the current line.
+.le
+.ls rowspace = 20
+Number of rows to skip between limbpoints near center in y.
+.le
+.ls rejectcoeff = .02
+Least squares rejection coefficient. If radius of a limbpoint is more than
+this far from the limb, where limbradius = 1.0, it is not used in the fit.
+.le
+.ih
+DESCRIPTION
+Quickfit finds the least squares best fit ellipse to the limb in a full
+disk scan. Quickfit returns the ellipse parameters (x,y coordinates of
+the ellipse center and the x and y semidiameters), the number of limbpoints
+found, the number of limbpoints rejected, and the fraction of limb
+points rejected by the least squares routine. This 'fraction rejected'
+allows the user to determine to some extent the goodness of the data and
+allows him or her to rerun Quickfit with different parameters to take
+this goodness into account. Quickfit also returns the sub-earth latitude
+and longitude when in verbose mode. The ellipse and ephemeris parameters
+are stored in the image header for future reference.
+.ih
+EXAMPLES
+1. To find the best fit ellipse for the limb in an image called "image1" and to
+see verbose output, one would use the following command:
+
+.nf
+ vt> quickfit image1 v+
+.fi
+
+This will also use the default values of rowspace, halfwidth,
+and rejectcoeff.
+
+.ih
+SEE ALSO
+.endhelp
diff --git a/noao/imred/vtel/doc/readvt.hlp b/noao/imred/vtel/doc/readvt.hlp
new file mode 100644
index 00000000..b9d6abe7
--- /dev/null
+++ b/noao/imred/vtel/doc/readvt.hlp
@@ -0,0 +1,86 @@
+.help readvt May87 noao.imred.vtel
+.ih
+NAME
+readvt -- Read vacuum telescope full disk grams
+.ih
+USAGE
+readvt input_fd files output_image
+.ih
+PARAMETERS
+.ls input_fd
+File or device template, e.g. "mta1600[1]" or "mtb800" or "junk" or "s*"
+.le
+.ls files
+List of tape file numbers or ranges delimited by commas, e.g. "1-3,5-8".
+`Files' is requested only if no file number is given in `input'.
+Files will be read in ascending order, regardless of the order of the list.
+Reading will terminate if EOT is reached, thus a list such as "1-999"
+may be used to read all the files on the tape.
+.le
+.ls output_image template
+Name to give output image. If the input file template is not a magtape
+specification then this can be an IRAF filename template to be
+expanded into a list of files. If the number of files in the input
+template and in the output template do not match and if the output
+template expands to one filename then that filename is used as a
+root name to which filenumbers are appended for each input file.
+i.e. "junk" becomes "junk001", "junk002", etc. If the input template
+is a magtape without a filenumber attached, i.e. "mta", the
+output name is used as a root name and the file number is appended
+for each file read.
+.le
+.ls verbose = no
+Flag to signal program that it should produce verbose output. This includes
+header information and progress reports.
+.le
+.ls headeronly = no
+Flag to signal the program that it should only print out header information
+and quit without reading the data. The 'verbose' flag must be set to yes
+to use this flag since otherwise the header information will not be printed.
+This flag is used to look at headers on the tape to check dates, times
+and observation types.
+.le
+.ls robust = no
+Flag to signal program that it should ignore a wrong observation type in the
+image header.
+.le
+.ih
+DESCRIPTION
+Readvt reads any one of the grams on a vacuum telescope tape and puts the
+data into an IRAF image. The IRAF image is 2048x2048 short integers.
+.ih
+EXAMPLES
+1. To read the second image from mta at 1600 bpi, store the image into "image1"
+and see verbose output the command would be:
+
+.nf
+ vt> readvt mta1600[2] image1 v+
+.fi
+
+2. To look at the header information of the 4th file on a tape which is on
+mtb and which was written at 1600 bpi, the command would be:
+
+.nf
+ vt> readvt mtb1600[4] v+ h+
+.fi
+
+3. To read the disk files "s001", "s002", "s003", "s004" and put the output
+images into the files "s001i", "s002i", "s003i", "s004i" without
+verbose output (assuming no other file in the directory starts with "s")
+the command would be:
+
+.nf
+ vt> readvt s* s*//i
+.fi
+
+4. To read the first five files on mta and put the output images into files
+images with root name HHH the command would be:
+
+.nf
+ vt> readvt mta 1-5 HHH
+.fi
+
+.ih
+SEE ALSO
+writevt
+.endhelp
diff --git a/noao/imred/vtel/doc/rmap.hlp b/noao/imred/vtel/doc/rmap.hlp
new file mode 100644
index 00000000..e4b4a645
--- /dev/null
+++ b/noao/imred/vtel/doc/rmap.hlp
@@ -0,0 +1,47 @@
+.help rmap Dec84 noao.imred.vtel
+.ih
+NAME
+rmap -- Project a full disk gram into a 180x180 flat image
+.ih
+USAGE
+rmap inputimage outputimage outweight outabs
+.ih
+PARAMETERS
+.ls inputimage
+Name of image to be projected.
+.le
+.ls outputimage
+Name to give output data image.
+.le
+.ls outweight
+Name to give output weight image.
+.le
+.ls outabs
+Name to give output absolute value image.
+.le
+.ih
+DESCRIPTION
+Rmap accepts as input a full disk carrington gram in a 2048x2048 IRAF image
+and projects it into a 180x180 IRAF image such that the lines of longitude
+and latitude are straight lines. The output is the data image, the weight
+image (which is the count of the number of pixels of the input image which
+were summed to produce the single output pixel), and the absolute value image
+which is the same as the data image except that the absolute value of each
+input pixel is taken before being summed into the output pixel.
+Rmap calculates the mean field, the mean of the absolute value of the field,
+and the number of pixels in the original gram used to make the projection.
+These three parameters are stored in the output "data" image header as
+MEAN_FLD, MEANAFLD, and NUMPIX respectively.
+.ih
+EXAMPLES
+1. To project an image called "im10830" and produce output images "im10830.d",
+"im10830.w", and "im10830.a", one would use the following command:
+
+.nf
+ vt> rmap im10830 im10830.d im10830.w im10830.a
+.fi
+
+.ih
+SEE ALSO
+readvt, quickfit, and merge.
+.endhelp
diff --git a/noao/imred/vtel/doc/syndico.hlp b/noao/imred/vtel/doc/syndico.hlp
new file mode 100644
index 00000000..25b4b0ee
--- /dev/null
+++ b/noao/imred/vtel/doc/syndico.hlp
@@ -0,0 +1,77 @@
+.help syndico May89 noao.imred.vtel
+.ih
+NAME
+syndico -- Make dicomed plots of full disk images (18 centimeters in diameter)
+.ih
+USAGE
+syndico image
+.ih
+PARAMETERS
+.ls image
+Image to plot on the dicomed.
+.le
+.ls logofile = iraf$noao/imred/vtel/nsolcrypt.dat
+File containing the text encoded NSO logo image.
+.le
+.ls device = dicomed
+Device on which to plot the image.
+.le
+.ls sbthresh = 2
+Squibby brightness threshold used to determine the limb for trimming.
+.le
+.ls plotlogo = yes
+Flag indicating whether or not to plot the logo.
+.le
+.ls verbose = yes
+Flag indicating to the program that it should give progress reports.
+.le
+.ls forcetype = no
+Flag to override the wavelength designation from the image header.
+.le
+.ls magnetic = yes
+If 'forcetype' = 'yes' then this flag designates that we should force
+to magnetic (8688). If set to 'no' the type is forced to 10830.
+The effect of forcing the type is to choose which lookup table to
+use when scaling the image.
+.le
+.ls month
+Month the observation was taken (January = 1,,,December = 12).
+.le
+.ls day
+Day of the month the observation was taken.
+.le
+.ls year
+Year the observation was taken (two digits only, ie. 89 for 1989).
+.le
+.ls hour
+Hour of the day the observation was taken (universal time, 1-24).
+.le
+.ls minute
+Minute the observation was taken (0-59).
+.le
+.ls second
+Second the observation was taken (0-59).
+.le
+.ih
+DESCRIPTION
+Syndico produces full disk plots on the Dicomed. The ephemeris data
+is used to estimate the radius of the image and the center of the
+disk is gotten from the image header. Using this data, an image is
+made that is as close to 18 centimeters in diameter as possible.
+There are two greyscale lookup tables corresponding to the two types
+of image normally used, the magnetogram and the spectroheliogram.
+If the wavelength is something other than 8688 or 10830, a linear
+greyscale is used.
+
+The National Solar Observatory (tentative) logo is read from an encoded
+text file and put on the plot if requested (default).
+.ih
+EXAMPLES
+
+.nf
+ vt> syndico image1
+.fi
+
+.ih
+SEE ALSO
+.endhelp
diff --git a/noao/imred/vtel/doc/tcopy.hlp b/noao/imred/vtel/doc/tcopy.hlp
new file mode 100644
index 00000000..57a523cb
--- /dev/null
+++ b/noao/imred/vtel/doc/tcopy.hlp
@@ -0,0 +1,56 @@
+.help tcopy Oct85 noao.imred.vtel
+.ih
+NAME
+tcopy -- Tape to tape copy
+.ih
+USAGE
+tcopy input_fd output_fd
+.ih
+PARAMETERS
+.ls input_fd
+Tape file or device name for input, e.g. "mta1600[1]" or "mtb800"
+.le
+.ls files
+List of tape file numbers or ranges delimited by commas, e.g. "1-3,5-8".
+`Files' is requested only if no file number is given in `input_fd'.
+Files will be read in ascending order, reguardless of the order of the list.
+Reading will terminate if EOT is reached, thus a list such as "1-999"
+may be used to read all the files on the tape.
+.le
+.ls output_fd
+File or device name, e.g. "mta1600[1]" or "mtb800" If a file number is not
+given the user will be asked whether or not this is a new tape. If it is
+a new tape the file number "1" will be used. If it is not a new tape, i.e.
+it already has data on it, then file number "EOT" will be used.
+.le
+.ls new_tape = no
+New tape flag. Usage is described above.
+.le
+.ls verbose = no
+Flag to signal program that it should print information about progress while
+running.
+.le
+.ih
+DESCRIPTION
+Tcopy copies files from one tape to another reporting read errors on the
+input tape as it goes. Tcopy, when it encounters a read error, does its
+best to get as much data as possible by validating the input buffer after
+the error, guessing its length, and writing it out to the output tape.
+.ih
+EXAMPLES
+1. To copy all the files on mta to a new tape on mtb:
+
+.nf
+ vt> tcopy mta 1-999 mtb yes
+.fi
+
+2. To copy file 5 from mta and append it to the tape on mtb:
+
+.nf
+ vt> tcopy mta1600[5] mtb no
+.fi
+
+.ih
+SEE ALSO
+t2d
+.endhelp
diff --git a/noao/imred/vtel/doc/trim.hlp b/noao/imred/vtel/doc/trim.hlp
new file mode 100644
index 00000000..9962db80
--- /dev/null
+++ b/noao/imred/vtel/doc/trim.hlp
@@ -0,0 +1,33 @@
+.help trim Jan85 noao.imred.vtel
+.ih
+NAME
+trim -- Trim the limb. Zero all pixels off the limb in a full disk image
+.ih
+USAGE
+trim inputimage threshold
+.ih
+PARAMETERS
+.ls inputimage
+Name of data image to trim.
+.le
+.ls threshold
+Squibby brightness value to use as a threshold in determining the limb.
+.le
+.ih
+DESCRIPTION
+Trim scans all the pixels in an image and sets those pixels to zero that
+contain a squibby brightness smaller than the threshold value. This is
+done in place, that is, the input image gets modified.
+.ih
+EXAMPLES
+1. To trim a data image called 'data' with a squibby brightness threshold
+of 4 (the standard value) the command would be:
+
+.nf
+ vt> trim data 4
+.fi
+
+.ih
+SEE ALSO
+getsqib, putsqib
+.endhelp
diff --git a/noao/imred/vtel/doc/unwrap.hlp b/noao/imred/vtel/doc/unwrap.hlp
new file mode 100644
index 00000000..67fad069
--- /dev/null
+++ b/noao/imred/vtel/doc/unwrap.hlp
@@ -0,0 +1,95 @@
+.help unwrap May87 noao.imred.vtel
+.ih
+NAME
+unwrap -- Filter an IRAF image; remove binary wrap-around.
+.ih
+USAGE
+unwrap listin listout
+.ih
+PARAMETERS
+.ls listin
+List of images to unwrap, this is an IRAF template.
+.le
+.ls listout
+List of output images, this is an IRAF template. If the output list
+is the same as the input list, the unwrapping is done in-place.
+.le
+.ls threshold1 = 128
+Data jump threshold for first unwrap pass.
+.le
+.ls wrapval1 = 256
+Factor to multiply wrap value by for first unwrap pass.
+.le
+.ls threshold2 = 128
+Data jump threshold for second unwrap pass.
+.le
+.ls wrapval2 = 256
+Factor to multiply wrap value by for second unwrap pass.
+.le
+.ls cstart = 2
+Column of image to start unwrapping. Columns are numbered from left to right.
+.le
+.ls step = 5
+Number of steps (1-5) to perform on image (unwrap1, difference, unwrap2,
+reconstruct, fixlines).
+.le
+.ls verbose = yes
+If set, program produces progress reports, etc.
+.le
+.ih
+DESCRIPTION
+Unwrap checks for binary wraparound in IRAF images.
+The algorithm consists of reading the image line by line, unwrapping
+each line, and writing the line out to another image. The procedure
+for unwraping is a five step process.
+.ls Step one: unwrap1
+Unwrapping is accomplished by scanning the data line and looking for
+large jumps in the data values. Large negative jumps are interpreted
+as data wrapping and large positive jumps are interpreted as data unwrapping.
+The program keeps track of the number of wraps, each data element in the
+array has wrapval1 * wrapnumber added. This effectively unwraps an image
+in which the point to point variation in the data values is small compared
+to the variation caused by a binary wrap.
+.le
+.ls Step two: difference
+A difference image is produced from the above step one image by calculating
+the pixel to pixel difference between all of the pixels in the line. The
+first column of the image is generally left intact so that the image can
+be reconstructed in a later step. Step one often produces streaks in the
+image due to data variation large enough to mimic wrapping. This step
+two difference image eliminates most of these streaks except for their
+point of origin, where the confusion occured.
+.le
+.ls Step three: unwrap2
+This is the second unwrapping step. The image is unwrapped as in step
+one using the second set of unwrap values (threshold2, wrapval2).
+.le
+.ls Step four: reconstruct
+The original image is reconstructed from the step three image by
+adding pixel values successively to line pixels.
+.le
+.ls Step five: fixlines
+If bad lines (streaks) still can be found in the image, they are
+eliminated by replacing the line by the average of the lines above
+and below bad line.
+.le
+.ih
+EXAMPLES
+1. To unwrap an image called "continuum" and store the resulting image in
+"unwrapped", and use the default parameters, the command might be:
+
+.nf
+ vt> unwrap continuum unwrapped
+.fi
+
+2. To unwrap all the images in the directory starting with s1492 and store
+the unwrapped images in s1492*u, to start in column 31, to do four steps,
+and to see verbose output, the command might be:
+
+.nf
+ vt> unwrap s1494* s1492*//u cstart=31 step=4 v+
+.fi
+
+.ih
+SEE ALSO
+.endhelp
diff --git a/noao/imred/vtel/doc/vtblink.hlp b/noao/imred/vtel/doc/vtblink.hlp
new file mode 100644
index 00000000..0bb26779
--- /dev/null
+++ b/noao/imred/vtel/doc/vtblink.hlp
@@ -0,0 +1,53 @@
+.help vtblink Dec84 noao.imred.vtel
+.ih
+NAME
+vtblink -- Blink daily grams to check registration
+.ih
+USAGE
+vtblink
+.ih
+PARAMETERS
+.ls imname1
+First image to be mapped.
+.le
+.ls imname2
+Subsequent images to be mapped
+.le
+.ls z1 = -3000.0
+Minimum grayscale intensity to be mapped during 'display'.
+.le
+.ls z2 = 3000.0
+Maximum grayscale intensity to be mapped during 'display'.
+.le
+.ih
+DESCRIPTION
+Vtblink allows the user to blink successive frames of data on the IIS. The
+program calculates the offset between grams based on the
+longitudes for each image. Vtblink will ask for each successive image
+and will display it on the next (mod 4) IIS frame.
+After each image is displayed the user is put back out in the cl so that he/she
+can use any of the images$tv tasks to analyze the data. The user returns to
+the blink program by typing 'bye' to the cl prompt. To exit the program the
+user enters the "end" for the filename. Images are displayed with the grayscale
+limits set by default to -3000.0 and 3000.0. These values correspond to the
+parameters z1 and z2 which may be given on the command line. If the user
+forgets which IIS frame contains which image, he/she can enter "stat" to the
+"next image" prompt and will get a list of which images are in which frames.
+.ih
+EXAMPLES
+1. To run vtblink with the default gray scale parameters just type:
+
+.nf
+ vt> vtblink
+.fi
+
+2. To run vtblink with gray scale parameters z1=-4000.0, z2=4000.0, the
+command would be:
+
+.nf
+ vt> vtblink z1=-4000.0 z2=4000.0
+.fi
+.ih
+SEE ALSO
+display, blink, lumatch
+.endhelp
diff --git a/noao/imred/vtel/doc/vtexamine.hlp b/noao/imred/vtel/doc/vtexamine.hlp
new file mode 100644
index 00000000..20bf13eb
--- /dev/null
+++ b/noao/imred/vtel/doc/vtexamine.hlp
@@ -0,0 +1,50 @@
+.help vtexamine Jan86 noao.imred.vtel
+.ih
+NAME
+vtexamine -- examine the headers and record structure of vacuum telescope files
+.ih
+USAGE
+mtexamine tape_file
+.ih
+PARAMETERS
+.ls tape_file
+Tape file, e.g. "mta1600[2]" or "mta1600".
+.le
+.ls files
+List of tape file numbers or
+ranges delimited by commas, e.g. "1-3,5-8".
+File_list is requested only if no file number is given in tape_file.
+Files will be read in ascending order, regardless of the order of the list.
+Reading
+will terminate if EOT is reached, thus a list such as "1-999"
+may be used to read all the files on the tape.
+.le
+.ls headers=yes
+Decode and print header information from each file examined.
+.le
+.ih
+DESCRIPTION
+By default, vtexamine decodes and prints header and record
+structure information for each file examined. The header
+information can be turned off by setting headers=no.
+.ih
+EXAMPLES
+1. To see the header information and determine the record structure of all the
+files on a vacuum telescope tape and send the result to the file vtdump:
+
+.nf
+ vt> vtexamine mtb1600 1-999 > vtdump
+.fi
+
+2. To just get the record structure for the third file on a vacuum telescope
+tape the command would be:
+
+.nf
+ vt> vtexamine mtb1600[3] headers=no
+.fi
+.ih
+BUGS
+The IRAF magtape i/o routines do not permit data beyond a double EOF
+to be accessed. Therefore vtexamine cannot be used to examine tapes with
+embedded double EOFs.
+.endhelp
diff --git a/noao/imred/vtel/doc/writetape.hlp b/noao/imred/vtel/doc/writetape.hlp
new file mode 100644
index 00000000..6159c016
--- /dev/null
+++ b/noao/imred/vtel/doc/writetape.hlp
@@ -0,0 +1,35 @@
+.help writetape Jan86 noao.imred.vtel
+.ih
+NAME
+writetape -- Write 5 grams to tape in full disk format. (Used as
+intermediate step in 10830 processing.
+.ih
+USAGE
+writetape input_root tape_name
+.ih
+PARAMETERS
+.ls getname
+Root name for input files.
+.le
+.ls getmtape
+Tape file descriptor.
+.le
+.ih
+DESCRIPTION
+Writetape takes as input five(5) full disk grams in IRAF image format
+and writes them to tape in a format identical to the original full disk
+grams produced on the vacuum telescope. The input image names are expected
+to be the "input_root" name concatenated with the numbers "1", "2", ... "5".
+Writetape calls 'writevt' and is a cl script file.
+.ih
+EXAMPLES
+1. To write five files with root name m1585 to tape mta, the command would be:
+
+.nf
+ vt> writetape m1585 mta
+.fi
+
+.ih
+SEE ALSO
+readvt, writevt
+.endhelp
diff --git a/noao/imred/vtel/doc/writevt.hlp b/noao/imred/vtel/doc/writevt.hlp
new file mode 100644
index 00000000..3475a5c4
--- /dev/null
+++ b/noao/imred/vtel/doc/writevt.hlp
@@ -0,0 +1,43 @@
+.help writevt Dec84 noao.imred.vtel
+.ih
+NAME
+writevt -- Write vacuum telescope full disk grams to tape
+.ih
+USAGE
+writevt input_image output_fd
+.ih
+PARAMETERS
+.ls input_image
+Name of input image.
+.le
+.ls output_fd
+File or device name, e.g. "mta1600[1]" or "mtb800" If a file number is not
+given the user will be asked whether or not this is a new tape. If it is
+a new tape the file number "1" will be used. If it is not a new tape, i.e.
+it already has data on it, then file number "EOT" will be used.
+.le
+.ls verbose = no
+Flag to signal program that it should produce verbose output. This includes
+header information and progress reports.
+.le
+.ls new_tape = no
+New tape flag. Usage is described above.
+.le
+.ih
+DESCRIPTION
+Writevt writes a full disk vacuum telescope gram in IRAF image format to tape.
+The IRAF image is 2048x2048 short integers. The tape format is the same as
+that used to write original data tapes on the mountain.
+.ih
+EXAMPLES
+1. To write the image "image1" to mta at 1600 bpi at file number 3 and
+see verbose output the command would be:
+
+.nf
+ vt> writevt image1 mta1600[3] v+
+.fi
+
+.ih
+SEE ALSO
+readvt
+.endhelp
diff --git a/noao/imred/vtel/fitslogr.cl b/noao/imred/vtel/fitslogr.cl
new file mode 100644
index 00000000..42681118
--- /dev/null
+++ b/noao/imred/vtel/fitslogr.cl
@@ -0,0 +1,104 @@
+#{ FITSLOGR -- Read all the headers on a FITS tape and print out some
+# of the header information for each file.
+
+{
+ struct header, headline, tfile, irafname
+ struct obsdate, lzero, keyword
+ struct tape, outfile, zcm, meanafld, numpix, meanfld
+ struct *fp
+ int sfnum, efnum, filenum, ssm
+ int hours, minutes, seconds
+ bool append, mag
+
+ if (!deftask ("rfits")) {
+ print ("Task rfits not loaded. Load dataio and then try again.")
+ bye
+ }
+
+ # Get the tape name and the output file name.
+ tape = gettape
+ outfile = getout
+
+ # Get the starting and ending file numbers for the log.
+ sfnum = getsfnum
+ efnum = getefnum
+
+ # Get the append flag.
+ append = getapp
+
+ # Get the mag flag.
+ mag = getmag
+
+ if (!append) {
+ if (mag) {
+ print ("File fname date time L-zero zcm meanafld numpix", >> outfile)
+ } else {
+ print ("File fname date time L-zero meanfld numpix", >> outfile)
+ }
+ }
+
+ filenum = sfnum
+ while (YES) {
+
+ # Read the next fits header from the tape.
+ header = mktemp("temp")
+ fp = header
+ rfits (tape, filenum, make_image=no, long_header=yes, > header)
+
+ # Initialize the output variables.
+ tfile = " "
+ irafname = " "
+ obsdate = " "
+ lzero = " "
+ zcm = " "
+ meanafld = " "
+ numpix = " "
+ hours = 0
+ minutes = 0
+ seconds = 0
+
+ # Now match keywords against this header to obtain needed output.
+tfile = filenum
+ while (fscan (fp, headline) != EOF) {
+ keyword = substr(headline, 1, 8)
+ if (keyword == "File: mt")
+ tfile = substr(headline, 7, 15)
+ else if (keyword == "IRAFNAME")
+ irafname = substr(headline, 12, 18)
+ else if (keyword == "OBS_DATE")
+ obsdate = substr(headline, 23, 30)
+ else if (keyword == "OBS_TIME") {
+ ssm = int(substr(headline, 23, 30)) # Seconds Since Midnight.
+ hours = ssm/3600
+ minutes = (ssm - (hours*3600))/60
+ seconds = ssm - hours*3600 - minutes*60
+ }
+ else if (keyword == "L_ZERO ")
+ lzero = substr(headline, 19, 26)
+ else if (keyword == "ZCM ")
+ zcm = substr(headline, 18, 26)
+ else if (keyword == "MEANAFLD")
+ meanafld = substr(headline, 18, 26)
+ else if (keyword == "MEAN_FLD")
+ meanfld = substr(headline, 18, 26)
+ else if (keyword == "NUMPIX ")
+ numpix = substr(headline, 19, 30)
+ else if (keyword == "End of d") {
+ print (headline, >> outfile)
+ delete (header, verify-)
+ bye
+ }
+ }
+ if (mag) {
+ print (tfile, irafname, obsdate, " ", hours, minutes, seconds,
+ lzero, zcm, meanafld, numpix, >> outfile)
+ } else {
+ print (tfile, irafname, obsdate, " ", hours, minutes, seconds,
+ lzero, meanfld, numpix, >> outfile)
+ }
+ filenum = filenum + 1
+ delete (header, verify-)
+ if (filenum > efnum)
+ bye
+ }
+}
diff --git a/noao/imred/vtel/fitslogr.par b/noao/imred/vtel/fitslogr.par
new file mode 100644
index 00000000..f6d8c141
--- /dev/null
+++ b/noao/imred/vtel/fitslogr.par
@@ -0,0 +1,6 @@
+gettape,s,a,,,,Tape to read fits headers from (i.e. "mta")
+getout,s,a,,,,File to put output information in
+getsfnum,i,a,,,,File number on tape from which to start logging
+getefnum,i,a,,,,File number on tape at which logging is to end
+getapp,b,a,,,,Append to existing file?
+getmag,b,a,,,,Is this data magnetic field? (yes = 8688 no = 10830)
diff --git a/noao/imred/vtel/gauss.x b/noao/imred/vtel/gauss.x
new file mode 100644
index 00000000..fc5f9211
--- /dev/null
+++ b/noao/imred/vtel/gauss.x
@@ -0,0 +1,16 @@
+procedure gauss (x, a, ymod, dyda, ma)
+
+real x, a[ma], ymod, dyda[ma]
+int ma
+
+real arg, ex, fac
+
+begin
+ arg = (x - a(2))/a(3)
+ ex = exp(-arg**2)
+ fac = a(1)*ex*2.0*arg
+ ymod = a(1)*ex
+ dyda(1) = ex
+ dyda(2) = fac/a(3)
+ dyda(3) = fac*arg/a(3)
+end
diff --git a/noao/imred/vtel/getsqib.par b/noao/imred/vtel/getsqib.par
new file mode 100644
index 00000000..a148cafb
--- /dev/null
+++ b/noao/imred/vtel/getsqib.par
@@ -0,0 +1,2 @@
+image,s,q,,,,Image to get sqibimage from
+sqibimage,s,q,,,,New image to contain squibby brightness image
diff --git a/noao/imred/vtel/getsqib.x b/noao/imred/vtel/getsqib.x
new file mode 100644
index 00000000..76e7e44d
--- /dev/null
+++ b/noao/imred/vtel/getsqib.x
@@ -0,0 +1,55 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+
+# GETSQIB -- Make a new image from a solar synoptic image containing just
+# the squibby brightness.
+
+procedure t_getsqib()
+
+char image[SZ_FNAME] # input image
+char sqibimage[SZ_FNAME] # output squibby brightness image
+
+int i, numpix
+pointer im, lgp, lpp, sqibim
+
+pointer immap(), imgl2s(), impl2s()
+errchk immap, imgl2s, impl2s
+
+begin
+ # Get parameters from the CL.
+ call clgstr ("image", image, SZ_FNAME)
+ call clgstr ("sqibimage", sqibimage, SZ_FNAME)
+
+ # Open image.
+ im = immap (image, READ_ONLY, 0)
+ sqibim = immap (sqibimage, NEW_COPY, im)
+
+ numpix = IM_LEN(im,1)
+ do i = 1, IM_LEN(im,2) {
+ lgp = imgl2s (im, i)
+ lpp = impl2s (sqibim, i)
+ call sqibline (Mems[lgp], Mems[lpp], numpix)
+ }
+
+ # Unmap images.
+ call imunmap (im)
+ call imunmap (sqibim)
+end
+
+
+# SQIBLINE -- Unpack squibby brightness from line1 and put it into line2.
+
+procedure sqibline (line1, line2, numpix)
+
+short line1[numpix] # input image line
+short line2[numpix] # output image line
+int numpix # number of pixels in line
+
+int i
+int and()
+
+begin
+ do i = 1, numpix
+ line2[i] = and(int(line1[i]),17B)
+end
diff --git a/noao/imred/vtel/gryscl.inc b/noao/imred/vtel/gryscl.inc
new file mode 100644
index 00000000..7198557a
--- /dev/null
+++ b/noao/imred/vtel/gryscl.inc
@@ -0,0 +1,52 @@
+data (ztbl[i], i=1,10) / 003, 003, 003, 003, 003, 003, 003, 003, 004, 005 /
+data (ztbl[i], i=11,20) / 005, 005, 005, 005, 005, 005, 005, 005, 006, 006 /
+data (ztbl[i], i=21,30) / 006, 006, 006, 006, 006, 006, 006, 006, 006, 007 /
+data (ztbl[i], i=31,40) / 007, 007, 007, 007, 007, 007, 007, 007, 007, 008 /
+data (ztbl[i], i=41,50) / 008, 008, 008, 008, 008, 008, 008, 008, 008, 009 /
+data (ztbl[i], i=51,60) / 009, 009, 009, 009, 009, 009, 009, 009, 009, 010 /
+data (ztbl[i], i=61,70) / 010, 010, 010, 010, 010, 010, 010, 010, 010, 010 /
+data (ztbl[i], i=71,80) / 010, 011, 011, 011, 011, 011, 011, 011, 011, 012 /
+data (ztbl[i], i=81,90) / 012, 012, 012, 012, 012, 012, 012, 013, 013, 013 /
+data (ztbl[i], i=91,100) / 013, 013, 013, 014, 014, 014, 014, 014, 014, 015/
+data (ztbl[i], i=101,110) /015, 015, 015, 015, 015, 015, 016, 016, 016, 016/
+data (ztbl[i], i=111,120) /016, 016, 017, 017, 017, 017, 017, 017, 017, 018/
+data (ztbl[i], i=121,130) /018, 018, 018, 018, 018, 018, 019, 019, 019, 019/
+data (ztbl[i], i=131,140) /019, 019, 020, 020, 020, 020, 020, 020, 021, 021/
+data (ztbl[i], i=141,150) /021, 021, 021, 021, 021, 022, 022, 022, 022, 022/
+data (ztbl[i], i=151,160) /022, 022, 023, 023, 023, 023, 023, 023, 024, 024/
+data (ztbl[i], i=161,170) /024, 024, 024, 024, 025, 025, 025, 025, 025, 026/
+data (ztbl[i], i=171,180) /026, 026, 026, 026, 026, 027, 027, 027, 027, 027/
+data (ztbl[i], i=181,190) /027, 028, 028, 028, 028, 028, 028, 029, 029, 029/
+data (ztbl[i], i=191,200) /029, 029, 029, 029, 029, 030, 030, 030, 030, 030/
+data (ztbl[i], i=201,210) /030, 030, 031, 031, 031, 031, 031, 031, 031, 031/
+data (ztbl[i], i=211,220) /032, 032, 032, 032, 032, 032, 032, 033, 033, 033/
+data (ztbl[i], i=221,230) /033, 033, 033, 034, 034, 034, 034, 034, 034, 035/
+data (ztbl[i], i=231,240) /035, 035, 035, 035, 035, 036, 036, 036, 036, 036/
+data (ztbl[i], i=241,250) /036, 037, 037, 037, 037, 037, 037, 038, 038, 038/
+data (ztbl[i], i=251,260) /038, 038, 038, 039, 039, 039, 039, 039, 039, 040/
+data (ztbl[i], i=261,270) /040, 040, 040, 040, 040, 041, 041, 041, 041, 041/
+data (ztbl[i], i=271,280) /041, 042, 042, 042, 042, 042, 042, 042, 042, 043/
+data (ztbl[i], i=281,290) /043, 043, 043, 044, 044, 044, 044, 045, 045, 045/
+data (ztbl[i], i=291,300) /045, 046, 046, 047, 047, 048, 048, 049, 049, 050/
+data (ztbl[i], i=301,310) /050, 051, 051, 052, 053, 054, 054, 055, 056, 057/
+data (ztbl[i], i=311,320) /057, 057, 057, 058, 058, 059, 059, 060, 060, 060/
+data (ztbl[i], i=321,330) /060, 061, 061, 063, 064, 065, 066, 067, 067, 068/
+data (ztbl[i], i=331,340) /068, 069, 069, 070, 071, 072, 072, 073, 074, 075/
+data (ztbl[i], i=341,350) /075, 076, 077, 078, 080, 082, 083, 084, 085, 086/
+data (ztbl[i], i=351,360) /086, 086, 087, 087, 088, 089, 089, 090, 091, 093/
+data (ztbl[i], i=361,370) /094, 095, 097, 099, 101, 102, 103, 105, 106, 108/
+data (ztbl[i], i=371,380) /109, 111, 113, 114, 115, 118, 121, 125, 128, 130/
+data (ztbl[i], i=381,390) /132, 135, 137, 140, 144, 148, 151, 154, 157, 162/
+data (ztbl[i], i=391,400) /166, 172, 177, 180, 184, 192, 200, 207, 213, 219/
+data (ztbl[i], i=401,410) /225, 231, 237, 240, 244, 246, 248, 250, 251, 252/
+data (ztbl[i], i=411,420) /253, 253, 254, 254, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=421,430) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=431,440) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=441,450) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=451,460) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=461,470) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=471,480) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=481,490) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=491,500) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=501,510) /255, 255, 255, 255, 255, 255, 255, 255, 255, 255/
+data (ztbl[i], i=511,512) /255, 255 /
diff --git a/noao/imred/vtel/imfglexr.x b/noao/imred/vtel/imfglexr.x
new file mode 100644
index 00000000..3c6d4649
--- /dev/null
+++ b/noao/imred/vtel/imfglexr.x
@@ -0,0 +1,76 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+
+# IMFGLEXR -- IMFilt Get Line with EXtension Real. Get a line from a
+# full disk solar image and extend the boundary appropriately for use
+# with acnvr. All pixels outside the limb are set equal to the value
+# of the last pixel inside the limb. The line is extended in size by
+# an amount given by 'extension' beyond the solar disk width.
+
+pointer procedure imfglexr (imptr, linenumber, el, extension)
+
+int linenumber # Line of input image to get
+int extension # Amount of boundary extension needed
+real el[LEN_ELSTRUCT] # limb ellipse structure
+pointer imptr # Input image pointer
+
+pointer rlptr, sp, tmpptr
+real p, n
+int lpix1, lpix2
+int linelength
+int lexb, rexb, i
+short k
+
+pointer imgl2r()
+short shifts()
+errchk imgl2r
+
+begin
+ k = -4
+
+ # Calculate the left and right bounds of the extended data.
+ lexb = E_XCENTER[el] - E_XSEMIDIAMETER[el] - extension
+ rexb = E_XCENTER[el] + E_XSEMIDIAMETER[el] + extension
+
+ # Extend 10 extra pixels beyond the minimum.
+ lexb = lexb - 10
+ rexb = rexb + 10
+ linelength = IM_LEN(imptr,1)
+
+ # Make a temporary short buffer for stripping.
+ call smark (sp)
+ call salloc (tmpptr, linelength, TY_SHORT)
+
+ # Get a line in the normal way. Point the real pointer to it.
+ rlptr = imgl2r (imptr, linenumber)
+
+ # Copy the line into the short array for stripping.
+ do i = 1, linelength
+ Mems[tmpptr+i-1] = short(Memr[rlptr+i-1])
+
+ # Strip off the squibby brightness. Put back into real array.
+ do i = 1, linelength
+ Memr[rlptr+i-1] = real(shifts(Mems[tmpptr+i-1], k))
+
+ # If the whole line is off the limb, return NULL.
+ if (abs(linenumber - E_YCENTER[el]) >= E_YSEMIDIAMETER[el])
+ return(NULL)
+
+ # Use ellipse parameters to determine where the limb intersections are.
+ p = (real(linenumber) - E_YCENTER[el])**2/E_YSEMIDIAMETER[el]**2
+ n = (1.0 - p) * E_XSEMIDIAMETER[el]**2
+
+ # The two limb points are:
+ lpix1 = int(-sqrt(abs(n)) + .5) + E_XCENTER[el]
+ lpix2 = int(sqrt(abs(n)) + .5) + E_XCENTER[el]
+
+ # Extend the boundary of the data beyond the limb
+ # by duplicating the last inside_the_limb pixel. This extension
+ # is done out to lexb on the left and rexb on the right.
+
+ call amovkr (Memr[rlptr+lpix1+1], Memr[rlptr+lexb], lpix1-1-lexb)
+ call amovkr (Memr[rlptr+lpix2-1], Memr[rlptr+lpix2+1], rexb-1-lpix2)
+ call sfree (sp)
+ return (rlptr)
+end
diff --git a/noao/imred/vtel/imfilt.x b/noao/imred/vtel/imfilt.x
new file mode 100644
index 00000000..1f25efcf
--- /dev/null
+++ b/noao/imred/vtel/imfilt.x
@@ -0,0 +1,170 @@
+include <mach.h>
+include <imhdr.h>
+include <imset.h>
+include "vt.h"
+
+# IMFILT -- Apply a spatial averageing filter to an image by convolving the
+# image with a filter kernel. Return the resulting image in a separate
+# image file.
+
+procedure imfilt (inim, outim, kernel, kxdim, kydim, el)
+
+pointer inim, outim # input and output images
+int kxdim, kydim # dimensions of convolution kernel
+real kernel[kxdim, kydim] # convolution kernel
+real el[LEN_ELSTRUCT] # limb ellipse structure
+
+int nlines, linelength, startline
+int linebuf, outline, i
+int k, offset, x2semi
+int extension, startpix, lastline
+real p, n, lpix1, lpix2
+pointer lines, tmpptr, outptr, inptr, sp
+
+pointer impl2r(), imgl2r(), imfglexr()
+errchk impl2r, imfglexr, imgl2r
+
+begin
+ # Set up the pointer array on the stack.
+ call smark (sp)
+ call salloc (lines, kydim, TY_POINTER)
+
+ # Calculate the extension.
+ extension = kxdim / 2
+ offset = E_XCENTER[el] - E_XSEMIDIAMETER[el]
+ x2semi = 2 * E_XSEMIDIAMETER[el]
+
+ # Startpix is the x-coordinate of the beginning of the 1-D array
+ # we pass to the convolution vector routine. If wrong, return.
+
+ startpix = offset - extension
+ if (startpix <= 0) {
+ call printf ("convolution kernel too wide for this image\n")
+ return
+ }
+
+ # Get the dimensions of the image.
+ linelength = IM_LEN(inim, 1)
+ nlines = IM_LEN(inim, 2)
+
+ # Pointers to the input and the output images are passed to this
+ # subroutine by the user.
+
+ # Use imseti to set up the appropriate number of input buffers.
+ call imseti (inim, IM_NBUFS, kydim+1)
+
+ # Read in the necessary number of input image lines to initially
+ # fill all but one of the input line buffers.
+ # First, skip over all lines that are off the limb.
+ # The size of the output image is defined prior to the call
+ # to this subroutine, the output image is the same size as the
+ # input image.
+
+ startline = 0
+ Memi[lines] = NULL
+
+ # Skip over empty lines.
+ while (Memi[lines] == NULL) {
+ startline = startline + 1
+ Memi[lines] = imfglexr (inim, startline, el, extension)
+ }
+
+ # Fill (almost) the line buffer.
+ do linebuf = 1, kydim-2
+ Memi[lines+linebuf] = imfglexr (inim, linebuf+startline,
+ el, extension)
+
+ # Copy the first startline lines from the input image into the
+ # output image.
+ do outline = 1, startline + (kydim/2) {
+
+ # Put next line to output image, get the corresponding line from
+ # the input image.
+ inptr = imgl2r (inim, outline)
+ outptr = impl2r (outim, outline)
+
+ # Copy the input line into the ouput line. Strip sqib.
+ do i = 1, DIM_VTFD {
+ Memr[outptr+i-1] = Memr[inptr+i-1]/16.
+ }
+ }
+
+ # Do the convolution, output line by output line.
+ do outline = (kydim/2) + startline, nlines {
+
+ # Use ellipse parameters to determine where the limb
+ # intersections are.
+ p = (real(outline) - E_YCENTER[el])**2/E_YSEMIDIAMETER[el]**2
+ n = (1.0 - p) * E_XSEMIDIAMETER[el]**2
+
+ # The two limb points are:
+ lpix1 = int(-sqrt(abs(n)) + .5) + E_XCENTER[el]
+ lpix2 = int(sqrt(abs(n)) + .5) + E_XCENTER[el]
+
+ # Keep a copy of this input line around for filling outside
+ # the limb.
+ inptr = imgl2r (inim, outline)
+
+ # Scroll the buffer pointer array.
+ if (outline > ((kydim/2) + startline))
+ do i = 0, kydim - 2
+ Memi[lines+i] = Memi[lines+i+1]
+
+ # Get next line from input image, if it is off the limb then we
+ # are done.
+
+ tmpptr = imfglexr (inim, outline+((kydim/2)+1), el, extension)
+ if (tmpptr == NULL) {
+ lastline = outline
+ break
+ }
+ Memi[lines+kydim-1] = tmpptr
+
+ # Put next line to output image.
+ outptr = impl2r (outim, outline)
+
+ # Zero the output line.
+ call aclrr (Memr[outptr], DIM_VTFD)
+
+ # Here is the actual convolution, this is a do loop over the lines
+ # of the kernel, each call to acnvrs adds the convolution of a
+ # kernel line with an input line to the output line.
+
+ do k = 1, kydim
+ call acnvr (Memr[Memi[lines+k-1]+startpix], Memr[outptr+offset],
+ x2semi, kernel[1,k], kxdim)
+
+ # Fill outside the limb with orig data.
+ do i = 1, lpix1 {
+ Memr[outptr+i-1] = Memr[inptr+i-1]/16.
+ }
+ do i = lpix2, DIM_VTFD {
+ Memr[outptr+i-1] = Memr[inptr+i-1]/16.
+ }
+
+ # Roundoff adjustment.
+ do i = startpix, startpix+x2semi {
+ if (Memr[outptr+i-1] < 0.0)
+ Memr[outptr+i-1] = Memr[outptr+i-1] - .5
+ else
+ Memr[outptr+i-1] = Memr[outptr+i-1] + .5
+ }
+
+ } # End of do loop on outline.
+
+ # Clear the rest of the image.
+ do outline = lastline, DIM_VTFD {
+
+ # Put next line to output image, get the corresponding line from
+ # the input image.
+ inptr = imgl2r (inim, outline)
+ outptr = impl2r (outim, outline)
+
+ # Copy the input line into the ouput line. Strip sqib.
+ do i = 1, DIM_VTFD {
+ Memr[outptr+i-1] = Memr[inptr+i-1]/16.
+ }
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/vtel/imratio.x b/noao/imred/vtel/imratio.x
new file mode 100644
index 00000000..5586d204
--- /dev/null
+++ b/noao/imred/vtel/imratio.x
@@ -0,0 +1,29 @@
+# IMRATIO -- Divide two images and return the result in a third image.
+
+procedure imratio (numerator, denominator, ratio, xdim, ydim)
+
+real numerator[xdim, ydim] # input numerator
+real denominator[xdim, ydim] # input denominator
+real ratio[xdim, ydim] # output ratio image
+int xdim, ydim # dimensions of the image
+
+int i
+real ezero()
+extern ezero()
+
+begin
+ do i = 1, ydim {
+ call arltr (denominator[1,i], xdim, 1E-10, 0.0)
+ call advzr (numerator[1,i], denominator[1,i], ratio[1,i], xdim,
+ ezero)
+ }
+end
+
+
+real procedure ezero (input)
+
+real input
+
+begin
+ return (0.0)
+end
diff --git a/noao/imred/vtel/lstsq.x b/noao/imred/vtel/lstsq.x
new file mode 100644
index 00000000..9091fb48
--- /dev/null
+++ b/noao/imred/vtel/lstsq.x
@@ -0,0 +1,85 @@
+include <mach.h>
+
+# LSTSQ -- Do a least squares fit to the data contained in the zz array.
+# Algorithm is from Jack Harvey. (Yes, it's a black box...)
+
+procedure lstsq (zz, mz, fno)
+
+real zz[mz, mz]
+int mz
+real fno
+
+int n, m, m1, i, j, k, l, l1
+real fn, pp
+
+begin
+ n = mz - 2
+ m = n + 1
+ m1 = m + 1
+ fn = n
+
+ do i = 1, m {
+ l = i + 1
+ do k = 1, i-1 {
+ zz[i,l] = zz[i,l] - zz[k,l]**2
+ }
+
+ if (i == m)
+ break
+ if (zz[i,l] >= 0.0)
+ zz[i,l] = zz[i,l]**.5
+ else {
+ call eprintf ("square root of negitive number in lstsq\n")
+ zz[i,l] = 0.0
+ }
+ l1 = l + 1
+
+ do j = l1, m1 {
+ do k = 1, i-1 {
+ zz[i,j] = zz[i,j] - zz[k,l] * zz[k,j]
+ }
+ if (zz[i,l] >= EPSILONR)
+ zz[i,j] = zz[i,j] / zz[i,l]
+ else
+ call eprintf ("divide by zero in lstsq\n")
+ }
+
+ if (zz[i,l] >= EPSILONR)
+ zz[i,i] = 1. / zz[i,l]
+ else
+ call eprintf ("divide by zero in lstsq\n")
+ do j = 1, i-1 {
+ pp = 0.
+ l1 = i - 1
+ do k = j, l1 {
+ pp = pp + zz[k,l] * zz[k,j]
+ }
+ zz[i,j] = -zz[i,i] * pp
+ }
+ }
+
+ if ((fno - fn) >= EPSILONR)
+ if ((zz[m,m1] / (fno - fn)) >= 0.0)
+ zz[m1,m1] = .6745 * (zz[m,m1] / (fno - fn))**.5
+ else {
+ call eprintf ("square root of negitive number in lstsq\n")
+ zz[m1,m1] = 0.0
+ }
+ else
+ call eprintf ("divide by zero in lstsq\n")
+
+ do i = 1, n {
+ zz[m,i] = 0.
+ pp = 0.
+ do j = i, n {
+ zz[m,i] = zz[m,i] + zz[j,i] * zz[j,m1]
+ pp = pp + zz[j,i] * zz[j,i]
+ }
+ if (pp >= 0.0)
+ zz[m1,i] = zz[m1,m1] * pp**.5
+ else {
+ call eprintf ("square root of negitive number in lstsq\n")
+ zz[m1,i] = 0.0
+ }
+ }
+end
diff --git a/noao/imred/vtel/makehelium.cl b/noao/imred/vtel/makehelium.cl
new file mode 100644
index 00000000..5cab8696
--- /dev/null
+++ b/noao/imred/vtel/makehelium.cl
@@ -0,0 +1,51 @@
+#{ MAKEHELIUM --
+
+# getinroot,s,a,,,,Input root file name
+# getoutroot,s,a,,,,Root filename for output images
+# inroot,s,h
+# outroot,s,h
+
+{
+ inroot = getinroot
+ outroot = getoutroot
+
+ if (access(inroot//"1.imh")) {
+ rmap (inroot//"1", outroot//"a1", outroot//"a3", outroot//"a2",
+ "H"//outroot//"a")
+ imdelete (inroot//"1")
+ } else {
+ print (inroot//"1 not accessable")
+ }
+
+ if (access(inroot//"2.imh")) {
+ rmap (inroot//"2", outroot//"b1", outroot//"b3", outroot//"b2",
+ "H"//outroot//"b")
+ imdelete (inroot//"2")
+ } else {
+ print (inroot//"2 not accessable")
+ }
+
+ if (access(inroot//"3.imh")) {
+ rmap (inroot//"3", outroot//"c1", outroot//"c3", outroot//"c2",
+ "H"//outroot//"c")
+ imdelete (inroot//"3")
+ } else {
+ print (inroot//"3 not accessable")
+ }
+
+ if (access(inroot//"4.imh")) {
+ rmap (inroot//"4", outroot//"d1", outroot//"d3", outroot//"d2",
+ "H"//outroot//"d")
+ imdelete (inroot//"4")
+ } else {
+ print (inroot//"4 not accessable")
+ }
+
+ if (access(inroot//"5.imh")) {
+ rmap (inroot//"5", outroot//"e1", outroot//"e3", outroot//"e2",
+ "H"//outroot//"e")
+ imdelete (inroot//"5")
+ } else {
+ print (inroot//"5 not accessable")
+ }
+}
diff --git a/noao/imred/vtel/makehelium.par b/noao/imred/vtel/makehelium.par
new file mode 100644
index 00000000..426eda03
--- /dev/null
+++ b/noao/imred/vtel/makehelium.par
@@ -0,0 +1,4 @@
+getinroot,s,a,,,,Input root file name
+getoutroot,s,a,,,,Root filename for output images
+inroot,s,h
+outroot,s,h
diff --git a/noao/imred/vtel/makeimages.cl b/noao/imred/vtel/makeimages.cl
new file mode 100644
index 00000000..1da6b832
--- /dev/null
+++ b/noao/imred/vtel/makeimages.cl
@@ -0,0 +1,66 @@
+#{ MAKEIMAGES --
+
+# getinroot,s,a,,,,Input root file name
+# getoutroot,s,a,,,,Root filename for output images
+# inroot,s,h
+# outroot,s,h
+
+{
+ inroot = getinroot
+ outroot = getoutroot
+
+ if (access("scratch$"//inroot//"001")) {
+ readvt ("scratch$"//inroot//"001", inroot//"tmp1")
+ quickfit (inroot//"tmp1001",verbose=yes)
+ rmap (inroot//"tmp1001",outroot//"a1",outroot//"a3",
+ outroot//"a2","H"//outroot//"a")
+ delete ("scratch$"//inroot//"001")
+ imdelete (inroot//"tmp1001")
+ } else {
+ print ("scratch$"//inroot//"001 not accessable")
+ }
+
+ if (access("scratch$"//inroot//"002")) {
+ readvt ("scratch$"//inroot//"002", inroot//"tmp2")
+ quickfit (inroot//"tmp2001",verbose=yes)
+ rmap (inroot//"tmp2001",outroot//"b1",outroot//"b3",
+ outroot//"b2","H"//outroot//"b")
+ delete ("scratch$"//inroot//"002")
+ imdelete (inroot//"tmp2001")
+ } else {
+ print ("scratch$"//inroot//"002 not accessable")
+ }
+
+ if (access("scratch$"//inroot//"003")) {
+ readvt ("scratch$"//inroot//"003", inroot//"tmp3")
+ quickfit (inroot//"tmp3001",verbose=yes)
+ rmap (inroot//"tmp3001",outroot//"c1",outroot//"c3",
+ outroot//"c2","H"//outroot//"c")
+ delete ("scratch$"//inroot//"003")
+ imdelete (inroot//"tmp3001")
+ } else {
+ print ("scratch$"//inroot//"003 not accessable")
+ }
+
+ if (access("scratch$"//inroot//"004")) {
+ readvt ("scratch$"//inroot//"004", inroot//"tmp4")
+ quickfit (inroot//"tmp4001",verbose=yes)
+ rmap (inroot//"tmp4001",outroot//"d1",outroot//"d3",
+ outroot//"d2","H"//outroot//"d")
+ delete ("scratch$"//inroot//"004")
+ imdelete (inroot//"tmp4001")
+ } else {
+ print ("scratch$"//inroot//"004 not accessable")
+ }
+
+ if (access("scratch$"//inroot//"005")) {
+ readvt ("scratch$"//inroot//"005", inroot//"tmp5")
+ quickfit (inroot//"tmp5001",verbose=yes)
+ rmap (inroot//"tmp5001",outroot//"e1",outroot//"e3",
+ outroot//"e2","H"//outroot//"e")
+ delete ("scratch$"//inroot//"005")
+ imdelete (inroot//"tmp5001")
+ } else {
+ print ("scratch$"//inroot//"005 not accessable")
+ }
+}
diff --git a/noao/imred/vtel/makeimages.par b/noao/imred/vtel/makeimages.par
new file mode 100644
index 00000000..426eda03
--- /dev/null
+++ b/noao/imred/vtel/makeimages.par
@@ -0,0 +1,4 @@
+getinroot,s,a,,,,Input root file name
+getoutroot,s,a,,,,Root filename for output images
+inroot,s,h
+outroot,s,h
diff --git a/noao/imred/vtel/merge.par b/noao/imred/vtel/merge.par
new file mode 100644
index 00000000..3a0b0768
--- /dev/null
+++ b/noao/imred/vtel/merge.par
@@ -0,0 +1,9 @@
+mergelist,s,h,"mergelist",,,List of files to merge
+outputimage,s,q,"carmap",,,Outputimage
+outweight,s,q,"carweight",,,Output image weights
+outabs,s,q,"carabs",,,Absolute value image
+outratio,s,q,"carratio",,,Ratio: outputimage over absolute value image
+longout,r,h,180.0,1.,360.,Longitude of center of this carrington rotation
+mapmonth,i,q,,1,12,Month of center of this carrington rotation
+mapday,i,q,,1,31,Day of center of this carrington rotation
+mapyear,i,q,,1,99,Year of center of this carrington rotation
diff --git a/noao/imred/vtel/merge.x b/noao/imred/vtel/merge.x
new file mode 100644
index 00000000..79aa23eb
--- /dev/null
+++ b/noao/imred/vtel/merge.x
@@ -0,0 +1,762 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+
+# MERGE -- Put together all appropriate daily grams to produce a full
+# carrington rotation map. This is done both for the average input images
+# and for the absolute value input images. The output of the program is
+# 4 images, average image, absolute value image, weight image, ratio of
+# first image to second image.
+
+procedure t_merge()
+
+char mergelist[SZ_FNAME] # list of images to be merged
+
+int wavelength, listfd
+char inputimage[SZ_FNAME]
+pointer inputim
+
+pointer immap()
+int imgeti(), open(), fscan()
+errchk immap, open
+
+begin
+ # Get the image name file from the cl and open it.
+ call clgstr ("mergelist", mergelist, SZ_FNAME)
+ listfd = open (mergelist, READ_ONLY, TEXT_FILE)
+
+ # Get the wavelength from the first image in the mergelist.
+ if (fscan (listfd) != EOF) {
+ call gargwrd (inputimage, SZ_FNAME)
+ inputim = immap (inputimage, READ_ONLY, 0)
+ wavelength = imgeti (inputim, "WV_LNGTH")
+ call close (listfd)
+ } else {
+ call error (0, "No images in 'mergelist'")
+ call close (listfd)
+ return
+ }
+
+ if (wavelength == 8688)
+ call mergem (mergelist, wavelength)
+ else
+ call mergeh (mergelist, wavelength)
+end
+
+
+# MERGEM -- MERGE Magnetograms.
+
+procedure mergem (mergelist, wavelength)
+
+char mergelist[SZ_FNAME] # list of images to be merged
+int wavelength # wavelength of images
+
+pointer outputim, outw, outa, outr
+pointer outptr, outwptr, outaptr, outrptr
+char outputimage[SZ_FNAME], outweight[SZ_FNAME]
+char outabs[SZ_FNAME], outratio[SZ_FNAME]
+real longout, weight_tbl[SZ_WTBL], bzeroave
+int i, mapmonth, mapday, mapyear
+
+real clgetr()
+int clgeti()
+pointer immap(), imps2r()
+errchk immap, imps2r
+
+begin
+ # Get parameters from the cl.
+
+ # Output images.
+ call clgstr ("outputimage", outputimage, SZ_FNAME)
+ call clgstr ("outweight", outweight, SZ_FNAME)
+ call clgstr ("outabs", outabs, SZ_FNAME)
+ call clgstr ("outratio", outratio, SZ_FNAME)
+
+ # Longitude of center of output Carrington rotation map.
+ longout = clgetr ("longout")
+
+ # Month, day, and year of the center of the output map.
+ mapmonth = clgeti ("mapmonth")
+ mapday = clgeti ("mapday")
+ mapyear = clgeti ("mapyear")
+
+ # Open output image.
+ outputim = immap (outputimage, NEW_IMAGE, 0)
+
+ # Define some parameters for the output images.
+ IM_NDIM(outputim) = 2
+ IM_LEN(outputim, 1) = DIM_XCARMAP
+ IM_LEN(outputim, 2) = DIM_SQUAREIM
+ IM_PIXTYPE(outputim) = TY_REAL
+
+ # Open the rest of the output images.
+ outw = immap (outweight, NEW_COPY, outputim)
+ outa = immap (outabs, NEW_COPY, outputim)
+ outr = immap (outratio, NEW_COPY, outputim)
+
+ # Map the outputimages into memory.
+ outptr = imps2r (outputim, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+ outwptr = imps2r (outw, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+ outaptr = imps2r (outa, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+ outrptr = imps2r (outr, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+
+ # Create weight table.
+ do i = 1,SZ_WTBL
+ weight_tbl[i] = (cos((real(i-91)+.5)*3.1415926/180.))**4
+
+ call mmall (mergelist, Memr[outptr], Memr[outwptr], Memr[outaptr],
+ outputim, outw, outa, outr, wavelength, weight_tbl, longout,
+ mapmonth, mapday, mapyear, bzeroave)
+
+ # Fill the ratio image.
+ call imratio (Memr[outptr],Memr[outaptr],Memr[outrptr],DIM_XCARMAP,
+ DIM_SQUAREIM)
+
+ # Write some information out to the image headers.
+ call imaddr (outputim, "AV_BZERO", bzeroave)
+ call imaddi (outputim, "WV_LNGTH", wavelength)
+ call imaddr (outw, "AV_BZERO", bzeroave)
+ call imaddr (outw, "WV_LNGTH", wavelength)
+ call imaddb (outw, "WEIGHTS", TRUE)
+ call imaddr (outa, "AV_BZERO", bzeroave)
+ call imaddr (outr, "AV_BZERO", bzeroave)
+ call imaddr (outa, "WV_LNGTH", wavelength)
+ call imaddr (outr, "WV_LNGTH", wavelength)
+ call imaddb (outa, "ABS_VALU", TRUE)
+ call imaddb (outr, "POLARITY", TRUE)
+
+ # Weight the data image and the abs image.
+ call imratio (Memr[outptr],Memr[outwptr],Memr[outptr],DIM_XCARMAP,
+ DIM_SQUAREIM)
+ call imratio (Memr[outaptr],Memr[outwptr],Memr[outaptr],DIM_XCARMAP,
+ DIM_SQUAREIM)
+
+ # Close images
+ call imunmap (outputim)
+ call imunmap (outw)
+ call imunmap (outa)
+ call imunmap (outr)
+end
+
+
+# MERGEH -- MERGE Helium 10830 grams.
+
+procedure mergeh (mergelist, wavelength)
+
+char mergelist[SZ_FNAME] # list of images to merge
+int wavelength # wavelength of observation
+
+pointer outputim, outw
+pointer outptr, outwptr
+char outputimage[SZ_FNAME], outweight[SZ_FNAME]
+real longout, weight_tbl[SZ_WTBL], bzeroave
+int i, mapmonth, mapday, mapyear
+
+real clgetr()
+int clgeti()
+pointer immap(), imps2r()
+errchk immap, imps2r
+
+begin
+ # Get parameters from the cl.
+
+ # Output images.
+ call clgstr ("outputimage", outputimage, SZ_FNAME)
+ call clgstr ("outweight", outweight, SZ_FNAME)
+
+ # Longitude of center of output Carrington rotation map.
+ longout = clgetr ("longout")
+
+ # Month, day, and year of the center of the output map.
+ mapmonth = clgeti ("mapmonth")
+ mapday = clgeti ("mapday")
+ mapyear = clgeti ("mapyear")
+
+ # Open output image.
+ outputim = immap (outputimage, NEW_IMAGE, 0)
+
+ # Define some parameters for the output images.
+ IM_NDIM(outputim) = 2
+ IM_LEN(outputim, 1) = DIM_XCARMAP
+ IM_LEN(outputim, 2) = DIM_SQUAREIM
+ IM_PIXTYPE(outputim) = TY_REAL
+
+ # Open the other output image.
+ outw = immap (outweight, NEW_COPY, outputim)
+
+ # Map the outputimages into memory.
+ outptr = imps2r (outputim, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+ outwptr = imps2r (outw, 1, DIM_XCARMAP, 1, DIM_SQUAREIM)
+
+ # Create weight table.
+ do i = 1,SZ_WTBL
+ weight_tbl[i] = (cos((real(i-91)+.5)*3.1415926/180.))**4
+
+ call mhall (mergelist, Memr[outptr], Memr[outwptr], outputim,
+ outw, wavelength, weight_tbl, longout, mapmonth,
+ mapday, mapyear, bzeroave)
+
+ # Write some information out to the image headers.
+ call imaddr (outputim, "AV_BZERO", bzeroave)
+ call imaddi (outputim, "WV_LNGTH", wavelength)
+ call imaddr (outw, "AV_BZERO", bzeroave)
+ call imaddr (outw, "WV_LNGTH", wavelength)
+ call imaddb (outw, "WEIGHTS", TRUE)
+
+ # Weight the data image.
+ call imratio (Memr[outptr],Memr[outwptr],Memr[outptr],DIM_XCARMAP,
+ DIM_SQUAREIM)
+
+ # Close images.
+ call imunmap (outputim)
+ call imunmap (outw)
+end
+
+
+# MMALL -- Merge Magnetograms ALL.
+# Map in each input image, weight it, figure out where it goes
+# and add it to the output image.
+
+procedure mmall (mergelist, outarray, outarrayw, outarraya, outputim,
+ outw, outa, outr, wavelength, weight_tbl, longout, mapmonth, mapday,
+ mapyear, bzeroave)
+
+char mergelist[SZ_FNAME] # list of images to be merged
+int wavelength # wavelength of observations
+real outarray[DIM_XCARMAP, DIM_SQUAREIM] # output data array
+real outarrayw[DIM_XCARMAP, DIM_SQUAREIM] # output weights array
+real outarraya[DIM_XCARMAP, DIM_SQUAREIM] # output absolute value array
+pointer inputim # pointer to input image
+pointer outputim # pointer to output image
+pointer outw # pointer to weight image
+pointer outa # pointer to abs value image
+pointer outr # pointer to ratio image
+int mapmonth, mapday, mapyear # date of output map
+real weight_tbl[SZ_WTBL] # weight table
+real longout # longitude of map center
+real bzeroave # average b-zero for map
+
+char inputimage[SZ_FNAME], inweight[SZ_FNAME], inabs[SZ_FNAME]
+pointer inw, ina, inptr, inwptr, inaptr
+int listfd, month, day, year, count
+real longin, bzero, bzerosum
+int obsdate, temp, i, j
+char ltext[SZ_LINE]
+
+int open(), fscan(), imgeti()
+real imgetr()
+pointer immap(), imgs2i(), imgs2s()
+errchk open, immap, imgs2i, imgs2s
+
+begin
+ count = 0
+ bzerosum = 0.0
+ listfd = open (mergelist, READ_ONLY, TEXT_FILE)
+
+ # Zero the output images.
+ do i = 1, DIM_XCARMAP {
+ do j = 1, DIM_SQUAREIM {
+ outarray[i,j] = 0.0
+ outarrayw[i,j] = 0.0
+ outarraya[i,j] = 0.0
+ }
+ }
+
+ # Get inputimages from the mergelist until they are all used up.
+ while (fscan (listfd) != EOF) {
+ call gargwrd (inputimage, SZ_FNAME)
+
+ # Get absolute value image.
+ if(fscan (listfd) != EOF)
+ call gargwrd (inabs, SZ_FNAME)
+ else
+ call error (0, "wrong number of file names in mergelist")
+
+ # Get weight image.
+ if(fscan (listfd) != EOF)
+ call gargwrd (inweight, SZ_FNAME)
+ else
+ call error (0, "wrong number of file names in mergelist")
+
+ # Open input image, its corresponding weight map, and its
+ # corresponding absolute value map.
+
+ inputim = immap (inputimage, READ_ONLY, 0)
+ inw = immap (inweight, READ_ONLY, 0)
+ ina = immap (inabs, READ_ONLY, 0)
+
+ bzero = imgetr (inputim, "B_ZERO")
+ bzerosum = bzerosum + bzero
+ longin = imgetr (inputim, "L_ZERO")
+ obsdate = imgeti (inputim, "OBS_DATE")
+
+ # Check to see that the date is same on the three input images.
+ temp = imgeti (inw, "OBS_DATE")
+ if (temp != obsdate) {
+ call eprintf ("ERROR: date on weight image differs from that ")
+ call eprintf ("on data image!\n")
+ break
+ }
+
+ temp = imgeti (ina, "OBS_DATE")
+ if (temp != obsdate) {
+ call eprintf ("ERROR: date on abs image differs from that ")
+ call eprintf ("on data image!\n")
+ break
+ }
+
+ # Decode month, day, year.
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+
+ # Pack a name for this date and longitude and then put them out
+ # into the outputimages' headers.
+
+ count = count + 1
+ call sprintf (ltext, SZ_LINE, "DATE%04d")
+ call pargi (count)
+ call imaddi (outputim, ltext, obsdate)
+ call imaddi (outw, ltext, obsdate)
+ call imaddi (outa, ltext, obsdate)
+ call imaddi (outr, ltext, obsdate)
+
+ call sprintf (ltext, SZ_LINE, "LONG%04d")
+ call pargi (count)
+ call imaddr (outputim, ltext, longin)
+ call imaddr (outw, ltext, longin)
+ call imaddr (outa, ltext, longin)
+ call imaddr (outr, ltext, longin)
+
+ # Map the inputimage, the weight map, and abs_image into memory.
+ inptr = imgs2i (inputim, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+ inwptr = imgs2s (inw, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+ inaptr = imgs2i (ina, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+
+ # Weight this image and add it to the output image.
+ call addmweight (Memi[inptr],Mems[inwptr],Memi[inaptr],outarray,
+ outarrayw, outarraya, weight_tbl, longin, longout,
+ month, day, year, mapmonth, mapday, mapyear)
+
+ # Close this input image.
+ call imunmap (inputim)
+ call imunmap (inw)
+ call imunmap (ina)
+
+ } # end of do loop on input images
+
+ bzeroave = bzerosum/real(count)
+ call close (listfd)
+end
+
+
+# MHALL -- Merge Heliumgrams ALL.
+# Map in each input image, weight it, figure out where it goes
+# and add it to the output image.
+
+procedure mhall (mergelist, outarray, outarrayw, outputim,
+ outw, wavelength, weight_tbl, longout, mapmonth, mapday,
+ mapyear, bzeroave)
+
+char mergelist[SZ_FNAME] # list of images to be merged
+int wavelength # wavelength of observations
+real outarray[DIM_XCARMAP, DIM_SQUAREIM] # output data array
+real outarrayw[DIM_XCARMAP, DIM_SQUAREIM] # output weights array
+pointer inputim # pointer to input image
+pointer outputim # pointer to output image
+pointer outw # pointer to weight image
+int mapmonth, mapday, mapyear # date of output map
+real weight_tbl[SZ_WTBL] # weight table
+real longout # longitude of map center
+real bzeroave # average b-zero for map
+
+char inputimage[SZ_FNAME], inweight[SZ_FNAME]
+pointer inw, inptr, inwptr
+int listfd, month, day, year, count
+real longin, bzero, bzerosum
+int obsdate, temp, i, j
+char ltext[SZ_LINE]
+
+real imgetr()
+int open(), fscan(), imgeti()
+pointer immap(), imgs2i(), imgs2s()
+errchk open, immap, imgs2i, imgs2s
+
+begin
+ count = 0
+ bzerosum = 0.0
+ listfd = open (mergelist, READ_ONLY, TEXT_FILE)
+
+ # Zero the output images.
+ do i = 1, DIM_XCARMAP {
+ do j = 1, DIM_SQUAREIM {
+ outarray[i,j] = 0.0
+ outarrayw[i,j] = 0.0
+ }
+ }
+
+ # Get inputimages from the mergelist until they are all used up.
+ while (fscan (listfd) != EOF) {
+ call gargwrd (inputimage, SZ_FNAME)
+
+ # Get weight image.
+ if (fscan (listfd) != EOF)
+ call gargwrd (inweight, SZ_FNAME)
+ else
+ call error (0, "wrong number of file names in mergelist")
+
+ # Open input image, its corresponding weight map, and its
+ # corresponding absolute value map.
+
+ inputim = immap (inputimage, READ_ONLY, 0)
+ inw = immap (inweight, READ_ONLY, 0)
+
+ bzero = imgetr (inputim, "B_ZERO")
+ bzerosum = bzerosum + bzero
+ longin = imgetr (inputim, "L_ZERO")
+ obsdate = imgeti (inputim, "OBS_DATE")
+
+ # Check to see that the date is same on the three input images.
+ temp = imgeti (inw, "OBS_DATE")
+ if (temp != obsdate) {
+ call eprintf ("ERROR: date on weight image differs from that ")
+ call eprintf ("on data image!\n")
+ break
+ }
+
+ # Decode month, day, year.
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+
+ # Pack a name for this date and longitude and then put them out
+ # into the outputimages' headers.
+
+ count = count + 1
+ call sprintf (ltext, SZ_LINE, "DATE%04d")
+ call pargi (count)
+ call imaddi (outputim, ltext, obsdate)
+ call imaddi (outw, ltext, obsdate)
+
+ call sprintf (ltext, SZ_LINE, "LONG%04d")
+ call pargi (count)
+ call imaddr (outputim, ltext, longin)
+ call imaddr (outw, ltext, longin)
+
+ # Map the inputimage, the weight map, and abs_image into memory.
+ inptr = imgs2i (inputim, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+ inwptr = imgs2s (inw, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+
+ # Weight this image and add it to the output image.
+ call addhweight (Memi[inptr], Mems[inwptr], outarray, outarrayw,
+ weight_tbl, longin, longout, month, day, year, mapmonth,
+ mapday, mapyear)
+
+ # Close this input image.
+ call imunmap (inputim)
+ call imunmap (inw)
+
+ } # end of do loop on input images
+
+ bzeroave = bzerosum/real(count)
+ call close (listfd)
+end
+
+
+# ADDMWEIGHT -- Weight input image by cos(longitude - (L-L0))**4, and add
+# it to the output image in the proper place.
+
+procedure addmweight (inim, inwim, inaim, outim, outwim, outaim,
+ weight_tbl, longin, longout, month, day, year, mapmonth, mapday,
+ mapyear)
+
+int inim[DIM_SQUAREIM, DIM_SQUAREIM] # input image
+short inwim[DIM_SQUAREIM, DIM_SQUAREIM] # input image weights
+int inaim[DIM_SQUAREIM, DIM_SQUAREIM] # input absolute image
+real outim[DIM_XCARMAP, DIM_SQUAREIM] # outputimage
+real outwim[DIM_XCARMAP, DIM_SQUAREIM] # output image weights
+real outaim[DIM_XCARMAP, DIM_SQUAREIM] # output absolute image
+int month, day, year # date of input image
+int mapmonth, mapday, mapyear # date of output image
+real weight_tbl[DIM_SQUAREIM] # weight table
+real longin, longout # longitudes of images
+
+int p1offset, p2offset, firstpix, lastpix, column, row
+int offset, datein, dateout, temp, temp2
+int d1900()
+
+begin
+ # Translate the two dates into julian day numbers to make comparisons
+ # simpler.
+
+ datein = d1900 (month, day, year)
+ dateout = d1900 (mapmonth, mapday, mapyear)
+
+ # Figure out the pixel offset between the first pixel of the input
+ # image and the first pixel of ther output image.
+ # Actually, there may be two pixel offsets for a particular image
+ # corresponding to the correct position of the image and the 360
+ # degree offset position.
+
+ p1offset = mod(abs(int(longin - longout + .5)), 360) # This is one.
+ p2offset = 360 - p1offset # This is the other.
+
+ # Determine which side of the output image center is each of these
+ # offsets.
+
+ if (datein > dateout) {
+ if (longout > 180) {
+ if (((longin >= longout) && (longin <= 360)) ||
+ (longin <= mod((longout + 180.),360.))) {
+ if (p1offset < 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ } else {
+ if ((longin >= longout) && (longin <= (longout + 180))) {
+ if (p1offset <= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ }
+ } else {
+ if (longout < 180) {
+ if (((longin >= (180 + longout)) && (longin <= 360)) ||
+ (longin <= longout)) {
+ if (p1offset < 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ } else {
+ if ((longin < longout) && (longin > (longout - 180))) {
+ if (p1offset < 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ }
+ }
+
+ # Make sure the sign is right
+ if (datein > dateout)
+ offset = -offset
+
+ # Check for the case that the two longitudes are equal.
+ if (longin == longout) {
+ if (abs(datein - dateout) <= 1) {
+ offset = 0
+ } else {
+ call eprintf ("input day too far from center of output map\n")
+ return
+ }
+ }
+
+ # Check for the case that the two dates are equal.
+ if (datein == dateout)
+ offset = longin - longout
+
+ # If the offset is too large then do not use this image.
+ if (abs(offset) > 240) {
+ call eprintf ("input day too far from center of output map\n")
+ return
+ }
+
+ # Determine what part, if not all, of the input image will lie on the
+ # output image.
+
+ firstpix = 1
+ if (offset < -90)
+ firstpix = abs(offset+90)
+ lastpix = DIM_SQUAREIM
+ if (offset > 90)
+ lastpix = 180 - (offset - 90)
+
+
+ # Do all 180 columns in the image.
+ if (offset <= 0)
+ temp = 91
+ else
+ temp = 90
+
+ do column = firstpix,lastpix {
+ do row = 1, DIM_SQUAREIM {
+ temp2 = column + temp + offset
+ outim[temp2,row] = outim[temp2, row] +
+ inim[column, row] * weight_tbl[column]
+ outwim[temp2,row] = outwim[temp2, row] +
+ inwim[column, row] * weight_tbl[column]
+ outaim[temp2,row] = outaim[temp2, row] +
+ inaim[column, row] * weight_tbl[column]
+ }
+ }
+end
+
+
+# ADDHWEIGHT -- Weight input image by cos(longitude - (L-L0))**4, and add
+# it to the output image in the proper place. (For 10830 grams)
+
+procedure addhweight (inim, inwim, outim, outwim, weight_tbl, longin, longout,
+ month, day, year, mapmonth, mapday, mapyear)
+
+int inim[DIM_SQUAREIM, DIM_SQUAREIM] # input image
+short inwim[DIM_SQUAREIM, DIM_SQUAREIM] # input image weights
+real outim[DIM_XCARMAP, DIM_SQUAREIM] # outputimage
+real outwim[DIM_XCARMAP, DIM_SQUAREIM] # output image weights
+int month, day, year # date of input image
+int mapmonth, mapday, mapyear # date of output image
+real weight_tbl[DIM_SQUAREIM] # weight table
+real longin, longout # longitudes of images
+
+int p1offset, p2offset, firstpix, lastpix, column, row
+int offset, datein, dateout, temp, temp2
+int d1900()
+
+begin
+ # Translate the two dates into julian day numbers to make comparisons
+ # simpler.
+
+ datein = d1900 (month, day, year)
+ dateout = d1900 (mapmonth, mapday, mapyear)
+
+ # Figure out the pixel offset between the first pixel of the input
+ # image and the first pixel of ther output image.
+ # Actually, there may be two pixel offsets for a particular image
+ # corresponding to the correct position of the image and the 360
+ # degree offset position.
+
+ p1offset = mod(abs(int(longin - longout + .5)), 360) # this is one.
+ p2offset = 360 - p1offset # this is the other.
+
+ # Determine which side of the output image center is each of these
+ # offsets.
+
+ if (datein > dateout) {
+ if (longout > 180) {
+ if (((longin >= longout) && (longin <= 360)) ||
+ (longin <= mod((longout + 180.),360.))) {
+ if (p1offset < 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ } else {
+ if ((longin >= longout) && (longin <= (longout + 180))) {
+ if (p1offset <= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ }
+ } else {
+ if (longout < 180) {
+ if (((longin >= (180 + longout)) && (longin <= 360)) ||
+ (longin <= longout)) {
+ if (p1offset < 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ } else {
+ if ((longin < longout) && (longin > (longout - 180))) {
+ if (p1offset < 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ } else {
+ if (p1offset >= 180)
+ offset = p2offset
+ else
+ offset = p1offset
+ }
+ }
+ }
+
+ # Make sure the sign is right.
+ if (datein > dateout)
+ offset = -offset
+
+ # Check for the case that the two longitudes are equal.
+ if (longin == longout) {
+ if (abs(datein - dateout) <= 1) {
+ offset = 0
+ } else {
+ call eprintf ("Input day too far from center of output map.\n")
+ return
+ }
+ }
+
+ # Check for the case that the two dates are equal.
+ if (datein == dateout)
+ offset = longin - longout
+
+ # If the offset is too large then do not use this image.
+ if (abs(offset) > 240) {
+ call eprintf ("input day too far from center of output map\n")
+ return
+ }
+
+ # Determine what part, if not all, of the input image will lie on the
+ # output image.
+
+ firstpix = 1
+ if (offset < -90)
+ firstpix = abs(offset+90)
+ lastpix = DIM_SQUAREIM
+ if (offset > 90)
+ lastpix = 180 - (offset - 90)
+
+
+ # Do all 180 columns in the image.
+ if (offset <= 0)
+ temp = 91
+ else
+ temp = 90
+
+ do column = firstpix, lastpix {
+ do row = 1, DIM_SQUAREIM {
+ temp2 = column + temp + offset
+ outim[temp2,row] = outim[temp2, row] +
+ inim[column, row] * weight_tbl[column]
+ outwim[temp2,row] = outwim[temp2, row] +
+ inwim[column, row] * weight_tbl[column]
+ }
+ }
+end
diff --git a/noao/imred/vtel/mkpkg b/noao/imred/vtel/mkpkg
new file mode 100644
index 00000000..3da8ea5a
--- /dev/null
+++ b/noao/imred/vtel/mkpkg
@@ -0,0 +1,59 @@
+# Make the VTEL Package
+
+$call relink
+$exit
+
+update:
+ $call relink
+ $call install
+ ;
+
+relink:
+ $set LIBS = "-lxtools"
+ $update libpkg.a
+ $omake x_vtel.x
+ $link x_vtel.o libpkg.a $(LIBS)
+ ;
+
+install:
+ $move x_vtel.e noaobin$
+ ;
+
+libpkg.a:
+ d1900.x
+ decodeheader.x "vt.h" <mach.h>
+ destreak.x "vt.h" <imhdr.h> <imset.h> <mach.h>
+ dicoplot.x "gryscl.inc" "dicoplot.h" "vt.h" <gset.h> <imhdr.h>\
+ <imset.h> <mach.h> <math/curfit.h>
+ dephem.x
+ gauss.x
+ getsqib.x "vt.h" <imhdr.h> <mach.h>
+ imfglexr.x "vt.h" <imhdr.h> <mach.h>
+ imfilt.x "vt.h" <imhdr.h> <imset.h> <mach.h>
+ imratio.x
+ textim.x <imhdr.h> <mach.h>
+ lstsq.x <mach.h>
+ merge.x "vt.h" <imhdr.h> <mach.h>
+ mrqmin.x
+ mscan.x "vt.h" <error.h> <mach.h>
+ numeric.x "vt.h" "numeric.h" <mach.h>
+ pimtext.x "vt.h"
+ pixbit.x "asciilook.inc" "pixelfont.inc"
+ putsqib.x "vt.h" <imhdr.h> <mach.h>
+ quickfit.x "vt.h" <imhdr.h> <mach.h>
+ readheader.x "vt.h" <mach.h> <fset.h>
+ readss1.x "vt.h" <imhdr.h> <mach.h> <fset.h>
+ readss2.x "vt.h" <imhdr.h> <mach.h> <fset.h>
+ readss3.x "vt.h" <imhdr.h> <mach.h> <fset.h>
+ readss4.x "vt.h" <imhdr.h> <mach.h> <fset.h>
+ readsubswath.x "vt.h" <mach.h> <fset.h>
+ readvt.x "vt.h" <imhdr.h> <mach.h> <fset.h>
+ rmap.x "vt.h" "numeric.h" <imhdr.h> <mach.h>
+ syndico.x "vt.h" "trnsfrm.inc" "syndico.h" <mach.h> <imhdr.h>\
+ <imset.h> <gset.h>
+ tcopy.x "vt.h" <error.h> <fset.h> <mach.h> <printf.h>
+ trim.x "vt.h" <imhdr.h> <mach.h>
+ unwrap.x <imhdr.h> <mach.h>
+ vtexamine.x "vt.h" <error.h> <fset.h> <mach.h> <printf.h>
+ writevt.x "vt.h" <error.h> <fset.h> <mach.h>
+ ;
diff --git a/noao/imred/vtel/mrotlogr.cl b/noao/imred/vtel/mrotlogr.cl
new file mode 100644
index 00000000..1612d030
--- /dev/null
+++ b/noao/imred/vtel/mrotlogr.cl
@@ -0,0 +1,68 @@
+#{ MROTLOGR -- Read all the headers on a FITS tape and print out some
+# of the header information for each file. (for Carrington rotation maps)
+
+{
+ struct header, headline, tfile, irafname
+ struct avbzero, keyword
+ struct tape, outfile
+ struct *fp
+ int sfnum, efnum, filenum
+ bool append
+
+ if (!deftask ("rfits")) {
+ print ("Task rfits not loaded. Load dataio and then try again.")
+ bye
+ }
+
+ # Get the tape name and the output file name.
+ tape = gettape
+ outfile = getout
+
+ # Get the starting and ending file numbers for the log.
+ sfnum = getsfnum
+ efnum = getefnum
+
+ # Get the append flag.
+ append = getapp
+
+ if (!append) {
+ print ("File fname avbzero", >> outfile)
+ }
+
+ filenum = sfnum
+ while (YES) {
+
+ # Read the next fits header from the tape.
+ header = mktemp("temp")
+ fp = header
+ rfits (tape, filenum, make_image=no, long_header=yes, > header)
+
+ # Initialize the output variables.
+ tfile = " "
+ irafname = " "
+ avbzero = " "
+
+ # Now match keywords against this header to obtain needed output.
+ while (fscan (fp, headline) != EOF) {
+ keyword = substr(headline, 1, 8)
+ if (keyword == "File: mt")
+ tfile = substr(headline, 7, 15)
+ else if (keyword == "IRAFNAME")
+ irafname = substr(headline, 12, 20)
+ else if (keyword == "AV_BZERO")
+ avbzero = substr(headline, 19, 27)
+ else if (keyword == "L_ZERO ")
+ lzero = substr(headline, 19, 26)
+ else if (keyword == "End of d") {
+ print (headline, >> outfile)
+ delete (header, verify-)
+ bye
+ }
+ }
+ print (tfile, irafname, avbzero, >> outfile)
+ filenum = filenum + 1
+ delete (header, verify-)
+ if (filenum > efnum)
+ bye
+ }
+}
diff --git a/noao/imred/vtel/mrotlogr.par b/noao/imred/vtel/mrotlogr.par
new file mode 100644
index 00000000..a18b0f4b
--- /dev/null
+++ b/noao/imred/vtel/mrotlogr.par
@@ -0,0 +1,5 @@
+gettape,s,a,,,,Tape to read fits headers from (i.e. "mta")
+getout,s,a,,,,File to put output information in
+getsfnum,i,a,,,,File number on tape from which to start logging
+getefnum,i,a,,,,File number on tape at which logging is to end
+getapp,b,a,,,,Append to existing file?
diff --git a/noao/imred/vtel/mrqmin.x b/noao/imred/vtel/mrqmin.x
new file mode 100644
index 00000000..197e7931
--- /dev/null
+++ b/noao/imred/vtel/mrqmin.x
@@ -0,0 +1,348 @@
+# MRQMIN -- Levenberg-Marquard nonlinear chi square minimization.
+# From NUMERICAL RECIPES by Press, Flannery, Teukolsky, and Vetterling, p526.
+#
+# Levenberg-Marquardt method, attempting to reduce the value of chi
+# square of a fit between a set of NDATA points X,Y with individual
+# standard deviations SIG, and a nonlinear function dependent on MA
+# coefficients A. The array LISTA numbers the parameters A such that the
+# first MFIT elements correspond to values actually being adjusted; the
+# remaining MA-MFIT parameters are held fixed at their input value. The
+# program returns the current best-fit values for the MA fit parameters
+# A, and chi square, CHISQ. The arrays COVAR and ALPHA with physical
+# dimension NCA (>= MFIT) are used as working space during most
+# iterations. Supply a subroutine FUNCS(X,A,YFIT,DYDA,MA) that evaluates
+# the fitting function YFIT, and its derivatives DYDA with respect to the
+# fitting parameters A at X. On the first call provide an initial guess
+# for the parameters A, and set ALAMDA<0 for initialization (which then
+# sets ALAMDA=0.001). If a step succeeds CHISQ becomes smaller and
+# ALAMDA decreases by a factor of 10. If a step fails ALAMDA grows by a
+# factor of 10. You must call this routine repeatedly until convergence
+# is achieved. Then make one final call with ALAMDA = 0, so that COVAR
+# returns the covariance matrix, and ALPHA the curvature matrix.
+#
+# This routine is cast in the IRAF SPP language but the variable names have
+# been maintained for reference to the original source. Also the working
+# arrays ATRY, BETA, and DA are allocated dynamically to eliminate
+# limitations on the number of parameters fit.
+
+procedure mrqmin (x, y, sig, ndata, a, ma, lista, mfit, covar, alpha, nca,
+ chisq, funcs, alamda)
+
+real x[ndata] # X data array
+real y[ndata] # Y data array
+real sig[ndata] # Sigma array
+int ndata # Number of data points
+real a[ma] # Parameter array
+int ma # Number of parameters
+int lista[ma] # List array indexing parameters to fit
+int mfit # Number of parameters to fit
+real covar[nca,nca] # Covariance matrix
+real alpha[nca,nca] # Curvature matrix
+int nca # Matrix dimension (>= mfit)
+real chisq # Chi square of fit
+extern funcs # Function to compute derivatives
+real alamda # Initialization and convergence parameter
+
+int j, k, kk, ihit
+real ochisq
+pointer atry, beta, da
+
+errchk gaussj
+
+begin
+ # Initialize and check that LISTA contains a proper permutation.
+ if (alamda < 0.) {
+ call mfree (atry, TY_REAL)
+ call mfree (beta, TY_REAL)
+ call mfree (da, TY_REAL)
+ call malloc (atry, ma, TY_REAL)
+ call malloc (beta, mfit, TY_REAL)
+ call malloc (da, mfit, TY_REAL)
+
+ kk = mfit + 1
+ do j = 1, ma {
+ ihit = 0
+ do k = 1, mfit
+ if (lista(k) == j)
+ ihit = ihit + 1
+ if (ihit == 0) {
+ lista (kk) = j
+ kk = kk + 1
+ } else if (ihit > 1)
+ call error (0, "Improper permutation in LISTA")
+ }
+ if (kk != (ma + 1))
+ call error (0, "Improper permutation in LISTA")
+ alamda = 0.001
+ call mrqcof (x, y, sig, ndata, a, ma, lista, mfit, alpha,
+ Memr[beta], nca, chisq, funcs)
+ ochisq = chisq
+ do j = 1, ma
+ Memr[atry+j-1] = a[j]
+ }
+
+ # Alter linearized fitting matrix by augmenting diagonal elements.
+ do j = 1, mfit {
+ do k = 1, mfit
+ covar[j,k] = alpha[j,k]
+ covar[j,j] = alpha[j,j] * (1. + alamda)
+ Memr[da+j-1] = Memr[beta+j-1]
+ }
+
+ # Matrix solution.
+ call gaussj (covar, mfit, nca, Memr[da], 1, 1)
+
+ # Once converged evaluate covariance matrix with ALAMDA = 0.
+ if (alamda == 0.) {
+ call covsrt (covar, nca, ma, lista, mfit)
+ call mfree (atry, TY_REAL)
+ call mfree (beta, TY_REAL)
+ call mfree (da, TY_REAL)
+ return
+ }
+
+ # Did the trial succeed?
+ do j = 1, mfit
+ Memr[atry+lista[j]-1] = a[lista[j]] + Memr[da+j-1]
+ call mrqcof (x, y, sig, ndata, Memr[atry], ma, lista, mfit, covar,
+ Memr[da], nca, chisq, funcs)
+
+ # Success - accept the new solution, Failure - increase ALAMDA
+ if (chisq < ochisq) {
+ alamda = 0.1 * alamda
+ ochisq = chisq
+ do j = 1, mfit {
+ do k = 1, mfit
+ alpha[j,k] = covar[j,k]
+ Memr[beta+j-1] = Memr[da+j-1]
+ a[lista[j]] = Memr[atry+lista[j]-1]
+ }
+ } else {
+ alamda = 10. * alamda
+ chisq = ochisq
+ }
+end
+
+
+# MRQCOF -- Evaluate linearized matrix coefficients.
+# From NUMERICAL RECIPES by Press, Flannery, Teukolsky, and Vetterling, p527.
+#
+# Used by MRQMIN to evaluate the linearized fitting matrix ALPHA and vector
+# BETA.
+#
+# This procedure has been recast in the IRAF/SPP language but the variable
+# names have been maintained. Dynamic memory is used.
+
+procedure mrqcof (x, y, sig, ndata, a, ma, lista, mfit, alpha, beta, nalp,
+ chisq, funcs)
+
+real x[ndata] # X data array
+real y[ndata] # Y data array
+real sig[ndata] # Sigma array
+int ndata # Number of data points
+real a[ma] # Parameter array
+int ma # Number of parameters
+int lista[ma] # List array indexing parameters to fit
+int mfit # Number of parameters to fit
+real alpha[nalp,nalp] # Work matrix
+real beta[ma] # Work array
+int nalp # Matrix dimension (>= mfit)
+real chisq # Chi square of fit
+extern funcs # Function to compute derivatives
+
+int i, j, k
+real sig2i, ymod, dy, wt
+pointer sp, dyda
+
+begin
+ call smark (sp)
+ call salloc (dyda, ma, TY_REAL)
+
+ do j = 1, mfit {
+ do k = 1, j
+ alpha[j,k] = 0.
+ beta[j] = 0.
+ }
+
+ chisq = 0.
+ do i = 1, ndata {
+ call funcs (x[i], a, ymod, Memr[dyda], ma)
+ sig2i = 1. / (sig[i] * sig[i])
+ dy = y[i] - ymod
+ do j = 1, mfit {
+ wt = Memr[dyda+lista[j]-1] * sig2i
+ do k = 1, j
+ alpha[j,k] = alpha[j,k] + wt * Memr[dyda+lista[k]-1]
+ beta[j] = beta[j] + dy * wt
+ }
+ chisq = chisq + dy * dy * sig2i
+ }
+
+ do j = 2, mfit
+ do k = 1, j-1
+ alpha[k,j] = alpha[j,k]
+
+ call sfree (sp)
+end
+
+
+# GAUSSJ -- Linear equation solution by Gauss-Jordan elimination.
+# From NUMERICAL RECIPES by Press, Flannery, Teukolsky, and Vetterling, p28.
+#
+# Linear equation solution by Gauss-Jordan elimination. A is an input matrix
+# of N by N elements, stored in an array of physical dimensions NP by
+# NP. B is an input matrix of N by M containing the M right-hand side
+# vectors, stored in an array of physical dimensions NP by MP. On
+# output, A is replaced by its matrix inverse, and B is replaced by the
+# corresponding set of solutionn vectors.
+#
+# This procedure has been recast in the IRAF/SPP language using dynamic
+# memory allocation and error return. The variable names have been maintained.
+
+procedure gaussj (a, n, np, b, m, mp)
+
+real a[np,np] # Input matrix and returned inverse
+int n # Dimension of input matrix
+int np # Storage dimension of input matrix
+real b[np,mp] # Input RHS matrix and returned solution
+int m # Dimension of input matrix
+int mp # Storage dimension of input matrix
+
+int i, j, k, l, ll, irow, icol, indxrl, indxcl
+real big, pivinv, dum
+pointer sp, ipiv, indxr, indxc
+
+begin
+ call smark (sp)
+ call salloc (ipiv, n, TY_INT)
+ call salloc (indxr, n, TY_INT)
+ call salloc (indxc, n, TY_INT)
+
+ do j = 1, n
+ Memi[ipiv+j-1] = 0
+
+ do i = 1, n {
+ big = 0.
+ do j = 1, n {
+ if (Memi[ipiv+j-1] != 1) {
+ do k = 1, n {
+ if (Memi[ipiv+k-1] == 0) {
+ if (abs (a[j,k]) >= big) {
+ big = abs (a[j,k])
+ irow = j
+ icol = k
+ }
+ } else if (Memi[ipiv+k-1] > 1) {
+ call sfree (sp)
+ call error (0, "Singular matrix")
+ }
+ }
+ }
+ }
+
+ Memi[ipiv+icol-1] = Memi[ipiv+icol-1] + 1
+
+ if (irow != icol) {
+ do l = 1, n {
+ dum = a[irow,l]
+ a[irow,l] = a[icol,l]
+ a[icol,l] = dum
+ }
+ do l = 1, m {
+ dum = b[irow,l]
+ b[irow,l] = b[icol,l]
+ b[icol,l] = dum
+ }
+ }
+ Memi[indxr+i-1] = irow
+ Memi[indxc+i-1] = icol
+ if (a[icol,icol] == 0.) {
+ call sfree (sp)
+ call error (0, "Singular matrix")
+ }
+ pivinv = 1. / a[icol,icol]
+ a[icol,icol] = 1
+ do l = 1, n
+ a[icol,l] = a[icol,l] * pivinv
+ do l = 1, m
+ b[icol,l] = b[icol,l] * pivinv
+ do ll = 1, n {
+ if (ll != icol) {
+ dum = a[ll,icol]
+ do l = 1, n
+ a[ll,l] = a[ll,l] - a[icol,l] * dum
+ do l = 1, m
+ b[ll,l] = b[ll,l] - b[icol,l] * dum
+ }
+ }
+ }
+
+ do l = n, 1, -1 {
+ indxrl = Memi[indxr+l-1]
+ indxcl = Memi[indxr+l-1]
+ if (indxrl != indxcl) {
+ do k = 1, n {
+ dum = a[k,indxrl]
+ a[k,indxrl] = a[k,indxcl]
+ a[k,indxcl] = dum
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# COVSRT -- Sort covariance matrix.
+# From NUMERICAL RECIPES by Press, Flannery, Teukolsky, and Vetterling, p515.
+#
+# Given the covariance matrix COVAR of a fit for MFIT of MA total parameters,
+# and their ordering LISTA, repack the covariance matrix to the true order of
+# the parameters. Elements associated with fixed parameters will be zero.
+# NCVM is the physical dimension of COVAR.
+#
+# This procedure has been recast into the IRAF/SPP language but the
+# original variable names are used.
+
+procedure covsrt (covar, ncvm, ma, lista, mfit)
+
+real covar[ncvm,ncvm] # Input and output array
+int ncvm # Physical dimension of array
+int ma # Number of parameters
+int lista[mfit] # Index of fitted parameters
+int mfit # Number of fitted parameters
+
+int i, j
+real swap
+
+begin
+ # Zero all elements below diagonal.
+ do j = 1, ma-1
+ do i = j+1, ma
+ covar[i,j] = 0.
+
+ # Repack off-diag elements of fit into correct locations below diag.
+ do i = 1, mfit-1
+ do j = i+1, mfit
+ if (lista[j] > lista[i])
+ covar [lista[j],lista[i]] = covar[i,j]
+ else
+ covar [lista[i],lista[j]] = covar[i,j]
+
+ # Temporarily store original diag elements in top row and zero diag.
+ swap = covar[1,1]
+ do j = 1, ma {
+ covar[1,j] = covar[j,j]
+ covar[j,j] = 0.
+ }
+ covar[lista[1],lista[1]] = swap
+
+ # Now sort elements into proper order on diagonal.
+ do j = 2, mfit
+ covar[lista[j],lista[j]] = covar[1,j]
+
+ # Finally, fill in above diagonal by symmetry.
+ do j = 2, ma
+ do i = 1, j-1
+ covar[i,j] = covar[j,i]
+end
diff --git a/noao/imred/vtel/mscan.par b/noao/imred/vtel/mscan.par
new file mode 100644
index 00000000..8c903220
--- /dev/null
+++ b/noao/imred/vtel/mscan.par
@@ -0,0 +1,8 @@
+input,s,a,,,,Input file descriptor
+verbose,b,h,yes,,,Print out header data
+files,s,a,,,,List of files to be examined
+makeimage,b,h,yes,,,Make images?
+brief,b,h,y,,,short output image names
+select,b,h,y,,,make select image
+bright,b,h,y,,,make brightness image
+velocity,b,h,y,,,make velocity image
diff --git a/noao/imred/vtel/mscan.x b/noao/imred/vtel/mscan.x
new file mode 100644
index 00000000..9044b943
--- /dev/null
+++ b/noao/imred/vtel/mscan.x
@@ -0,0 +1,188 @@
+include <error.h>
+include <mach.h>
+include "vt.h"
+
+define MAX_RANGES 100
+
+# MSCAN -- Read vacuum telescope area scans.
+
+procedure t_mscan()
+
+char input[SZ_FNAME] # input file template
+char files[SZ_LINE] # file list to process
+bool verbose # verbose flag
+bool makeimage # flag to make an image
+bool bright # flag to make a brightness image
+bool velocity # flag to make a velocity image
+bool select # flag to make a select image
+bool brief # flag to make brief file names
+
+char tapename[SZ_FNAME]
+char diskfile[SZ_LINE]
+int filerange[2 * MAX_RANGES + 1]
+int nfiles, filenumber, recsize, listin
+
+bool clgetb()
+int decode_ranges(), get_next_number(), mscan()
+int fntopnb(), clgfil(), mtneedfileno()
+int mtfile()
+errchk mscan
+
+begin
+ # CLIO for parameters.
+ verbose = clgetb ("verbose")
+ makeimage = clgetb ("makeimage")
+ bright = clgetb ("bright")
+ velocity = clgetb ("velocity")
+ select = clgetb ("select")
+ brief = clgetb ("brief")
+
+ # If the user hasn't asked for ANY of the images, just return.
+ if (!bright && !velocity && !select)
+ return
+
+ # Get input file(s).
+ call clgstr ("input", input, SZ_FNAME)
+ if (mtfile (input) == NO) {
+
+ # This is not a tape file, expand as a list template.
+ listin = fntopnb (input, 0)
+ filenumber = 1
+
+ while (clgfil (listin, diskfile, SZ_FNAME) != EOF) {
+ iferr (recsize = mscan (diskfile, filenumber, brief,
+ verbose, makeimage, select, bright, velocity)) {
+ call eprintf ("Error reading file %s\n")
+ call pargstr (diskfile)
+ }
+ if (recsize == EOF) {
+ call printf ("Tape at EOT\n")
+ break
+ }
+ filenumber = filenumber + 1
+ }
+ call clpcls (listin)
+
+ } else if (mtneedfileno(input) == NO) {
+
+ # This is a tape file and the user specified which file.
+ iferr (recsize = mscan (input, 0, brief, verbose,
+ makeimage, select, bright, velocity)) {
+ call eprintf ("Error reading file %s\n")
+ call pargstr (input)
+ }
+ } else {
+
+ # This is a tape file or files and the user did not specify
+ # which file.
+ call clgstr ("files", files, SZ_LINE)
+
+ if (decode_ranges (files, filerange, MAX_RANGES, nfiles) == ERR)
+ call error (0, "Illegal file number list.")
+
+ if (verbose)
+ call printf ("\n")
+
+ # Loop over files.
+ filenumber = 0
+ while (get_next_number (filerange, filenumber) != EOF) {
+
+ # Assemble the appropriate tape file name.
+ call mtfname (input, filenumber, tapename, SZ_FNAME)
+
+ # Read this file.
+ iferr {
+ recsize = mscan (tapename, filenumber, brief,
+ verbose, makeimage, select, bright, velocity)
+ } then {
+ call eprintf ("Error reading file: %s\n")
+ call pargstr (tapename)
+ call erract (EA_WARN)
+ next
+ }
+ if (recsize == EOF) {
+ call printf ("Tape at EOT\n")
+ break
+ }
+
+ } # End while.
+ }
+end
+
+
+# MSCAN -- Read in the next sector scan file from tape. First read the file
+# header to determine what type scan it is and then call the appropriate
+# subroutime for that type of scan.
+
+int procedure mscan (input, filenumber, brief, verbose, makeimage, select,
+ bright, velocity)
+
+char input[SZ_FNAME] # input file name
+int filenumber # file number
+bool brief # brief disk file names?
+bool verbose # print header info?
+bool makeimage # make images?
+bool select # make select image?
+bool bright # make bright image?
+bool velocity # make velocity image?
+
+int in
+int lastrecsize
+int recsize
+bool selfbuf
+pointer sp, hbuf, hs
+
+int mtopen()
+int readheader()
+define nexit_ 10
+errchk mtopen, close, readheader
+
+begin
+ call smark (sp)
+ call salloc (hbuf, SZ_VTHDR, TY_SHORT)
+ call salloc (hs, VT_LENHSTRUCT, TY_STRUCT)
+
+ in = mtopen (input, READ_ONLY, 0)
+
+ call printf ("File %s: ")
+ call pargstr (input)
+
+ lastrecsize = 0
+
+ # First, read the header file
+ selfbuf = FALSE
+ recsize = readheader (in, hbuf, selfbuf)
+ if (recsize == EOF)
+ return (recsize)
+
+ # Decode the header and jump if '!makeimage'.
+ lastrecsize = recsize
+ call decodeheader (hbuf, hs, verbose)
+ if (verbose) {
+ call printf ("\n")
+ call flush (STDOUT)
+ }
+ if (!makeimage)
+ goto nexit_
+
+ # Call the appropriate area scan reader.
+ switch (VT_HOBSTYPE(hs)) {
+ case 1:
+ call readss1 (in, filenumber, brief, select, bright, velocity, hs)
+ case 2:
+ call readss2 (in, filenumber, brief, select, bright, velocity, hs)
+ case 3:
+ call readss3 (in, filenumber, brief, select, bright, velocity, hs)
+ case 4:
+ call readss4 (in, filenumber, brief, select, bright, velocity, hs)
+ case 0:
+ call printf ("Observation type zero encountered, image skipped.\n")
+ default:
+ call error (0, "unknown observation type, image skipped")
+ } # End of switch case.
+
+nexit_
+ call sfree (sp)
+ call close (in)
+ return (recsize)
+end
diff --git a/noao/imred/vtel/nsolcrypt.dat b/noao/imred/vtel/nsolcrypt.dat
new file mode 100644
index 00000000..65c3b067
--- /dev/null
+++ b/noao/imred/vtel/nsolcrypt.dat
@@ -0,0 +1,555 @@
+
+
+ '
+
+ #
+ #
+'
+ #
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 6L X}r '[T E_:
+
+ '
+ +}}# y}}A #v}: v}2 TA
+
+ P}E
+ y}= :}}}k T}kEEEX}r +}y
+
+ +EXv+ I}_
+ _}T [}[y}6 .}}}}}}}P 6}k
+
+ #Lr}}}k. .}v
+ T}T y}2T}g g}_2I}}' E}[ #EckcI#
+
+ A}}_E+ v}
+EE_}}. =}r .}}. A}n L}_ X}L +n}}}}}r2
+
+ #2. '}}# _}
+}}}}}v6 _}P [}[ #y}2r}A g}E# #n}r= '[}v'
+
+ 6c}}}n+ k}= 26 E}
+yL.#[}c #y}. 2}y' T}n}v v}2 A}y. _}P
+
+ E}}r[v}n P}kXv}y '}
+} I}n :}r k}T .}}}T +}} _}T :}r L['
+
+ 'y}I +}}2 .}}}v_= r
+}=6Ly}[ _}P :}v g}}+ 'IAI}k n}A 2}} 'y}.
+
+ :}n :}}2 r}P T
+}}}}}c+ =E' .XI :}}}}y[A' k}A :}v P}g .:
+
+ Ak}k6 2}X #T}}g [}X :PE 6
+}v_E+ 6IXgy}}A X}[ c}_ #v}A n}[
+
+ # #In}}}}y+ + 6r}}c'.' =}v[v}}c
+ +:# .y}E X}}6 E}}# :}}.
+
+ =v}y[2.v}E 2y}vA T}T #y}}nL2
+ E}}k[r}}T n}}vL.k}_
+
+ ' A}}2 [}A L}v+ [}P =I2
+ 2g}}}n: 6}y_}}}}y. +=
+
+ v}I :yr# I}n# Ey}2
+ #2' c}T '_}c+ k}=
+
+ +AE6 I}r:g}}}n+ +y}yv}}E
+ +}}: .}v E}v#
+
+ # 2k}}}}n2 #y}}}gEr}[ 6c}rX2
+ 2y}yT6X}r +v}I
+
+ # +y}cEEn}y. P}}: E}n
+ 'Pv}}}}P #Ly}v#
+
+ g}X #g}k +}}6 .r}_ # '
+ #AckL =r}}}y
+
+ .}v# +y}6 [}k[}}k+
+ .c}}n6}v
+
+ E}k c}E .}}}nA
+ 'y}yL }}
+
+ E}n c}P [g6
+ +L+ }}
+
+ 2}}. v}A
+ }}
+
+ g}g# I}v#
+ }}
+
+ # 2y}k=6X}}A
+ Ac
+
+ 6n}}}}r:
+
+
+ :II6 #
+ :2 ''
+
+
+ }k kc
+
+
+ }k rk
+
+
+ }n nk
+
+ 'c2
+ rr ky
+
+ .A.}g
+ }} k}
+
+ T}6k}'
+ }} k}
+
+ :}_E}T
+ y} g} 2T#
+
+ r}+vv#
+ n} [} +vy'
+
+ I}IT}E
+ k} X} g}TE}A
+
+ 'vr.}c
+ k} X}# P}r6y}:
+
+ # X}:c}.
+ ky X}+ 2}}2n}[
+
+ 2}[=}[
+ '}} X}= k}PP}r#
+
+ c}.rv#
+ _}I Ey}_ A}}E L}k2}}=
+
+ A}LE}I
+ +}}}X +g}yE Ly}X# +__+ 2}}:g}X
+
+ '}v'}v
+ L}gy}n}}c+ .k}r2 'g}}g #n}PL}k#
+
+ _}=_}_EEXEE
+ELv}'6v}vE P}}[r}cv}: '_}n:}y2
+
+ E}X6y}}}}}}
+}}}_ +E' 6v}}X#:}v#.:Tcv}}}6r}P
+
+ [}L +222226
+:A=# +[T# g}}}}}}}rkA_}k#
+
+ # +yv+
+ :ELXgkkkknkkgXLE2 '[kXXI=' }y2
+
+ X}L
+ +I_v}}}}}}}}}}}}}}}}}kX=' vy+
+
+ '}v# 'E
+g}}}}}kXLA2222226AT[ky}}}y[A P}T
+
+ T}I .[y}
+}}kP:# ':Tk}}}vL' +yy.
+
+ :}v# 2c}}}k
+I' 2Tv}}yP# X}c
+
+ 'cv}}}}I 'X}}}_6
+ #An}}vA '}}.
+
+ A}}}}}_ =r}}[+
+ =r}}k. [}X
+
+ EkE 2}c' #_}}k2
+ #Ey}}I 'y}PEE#
+
+ A}}k. 2}[ 2r}yL
+ +c}}_# I}}}}[
+
+ =:2k}}T# #X}g =v}n2
+ L}}g' =EP}_ +
+
+ # #y}c'Iy}vE Av}v: E}}g#
+ 6+ :y}r. .}X Iry
+
+ :r}yI'[}}k2 6n}yT' E}}X#
+ P}y. +k}v2 E}g# Ar}yL
+
+ #Ly}r:6n}}T=c}}[' E}}T
+ +y}}c #k}y2 +y}v= An}vI.Pg.
+
+ +[}}[+Ly}}}g2 6}}P
+ #k}}}}A #k}r+ 6k}}[' An}vI#Iy}y.
+
+ =v}yI.PX= +v}X
+ T}}}}}v' 'r}n# Ev}yL#Ir}yT'Pv}yT'
+
+ #P}}r. #k}c# .2+ #==+
+ =}}}}}}}X cnkA :n}r +r}_ 'T}}}}}T'Iv}yT+
+
+ +g}c P}r+ A}}}: T}}y
+ 'y}}}}}}}}A }}}k T}}} 6y}E 6gyX'=n}yT'
+
+ nn :}}6 I}}}E X}}}
+ _}}}}}}}}}v' }}}k X}}} E}y+ 6k}}X'
+
+ ## [} #r}P E}}}E X}}}
+ A}}}}}}}}}}}[ }}}k X}}}+ c}k y}[.
+
+ X}. P}k# E}}}E X}}y
+ +y}}}}}}}}}}}}: }}}k X}}} 'y}E }n
+
+ E}2 'y}. E}}}E X}}v
+ c}}}}}}}}}}}}}v# }}}k X}}} L}v# #}[
+
+ E}A [}[ E}}}E [}}r
+ .}}}}}}}}}}}}}}}2 r}}k X}}} n}L +}X
+
+ :}I 'yy. E}}}E X}}y
+ _kggkgccXg_ckc[# }}}k X}}} :}v' 2}T
+
+ :}X T}X E}}}E [}}k
+ }}}k X}}} k}I 2}P
+
+ :r}E#y}. E}}}A X}}k
+ y}}k X}}y A}v :}E
+
+ #X}yE A}_ E}}}E [}}k
+ y}}_ X}}} #v}: E}E
+ '
+ n}n. c}A E}}}E c}}k
+ y}}[ X}}} X}c E}X
+
+ y}. +}y# E}}}E k}}k
+ }}}X X}}} .}y'6}}g=
+
+ I}[ =}_ =}}}= [}}P
+ r}}X T}}} k}= =n}y#
+
+ k}6 _}A AEA 2.
+ 2EE' #EA2 P}[ +}y
+
+ T}= v}+
+ 2}v _}A
+
+ +yv#.}y
+ .# #}}.=}n
+
+ X}E A}c '2:E
+IXXP' _}}ykXXLE:2. r}EA}n
+
+ :}k P}X +2AEIX[ky}}}}
+}}}}X k}}}}}}}}}}}}nkkXXTEEE22+ [}X _}E
+
+ #ny+ X}T :X_kk}}}}}}}}}}}}}}}
+}}}}[ _}}}}}}}}}}}}}}}}}}}}}}}}A I}[ .yv#
+
+ L}L k}A g}}}}}}}}}}}}}}}}}}}
+}}}}c X}}}}}}}}}}}}}}}}}}}}}}}}X =}k T}I
+
+ :E:22::=:AEP}n# k}6 n}}}}}}}}}}}}}}}}}}}
+}}}}k [}}}}}}}}}}}}}}}}}}}}}}}}X 2}k #yv+
+
+ .}}}}}}}}}}}}}6 k}2 }}}}}}}}}}}}}}}}}}}}
+}}}}g g}}}}}}}}}}}}}}}}}}}}}}}}X 2}} I}nXXXXXXXXTXXP#
+
+ #PPITXXIILPIT6 }}# [}}}}}}}}}}}}}}}}}}}
+}}}}k k}}}}}}}}}}}}}}}}}}}}}}}}E }n #n}}}}}}}}}}}}}2
+
+ +XXXc__ccgkkkc# [T .266AEIXTXXXgkkrv}}
+}}}}X P}}rkkc[XXXIEEA:=22222'' =2 #+ 222222:E6
+
+ A}}}}}}}}}}}}}A #
++22=+ 'kkkknkrrkk}kX
+
+ .22+#. 'vr
+ X}}}}y}}ny}}}g
+
+ T}E
+ .}g#
+
+ .}n +2222222222222222.2222222
+222222222222222222:6:EEEEEEEEEEEEEEEEEEEEEEEELLLTXX2 n}6
+
+ [}6 y}}}}}}}}}}}}}}}}}}}}}}}}
+}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}r I}_
+
+ .}c A}}}}}}}}}}}}}}}}}}}}}}}}}
+}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}' +yr+
+
+ rv E}}}}}}}}}}}}}}}}}}}}}}}}}
+}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}2 _}I
+
+ +}r 6}}}}}}}}}}}}}}}}}}}}}}}}}
+}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}y 6}k
+
+ # P}I v}}}}}}}}}}}}}}}}}}}}}}}}
+}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}I T}A
+
+ v}+ +2222222222222222+22222..
+222.'.++2A}}}}}}r+'+'##.2+'#. 2'.'+#''' ++'2222222' A}r#
+
+ .}}P2
+ }}}}}}k _}T
+
+ #g}}}:
+ }}}}}}k +g}g
+
+ 'T}T
+# }}}}}}k .22222222222222222222226AA+ #Py}r2
+
+ 2}E 6nnv}yr}nn}v}}}}}}}}}}}
+}}}k }}}}}}k 6}}}}}}}}}}}}}}}}}}}}}}}}}}y+P}yP#
+
+ 6}E k}}}}}}}}}}}}}}}}}}}}}}
+}}}}' #}}}}}}k E}}}}}}}}}}}}}}}}}}}}}}}}}}y#T}I
+
+ E}E L}}}}}}}}}}}}}}}}}}}}}}
+}}}}. '}}}}}}k P}}}}}}}}}}}}}}}}}}}}}}}}}}I E}A
+
+ # E}E #v}}}}}}}}}}}}}}}}}}}}}
+}}}}2 }}}}}}k E}}}}}}}}}}}}}}}}}}}}}}}}}v# E}A
+
+ E}6 I}}}}}}}}}}}}}}}}}}}}}
+}}}}. }}}}}}g E}}}}}}}}}}}}}}}}}}}}}}}}}L =}E
+
+ T}6 #n}}}}}}}}}}}}}}}}}}}}
+}}}}2 .}}}}}}g E}}}}}}}}}}}}}}}}}}}}}}}}v# 2}L
+
+ 2r}2 A}}}}}}}}}}}}}}}}}}}}
+}}}}2 .}}}}}}g E}}}}}}}}}}}}}}}}}}}}}}}}= 2}X
+
+ =g}}n# g}}}}}}}}}}}}}}}}}}}
+}}}}2 '}}}}}}c E}}}}}}}}}}}}}}}}}}}}}}}g #}[
+ '
+ #In}}gA#. +y}}}}}}}}}}}}}}}}}}
+}}}}' #}}}}}}X E}}}}}}}}}}}}}}}}}}}}}}y+ }y=
+
+ 'Pv}}_:.X}}c2 A}}}}}}}}}}}}}}}}}}
+}}}}+ #}}}}}}X E}}}}}}}}}}}}}}}}}}}}}}I P}}_.
+
+ 2[}}yX.=g}}}}}}X+ [}}}}}}}}}}}}}}}}}
+}}}}+ '}}}}}}X L}}}}}}}}}}}}}}}}}}}}}g +Tc62g}yT#
+
+ P}}vI+Lr}}vP''T}}yT+ #n}}}}}}}}}}}}}}}}
+}}}}. 2}}}}}}g I}}}}}}}}}}}}}}}}}}}}n# Ev}}}c'6v}vP
+
+ # =kL6Xy}}c=# 'Pv}yL #r}}}}}}}}}}}}}}}
+}}}}. .}}}}}}X E}}}}}}}}}}}}}}}}}}}r+ +_}}XPy}yE'[}}g2
+
+ +g}}vT' #Tyy# +r}}}}}}}}}}}}}}
+}}}}2 .}}}}}}[ E}}}}}}}}}}}}}}}}}}y2 Ey}k.# +c}}nI2k}yX#
+
+ T}rI# _}2 .r}}}}}}}}}}}}}
+}}}}+ 2}}}}}}X E}}}}}}}}}}}}}}}}}v. _}yL :k}}_#Ay}n
+
+ #2 X}2 .r}}}}}}}}}}}}
+}}}}' 2}}}}}}X E}}}}}}}}}}}}}}}}r. .}r+ Ly}vE'PT
+
+ X}IAA# 'g}}}}}}}}}}}
+}}}} 2}}}}}}X E}}}}}}}}}}}}}}}g+ 2}X '_}}k'
+
+ X}}}}g# T}}}}}}}}}}
+}}}} 2}}}}}}X E}}}}}}}}}}}}}}X +}X 2g}2
+
+ E}kLn}[ :v}}}}}}}}
+}}}} 2}}}}}}X E}}}}}}}}}}}}yI +_kkk}c .
+
+ #n}= 'X}}}}}}}
+}}}} 2}}}}}}X L}}}}}}}}}}}c. #v}}}}}X
+
+ 2yy' 2g}}}}}
+}}}}# 2}}}}}}X X}}}}}}}}}n= # P}P #
+
+ I}_ 6g}}}
+}}}} 2}}}}}}X X}}}}}}}rA 'yr#
+
+ g}E 6_y
+}}}} 2}}}}}}X X}}}}}nI [}:
+
+ .}y. '
+Pr}k 2}}}}}}T E}}y_= 6}k
+
+ X}E
+ .' .v}}}}g+ A:# n}6
+
+ # # #k}:2EEEEEEPXI
+# =EA. =cXXXXXLXT+=}g
+
+ # [}[2}}}}}}}}}}
+[ EXI .}}}}}}}}}}[6}n
+
+ A}r'k}kEEEEA=Av
+}+X}}}rI 2gvc.g}I222222=}y'v}:
+
+ +yy.X}r' I
+}}}n:Ty}k. ._}}v}}}k c}IP}X
+
+ #n}I=}}: #
+y}g# .[}}c' #Py}v= I}}A =}g2}v#
+
+ IT T}_+v}I
+.A# +g}_ :v}}X' :T# r}+k}:
+
+ #X}}6 E}r#g}_
+ +}k k}v= P}PA}_
+
+ #g}}E ' .y}2P}r'
+ # }k k}6 .}n'}y'
+
+ +r}y: n}P:}}6
+ .}k k}2 k}6g}E
+
+ 6y}r. y_#r}P
+ +}k g}2 E}X=}c #_#
+
+ Ey}n+ +226EE# X}c
+ }k k}. '}y#r}. _}X
+
+ A}}}}}}}}}}}}[ vy.
+ }k c}+ _}AP}P A}}P
+
+ .y}}}ykkk[y}}P +'
+ }k _}+ A}g+}k L}}+
+
+ +2' #g}yE E+
+ }k k} # #v} IE 6T c}6#
+
+ +n}y6 'yr #
+ }n c}. 6: v}X .y}}}L
+
+ 6r}n+ =}n
+ }y c}# :}}T6y}gc}}6
+
+ Ey}g# [}T #
+ }} k}2 := E}}}}T r}E
+
+ g}X# #y}v6
+ }} c}. 6}c E}}_ +v}:
+
+ += =}yyyA .P+
+ ng 6T '}v L}}E+r}c
+
+ '+ [}P6r}g}}n
+ }}. X}}y}X#
+
+ y}.#_}}}[.
+ +# +k}}E X}}X
+
+ =}vIv}}c. :L
+ Ayn:Py}n}X TL
+
+ [}}}}g6 .y}=
+ 6r}}}r:2}n
+
+ k}}n= #r}g
+ .T# #X}}r2 }}'
+
+ 2gA _}r'
+ '[}}= =r}}Ig}6
+
+ L}y2 .kA #
+ 'Ty}nA '_}}}}L
+
+ #P' 6}}E g}n
+ +y}vA =v}}X
+
+ E}r=y}_ A}}6
+ n}T '[n2
+
+ T}}}k# #r}[
+ 2A6' 6}y'
+
+ Ay}_# P}y' Ig}}gI
+ =r}}}yP# k}T
+
+ #6y}c# #y}T _}}}}}}c
+ A}}kEk}}_ :}}+
+
+ #_y+ T}n# P}yE .k}T +.
+ .}}6 =}}E g}[
+ #
+ +2# .v}E 'v}L# 6}v# [k.
+ :PXT6 L}c X}n 2}y.
+
+ [}_ =}y# k}: #}}' 6vk+
+ 6n}}}}}I k}E 2}}' [}_
+
+ =X' E}k k}E 6}n 6y}}26X
+A .2 =PXPEPP #v}k=2L}y' k}L }}2 'yy#
+
+ E}k #v}= I}[ :}}}y A}
+r #y}. }}}}}}} :}y# y}6 T}k 2}y# :'
+
+ +}}. L}y# X}L E}}g}k v
+}A2' T}g }}A222. A}X '[}}2 6}}I _}c
+
+ X}n. I}}P k}:L}}EL}T T
+}}}}}}}6 }}+ .[}}}T _}}T26_}y.
+
+ #g}}nv}}c# }}g}}E X}= :
+}}TXr}n }}. E}}}n= ' #_}}}}}r6
+
+ Agy}kI 2}}}y= n}.
+v}2 n}A }}' 'y}c2 .yy 6TXX=
+
+ A}}y6 }}
+X}P6}v k}' 2}}# A}n
+
+ # E}y6 2}g
+:}rg}I r}2 'y}TAPy}I
+
+ #' E}X
+ v}}y' n}. A}}}}}P
+
+ ' 'A. #
+ P}}T k}. .LTE+
+
+ '
+ .kr' [k#
+
+
+ #
+
+
+
+
+
+ #
+
+
+ #
+ #
+ #
+
+
+
+
+
+
+
+
+
+ $
+
diff --git a/noao/imred/vtel/numeric.h b/noao/imred/vtel/numeric.h
new file mode 100644
index 00000000..765fac03
--- /dev/null
+++ b/noao/imred/vtel/numeric.h
@@ -0,0 +1,12 @@
+# Structure for argument list to subroutine 'numeric'.
+
+define VT_LENNUMSTRUCT 8 # Length of VT num structure
+
+define VT_DLODX Memr[P2R($1)] # deriv longitude wrt x
+define VT_DLATDY Memr[P2R($1+1)] # deriv latitude wrt y
+define VT_LATTOP Memr[P2R($1+2)] # latitude of top of output pixel
+define VT_LATBOT Memr[P2R($1+3)] # latitude of bottom of output pixel
+define VT_LOLEFT Memr[P2R($1+4)] # longitude of left side of out pixel
+define VT_LORITE Memr[P2R($1+5)] # longitude of right side of out pixel
+define VT_LATMID Memr[P2R($1+6)] # latitude of middle of output pixel
+define VT_LOMID Memr[P2R($1+7)] # longitude of middle of output pixel
diff --git a/noao/imred/vtel/numeric.x b/noao/imred/vtel/numeric.x
new file mode 100644
index 00000000..640778c8
--- /dev/null
+++ b/noao/imred/vtel/numeric.x
@@ -0,0 +1,177 @@
+include <mach.h>
+include "vt.h"
+include "numeric.h"
+
+# NUMERIC -- calculate some of the necessary information, including
+# the partial derivitives of latitude and longitude with respect
+# to x and y, that we need to do the projection.
+
+procedure numeric (bzero, el, outputrow, pixel, xpixcenter, ypixcenter, num)
+
+real bzero # latitude of subearth point
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+int outputrow # which output row are we working on
+int pixel # which pixel in that output row
+real xpixcenter, ypixcenter # coordinates of center of pixel
+pointer num # numeric structure pointer
+
+real dlatdy, dlongdx # partial derivitives
+real lat_top, lat_bot # latitude of top and bottom of pix
+real long_left, long_rite # longitude of left and right of pix
+real lat_mid, long_mid # latitude and longitude of middle
+real lat1, long1, lat3, long3, lat4, long4, lat5, long5
+real x1, y1, x3, y3, x4, y4, x5, y5
+bool skip
+
+begin
+ skip = false
+
+ # First calculate lats, longs for this pixel.
+ lat_top = 180./3.1415926*asin(real(outputrow - 90)/90.)
+ lat_bot = 180./3.1415926*asin(real(outputrow - 91)/90.)
+ long_left = real(pixel - 1) - 90.
+ long_rite = real(pixel) - 90.
+ lat_mid = .5 * (lat_top + lat_bot)
+ long_mid = .5 * (long_left + long_rite)
+
+ # Check the proximity of this pixel to the image boundary, if its
+ # too close, set that output pixel to zero.
+
+ if (abs(abs(lat_mid) - 90.0) < abs(bzero)) {
+ if (abs(abs(lat_top) - 90.0) >= abs(bzero)) {
+ lat_bot = -90.0 + bzero
+ lat_mid = .5 * (lat_top + lat_bot)
+ } else {
+ if (abs(abs(lat_bot) - 90.0) >= abs(bzero)) {
+ lat_top = 90.0 + bzero
+ lat_mid = .5 * (lat_top + lat_bot)
+ } else {
+ # Nothing to map!
+ # Flag to pixelmap marking zero pixel.
+ VT_LATTOP(num) = 10000.
+ return
+ }
+ }
+ } else {
+ if (abs(abs(lat_top) - 90.0) < abs(bzero))
+ lat_top = 90.0 + bzero
+ else
+ if (abs(abs(lat_bot) - 90.0) < abs(bzero))
+ lat_bot = -90.0 + bzero
+ }
+
+ # Now that we have the pixel we want defined, calculate the partial
+ # derivitives we need numerically. First calculate the latitude and
+ # longitude of the centers of the 4 adjacent pixels.
+
+ lat1 = lat_mid
+ if (pixel == 1)
+ long1 = long_mid
+ else
+ long1 = long_mid - 1.0
+
+ lat3 = lat_mid
+ if (pixel == 180)
+ long3 = long_mid
+ else
+ long3 = long_mid + 1.0
+
+ long5 = long_mid
+ if (outputrow == 1)
+ lat5 = lat_mid
+ else
+ lat5 = 180./3.1415926*((asin(real(outputrow - 92)/90.) +
+ asin(real(outputrow - 91)/90.))/2.)
+
+ long4 = long_mid
+ if (outputrow == 180)
+ lat4 = lat_mid
+ else
+ lat4 = 180./3.1415926*((asin(real(outputrow - 89)/90.) +
+ asin(real(outputrow - 90)/90.))/2.)
+
+ # Given these latitudes and longitudes, find out where in xy coords
+ # they are. Get xpixcenter and ypixcenter then the x#s and y#s.
+
+ call getxy (lat_mid, long_mid, bzero, el, xpixcenter, ypixcenter, skip)
+ if (skip) {
+
+ # Off the limb or behind the sun.
+ # Flag to pixelmap marking zero pixel.
+ VT_LATTOP(num) = 10000.
+ return
+ }
+
+ call getxy (lat1, long1, bzero, el, x1, y1, skip)
+ call getxy (lat3, long3, bzero, el, x3, y3, skip)
+ call getxy (lat4, long4, bzero, el, x4, y4, skip)
+ call getxy (lat5, long5, bzero, el, x5, y5, skip)
+
+ # Calculate the partials.
+ if (x3 == x1)
+ dlongdx = 9999.
+ else
+ dlongdx = (long3 - long1) / (x3 - x1)
+
+ if (y4 == y5)
+ dlatdy = 9999.
+ else
+ dlatdy = (lat4 - lat5) / (y4 - y5)
+
+ VT_DLODX(num) = dlongdx
+ VT_DLATDY(num) = dlatdy
+ VT_LATTOP(num) = lat_top
+ VT_LATBOT(num) = lat_bot
+ VT_LOLEFT(num) = long_left
+ VT_LORITE(num) = long_rite
+ VT_LATMID(num) = lat_mid
+ VT_LOMID(num) = long_mid
+end
+
+
+# GETXY -- Given the latitude and longitude of a point and the image
+# parameters, return the x and y position of that point.
+
+procedure getxy (lat, lml0, b0, el, x, y, skip)
+
+real lat # latitude of point on image
+real lml0 # distance in longitude from disk center
+real b0 # latitude of sub earth point
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+real x, y # returned position
+bool skip # skip flag
+
+real sinlat, coslat, sinbzero, cosbzero, sinlminusl0, coslminusl0
+real cosrho, sinrho, sinpminustheta, cospminustheta
+real latitude, lminusl0, bzero
+
+begin
+ skip = false
+ lminusl0 = lml0*3.1415926/180.
+ bzero = b0*3.1415926/180.
+ latitude = lat*3.1415926/180.
+ sinlat = sin(latitude)
+ coslat = cos(latitude)
+ sinbzero = sin(bzero)
+ cosbzero = cos(bzero)
+ sinlminusl0 = sin(lminusl0)
+ coslminusl0 = cos(lminusl0)
+ cosrho = sinbzero * sinlat + cosbzero * coslat * coslminusl0
+
+ # If we are behind limb return skip = true.
+ if (cosrho <= 0.00) skip = true
+ sinrho = (1. - cosrho**2)**.5
+ if (sinrho >= EPSILONR) {
+ sinpminustheta = (coslat/sinrho) * sinlminusl0
+ cospminustheta = (coslat/sinrho) * (cosbzero * tan(latitude) -
+ sinbzero * coslminusl0)
+ } else {
+ sinpminustheta = 0.000001
+ cospminustheta = 0.000001
+ }
+
+ x = E_XSEMIDIAMETER(el) * sinrho * sinpminustheta
+ y = E_YSEMIDIAMETER(el) * sinrho * cospminustheta
+ x = x + real(E_XCENTER(el))
+ y = y + real(E_YCENTER(el))
+end
diff --git a/noao/imred/vtel/pimtext.par b/noao/imred/vtel/pimtext.par
new file mode 100644
index 00000000..fa0494d9
--- /dev/null
+++ b/noao/imred/vtel/pimtext.par
@@ -0,0 +1,13 @@
+iraf_files,s,q,,,,Images to be written
+refim,s,q,,,,Reference image to get date and time from
+ref,b,h,no,,,Find the date and time in a reference image
+x,i,h,10,,,X position of text in image
+y,i,h,10,,,Y position of text in image
+xmag,i,h,2,,,Text magnification factor in x direction
+ymag,i,h,2,,,Text magnification factor in y direction
+val,i,h,-10000,,,Value to use to write text in images
+setbgnd,b,h,yes,,,Set the pixels in the image behind the text
+bgndval,i,h,10000,,,Value to use in background of text
+date,b,h,yes,,,Write the date into the images
+time,b,h,yes,,,Write the time into the images
+text,s,q,,,,Text string to write into image
diff --git a/noao/imred/vtel/pimtext.x b/noao/imred/vtel/pimtext.x
new file mode 100644
index 00000000..b39c12be
--- /dev/null
+++ b/noao/imred/vtel/pimtext.x
@@ -0,0 +1,131 @@
+include "vt.h"
+
+# PIMTEXT -- Put a text string directly into an image using a pixel font
+# and writing over the image pixels.
+
+procedure t_pimtext()
+
+char im[SZ_FNAME] # image to put text in
+char refim[SZ_FNAME] # reference image (get date/time)
+int x, y # position to put text
+int xmag, ymag # text magnification parameters
+int val # value to use for text pixels
+int bgndval # value to use for background pixels
+bool setbgnd # flag, should we set the background?
+bool ref # flag, are we using a ref image
+
+int obstime, obsdate, hour, minute, second
+int list, nfiles
+int month, day, year
+char dt[DTSTRING]
+bool istime, isdate, date, time
+pointer imp, rimp
+
+bool clgetb(), imaccf()
+int clgeti(), imgeti()
+int clpopni(), clplen(), clgfil()
+pointer immap()
+errchk immap
+
+begin
+ # Get file name template from the CL.
+ list = clpopni ("iraf_files")
+ nfiles = clplen (list)
+
+ # Get some other parameters.
+ ref = clgetb ("ref")
+ if (ref)
+ call clgstr ("refim", refim, SZ_FNAME)
+ x = clgeti ("x")
+ y = clgeti ("y")
+ xmag = clgeti ("xmag")
+ ymag = clgeti ("ymag")
+ val = clgeti ("val")
+ setbgnd = clgetb ("setbgnd")
+ bgndval = clgeti ("bgndval")
+ date = clgetb ("date")
+ time = clgetb ("time")
+
+ while (clgfil (list, im, SZ_FNAME) != EOF) {
+ # Open the image(s).
+ imp = immap (im, READ_WRITE, 0)
+ if (ref)
+ rimp = immap (refim, READ_ONLY, 0)
+
+ if (date || time) {
+ # Find out if the date and time exist in the image header.
+ if (ref) {
+ istime = imaccf (rimp, "obs_time")
+ isdate = imaccf (rimp, "obs_date")
+ } else {
+ istime = imaccf (imp, "obs_time")
+ isdate = imaccf (imp, "obs_date")
+ }
+
+ # Get the date and/or time.
+ if (date && isdate && !time) {
+ if (ref)
+ obsdate = imgeti (rimp, "obs_date")
+ else
+ obsdate = imgeti (imp, "obs_date")
+
+ month = obsdate / 10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+ call sprintf (dt, DTSTRING, "%02d/%02d/%02d")
+ call pargi (month)
+ call pargi (day)
+ call pargi (year)
+
+ } else if (time && istime && !date) {
+ if (ref)
+ obstime = imgeti (rimp, "obs_time")
+ else
+ obstime = imgeti (imp, "obs_time")
+
+ hour = int(obstime/3600)
+ minute = int((obstime - hour * 3600)/60)
+ second = obstime - hour * 3600 - minute * 60
+ call sprintf (dt, DTSTRING, "%02d:%02d:%02d")
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (second)
+
+ } else if (istime && isdate && time && date) {
+ if (ref) {
+ obstime = imgeti (rimp, "obs_time")
+ obsdate = imgeti (rimp, "obs_date")
+ } else {
+ obstime = imgeti (imp, "obs_time")
+ obsdate = imgeti (imp, "obs_date")
+ }
+
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+ hour = int(obstime/3600)
+ minute = int((obstime - hour * 3600)/60)
+ second = obstime - hour * 3600 - minute * 60
+ call sprintf (dt, DTSTRING, "%02d:%02d:%02d %02d/%02d/%02d")
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (second)
+ call pargi (month)
+ call pargi (day)
+ call pargi (year)
+ } else {
+ call printf ("Warning: cannot get date and/or time.\n")
+ call printf ("Getting text string fron the CL.\n")
+ call clgstr ("text", dt, DTSTRING)
+ }
+ } else
+ call clgstr ("text", dt, DTSTRING)
+
+ call textim (imp, dt, x, y, xmag, ymag, val, setbgnd, bgndval)
+ call imunmap (imp)
+ if (ref)
+ call imunmap (rimp)
+ } # end while
+
+ call clpcls (list)
+end
diff --git a/noao/imred/vtel/pixbit.x b/noao/imred/vtel/pixbit.x
new file mode 100644
index 00000000..a6db321a
--- /dev/null
+++ b/noao/imred/vtel/pixbit.x
@@ -0,0 +1,23 @@
+# PIXBIT -- Look up which bits should be set for this character on this line.
+
+procedure pixbit (code, line, bitarray)
+
+int code # character we are writing
+int line # line of the character we are writing
+int bitarray[5] # bit-array to receive data
+
+int pix, i
+short asciilook[128]
+short font[455]
+int bitupk()
+include "pixelfont.inc"
+include "asciilook.inc"
+
+begin
+ pix = font[asciilook[code+1]+line-1]
+ bitarray[5] = bitupk (pix, 1, 1)
+ bitarray[4] = bitupk (pix, 4, 1)
+ bitarray[3] = bitupk (pix, 7, 1)
+ bitarray[2] = bitupk (pix, 10, 1)
+ bitarray[1] = bitupk (pix, 13, 1)
+end
diff --git a/noao/imred/vtel/pixelfont.inc b/noao/imred/vtel/pixelfont.inc
new file mode 100644
index 00000000..92216e6d
--- /dev/null
+++ b/noao/imred/vtel/pixelfont.inc
@@ -0,0 +1,519 @@
+data (font[i], i=1,7) / 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B / # (space)
+
+data (font[i], i=8,14) / 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00000B,
+ 00100B / # !
+
+data (font[i], i=15,21) / 01010B,
+ 01010B,
+ 01010B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B / # "
+
+data (font[i], i=22,28) / 01010B,
+ 01010B,
+ 11111B,
+ 01010B,
+ 11111B,
+ 01010B,
+ 01010B / # #
+
+data (font[i], i=29,35) / 00100B,
+ 01111B,
+ 10100B,
+ 01110B,
+ 00101B,
+ 11110B,
+ 00100B / # $
+
+data (font[i], i=36,42) / 11000B,
+ 11001B,
+ 00010B,
+ 00100B,
+ 01000B,
+ 10011B,
+ 00011B / # %
+
+data (font[i], i=43,49) / 01000B,
+ 10100B,
+ 10100B,
+ 01000B,
+ 10101B,
+ 10010B,
+ 01101B / # &
+
+data (font[i], i=50,56) / 00100B,
+ 00100B,
+ 00100B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B / # '
+
+data (font[i], i=57,63) / 00100B,
+ 01000B,
+ 10000B,
+ 10000B,
+ 10000B,
+ 01000B,
+ 00100B / # (
+
+data (font[i], i=64,70) / 00100B,
+ 00010B,
+ 00001B,
+ 00001B,
+ 00001B,
+ 00010B,
+ 00100B / # )
+
+data (font[i], i=71,77) / 00100B,
+ 10101B,
+ 01110B,
+ 00100B,
+ 01110B,
+ 10101B,
+ 00100B / # *
+
+data (font[i], i=78,84) / 00000B,
+ 00100B,
+ 00100B,
+ 11111B,
+ 00100B,
+ 00100B,
+ 00000B / # +
+
+data (font[i], i=85,91) / 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00100B,
+ 00100B,
+ 01000B / # ,
+
+data (font[i], i=92,98) / 00000B,
+ 00000B,
+ 00000B,
+ 11111B,
+ 00000B,
+ 00000B,
+ 00000B / # -
+
+data (font[i], i=99,105) / 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00100B / # .
+
+data (font[i], i=106,112) / 00000B,
+ 00001B,
+ 00010B,
+ 00100B,
+ 01000B,
+ 10000B,
+ 00000B / # /
+
+data (font[i], i=113,119) / 01110B,
+ 10001B,
+ 10011B,
+ 10101B,
+ 11001B,
+ 10001B,
+ 01110B / # 0
+
+data (font[i], i=120,126) / 00100B,
+ 01100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 01110B / # 1
+
+data (font[i], i=127,133) / 01110B,
+ 10001B,
+ 00001B,
+ 00110B,
+ 01000B,
+ 10000B,
+ 11111B / # 2
+
+data (font[i], i=134,140) / 11111B,
+ 00001B,
+ 00010B,
+ 00110B,
+ 00001B,
+ 10001B,
+ 11111B / # 3
+
+data (font[i], i=141,147) / 00010B,
+ 00110B,
+ 01010B,
+ 11111B,
+ 00010B,
+ 00010B,
+ 00010B / # 4
+
+data (font[i], i=148,154) / 11111B,
+ 10000B,
+ 11110B,
+ 00001B,
+ 00001B,
+ 10001B,
+ 01110B / # 5
+
+data (font[i], i=155,161) / 00111B,
+ 01000B,
+ 10000B,
+ 11110B,
+ 10001B,
+ 10001B,
+ 01110B / # 6
+
+data (font[i], i=162,168) / 11111B,
+ 00001B,
+ 00010B,
+ 00100B,
+ 01000B,
+ 01000B,
+ 01000B / # 7
+
+data (font[i], i=169,175) / 01110B,
+ 10001B,
+ 10001B,
+ 01110B,
+ 10001B,
+ 10001B,
+ 01110B / # 8
+
+data (font[i], i=176,182) / 01110B,
+ 10001B,
+ 10001B,
+ 01111B,
+ 00001B,
+ 00010B,
+ 11100B / # 9
+
+data (font[i], i=183,189) / 00000B,
+ 00000B,
+ 00100B,
+ 00000B,
+ 00100B,
+ 00000B,
+ 00000B / # :
+
+data (font[i], i=190,196) / 00000B,
+ 00000B,
+ 00100B,
+ 00000B,
+ 00100B,
+ 00100B,
+ 01000B / # ;
+
+data (font[i], i=197,203) / 00010B,
+ 00100B,
+ 01000B,
+ 10000B,
+ 01000B,
+ 00100B,
+ 00010B / # <
+
+data (font[i], i=204,210) / 00000B,
+ 00000B,
+ 11111B,
+ 00000B,
+ 11111B,
+ 00000B,
+ 00000B / # =
+
+data (font[i], i=211,217) / 01000B,
+ 00100B,
+ 00010B,
+ 00001B,
+ 00010B,
+ 00100B,
+ 01000B / # >
+
+data (font[i], i=218,224) / 01110B,
+ 10001B,
+ 00010B,
+ 00100B,
+ 00100B,
+ 00000B,
+ 00100B / # ?
+
+data (font[i], i=225,231) / 01110B,
+ 10001B,
+ 10101B,
+ 10111B,
+ 10110B,
+ 10000B,
+ 01111B / # @
+
+data (font[i], i=232,238) / 00100B,
+ 01010B,
+ 10001B,
+ 10001B,
+ 11111B,
+ 10001B,
+ 10001B / # A
+
+data (font[i], i=239,245) / 11110B,
+ 10001B,
+ 10001B,
+ 11110B,
+ 10001B,
+ 10001B,
+ 11110B / # B
+
+data (font[i], i=246,252) / 01110B,
+ 10001B,
+ 10000B,
+ 10000B,
+ 10000B,
+ 10001B,
+ 01110B / # C
+
+data (font[i], i=253,259) / 11110B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 11110B / # D
+
+data (font[i], i=260,266) / 11111B,
+ 10000B,
+ 10000B,
+ 11110B,
+ 10000B,
+ 10000B,
+ 11111B / # E
+
+data (font[i], i=267,273) / 11111B,
+ 10000B,
+ 10000B,
+ 11110B,
+ 10000B,
+ 10000B,
+ 10000B / # F
+
+data (font[i], i=274,280) / 01111B,
+ 10000B,
+ 10000B,
+ 10000B,
+ 10011B,
+ 10001B,
+ 01111B / # G
+
+data (font[i], i=281,287) / 10001B,
+ 10001B,
+ 10001B,
+ 11111B,
+ 10001B,
+ 10001B,
+ 10001B / # H
+
+data (font[i], i=288,294) / 01110B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 01110B / # I
+
+data (font[i], i=295,301) / 00001B,
+ 00001B,
+ 00001B,
+ 00001B,
+ 00001B,
+ 10001B,
+ 01110B / # J
+
+data (font[i], i=302,308) / 10001B,
+ 10010B,
+ 10100B,
+ 11000B,
+ 10100B,
+ 10010B,
+ 10001B / # K
+
+data (font[i], i=309,315) / 10000B,
+ 10000B,
+ 10000B,
+ 10000B,
+ 10000B,
+ 10000B,
+ 11111B / # L
+
+data (font[i], i=316,322) / 10001B,
+ 11011B,
+ 10101B,
+ 10101B,
+ 10001B,
+ 10001B,
+ 10001B / # M
+
+data (font[i], i=323,329) / 10001B,
+ 10001B,
+ 11001B,
+ 10101B,
+ 10011B,
+ 10001B,
+ 10001B / # N
+
+data (font[i], i=330,336) / 01110B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 01110B / # O
+
+data (font[i], i=337,343) / 11110B,
+ 10001B,
+ 10001B,
+ 11110B,
+ 10000B,
+ 10000B,
+ 10000B / # P
+
+data (font[i], i=344,350) / 01110B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10101B,
+ 10010B,
+ 01101B / # Q
+
+data (font[i], i=351,357) / 11110B,
+ 10001B,
+ 10001B,
+ 11110B,
+ 10100B,
+ 10010B,
+ 10001B / # R
+
+data (font[i], i=358,364) / 01110B,
+ 10001B,
+ 10000B,
+ 01110B,
+ 00001B,
+ 10001B,
+ 01110B / # S
+
+data (font[i], i=365,371) / 11111B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B / # T
+
+data (font[i], i=372,378) / 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 01110B / # U
+
+data (font[i], i=379,385) / 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 10001B,
+ 01010B,
+ 00100B / # V
+
+data (font[i], i=386,392) / 10001B,
+ 10001B,
+ 10001B,
+ 10101B,
+ 10101B,
+ 11011B,
+ 10001B / # W
+
+data (font[i], i=393,399) / 10001B,
+ 10001B,
+ 01010B,
+ 00100B,
+ 01010B,
+ 10001B,
+ 10001B / # X
+
+data (font[i], i=400,406) / 10001B,
+ 10001B,
+ 01010B,
+ 00100B,
+ 00100B,
+ 00100B,
+ 00100B / # Y
+
+data (font[i], i=407,413) / 11111B,
+ 00001B,
+ 00010B,
+ 00100B,
+ 01000B,
+ 10000B,
+ 11111B / # Z
+
+data (font[i], i=414,420) / 11111B,
+ 11000B,
+ 11000B,
+ 11000B,
+ 11000B,
+ 11000B,
+ 11111B / # [
+
+data (font[i], i=421,427) / 00000B,
+ 10000B,
+ 01000B,
+ 00100B,
+ 00010B,
+ 00001B,
+ 00000B / # \
+
+data (font[i], i=428,434) / 11111B,
+ 00011B,
+ 00011B,
+ 00011B,
+ 00011B,
+ 00011B,
+ 11111B / # ]
+
+data (font[i], i=435,441) / 00000B,
+ 00000B,
+ 00100B,
+ 01010B,
+ 10001B,
+ 00000B,
+ 00000B / # ^
+
+data (font[i], i=442,448) / 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 00000B,
+ 11111B / # _
+
+data (font[i], i=449,455) / 11111B,
+ 10001B,
+ 11011B,
+ 10101B,
+ 11011B,
+ 10001B,
+ 11111B / # (unknown)
diff --git a/noao/imred/vtel/putsqib.par b/noao/imred/vtel/putsqib.par
new file mode 100644
index 00000000..635f540f
--- /dev/null
+++ b/noao/imred/vtel/putsqib.par
@@ -0,0 +1,3 @@
+image,s,q,,,,Data image to merge with squibby brightness image
+sqibimage,s,q,,,,Squibby brightness image
+merged,s,q,,,,New image to contain the merged image
diff --git a/noao/imred/vtel/putsqib.x b/noao/imred/vtel/putsqib.x
new file mode 100644
index 00000000..9299c4d4
--- /dev/null
+++ b/noao/imred/vtel/putsqib.x
@@ -0,0 +1,69 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+
+# PUTSQIB -- Murge a solar synoptic 'data only' image with a
+# squibby brightness image. Output image is separate image.
+
+procedure t_putsqib()
+
+char image[SZ_FNAME] # input image
+char sqibimage[SZ_FNAME] # squibby brightness image
+char merged[SZ_FNAME] # output merged image
+
+int i, numpix
+pointer im, ldatagp, lsqibgp, lpp, sqibim, mim
+pointer immap(), imgl2s(), impl2s()
+errchk immap, imgl2s, impl2s
+
+begin
+ # Get parameters from the CL.
+ call clgstr ("image", image, SZ_FNAME)
+ call clgstr ("sqibimage", sqibimage, SZ_FNAME)
+ call clgstr ("merged", merged, SZ_FNAME)
+
+ # Open the two input images, see that they are the same size.
+ im = immap (image, READ_ONLY, 0)
+ sqibim = immap (sqibimage, READ_ONLY, 0)
+
+ # If not, error.
+ if (IM_LEN(im,2) != IM_LEN(sqibim,2))
+ call error(0,"sizes of data image and sqib image must match")
+
+ if (IM_LEN(im,1) != IM_LEN(sqibim,1))
+ call error(0,"sizes of data image and sqib image must match")
+
+ # Open the new image.
+ mim = immap (merged, NEW_COPY, im)
+
+ do i = 1, IM_LEN(im,2) {
+ ldatagp = imgl2s (im, i)
+ lsqibgp = imgl2s (sqibim, i)
+ lpp = impl2s (mim, i)
+ numpix = IM_LEN(im,1)
+ call sqibput (Mems[ldatagp], Mems[lsqibgp], Mems[lpp], numpix)
+ }
+
+ # Unmap images.
+ call imunmap (im)
+ call imunmap (sqibim)
+ call imunmap (mim)
+end
+
+
+# SQIBPUT -- pack squibby brightness from line2 into line1 and put the
+# result into line3.
+
+procedure sqibput (line1, line2, line3, numpix)
+
+short line1[numpix] # data line
+short line2[numpix] # sqib line
+short line3[numpix] # out line
+int numpix # number of pixels
+
+int i
+
+begin
+ do i = 1, numpix
+ line3[i] = line1[i]*16 + line2[i]
+end
diff --git a/noao/imred/vtel/quickfit.par b/noao/imred/vtel/quickfit.par
new file mode 100644
index 00000000..6ce8e742
--- /dev/null
+++ b/noao/imred/vtel/quickfit.par
@@ -0,0 +1,8 @@
+image,s,q,,,,Image file descriptor
+threshold,i,h,4,,,Squibby brightness threshold
+verbose,b,h,no,,,Print out in verbose mode?
+xguess,i,h,1024,,,X coordinate of center of guess circle
+yguess,i,h,1024,,,Y coordinate of center of guess circle
+halfwidth,i,h,50,,,Halfwidth of limbfinding window
+rowspace,i,h,20,,,# of rows to skip near center in limbfind
+rejectcoeff,r,h,.02,,,Least squares rejection coefficient
diff --git a/noao/imred/vtel/quickfit.x b/noao/imred/vtel/quickfit.x
new file mode 100644
index 00000000..40efb257
--- /dev/null
+++ b/noao/imred/vtel/quickfit.x
@@ -0,0 +1,499 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+
+define SZ_VTPBUF 4096 # Size of limb point buffer.
+
+# QUICKFIT -- Given a fulldisk solar image, find the parameters of an ellipse
+# that best fits the limb. First the points on the limb are determined using
+# the squibby brightness, then an initial guess for the limb parameters is
+# made, and finally a least squares fit is made by an iterative method.
+
+procedure t_quickfit()
+
+char image[SZ_FNAME] # image to find the limb on
+int threshold # squibby limb threshold
+bool verbose # verbose flag
+
+pointer pb # buffer for saving limb points
+int npoints, rejects # number of limb pts, rejects
+real x, y, a, b # x, y, a, b (a = z0)
+real rguess, rpercent # initial guess at r, % rejects
+errchk limbfind, efit
+pointer im, sp
+
+pointer immap()
+int clgeti()
+bool clgetb()
+errchk immap, limbfind
+
+begin
+ call smark (sp)
+ call salloc (pb, 2*SZ_VTPBUF, TY_INT)
+
+ # Get parameters from the cl.
+ call clgstr ("image", image, SZ_FNAME)
+ threshold = clgeti ("threshold")
+ verbose = clgetb ("verbose")
+
+ # Open image.
+ im = immap (image, READ_WRITE, 0)
+
+ # Get the point buffer and npoints.
+ iferr (call limbfind (im, Memi[pb], npoints, threshold, rguess,
+ verbose))
+ call eprintf("Error getting limbpoints.\n")
+ if (verbose) {
+ call printf ("\nrguess = %g\n")
+ call pargr (rguess)
+ call flush (STDOUT)
+ }
+
+ # Fit the ellipse.
+ b = rguess
+ a = rguess
+ x = real(DIM_VTFD)/2.
+ y = real(DIM_VTFD)/2.
+ iferr (call efit (Memi[pb], npoints, x, y, a, b, rejects, verbose))
+ call eprintf ("Error fitting elipse.\n")
+
+ rpercent = real(rejects)/real(npoints)
+ if (verbose) {
+ call printf ("\nTotal number of limbpoints found was %d\n")
+ call pargi (npoints)
+ call printf ("Number of limbpoints rejected was %d\n")
+ call pargi (rejects)
+ call printf ("Fraction of limb points rejected = %g\n")
+ call pargr (rpercent)
+ call flush (STDOUT)
+ }
+
+ # Put ellipse parameters in image header.
+ call imaddr (im, "E_XCEN", x)
+ call imaddr (im, "E_YCEN", y)
+ call imaddr (im, "E_XSMD", a)
+ call imaddr (im, "E_YSMD", b)
+
+ # Close the image.
+ call imunmap (im)
+
+ call sfree (sp)
+end
+
+
+# LIMBFIND - Find all of the points on the image that determine the
+# limb. This is done line by line.
+
+procedure limbfind (imageptr, pointbuf, npoints, threshold, rguess, verbose)
+
+pointer imageptr # pointer to image
+int pointbuf[SZ_VTPBUF,2] # buffer in which to store limb points
+int npoints # number of points
+int threshold # squibby threshold
+real rguess # first guess at radius
+bool verbose # verbose flag
+
+int rowspace, halfwidth, leftsave, rightsave, y
+int numpix, numrow, leftx, rightx, yesno
+int month, day, year, hour, minute, second, obsdate, obstime
+real b0, l0
+pointer lpg
+
+pointer imgl2s()
+int clgeti(), imgeti()
+errchk ephem, flocr, florr, imgl2s
+
+begin
+ # Get date and time from the header.
+ obsdate = imgeti (imageptr, "OBS_DATE")
+ obstime = imgeti (imageptr, "OBS_TIME")
+
+ # Calculate the month/day/year.
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+
+ # Calculate the hour:minute:second.
+ hour = int(obstime/3600)
+ minute = int((obstime - hour * 3600)/60)
+ second = obstime - hour * 3600 - minute * 60
+ if (verbose) {
+ call printf("date and time of this image = %d/%d/%d, %d:%d:%d\n")
+ call pargi(month)
+ call pargi(day)
+ call pargi(year)
+ call pargi(hour)
+ call pargi(minute)
+ call pargi(second)
+ call flush (STDOUT)
+ }
+
+ # Get rowspace and halfwidth from the cl.
+ halfwidth = clgeti("halfwidth")
+ rowspace = clgeti("rowspace")
+
+ numpix = IM_LEN(imageptr, 1)
+ numrow = IM_LEN(imageptr, 2)
+ npoints = 0
+
+ # Get rguess from ephem.
+ iferr (call ephem (month, day, year, hour, minute, second, rguess,
+ b0, l0, verbose))
+ call eprintf ("Error getting ephemeris data.\n")
+
+ # Put b0 and l0 in the image header.
+ call imaddr (imageptr, "B_ZERO", b0)
+ call imaddr (imageptr, "L_ZERO", l0)
+
+ # Get central row to start with and find its limb points.
+ lpg = imgl2s (imageptr, numrow/2)
+ yesno = YES
+ iferr (call flocr (Mems[lpg], numpix, pointbuf, numrow, npoints, leftx,
+ rightx, threshold, yesno))
+ call eprintf ("Error in 'find limb on center row(flocr)'\n")
+ if (yesno == NO)
+ call error (0,"Failure to find initial limb points, quickfit dies")
+
+ leftsave = leftx
+ rightsave = rightx
+
+ # Find the limb points for the lower half of the image.
+ yesno = YES
+ y = numrow/2-rowspace
+ while (y >= 1) {
+
+ # Read this line in from the image.
+ lpg = imgl2s (imageptr, y)
+
+ # Find its limb points.
+ iferr (call florr (Mems[lpg], numpix, pointbuf, npoints, numrow,
+ y, leftx, rightx, threshold, yesno, rguess, halfwidth))
+ call eprintf ("Error in florr.\n")
+ if (yesno == NO)
+ break
+ if (abs(y-numrow/2) > rguess)
+ break
+ if ((int(rowspace * (rguess**2 -
+ real(y-numrow/2)**2)**.5/rguess)) >= 1)
+ y = y - int(rowspace * (rguess**2 -
+ real(y-numrow/2)**2)**.5/rguess)
+ else
+ y = y - 1
+ }
+
+ # Find the limb points for the upper half of the image.
+
+ # Restore the pointers to the limb at disk center.
+ leftx = leftsave
+ rightx = rightsave
+ yesno = NO
+ y = numrow/2+rowspace
+
+ while (y <= numrow) {
+ # Read this line in from the image.
+ lpg = imgl2s (imageptr, y)
+
+ # Find its limb points.
+ iferr (call florr (Mems[lpg], numpix, pointbuf, npoints, numrow,
+ y, leftx, rightx, threshold, yesno, rguess, halfwidth))
+ call eprintf ("Error in florr.\n")
+
+ # If we couldn't find any limb points then it's time to go.
+ if (yesno == NO)
+ break
+
+ # If we are beyond the limb vertically then its time to go.
+ if (abs(y-numrow/2) > rguess)
+ break
+
+ # If the calculated rowspacing gets less than 1, just set it to 1.
+ if ((int(rowspace * (rguess**2 -
+ real(y-numrow/2)**2)**.5/rguess)) >= 1) {
+ y = y + int(rowspace * (rguess**2 -
+ real(y-numrow/2)**2)**.5/rguess)
+ } else
+ y = y + 1
+ }
+end
+
+
+# FLOCR -- Find Limbpoints On Center Row. Since this is the first row
+# to be searched, we have no idea of approximately where the limb points
+# will be found in the row as we have in florr. We search from the endpoints
+# of the row inward until the squibby brightness crosses the threshold.
+
+procedure flocr (array, numpix, pointbuf, npoints, numrow, leftx, rightx,
+ threshold, yesno)
+
+short array[numpix] # line of image
+int pointbuf[SZ_VTPBUF,2] # limb point storage array
+int numpix # number of pixels in line
+int npoints # number of limb points
+int numrow # which row this is in image
+int leftx # return left boundary position here
+int rightx # return right boundary position here
+int threshold # squibby brightness limb threshold
+int yesno # return yes if we found the limb
+
+int i, j, foundi, foundj
+
+begin
+ # Start at beginning and end of array and work in.
+ i = 1
+ j = numpix
+
+ # Flags that indicate when a limbpoint has been found.
+ foundi = 0
+ foundj = 0
+
+ while (i <= j) {
+ if (foundi == 0) {
+ if (and(int(array[i]), 17B) >= threshold) {
+ foundi = 1
+ npoints = npoints + 1
+ pointbuf[npoints,1] = i
+ pointbuf[npoints,2] = numrow/2
+ leftx = i
+ }
+ if (i == j) {
+ yesno = NO
+ return
+ }
+ }
+
+ if (foundj == 0) {
+ if (and(int(array[j]), 17B) >= threshold) {
+ foundj = 1
+ npoints = npoints + 1
+ pointbuf[npoints,1] = j
+ pointbuf[npoints,2] = numrow/2
+ rightx = j
+ }
+ }
+ if ((foundi == 1) && (foundj == 1))
+ break
+ i = i + 1
+ j = j - 1
+ }
+end
+
+
+# FLORR -- Find Limbpoints On Random Row. Since we know the approximate
+# positions of the limbpoints based on their positions on the ajacent
+# row, we can restrict the range of x positions to be searched to those
+# within a certain distance of those positions. These ranges we will
+# call windows. Each window is checked for validity before it is
+# searched for the limbpoints, if invalid a correct window is found.
+
+procedure florr (array, numpix, pointbuf, npoints, numrow, y, leftx, rightx,
+ threshold, yesno, rguess, halfwidth)
+
+short array[numpix] # line of image
+int pointbuf[SZ_VTPBUF,2] # limb point storage array
+int numpix # number of pixels in line
+int npoints # number of limb points
+int numrow # which row this is in image
+int leftx # return left boundary position here
+int rightx # return right boundary position here
+int threshold # squibby brightness limb threshold
+int yesno # return yes if we found the limb
+int halfwidth # halfwidth of limb search window
+real rguess # radius for sun guess
+
+int i, j, y
+
+begin
+ # Windows are leftx plus or minus halfwidth and rightx plus or
+ # minus halfwidth. Before searching windows, check them for
+ # validity and call newwindow if necessary.
+
+ # Check for validity means the endpoint we expect to be outside
+ # the limb should have a squibby brightness less than the
+ # threshold and the inside the limb endpoint should have a
+ # squibby brightness greater than the threshold.
+
+ # if invalid...
+ if ((and(int(array[max(1,(leftx-halfwidth))]),17B) >= threshold) ||
+ (and(int(array[leftx+halfwidth]),17B) < threshold)) {
+
+ # if we are getting too far from the center (outside limb)
+ # then return flag for no limbpoints.
+
+ if (abs(y-numrow/2) > int(rguess)) {
+ yesno = NO
+ return
+ }
+
+ # Otherwise calculate a new leftx for this row.
+ leftx = -((int(rguess**2) - (y-numrow/2)**2)**.5) + numrow/2
+ }
+
+ # If we now have a valid window...
+ if ((and(int(array[max(1,(leftx-halfwidth))]),17B) < threshold) &&
+ (and(int(array[leftx+halfwidth]),17B) >= threshold)) {
+
+ # Search window for limb point.
+ do i = max(1,(leftx-halfwidth)), leftx+halfwidth {
+
+ # When we find it add it to the limbpoints array and
+ # break out of the do loop
+
+ if (and(int(array[i]), 17B) >= threshold) {
+
+ # Set the 'we found it' flag.
+ yesno = YES
+
+ npoints = npoints + 1
+ pointbuf[npoints,1] = i
+ pointbuf[npoints,2] = y
+ leftx = i
+ break
+ }
+ }
+ }
+
+ # Same stuff for the right hand window.
+ if ((and(int(array[min(numpix,(rightx+halfwidth))]),17B) >=
+ threshold) || (and(int(array[rightx-halfwidth]),17B) < threshold)) {
+ if (abs(y-numrow/2) > int(rguess)) {
+ yesno = NO
+ return
+ }
+ rightx = (int(rguess**2) - (y-numrow/2)**2)**.5 + numrow/2
+ }
+
+ if ((and(int(array[min(numpix,(rightx+halfwidth))]),17B) < threshold) &&
+ (and(int(array[rightx-halfwidth]),17B) >= threshold)) {
+ do j = min(numpix,(rightx+halfwidth)), rightx-halfwidth, -1 {
+ if (and(int(array[j]), 17B) >= threshold) {
+ yesno = YES
+ npoints = npoints + 1
+ pointbuf[npoints,1] = j
+ pointbuf[npoints,2] = y
+ rightx = j
+ break
+ }
+ }
+ }
+end
+
+
+# EFIT - Find the best fitting ellipse to the limb points. We iterate
+# 10 times, this seems to converge very well.
+# Algorithm due to Jack Harvey.
+
+procedure efit (pointbuf, npoints, xzero, yzero, azero, bzero, rejects,
+ verbose)
+
+int pointbuf[SZ_VTPBUF,2] # buffer containing limb points
+int npoints # number of limb points
+real xzero, yzero, azero, bzero # return elipse parameters
+int rejects # number of points rejected
+bool verbose # verbose flag
+
+int i, j, ij, n
+real xcenter, ycenter, a, b, a2, b2, a3, b3
+real z[6,6]
+real x1, y1, x2, y2, q[5], fn, sq
+real rejectcoeff
+
+real clgetr()
+
+begin
+ # Get the least squares rejection coefficient.
+ rejectcoeff = clgetr("rejectcoeff")
+ xcenter = xzero
+ ycenter = yzero
+ a = azero
+ b = azero
+
+ do ij = 1, 10 {
+ a2 = a**2
+ a3 = a2 * a
+ b2 = b**2
+ b3 = b2 * b
+ sq = 0.
+
+ do i = 1, 6
+ do j = 1, 6
+ z[i,j] = 0
+
+ fn = 0.
+ rejects = 0
+
+ do n = 1, npoints {
+ x1 = real(pointbuf[n,1]) - xcenter
+ y1 = real(pointbuf[n,2]) - ycenter
+ x2 = x1**2
+ y2 = y1**2
+ q[1] = x1/a2
+ q[2] = y1/b2
+ q[3] = -x2/a3
+ q[4] = -y2/b3
+ q[5] = .5 * (1. - x2/a2 - y2/b2)
+
+ # Reject a point if it is too far from the approximate ellipse.
+ if (abs(q[5]) >= rejectcoeff) {
+ rejects = rejects + 1
+ next
+ }
+
+ sq = sq + q[5]
+
+ do i = 1, 5
+ do j = i, 5
+ z[i,j+1] = z[i,j+1] + q[i] * q[j]
+
+ fn = fn + 1.
+ }
+
+ sq = sq/fn
+ call flush(STDOUT)
+ call lstsq (z, 6, fn)
+ if (z(5,3) > 3.)
+ z(5,3) = 3.
+ if (z(5,3) < -3.)
+ z(5,3) = -3.
+ if (z(5,4) > 3.)
+ z(5,4) = 3.
+ if (z(5,4) < -3.)
+ z(5,4) = -3.
+ if (z(5,1) > 10.)
+ z(5,1) = 10.
+ if (z(5,1) < -10.)
+ z(5,1) = -10.
+ if (z(5,2) > 10.)
+ z(5,2) = 10.
+ if (z(5,2) < -10.)
+ z(5,2) = -10.
+ a = a + z[5,3]
+ b = b + z[5,4]
+ xcenter = xcenter - z[5,1]
+ ycenter = ycenter - z[5,2]
+
+ if (verbose) {
+ call printf ("x = %f, y = %f, a = %f, b = %f, sq = %13.10f\n")
+ call pargr (xcenter)
+ call pargr (ycenter)
+ call pargr (a)
+ call pargr (b)
+ call pargr (sq)
+ call flush (STDOUT)
+ }
+ }
+
+ if (verbose) {
+ call printf ("\nCoordinates of center are x = %f, y = %f\n")
+ call pargr(xcenter)
+ call pargr(ycenter)
+ call printf ("xsemidiameter = %f, ysemidiameter = %f\n")
+ call pargr(a)
+ call pargr(b)
+ call flush (STDOUT)
+ }
+
+ xzero = xcenter
+ yzero = ycenter
+ azero = a
+ bzero = b
+end
diff --git a/noao/imred/vtel/readheader.x b/noao/imred/vtel/readheader.x
new file mode 100644
index 00000000..85fb6f66
--- /dev/null
+++ b/noao/imred/vtel/readheader.x
@@ -0,0 +1,59 @@
+include <mach.h>
+include <fset.h>
+include "vt.h"
+
+# READHEADER -- Read header info from the input.
+
+int procedure readheader(inputfd, hbuf, selfbuf)
+
+int inputfd # input file discriptor
+pointer hbuf # header data input buffer pointer (short, SZ_VTHDR)
+bool selfbuf # flag to tell if we should do our own buffering
+
+int numchars
+pointer sp, tempbuf
+int read()
+errchk read
+
+begin
+ call smark (sp)
+ call salloc (tempbuf, 100, TY_SHORT)
+
+ # If we are reading from tape and buffering for ourselves then
+ # do a large read and see how many chars we get. If too few or
+ # too many give an error. Otherwise just read the correct number
+ # of chars.
+
+ if (selfbuf) {
+ iferr (numchars = read (inputfd, Mems[tempbuf],
+ 10000*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ call printf ("Error reading header.\n")
+ numchars = read (inputfd, Mems[tempbuf],
+ SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ }
+ if (numchars < 10 || numchars >= 100) {
+ call error (0, "error reading header")
+ return (numchars)
+ }
+ call amovs (Mems[tempbuf], Mems[hbuf], SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ } else {
+ iferr (numchars = read (inputfd, Mems[hbuf],
+ SZ_VTHDR*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ call printf ("Error reading header.\n")
+ numchars = read (inputfd, Mems[tempbuf],
+ SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ }
+ if (numchars < SZ_VTHDR*SZB_SHORT/SZB_CHAR) {
+ call error (0, "eof encountered when reading header")
+ return (0)
+ }
+ }
+
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (Mems[hbuf], 1, Mems[hbuf], 1, SZ_VTHDR*SZB_SHORT)
+ call sfree (sp)
+
+ return (SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+end
diff --git a/noao/imred/vtel/readss1.x b/noao/imred/vtel/readss1.x
new file mode 100644
index 00000000..2aea6d51
--- /dev/null
+++ b/noao/imred/vtel/readss1.x
@@ -0,0 +1,163 @@
+include <mach.h>
+include <imhdr.h>
+include <fset.h>
+include "vt.h"
+
+define WDSBRSTR 50
+
+# READSS1 -- Read a type 1 sector scan from tape and format into 3 iraf images.
+# Type one sector scans consist of three images packed into 32 bits per
+# pixel. The three images are 1. velocity (12 bits) 2. select (12 bits) and
+# 3. continuum intensity (8 bits). The images are only 256 pixels high as
+# opposed to 512 pixels high for the other scans.
+
+procedure readss1 (inputfd, filenumber, brief, select, bright, velocity, hs)
+
+int inputfd # file descriptor for input (usually tape)
+int filenumber # file number on tape
+bool brief # short output file names
+bool select # flag to make select image
+bool bright # flag to make bright image
+bool velocity # flag to make velocity image
+int hs # header data structure pointer
+
+char velimage[SZ_FNAME] # Velocity image
+char selimage[SZ_FNAME] # Select image
+char britimage[SZ_FNAME] # Brightness image
+short u[SWTH_HIGH], dat
+int date, hour, minute, seconds, i, j, num, lrs
+pointer velim, selim, britim, velsrp, selsrp, britsrp
+
+int read()
+pointer immap(), impl2s()
+errchk immap, impl2s
+
+begin
+ # Calculate the time. Assemble the output image names.
+ hour = int(VT_HTIME(hs)/3600)
+ minute = int((VT_HTIME(hs) - hour * 3600)/60)
+ seconds = int(VT_HTIME(hs) - hour * 3600 - minute * 60)
+ if (brief) {
+ call sprintf (velimage[1], SZ_FNAME, "v%03d")
+ call pargi (filenumber)
+ call sprintf (selimage[1], SZ_FNAME, "s%03d")
+ call pargi (filenumber)
+ call sprintf (britimage[1], SZ_FNAME, "b%03d")
+ call pargi (filenumber)
+ } else {
+ call sprintf (velimage[1], SZ_FNAME, "v%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ call sprintf (selimage[1], SZ_FNAME, "s%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ call sprintf (britimage[1], SZ_FNAME, "b%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ }
+ if (select) {
+ selim = immap (selimage, NEW_IMAGE, 0)
+ IM_NDIM(selim) = 2
+ IM_LEN(selim,1) = SWTH_HIGH/2
+ IM_LEN(selim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(selim) = TY_SHORT
+ call imaddi (selim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (selim, "obs_date", date )
+ call imaddi (selim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (selim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (selim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (selim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (selim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (selim, "rep_time", VT_HREPTIME(hs))
+ }
+ if (bright) {
+ britim = immap (britimage, NEW_IMAGE, 0)
+ IM_NDIM(britim) = 2
+ IM_LEN(britim,1) = SWTH_HIGH/2
+ IM_LEN(britim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(britim) = TY_SHORT
+ call imaddi (britim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (britim, "obs_date", date )
+ call imaddi (britim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (britim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (britim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (britim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (britim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (britim, "rep_time", VT_HREPTIME(hs))
+ }
+ if (velocity) {
+ velim = immap (velimage, NEW_IMAGE, 0)
+ IM_NDIM(velim) = 2
+ IM_LEN(velim,1) = SWTH_HIGH/2
+ IM_LEN(velim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(velim) = TY_SHORT
+ call imaddi (velim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (velim, "obs_date", date )
+ call imaddi (velim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (velim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (velim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (velim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (velim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (velim, "rep_time", VT_HREPTIME(hs))
+ }
+
+ do j = 1, VT_HNUMCOLS(hs) {
+ if (select)
+ selsrp = impl2s (selim, j)
+ if (bright)
+ britsrp = impl2s (britim, j)
+ if (velocity)
+ velsrp = impl2s (velim, j)
+
+ iferr (num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (u, 1, u, 1, SWTH_HIGH * SZB_SHORT)
+
+ # Unpack the data into the three arrays.
+ do i = 257, 512 {
+ if (select) {
+ dat = u[i]/16
+ if (u[i] < 0)
+ dat = dat - 1
+ Mems[selsrp+i-257] = dat
+ }
+ if (bright)
+ Mems[britsrp+i-257] = and(int(u[i]),17B)*16
+ }
+
+ do i = 1, 256 {
+ if (velocity) {
+ dat = u[i]/16
+ if (u[i] < 0)
+ dat = dat - 1
+ Mems[velsrp+i-1] = dat
+ }
+ if (bright)
+ Mems[britsrp+i-1] = Mems[britsrp+i-1]+and(int(u[i]),17B)
+ }
+ }
+
+ # Unmap images.
+ if (select)
+ call imunmap (selim)
+ if (velocity)
+ call imunmap (velim)
+ if (bright)
+ call imunmap (britim)
+end
diff --git a/noao/imred/vtel/readss2.x b/noao/imred/vtel/readss2.x
new file mode 100644
index 00000000..71ae8758
--- /dev/null
+++ b/noao/imred/vtel/readss2.x
@@ -0,0 +1,174 @@
+include <mach.h>
+include <imhdr.h>
+include <fset.h>
+include "vt.h"
+
+define WDSBRSTR 50
+
+# READSS2 -- Read a type 2 sector scan from tape and format into 3 iraf images.
+# Type two sector scans consist of three images with 16 bits per
+# pixel. The three images are 1. velocity (16 bits) 2. select (16 bits) and
+# 3. brightness (16 bits). The images are 512 pixels high.
+
+procedure readss2 (inputfd, filenumber, brief, select, bright, velocity, hs)
+
+int inputfd # file descriptor for input (usually tape)
+int filenumber # file number on tape
+bool brief # short output file names
+bool select # flag to make select image
+bool bright # flag to make bright image
+bool velocity # flag to make velocity image
+int hs # header data structure pointer
+
+char velimage[SZ_FNAME] # velocity image
+char selimage[SZ_FNAME] # select image
+char britimage[SZ_FNAME] # brightness image
+short u[SWTH_HIGH]
+int date, hour, minute, seconds, i, j, num, lrs
+pointer velim, selim, britim, velsrp, selsrp, britsrp
+
+int read()
+pointer immap(), impl2s()
+errchk immap, impl2s
+
+begin
+ # Calculate the time. Assemble the output image names.
+ hour = int(VT_HTIME(hs)/3600)
+ minute = int((VT_HTIME(hs) - hour * 3600)/60)
+ seconds = int(VT_HTIME(hs) - hour * 3600 - minute * 60)
+ if (brief) {
+ call sprintf (velimage[1], SZ_FNAME, "v%03d")
+ call pargi (filenumber)
+ call sprintf (selimage[1], SZ_FNAME, "s%03d")
+ call pargi (filenumber)
+ call sprintf (britimage[1], SZ_FNAME, "b%03d")
+ call pargi (filenumber)
+ } else {
+ call sprintf (velimage[1], SZ_FNAME, "v%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ call sprintf (selimage[1], SZ_FNAME, "s%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ call sprintf (britimage[1], SZ_FNAME, "b%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ }
+
+ if (select) {
+ selim = immap (selimage, NEW_IMAGE, 0)
+ IM_NDIM(selim) = 2
+ IM_LEN(selim,1) = SWTH_HIGH
+ IM_LEN(selim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(selim) = TY_SHORT
+ call imaddi (selim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (selim, "obs_date", date )
+ call imaddi (selim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (selim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (selim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (selim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (selim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (selim, "rep_time", VT_HREPTIME(hs))
+ }
+ if (bright) {
+ britim = immap (britimage, NEW_IMAGE, 0)
+ IM_NDIM(britim) = 2
+ IM_LEN(britim,1) = SWTH_HIGH
+ IM_LEN(britim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(britim) = TY_SHORT
+ call imaddi (britim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (britim, "obs_date", date )
+ call imaddi (britim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (britim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (britim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (britim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (britim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (britim, "rep_time", VT_HREPTIME(hs))
+ }
+ if (velocity) {
+ velim = immap (velimage, NEW_IMAGE, 0)
+ IM_NDIM(velim) = 2
+ IM_LEN(velim,1) = SWTH_HIGH
+ IM_LEN(velim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(velim) = TY_SHORT
+ call imaddi (velim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (velim, "obs_date", date )
+ call imaddi (velim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (velim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (velim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (velim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (velim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (velim, "rep_time", VT_HREPTIME(hs))
+ }
+
+ do j = 1, VT_HNUMCOLS(hs) {
+ if (select)
+ selsrp = impl2s (selim, j)
+ if (bright)
+ britsrp = impl2s (britim, j)
+ if (velocity)
+ velsrp = impl2s (velim, j)
+
+ iferr (num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (u, 1, u, 1, SWTH_HIGH * SZB_SHORT)
+
+ if (velocity)
+ do i = 1, 512
+ Mems[velsrp+i-1] = u[i]
+
+ iferr (num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (u, 1, u, 1, SWTH_HIGH * SZB_SHORT)
+
+ if (select)
+ do i = 1, 512
+ Mems[selsrp+i-1] = u[i]
+
+ iferr (num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (u, 1, u, 1, SWTH_HIGH * SZB_SHORT)
+
+ if (bright)
+ do i = 1, 512
+ Mems[britsrp+i-1] = u[i]
+ }
+
+ # Unmap images.
+ if (select)
+ call imunmap (selim)
+ if (velocity)
+ call imunmap (velim)
+ if (bright)
+ call imunmap (britim)
+end
diff --git a/noao/imred/vtel/readss3.x b/noao/imred/vtel/readss3.x
new file mode 100644
index 00000000..f8721ae0
--- /dev/null
+++ b/noao/imred/vtel/readss3.x
@@ -0,0 +1,171 @@
+include <mach.h>
+include <imhdr.h>
+include <fset.h>
+include "vt.h"
+
+define WDSBRSTR 50
+
+# READSS3 -- Read a type 3 sector scan from tape and format into 3 iraf images.
+# Type three sector scans consist of three images packed into 32 bits per
+# pixel. The three images are 1. velocity (12 bits) 2. select (12 bits) and
+# 3. continuum intensity (8 bits)
+
+procedure readss3 (inputfd, filenumber, brief, select, bright, velocity, hs)
+
+int inputfd # file descriptor for input (usually tape)
+int filenumber # file number on tape
+bool brief # short output file names
+bool select # flag to make select image
+bool bright # flag to make bright image
+bool velocity # flag to make velocity image
+int hs # header data structure pointer
+
+char velimage[SZ_FNAME] # Velocity image
+char selimage[SZ_FNAME] # Select image
+char britimage[SZ_FNAME] # Brightness image
+bool zero
+short t[SWTH_HIGH], u[SWTH_HIGH], k
+int date, hour, minute, seconds, i, j, num, lrs
+pointer velim, selim, britim, velsrp, selsrp, britsrp
+
+define redo_ 10
+
+int read()
+short shifts()
+pointer immap(), impl2s()
+errchk immap, impl2s
+
+begin
+ k = -4
+
+ # Calculate the time. Assemble the output image names.
+ hour = int(VT_HTIME(hs)/3600)
+ minute = int((VT_HTIME(hs) - hour * 3600)/60)
+ seconds = int(VT_HTIME(hs) - hour * 3600 - minute * 60)
+ if (brief) {
+ call sprintf (velimage[1], SZ_FNAME, "v%03d")
+ call pargi (filenumber)
+ call sprintf (selimage[1], SZ_FNAME, "s%03d")
+ call pargi (filenumber)
+ call sprintf (britimage[1], SZ_FNAME, "b%03d")
+ call pargi (filenumber)
+ } else {
+ call sprintf (velimage[1], SZ_FNAME, "v%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ call sprintf (selimage[1], SZ_FNAME, "s%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ call sprintf (britimage[1], SZ_FNAME, "b%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ }
+ if (select) {
+ selim = immap (selimage, NEW_IMAGE, 0)
+ IM_NDIM(selim) = 2
+ IM_LEN(selim,1) = SWTH_HIGH
+ IM_LEN(selim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(selim) = TY_SHORT
+ call imaddi (selim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (selim, "obs_date", date )
+ call imaddi (selim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (selim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (selim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (selim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (selim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (selim, "rep_time", VT_HREPTIME(hs))
+ }
+ if (bright) {
+ britim = immap (britimage, NEW_IMAGE, 0)
+ IM_NDIM(britim) = 2
+ IM_LEN(britim,1) = SWTH_HIGH
+ IM_LEN(britim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(britim) = TY_SHORT
+ call imaddi (britim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (britim, "obs_date", date )
+ call imaddi (britim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (britim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (britim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (britim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (britim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (britim, "rep_time", VT_HREPTIME(hs))
+ }
+ if (velocity) {
+ velim = immap (velimage, NEW_IMAGE, 0)
+ IM_NDIM(velim) = 2
+ IM_LEN(velim,1) = SWTH_HIGH
+ IM_LEN(velim,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(velim) = TY_SHORT
+ call imaddi (velim, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (velim, "obs_date", date )
+ call imaddi (velim, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (velim, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (velim, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (velim, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (velim, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (velim, "rep_time", VT_HREPTIME(hs))
+ }
+
+ do j = 1, VT_HNUMCOLS(hs) {
+redo_ if (select)
+ selsrp = impl2s (selim, j)
+ if (bright)
+ britsrp = impl2s (britim, j)
+ if (velocity)
+ velsrp = impl2s (velim, j)
+
+ iferr (num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (u, 1, u, 1, SWTH_HIGH * SZB_SHORT)
+ iferr (num = read (inputfd, t, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, t, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (t, 1, t, 1, SWTH_HIGH * SZB_SHORT)
+
+ zero = true
+ do i = 1, SWTH_HIGH {
+ if (select)
+ Mems[selsrp+i-1] = shifts(t[i], k)
+ if (velocity)
+ Mems[velsrp+i-1] = shifts(u[i], k)
+ if (bright)
+ Mems[britsrp+i-1] = and(int(t[i]),17B)*16+and(int(u[i]),17B)
+ if (t[i] != 0)
+ zero = false
+ }
+ if (zero) {
+ call eprintf ("READSS3: found a zero line in image, skip.\n")
+ goto redo_
+ }
+ }
+
+ # Unmap images.
+ if (select)
+ call imunmap (selim)
+ if (velocity)
+ call imunmap (velim)
+ if (bright)
+ call imunmap (britim)
+end
diff --git a/noao/imred/vtel/readss4.x b/noao/imred/vtel/readss4.x
new file mode 100644
index 00000000..2ab3199d
--- /dev/null
+++ b/noao/imred/vtel/readss4.x
@@ -0,0 +1,85 @@
+include <mach.h>
+include <imhdr.h>
+include <fset.h>
+include "vt.h"
+
+define WDSBRSTR 50
+
+# READSS4 -- Read data file from tape or disk and format the data into
+# an IRAF image. This is for type 4 sector scans.
+
+procedure readss4 (inputfd, filenumber, brief, select, bright, velocity, hs)
+
+int inputfd # file descriptor for input (usually tape)
+int filenumber # file number on tape
+bool brief # short output file names
+bool select # flag to make select image
+bool bright # flag to make bright image
+bool velocity # flag to make velocity image
+int hs # header data structure pointer
+
+pointer im, srp
+char imagefile[SZ_FNAME]
+int date, hour, minute, seconds, i, j, num, lrs
+short u[SWTH_HIGH]
+
+int read()
+pointer immap(), impl2s()
+errchk immap, impl2s
+
+begin
+ # Calculate the time. Assemble the output image name.
+ hour = int(VT_HTIME(hs)/3600)
+ minute = int((VT_HTIME(hs) - hour * 3600)/60)
+ seconds = int(VT_HTIME(hs) - hour * 3600 - minute * 60)
+ if (brief) {
+ call sprintf (imagefile[1], SZ_FNAME, "s%03d")
+ call pargi (filenumber)
+ } else {
+ call sprintf (imagefile[1], SZ_FNAME, "s%02d_%02d%02d_%03d")
+ call pargi (VT_HDAY(hs)) # day of month
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (filenumber)
+ }
+
+ if (select) {
+ im = immap (imagefile, NEW_IMAGE, 0)
+ IM_NDIM(im) = 2
+ IM_LEN(im,1) = SWTH_HIGH
+ IM_LEN(im,2) = VT_HNUMCOLS(hs)
+ IM_PIXTYPE(im) = TY_SHORT
+ call imaddi (im, "obs_time", VT_HTIME(hs))
+ date = VT_HMONTH(hs) * 10000 + VT_HDAY(hs) * 100 + VT_HYEAR(hs)
+ call imaddi (im, "obs_date", date )
+ call imaddi (im, "wv_lngth", VT_HWVLNGTH(hs))
+ call imaddi (im, "obs_type", VT_HOBSTYPE(hs))
+ call imaddi (im, "av_intns", VT_HAVINTENS(hs))
+ call imaddi (im, "num_cols", VT_HNUMCOLS(hs))
+ call imaddi (im, "intg/pix", VT_HINTGPIX(hs))
+ call imaddi (im, "rep_time", VT_HREPTIME(hs))
+ }
+
+ do j = 1, VT_HNUMCOLS(hs) {
+ if (select)
+ srp = impl2s (im, j)
+
+ iferr (num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE, lrs*SZB_SHORT/SZB_CHAR)
+ call eprintf ("Error on tape read.\n")
+ num = read (inputfd, u, SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ }
+ lrs = num
+ if (num < SWTH_HIGH*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading file")
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (u, 1, u, 1, SWTH_HIGH * SZB_SHORT)
+
+ if (select)
+ do i = 1, 512
+ Mems[srp+i-1] = u[i]
+ }
+
+ if (select)
+ call imunmap (im)
+end
diff --git a/noao/imred/vtel/readsubswath.x b/noao/imred/vtel/readsubswath.x
new file mode 100644
index 00000000..9c15bb44
--- /dev/null
+++ b/noao/imred/vtel/readsubswath.x
@@ -0,0 +1,91 @@
+include <mach.h>
+include <fset.h>
+include "vt.h"
+
+define SZ_VTRECFD 5120 # length, in chars, of full disk recs
+
+# READSUBSWATH -- Read data from file whose logical unit is inputfd.
+# Swap the bytes in each data word.
+
+procedure readsubswath (inputfd, selfbuf, databuf, buflength, bp)
+
+int inputfd # input file discriptor
+int buflength # length of data buffer
+bool selfbuf # self buffering flag
+short databuf[buflength] # data buffer
+pointer bp # buffer pointer structure pointer
+
+int num, bleft, last_recsize
+int read()
+errchk read
+
+begin
+ # If we are doing our own buffering, keep track of the number
+ # of records in each file, else let mtio do it.
+
+ last_recsize = 0
+ if (selfbuf) { # do our own buffering
+
+ # If there is enough data still in the buffer, just copy data
+ # to the output buffer and move the pointer, otherwise, read
+ # the next tape record.
+
+ if ((VT_BUFBOT(bp) - VT_BP(bp)) >= buflength) {
+ # Copy the data into the data buffer, move the pointer.
+ call amovs (Mems[VT_BP(bp)], databuf, buflength)
+ VT_BP(bp) = VT_BP(bp) + buflength
+
+ } else {
+ # Copy leftover data from the bottom of the input buffer
+ # into the top of the input buffer, reset the flags.
+
+ bleft = VT_BUFBOT(bp) - VT_BP(bp)
+ call amovs (Mems[VT_BP(bp)], Mems[VT_BUFP(bp)], bleft)
+ VT_BP(bp) = VT_BUFP(bp) + bleft
+
+ # Read in another tape record.
+ # Check the number of chars read. If this number is EOF or
+ # too short, error. If it is too long, truncate to correct
+ # length. This is done because some data tapes are written
+ # in a weird way and have some noise chars tacked on the end
+ # of each tape record.
+
+ iferr (num = read (inputfd, Mems[VT_BP(bp)],
+ 10000*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE,
+ SZ_VTRECFD*SZB_SHORT/SZB_CHAR)
+ call printf ("Error reading subswath.\n")
+ num = read (inputfd, Mems[VT_BP(bp)],
+ SZ_VTRECFD*SZB_SHORT/SZB_CHAR)
+ }
+ if (num == EOF)
+ call error (0, "EOF encountered on tape read")
+ else if (num < SZ_VTRECFD*SZB_SHORT/SZB_CHAR)
+ call error (0, "error on tape read, record too short")
+ else if (num >= SZ_VTRECFD*SZB_SHORT/SZB_CHAR &&
+ num < (SZ_VTRECFD+300)*SZB_SHORT/SZB_CHAR)
+ num = SZ_VTRECFD*SZB_SHORT/SZB_CHAR
+ else
+ call error (0, "error on tape read, record too long")
+
+ # Update the pointers, move data into the data buffer.
+ VT_BUFBOT(bp) = VT_BP(bp) + num
+ call amovs (Mems[VT_BP(bp)], databuf, buflength)
+ VT_BP(bp) = VT_BP(bp) + buflength
+ }
+ } else { # Let the mtio do the buffering.
+ iferr (num = read (inputfd, databuf,
+ buflength*SZB_SHORT/SZB_CHAR)) {
+ call fseti (inputfd, F_VALIDATE,
+ last_recsize*SZB_SHORT/SZB_CHAR)
+ call printf ("Error on tape read.\n")
+ num = read (inputfd, databuf, buflength*SZB_SHORT/SZB_CHAR)
+ }
+ last_recsize = num
+ if (num < buflength*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading subswath")
+ }
+
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (databuf, 1, databuf, 1, buflength * SZB_SHORT)
+end
diff --git a/noao/imred/vtel/readvt.par b/noao/imred/vtel/readvt.par
new file mode 100644
index 00000000..5986a8c0
--- /dev/null
+++ b/noao/imred/vtel/readvt.par
@@ -0,0 +1,6 @@
+infile,s,q,,,,Input file descriptor
+outfile,s,q,,,,Output image file descriptor
+files,s,q,,,,Tape files to read
+verbose,b,h,no,,,Print out header data and give progress reports
+headeronly,b,h,no,,,Print out the header data and quit
+robust,b,h,no,,,Ignore wrong observation type in header
diff --git a/noao/imred/vtel/readvt.x b/noao/imred/vtel/readvt.x
new file mode 100644
index 00000000..27e34be3
--- /dev/null
+++ b/noao/imred/vtel/readvt.x
@@ -0,0 +1,347 @@
+include <mach.h>
+include <imhdr.h>
+include <fset.h>
+include "vt.h"
+
+define MAX_RANGES 100
+define VT_TBUF 15000
+
+# READVT -- Read data from tape or disk and format the data into an IRAF image.
+# Display header information to the user as a check if the 'verbose' flag is
+# set.
+
+procedure t_readvt()
+
+pointer infile # pointer to input filename(s)
+pointer outfile # pointer to output filename(s)
+bool verbose # verbose flag
+bool headeronly # if set, just print the header
+bool robust # if set, ignore wrong observation type
+pointer files # file list for multiple tape files
+
+int listin # list of input images
+int listout # list of output images
+bool selfbuf, rootflag
+int nfiles, filenumber, stat
+pointer bp, sp, tapename, dfilename, diskfile, root
+int filerange[2 * MAX_RANGES + 1]
+
+bool clgetb()
+int get_next_number(), mtneedfileno()
+int strlen(), decode_ranges()
+int fntopnb(), imtopenp(), clgfil(), imtgetim(), clplen(), imtlen()
+int mtfile()
+errchk vt_rfd
+
+begin
+ call smark (sp)
+ call salloc (infile, SZ_LINE, TY_CHAR)
+ call salloc (outfile, SZ_LINE, TY_CHAR)
+ call salloc (tapename, 2*SZ_LINE, TY_CHAR)
+ call salloc (dfilename, 2*SZ_LINE, TY_CHAR)
+ call salloc (diskfile, SZ_LINE, TY_CHAR)
+ call salloc (root, SZ_LINE, TY_CHAR)
+ call salloc (files, SZ_LINE, TY_CHAR)
+
+ call fseti (STDOUT, F_FLUSHNL, YES)
+
+ # Get parameters from the CL.
+ verbose = clgetb ("verbose")
+ headeronly = clgetb ("headeronly")
+ robust = clgetb ("robust")
+
+ call clgstr ("infile", Memc[infile], SZ_FNAME)
+
+ # Set up the buffer structure, we may need it.
+ call salloc (bp, VT_LENBSTRUCT, TY_STRUCT)
+ call salloc (VT_BUFP(bp), VT_TBUF, TY_SHORT)
+ VT_BP(bp) = VT_BUFP(bp)
+ VT_BUFBOT(bp) = VT_BUFP(bp)
+
+ if (mtfile (Memc[infile]) == NO) {
+ # This is not a tape file, expand as a list template.
+ listin = fntopnb (Memc[infile], 0)
+ rootflag = FALSE
+ filenumber = 1
+ if (!headeronly) {
+ listout = imtopenp ("outfile")
+
+ # Compare the lengths of the two lists. If equal, proceed,
+ # otherwise if the outlist is of length one, use it as a root
+ # name, otherwise error.
+
+ if (imtlen (listout) == 1) {
+ rootflag = TRUE
+ stat = imtgetim (listout, Memc[root], SZ_FNAME)
+ } else if (clplen (listin) != imtlen (listout)) {
+ call clpcls (listin)
+ call imtclose (listout)
+ call error (1, "Wrong number of elements in operand lists")
+ }
+ }
+
+ while (clgfil (listin, Memc[diskfile], SZ_FNAME) != EOF) {
+ if (!headeronly) {
+ if (!rootflag)
+ stat = imtgetim (listout, Memc[dfilename], SZ_FNAME)
+ else {
+ # Assemble an output filename from the root name.
+ call sprintf (Memc[dfilename], SZ_FNAME, "%s")
+ call pargstr (Memc[root])
+ call sprintf (Memc[dfilename+strlen(Memc[root])],
+ SZ_FNAME, "%03d")
+ call pargi (filenumber)
+ filenumber = filenumber + 1
+ }
+ }
+
+ # Of course, if the user is reading from disk, we can't
+ # check record sizes.
+
+ selfbuf = false
+ iferr (call vt_rfd (diskfile, dfilename,
+ selfbuf, verbose, headeronly, robust, bp)) {
+ call eprintf ("Error reading file %s\n")
+ call pargstr (Memc[infile])
+ }
+ }
+ call clpcls (listin)
+ if (!headeronly)
+ call imtclose (listout)
+
+ } else if (mtneedfileno(Memc[infile]) == NO) {
+
+ # This is a tape file and the user specified which file.
+ if (!headeronly)
+ call clgstr ("outfile", Memc[outfile], SZ_FNAME)
+ selfbuf = true
+ iferr (call vt_rfd (infile, outfile, selfbuf, verbose,
+ headeronly, robust, bp)) {
+ call eprintf ("Error reading file %s\n")
+ call pargstr (Memc[infile])
+ }
+
+ } else {
+
+ # This is a tape file and the user did not specify which file.
+ call clgstr ("files", Memc[files], SZ_LINE)
+ if (!headeronly)
+ call clgstr ("outfile", Memc[outfile], SZ_FNAME)
+
+ # Set up the file names, then do the read.
+ if (decode_ranges (Memc[files], filerange, MAX_RANGES,
+ nfiles) == ERR)
+ call error (0, "Illegal file number list.")
+
+ while (get_next_number (filerange, filenumber) != EOF) {
+ # Assemble the appropriate tape file name.
+ call mtfname (Memc[infile], filenumber, Memc[tapename],
+ SZ_FNAME)
+
+ # Assemble the appropriate disk file name.
+ if (!headeronly) {
+ call strcpy (Memc[outfile], Memc[dfilename], SZ_FNAME)
+ call sprintf (Memc[dfilename+strlen(Memc[outfile])],
+ SZ_FNAME, "%03d")
+ call pargi (filenumber)
+ }
+
+ selfbuf = TRUE
+ iferr (call vt_rfd (tapename, dfilename, selfbuf,
+ verbose, headeronly, robust, bp)) {
+ call eprintf ("Error reading file %s\n")
+ call pargstr (Memc[infile])
+ }
+ }
+ }
+
+ call sfree (sp)
+end
+
+
+# VT_RFD -- Do the actual read of a full disk gram.
+
+procedure vt_rfd (in, out, selfbuf, verbose, headeronly, robust, bp)
+
+pointer in # input file
+pointer out # output file
+bool selfbuf # do input buffering and correct for bad record lengths
+bool verbose # verbose flag
+bool headeronly # if set, just print the header
+bool robust # if set, ignore wrong observation type
+
+short one
+int date, numchars
+int subraster, x1, y1, inputfd
+pointer table, bp, im, srp, hs, sp, hbuf
+pointer immap(), imps2s()
+int mtopen(), readheader()
+errchk readheader, loadsubswath, immap, imps2s
+define exit_ 10
+
+begin
+ call smark (sp)
+ call salloc (hbuf, SZ_VTHDR, TY_SHORT)
+ call salloc (table, SZ_TABLE, TY_SHORT)
+ call salloc (hs, VT_LENHSTRUCT, TY_STRUCT)
+
+ if (verbose) {
+ call printf ("\nfile %s ")
+ call pargstr (Memc[in])
+ }
+
+ # Open input file.
+ inputfd = mtopen (Memc[in], READ_ONLY, 0)
+
+ # Read header.
+ iferr (numchars = readheader (inputfd, hbuf, selfbuf))
+ call error (0, "Error reading header information.")
+ call decodeheader (hbuf, hs, verbose)
+ if (verbose)
+ call printf ("\n")
+
+ # Check the observation type in the header. If this value is not
+ # zero (full disk) then write an error message, if the robust flag
+ # is set go ahead and read the file.
+
+ if (!robust) {
+ if (VT_HOBSTYPE[hs] != 0) {
+ call printf ("file %s is not a type zero scan (full disk)\n")
+ call pargstr (Memc[in])
+ call printf ("Use 'mscan' to read this type %d area scan\n")
+ call pargi (VT_HOBSTYPE[hs])
+ goto exit_ # close input file and exit
+ }
+ } else {
+ if (VT_HOBSTYPE[hs] != 0) {
+ call printf ("The header for file %s contains 'observation ")
+ call pargstr (Memc[in])
+ call printf ("type = %d'\n")
+ call pargi (VT_HOBSTYPE[hs])
+ call printf ("READVT expects the observation type ")
+ call printf ("to be zero.\n")
+ call printf ("This error will be ignored since the 'robust'")
+ call printf (" flag is set\n")
+ }
+ }
+
+ if (headeronly)
+ goto exit_ # close input file and exit
+
+ if (verbose) {
+ call printf ("\nwriting %s\n")
+ call pargstr (Memc[out])
+ }
+
+ # Open the output image. Set it up.
+ im = immap (Memc[out], NEW_IMAGE, 0)
+ IM_NDIM(im) = 2
+ IM_LEN(im,1) = DIM_VTFD
+ IM_LEN(im,2) = DIM_VTFD
+ IM_PIXTYPE(im) = TY_SHORT
+
+ # Set up the 8 header fields we need and store the information we
+ # obtained from the raw data image header.
+
+ call imaddi (im, "obs_time", VT_HTIME[hs])
+ date = VT_HMONTH[hs] * 10000 + VT_HDAY[hs] * 100 + VT_HYEAR[hs]
+
+ call imaddi (im, "obs_date", date )
+ call imaddi (im, "wv_lngth", VT_HWVLNGTH[hs])
+ call imaddi (im, "obs_type", VT_HOBSTYPE[hs])
+ call imaddi (im, "av_intns", VT_HAVINTENS[hs])
+ call imaddi (im, "num_cols", VT_HNUMCOLS[hs])
+ call imaddi (im, "intg/pix", VT_HINTGPIX[hs])
+ call imaddi (im, "rep_time", VT_HREPTIME[hs])
+
+ # Set up lookuptable.
+ one = 1
+ call amovks (one, Mems[table], SZ_TABLE)
+ call aclrs (Mems[table], HALF_DIF)
+ call aclrs (Mems[table + SWTHWID_14 + HALF_DIF], HALF_DIF)
+ call aclrs (Mems[table + SWTHWID_23 * 3], HALF_DIF)
+ call aclrs (Mems[table + SZ_TABLE - HALF_DIF], HALF_DIF)
+
+ # Now, fill the image with data.
+ do subraster = 1, NUM_SRSTR {
+
+ # Calculate position of bottom left corner of this subraster
+ x1 = ((NUM_SRSTR_X - 1) - mod((subraster - 1), NUM_SRSTR_X)) *
+ SRSTR_WID + 1
+ y1 = ((NUM_SRSTR_Y - 1) - ((subraster - mod((subraster - 1),
+ NUM_SRSTR_Y)) / NUM_SRSTR_Y)) * SWTH_HIGH + 1
+
+ # Get subraster.
+ srp = imps2s (im, x1, x1+(SRSTR_WID - 1), y1, y1+(SWTH_HIGH - 1))
+
+ # Load the subraster with data.
+ iferr (call loadsubraster (inputfd, Mems[srp], SRSTR_WID, SWTH_HIGH,
+ Mems[table], subraster, selfbuf, bp)) {
+ call eprintf ("Error in loadsubraster, subraster = %d\n")
+ call pargi (subraster)
+ break
+ }
+
+ if (verbose) {
+ call printf("%d%% ")
+ call pargi ((subraster*100)/NUM_SRSTR)
+ call flush (STDOUT)
+ }
+ }
+
+ if (verbose)
+ call printf ("\n")
+
+ # Unmap image and close input file.
+ call imunmap (im)
+exit_
+ call sfree (sp)
+ call close (inputfd)
+end
+
+
+# LOADSUBRASTER -- Get data from the input and load it into this
+# subraster, look in the table to see if each subswath should be
+# filled with data or zeros.
+
+procedure loadsubraster (inputfd, array, nx, ny, table, subraster, selfbuf, bp)
+
+int inputfd # input file we are reading from
+short array[nx, ny] # array to put the data in
+int nx # x length of the array
+int ny # y length of the array
+short table[SZ_TABLE] # lookup table for data
+int subraster # subraster number are we loading
+bool selfbuf # buffering and record length checking?
+pointer bp # pointer to buffer pointer structure
+
+pointer sp, bufpointer
+int i, subswath, tableindex
+errchk readsubswath
+
+begin
+ call smark (sp)
+ call salloc (bufpointer, ny, TY_SHORT)
+
+ for (subswath = nx; subswath >= 1; subswath = subswath - 1) {
+ tableindex = (subraster - 1) * nx + ((nx + 1) - subswath)
+
+ if (table[tableindex] == IS_DATA) {
+ iferr (call readsubswath (inputfd, selfbuf, Mems[bufpointer],
+ ny, bp)) {
+
+ call eprintf ("Error in readsubswath, subswath = %d\n")
+ call pargi (subswath)
+ }
+
+ do i = ny, 1, -1
+ array[subswath,i] = Mems[bufpointer + ny - i]
+
+ } else {
+ do i = 1, ny
+ array[subswath,i] = 0
+ }
+ }
+
+ call sfree (sp)
+end
diff --git a/noao/imred/vtel/rmap.par b/noao/imred/vtel/rmap.par
new file mode 100644
index 00000000..b8c6efd0
--- /dev/null
+++ b/noao/imred/vtel/rmap.par
@@ -0,0 +1,5 @@
+inputimage,s,q,,,,Input image
+outputimage,s,q,,,,Output data image
+outweight,s,q,,,,Weights image
+outabs,s,q,,,,Absolute value image
+histoname,s,q,,,,Histogram name
diff --git a/noao/imred/vtel/rmap.x b/noao/imred/vtel/rmap.x
new file mode 100644
index 00000000..313b03db
--- /dev/null
+++ b/noao/imred/vtel/rmap.x
@@ -0,0 +1,563 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+include "numeric.h"
+
+define LEN_HISTO 1025
+define SPACING 1
+
+# RMAP -- Project a full disk solar image [2048x2048] into a square
+# image [180x180] such that lines of latitude and longitude are
+# perpendicular straight lines.
+
+procedure t_rmap()
+
+char inputimage[SZ_FNAME] # input image
+char outputimage[SZ_FNAME] # output data image
+char outweight[SZ_FNAME] # output weight image
+char outabs[SZ_FNAME] # output absolute value image
+char histoname[SZ_FNAME] # output histogram name
+
+real bzero # latitude of sub-earth point
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+pointer inputim, outputim, outw, outa, sp
+pointer inim_subras_ptr, outim_subras_ptr
+pointer outwei_subras_ptr, outav_subras_ptr
+int outputrow, wvlngth
+int inim_subras_bottom
+double meanf, meanaf, zcm, muzero
+int numpix
+bool skip, helium
+real tempr
+
+real imgetr()
+int imgeti()
+pointer immap()
+pointer imgs2s(), imps2s(), imps2i()
+double rmap_mode()
+errchk immap, imgs2s, imps2s, imps2i, checkimmem, rowmap
+
+begin
+ # Get parameters from the cl.
+
+ # Image names.
+ call clgstr ("inputimage", inputimage, SZ_FNAME)
+ call clgstr ("outputimage", outputimage, SZ_FNAME)
+ call clgstr ("outweight", outweight, SZ_FNAME)
+ call clgstr ("outabs", outabs, SZ_FNAME)
+ call clgstr ("histoname", histoname, SZ_FNAME)
+
+ # Open images.
+ inputim = immap (inputimage, READ_ONLY, 0)
+ wvlngth = imgeti (inputim, "wv_lngth")
+ helium = false
+ if (wvlngth == 10830)
+ helium = true
+ outputim = immap (outputimage, NEW_COPY, inputim)
+ outw = immap (outweight, NEW_COPY, inputim)
+ if (!helium)
+ outa = immap (outabs, NEW_COPY, inputim)
+
+ # Compute mode estimate from the input image.
+ muzero = rmap_mode (inputim, histoname, helium)
+
+ # Define some parameters for output images.
+ IM_LEN(outputim, 1) = DIM_SQUAREIM
+ IM_LEN(outputim, 2) = DIM_SQUAREIM
+ IM_PIXTYPE(outputim) = TY_INT
+
+ IM_LEN(outw, 1) = DIM_SQUAREIM
+ IM_LEN(outw, 2) = DIM_SQUAREIM
+
+ if (!helium) {
+ IM_LEN(outa, 1) = DIM_SQUAREIM
+ IM_LEN(outa, 2) = DIM_SQUAREIM
+ IM_PIXTYPE(outa) = TY_INT
+ }
+
+ # Get latitude of sub-earth point from input image header.
+ bzero = imgetr (inputim, "B_ZERO")
+
+ # Ellipse parameters.
+ E_XCENTER(el) = imgetr (inputim, "E_XCEN")
+ E_YCENTER(el) = imgetr (inputim, "E_YCEN")
+ E_XSEMIDIAMETER(el) = imgetr (inputim, "E_XSMD")
+ E_YSEMIDIAMETER(el) = imgetr (inputim, "E_YSMD")
+
+ # Remove the elipse parameters from the header records of the
+ # output images
+
+ call imdelf (outputim, "E_XCEN")
+ call imdelf (outputim, "E_YCEN")
+ call imdelf (outputim, "E_XSMD")
+ call imdelf (outputim, "E_YSMD")
+
+ call imdelf (outw, "E_XCEN")
+ call imdelf (outw, "E_YCEN")
+ call imdelf (outw, "E_XSMD")
+ call imdelf (outw, "E_YSMD")
+ call imaddb (outw, "WEIGHTS", YES)
+
+ if (!helium) {
+ call imdelf (outa, "E_XCEN")
+ call imdelf (outa, "E_YCEN")
+ call imdelf (outa, "E_XSMD")
+ call imdelf (outa, "E_YSMD")
+ call imaddb (outa, "ABS_VALU", YES)
+ }
+
+ # Set the variable that keeps track of where in the input image the
+ # bottom of the subraster is, map in the initial subraster.
+
+ inim_subras_bottom = 1
+ inim_subras_ptr = imgs2s (inputim, 1, DIM_VTFD, inim_subras_bottom,
+ inim_subras_bottom+DIM_IN_RAS-1)
+
+ # Map the outputimages into memory.
+ outim_subras_ptr = imps2i (outputim, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+ outwei_subras_ptr = imps2s (outw, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+ if (!helium)
+ outav_subras_ptr = imps2i (outa, 1, DIM_SQUAREIM, 1, DIM_SQUAREIM)
+ else {
+ call smark (sp)
+ call salloc (outav_subras_ptr, DIM_SQUAREIM*DIM_SQUAREIM, TY_INT)
+ }
+
+ # Initialize meanf, meanaf, numpix.
+ meanf = 0.0
+ meanaf = 0.0
+ numpix = 0
+
+ # Map the input image into the output image by output image rows.
+ do outputrow = 1, DIM_SQUAREIM {
+
+ # Check the current input subraster to see if it covers
+ # the next output row to be mapped and map in a new subraster
+ # if necessary.
+
+ call checkimmem (inim_subras_bottom, bzero, inputim, outputrow,
+ inim_subras_ptr, el, skip)
+
+ # If checkimmem returns skip = true then this row is not contained
+ # in the input image so fill it with zeros and skip it.
+
+ if (skip) {
+ # Fill the empty row with zeros.
+ call emptyrow (outputrow, Memi[outim_subras_ptr],
+ Mems[outwei_subras_ptr], Memi[outav_subras_ptr])
+ next
+ }
+
+ # Map this pixel row.
+ call rowmap (inim_subras_bottom, Mems[inim_subras_ptr], bzero,
+ outputrow, Memi[outim_subras_ptr], Mems[outwei_subras_ptr],
+ Memi[outav_subras_ptr], el, muzero, meanf, meanaf, numpix,
+ helium)
+ }
+
+ # Put the mean field, the number of pixels, the zero corrected mean
+ # absolute field, the mode estimate, the zero corrected mean field,
+ # and the standard deviation in the output image header.
+
+ meanaf = meanaf/double(numpix)
+ meanf = meanf/double(numpix)
+ zcm = meanf - muzero
+ tempr = real(meanf)
+ call imaddr (outputim, "MEAN_FLD", tempr)
+ call imaddi (outputim, "NUMPIX", numpix)
+ if (!helium) {
+ tempr = real(meanaf)
+ call imaddr (outputim, "MEANAFLD", tempr)
+ tempr = real(muzero)
+ call imaddr (outputim, "MUZERO", tempr)
+ tempr = real(zcm)
+ call imaddr (outputim, "ZCM", tempr)
+ }
+
+ # Close images.
+ call imunmap (inputim)
+ call imunmap (outputim)
+ call imunmap (outw)
+ if (!helium)
+ call imunmap (outa)
+ if (helium)
+ call sfree (sp)
+end
+
+
+# CHECKIMMEM -- Check this row to see if the input subraster in memory
+# covers it and if it doesn't, map in a new subraster.
+
+procedure checkimmem (inim_subras_bottom, bzero, inputim, outputrow,
+ inim_subras_ptr, el, skip)
+
+int inim_subras_bottom # current bottom of the loaded subraster
+real bzero # latitude of sub-earth point for this image
+pointer inputim # pointer to input image
+int outputrow # which output row to map
+pointer inim_subras_ptr # input image subraster pointer
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+bool skip # returned flag saying to skip this line
+
+real x ,y
+int ymax, ymin
+real uplat, downlat, lminusl0, latitude
+pointer imgs2s()
+errchk imgs2s
+
+begin
+ skip = false
+
+ # Find values for the latitudes of the upper and lower edges of this
+ # pixel row.
+
+ uplat = 180./3.1415926*asin(float(outputrow - 90)/90.)
+ downlat = 180./3.1415926*asin(float(outputrow - 91)/90.)
+
+ # Check to see if this row is either completely off the image or
+ # partially off the image. If it is off the image then return
+ # skip = true. If it is partially off the image then truncate
+ # the appropriate boundary latitude at the image boundary.
+
+ if (bzero > 0) {
+ if ( downlat < (-90 + bzero) && uplat < (-90 + bzero)) {
+
+ # This row is not on the image.
+ skip = true
+ return
+ }
+ if (downlat < (-90 + bzero))
+ downlat = -90 + bzero
+ } else {
+ if ( downlat > (90 - bzero) && uplat > (90 - bzero)) {
+
+ # This row is not on the image.
+ skip = true
+ return
+ }
+ if (uplat > (90 - bzero))
+ uplat = 90 - bzero
+ }
+
+ # Calculate the minimum and maximum values of y in the input image that
+ # we will need to map this output row of pixels and check these
+ # values against the value of the current bottom of the subraster.
+
+ if (bzero > 0) {
+
+ # Calculate y position in image.
+ lminusl0 = 90.
+ latitude = uplat
+ call getxy (latitude, lminusl0, bzero, el, x, y, skip)
+ ymax = int(y + .5)
+ lminusl0 = -90.
+ latitude = uplat
+ call getxy (latitude, lminusl0, bzero, el, x, y, skip)
+ if (int(y + .5) > ymax)
+ ymax = int(y + .5)
+
+ # Calculate min y position.
+ lminusl0 = 0.
+ latitude = downlat
+ call getxy (latitude, lminusl0, bzero, el, x, y, skip)
+ ymin = int(y + .5)
+
+ } else {
+
+ # Calculate y position in image.
+ lminusl0 = 90.
+ latitude = downlat
+ call getxy (latitude, lminusl0, bzero, el, x, y, skip)
+ ymin = int(y + .5)
+ lminusl0 = -90.
+ latitude = downlat
+ call getxy (latitude, lminusl0, bzero, el, x, y, skip)
+ if (int(y + .5) < ymin) ymin = int(y + .5)
+
+ # Calculate max y position.
+ lminusl0 = 0.
+ latitude = uplat
+ call getxy (latitude, lminusl0, bzero, el, x, y, skip)
+ ymax = int(y + .5)
+ }
+
+ # If ymin or ymax is outside the current subraster, then map in
+ # an appropriate subraster.
+
+ if ((ymin < (inim_subras_bottom + 5)) ||
+ (ymax > (inim_subras_bottom + 140))) {
+ if ((ymax - ymin) > 150) {
+ call printf ("Subraster too small(ymax-ymin > 150), bye")
+ }
+ if ((ymin + 144) > 2048) {
+ ymin = 2048 - 144
+ }
+ if ((ymin - 5) < 1) {
+ skip = true
+ return
+ }
+ inim_subras_ptr = imgs2s (inputim, 1, DIM_VTFD, (ymin - 5),
+ (ymin + 144))
+ inim_subras_bottom = ymin - 5
+ }
+end
+
+
+# ROWMAP -- Map this output row pixel by pixel.
+
+procedure rowmap (inim_subras_bottom, in_subraster, bzero, outputrow,
+ out_subraster, outw_subraster, outa_subraster, el, muzero, meanf,
+ meanaf, numpix helium)
+
+real bzero # lat of sub-earth
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+int inim_subras_bottom # bottom of current
+int outputrow # output row
+short in_subraster[DIM_VTFD, DIM_IN_RAS] # subraster
+int out_subraster[DIM_SQUAREIM, DIM_SQUAREIM] # output image
+short outw_subraster[DIM_SQUAREIM, DIM_SQUAREIM] # output weights
+int outa_subraster[DIM_SQUAREIM, DIM_SQUAREIM] # output abs. value
+double muzero # mode estimate
+double meanf # mean field
+double meanaf # mean absolute field
+int numpix # number of pixels
+bool helium # 10830 flag
+
+int pixel
+
+errchk pixelmap
+
+begin
+ # Do all 180 pixels in this output row.
+ do pixel = 1,180 {
+ call pixelmap (pixel, in_subraster, inim_subras_bottom,
+ bzero, outputrow, out_subraster, outw_subraster, outa_subraster,
+ el, muzero, meanf, meanaf, numpix, helium)
+ }
+end
+
+
+# PIXELMAP -- Sum up and count the input pixels contained inside the
+# given output pixel. The sum is carried out in the following way:
+#
+# Calculate, on the input image, the position of the center of the
+# output pixel to be mapped.
+# Calculate the values of the partial derivitives of latitude and
+# longitude with respect to x and y.
+# Calculate the boundaries of the pixel in the input image and
+# sum and count all the pixels inside, assign the value
+# to the output pixel = sum/count.
+
+procedure pixelmap (pixel, in_subraster, inim_subras_bottom,
+ bzero, outputrow, out_subraster, outw_subraster, outa_subraster,
+ el, muzero, meanf, meanaf, numpix, helium)
+
+int pixel # which pixel
+short in_subraster[DIM_VTFD, DIM_IN_RAS] # subraster
+int inim_subras_bottom # bottom of current
+real bzero # lat of sub-earth
+int outputrow # output row
+int out_subraster[DIM_SQUAREIM, DIM_SQUAREIM] # output image
+short outw_subraster[DIM_SQUAREIM, DIM_SQUAREIM] # output weights
+int outa_subraster[DIM_SQUAREIM, DIM_SQUAREIM] # output abs. value
+real el[LEN_ELSTRUCT] # ellipse parameters data structure
+double muzero # mode estimate
+double meanf # first moment accum.
+double meanaf # mean absolute field
+int numpix # number of pixels
+bool helium # helium flag
+
+real lat_mid, long_mid, lat_bot, lat_top
+real long_rite, long_left
+double sum, sumabs
+int count
+real xpixcenter, ypixcenter
+real dlongdx, dlatdy
+int xleft,xright,ybottom,ytop,x,y
+int num_pix_vert, num_pix_horz
+pointer sp
+pointer num # numeric structure pointer
+real dat
+
+begin
+ call smark (sp)
+ call salloc (num, VT_LENNUMSTRUCT, TY_STRUCT)
+
+ # First obtain the parameters necessary from numeric.
+ call numeric (bzero, el, outputrow, pixel, xpixcenter, ypixcenter, num)
+
+ dlongdx = VT_DLODX(num)
+ dlatdy = VT_DLATDY(num)
+ lat_top = VT_LATTOP(num)
+ lat_bot = VT_LATBOT(num)
+ long_left = VT_LOLEFT(num)
+ long_rite = VT_LORITE(num)
+ lat_mid = VT_LATMID(num)
+ long_mid = VT_LOMID(num)
+
+ if (lat_top == 10000.) {
+ out_subraster[pixel,outputrow] = 0
+ outw_subraster[pixel,outputrow] = 0
+ outa_subraster[pixel,outputrow] = 0
+ call sfree (sp)
+ return
+ }
+
+ # Calculate the box of pixels we want.
+ num_pix_horz = int((1.0 / dlongdx) + .5)
+ xleft = xpixcenter - int((.5 / dlongdx) + .5)
+ xright = xleft + num_pix_horz - 1
+ num_pix_vert = int((abs(abs(lat_top) - abs(lat_bot)) / dlatdy) + .5)
+ ybottom = ypixcenter - int(((abs(abs(lat_mid) - abs(lat_bot))) /
+ dlatdy) + .5) - (inim_subras_bottom - 1)
+ ytop = ybottom + num_pix_vert - 1
+
+ # Sum up the pixels inside this box.
+ count = 0
+ sum = 0.0
+ sumabs = 0.0
+
+ do x = xleft, xright {
+ do y = ybottom, ytop {
+ if (and(int(in_subraster[x,y]),17B) >= THRESHOLD+1) {
+ count = count + 1
+
+ # Divide by 16 to remove squibby brightness
+ # Accumulate the various moment data.
+ dat = real(in_subraster[x,y]/16)
+ sum = sum + double(dat)
+ sumabs = sumabs + double(abs(dat - muzero))
+ }
+ }
+ }
+
+ outw_subraster[pixel,outputrow] = short(count)
+ out_subraster[pixel,outputrow] = int(sum - double(count*muzero) + .5)
+ if (!helium)
+ outa_subraster[pixel,outputrow] = int(sumabs + .5)
+ meanf = meanf + sum
+ meanaf = meanaf + sumabs
+ numpix = numpix + count
+
+ call sfree (sp)
+end
+
+
+# EMPTYROW -- Set this row in the output image to zero.
+
+procedure emptyrow (outputrow, out_subraster, outw_subraster, outa_subraster)
+
+int outputrow
+int out_subraster[DIM_SQUAREIM, DIM_SQUAREIM]
+short outw_subraster[DIM_SQUAREIM, DIM_SQUAREIM]
+int outa_subraster[DIM_SQUAREIM, DIM_SQUAREIM]
+
+int pixel
+
+begin
+ # Do all 180 pixels in this output row.
+ do pixel = 1,180 {
+ out_subraster[pixel, outputrow] = 0
+ outw_subraster[pixel, outputrow] = 0
+ outa_subraster[pixel, outputrow] = 0
+ }
+end
+
+
+double procedure rmap_mode (inputim, histoname, helium)
+
+pointer inputim # Input image
+char histoname[SZ_FNAME] # Histogram name
+bool helium
+
+int count, i, j
+int dati, hist_middle
+pointer imline, histim, hiptr
+int histo[LEN_HISTO]
+
+# Stuff for mrqmin.
+real a[3], x[LEN_HISTO], y[LEN_HISTO], sig[LEN_HISTO]
+int lista[3]
+real alambda, chisq, covar[3,3], alpha[3,3]
+short k
+
+pointer imgl2s(), impl1i(), immap()
+short shifts()
+
+extern gauss
+
+begin
+ # Initialize.
+ count = 0
+ k = -4
+ do i = 1, LEN_HISTO
+ histo[i] = 0
+
+ do i = 1, DIM_VTFD, SPACING{
+ imline = imgl2s (inputim, i)
+ do j = 1, DIM_VTFD, SPACING {
+ if (and(int(Mems[imline+j-1]),17B) >= THRESHOLD+1) {
+ count = count + 1
+ dati = shifts(Mems[imline+j-1], k)
+
+ # Put the data into a histogram.
+ hist_middle = (LEN_HISTO-1)/2 + 1
+ if (abs(dati) <= hist_middle-1)
+ histo[dati+hist_middle] = histo[dati+hist_middle] + 1
+ }
+ }
+ }
+
+ # Write this histogram out to an image.
+ histim = immap (histoname, NEW_COPY, inputim)
+ IM_NDIM(histim) = 1
+ IM_LEN(histim, 1) = LEN_HISTO
+ IM_PIXTYPE(histim) = TY_INT
+ hiptr = impl1i (histim)
+
+ # Put the histogram into this image.
+ do i = 1, LEN_HISTO
+ Memi[hiptr+i-1] = histo[i]
+
+ if (!helium) {
+ # Set up arrays, etc. for gaussian fit.
+ a[2] = 1.0
+ a[1] = real(histo[1])
+ do i = 1, LEN_HISTO {
+ x[i] = real(i)
+ y[i] = real(histo[i])
+ sig[i] = 1.0
+ if (histo[i] > a[1]) {
+ a[1] = real(histo[i])
+ a[2] = real(i)
+ }
+ }
+ a[3] = 15.0
+
+ do i = 1, 3
+ lista[i] = i
+
+ # Fit the gaussian.
+ alambda = -1.0
+ call mrqmin (x, y, sig, LEN_HISTO, a, 3, lista, 3, covar, alpha, 3,
+ chisq, gauss, alambda)
+ do i = 1, 10 {
+ call mrqmin (x, y, sig, LEN_HISTO, a, 3, lista, 3, covar,
+ alpha, 3, chisq, gauss, alambda)
+ }
+
+ call imaddr (histim, "GSS_AMPL", a[1])
+ call imaddr (histim, "GSS_CNTR", a[2])
+ call imaddr (histim, "GSS_WDTH", a[3])
+
+ # Put the mode estimate in the header.
+ call imaddr (histim, "MUZERO", (a[2] - real(hist_middle)))
+ }
+
+ call imunmap (histim)
+
+ if (helium)
+ return (0.0)
+ else
+ return (double(a[2] - real(hist_middle)))
+end
diff --git a/noao/imred/vtel/syndico.h b/noao/imred/vtel/syndico.h
new file mode 100644
index 00000000..d7693057
--- /dev/null
+++ b/noao/imred/vtel/syndico.h
@@ -0,0 +1,13 @@
+# coordinates of center of picture.
+define DICO_XCENTER .505
+define DICO_YCENTER .500
+
+# The number of dicomed pixels it takes to make 18 centimeters on a
+# standard dicomed plot.
+define DICO_18CM 2436.0
+
+# coordinates of greyscale box
+define IMGBL_X .245
+define IMGBL_Y .867
+define IMGTR_X .765
+define IMGTR_Y .902
diff --git a/noao/imred/vtel/syndico.par b/noao/imred/vtel/syndico.par
new file mode 100644
index 00000000..b36d0624
--- /dev/null
+++ b/noao/imred/vtel/syndico.par
@@ -0,0 +1,14 @@
+image,s,a,,,,input image
+logofile,s,h,iraf$noao/imred/vtel/nsolcrypt.dat,,,logo file
+device,s,h,dicomed,,,plot device
+sbthresh,i,h,2,,,squibby brightness threshold
+plotlogo,b,h,yes,,,plot the logo on the image?
+verbose,b,h,no,,,give progress reports?
+forcetype,b,h,no,,,force the data type?
+magnetic,b,h,yes,,,if forcing datatype is it magnetic else 10830
+month,i,q,,,,month the observation was made
+day,i,q,,,,day the observation was made
+year,i,q,,,,year the observation was made
+hour,i,q,,,,hour the observation was made
+minute,i,q,,,,minute the observation was made
+second,i,q,,,,second the observation was made
diff --git a/noao/imred/vtel/syndico.x b/noao/imred/vtel/syndico.x
new file mode 100644
index 00000000..64910679
--- /dev/null
+++ b/noao/imred/vtel/syndico.x
@@ -0,0 +1,416 @@
+include <mach.h>
+include <imhdr.h>
+include <imset.h>
+include <gset.h>
+include "syndico.h"
+include "vt.h"
+
+# SYNDICO -- Make Dicomed prints of synoptic images. This program is tuned
+# to make the images 18 centimeters in diameter.
+
+procedure t_syndico()
+
+char image[SZ_FNAME] # image to plot
+char logofile[SZ_FNAME] # file containing logo
+char device[SZ_FNAME] # plot device
+int sbthresh # squibby brightness threshold
+bool verbose # verbose flag
+bool plotlogo # plotlogo flag
+bool forcetype # force image type flag
+bool magnetic # image type = magnetic flag
+
+int obsdate, wavelength, obstime
+int i, j, month, day, year, hour, minute, second, stat, bufptr
+real delta_gblock, x, y
+real excen, eycen, exsmd, eysmd, rguess
+real b0, l0
+real mapy1, mapy2, radius, scale, diskfrac
+char ltext[SZ_LINE]
+char system_id[SZ_LINE]
+
+short grey[16]
+pointer gp, sp, im, lf
+pointer subrasp, subras1, buff
+int trnsfrm[513]
+int lkup10830[1091]
+int gs10830[16]
+real xstart, xend, ystart, yend, yinc
+real xcenerr, ycenerr, ndc_xcerr, ndc_ycerr
+real temp_xcenter, temp_ycenter
+
+pointer immap(), gopen(), imgl2s()
+int imgeti(), clgeti(), open(), read()
+real imgetr()
+bool clgetb(), imaccf()
+include "trnsfrm.inc"
+errchk gopen, immap, sysid, imgs2s, imgl2s
+
+# Grey scale points for 10830.
+data (gs10830[i], i = 1, 6) /-1000,-700,-500,-400,-300,-250/
+data (gs10830[i], i = 7, 10) /-200,-150,-100,-50/
+data (gs10830[i], i = 11, 16) /0,10,20,40,60,90/
+
+begin
+ call smark (sp)
+ call salloc (subrasp, DIM_VTFD, TY_SHORT)
+ call salloc (subras1, 185*185, TY_SHORT)
+ call salloc (buff, 185, TY_CHAR)
+
+ # Get parameters from the cl.
+ call clgstr ("image", image, SZ_FNAME)
+ call clgstr ("logofile", logofile, SZ_FNAME)
+ call clgstr ("device", device, SZ_FNAME)
+ sbthresh = clgeti ("sbthresh")
+ plotlogo = clgetb ("plotlogo")
+ verbose = clgetb ("verbose")
+ forcetype = clgetb ("forcetype")
+ magnetic = clgetb ("magnetic")
+
+ # Open the input image, open the logo image if requested.
+ im = immap (image, READ_ONLY, 0)
+ if (plotlogo)
+ iferr {
+ lf = open (logofile, READ_ONLY, TEXT_FILE)
+ } then {
+ call eprintf ("Error opening the logo file, logo not made.\n")
+ plotlogo = false
+ }
+
+ # Get/calculate some of the housekeeping data.
+ if (imaccf (im, "obs_date")) {
+ obsdate = imgeti (im, "obs_date")
+ obstime = imgeti (im, "obs_time")
+ month = obsdate/10000
+ day = obsdate/100 - 100 * (obsdate/10000)
+ year = obsdate - 100 * (obsdate/100)
+ hour = int(obstime/3600)
+ minute = int((obstime - hour * 3600)/60)
+ second = obstime - hour * 3600 - minute * 60
+ } else {
+ # Use cl query parameters to get these values.
+ call eprintf ("Date and Time not found in image header.\n")
+ call eprintf ("Please enter them below.\n")
+ month = clgeti ("month")
+ day = clgeti ("day")
+ year = clgeti ("year")
+ hour = clgeti ("hour")
+ minute = clgeti ("minute")
+ second = clgeti ("second")
+ }
+
+ # Get the solar image center and radius from the image header,
+ # get the solar image radius from the ephemeris routine. If
+ # the two radii are similar, use the former one, if they are
+ # %10 percent or more different, use the ephemeris radius and
+ # assume the center is at (1024,1024).
+
+ # Get ellipse parameters from image header.
+ # If they are not there, warn the user that we are using ephemeris
+ # values.
+ if (imaccf (im, "E_XCEN")) {
+ excen = imgetr (im, "E_XCEN")
+ eycen = imgetr (im, "E_YCEN")
+ exsmd = imgetr (im, "E_XSMD")
+ eysmd = imgetr (im, "E_YSMD")
+
+ # Get rguess from ephem.
+ iferr (call ephem (month, day, year, hour, minute, second, rguess,
+ b0, l0, false))
+ call eprintf ("Error getting ephemeris data.\n")
+
+ radius = (exsmd + eysmd) / 2.0
+ if (abs(abs(radius-rguess)/rguess - 1.0) > 0.1) {
+ radius = rguess
+ excen = 1024.0
+ eycen = 1024.0
+ }
+
+ } else {
+ call eprintf ("No ellipse parameters in image header.\n Using")
+ call eprintf (" ephemeris value for radius and setting center to")
+ call eprintf (" 1024, 1024\n")
+
+ # Get rguess from ephem.
+ iferr (call ephem (month, day, year, hour, minute, second, rguess,
+ b0, l0, false))
+ call eprintf ("Error getting ephemeris data.\n")
+
+ radius = rguess
+ excen = 1024.0
+ eycen = 1024.0
+ }
+
+ # Error in center. (units of pixels)
+ xcenerr = excen - 1024.0
+ ycenerr = eycen - 1024.0
+
+ # Transform error to NDC.
+ ndc_xcerr = xcenerr * (1.0/4096.0)
+ ndc_ycerr = ycenerr * (1.0/4096.0)
+
+ # Next, knowing that the image diameter must be 18 centimeters,
+ # calculate the scaling factor we must use to expand the image.
+ # DICO_18CM is a MAGIC number = 18 centimeters on dicomed prints
+ # given the way the NOAO photo lab currently enlarges the images.
+ scale = DICO_18CM / real(radius*2)
+
+ # Open the output file.
+ gp = gopen (device, NEW_FILE, STDGRAPH)
+
+ # Put feducial(sp?) marks on plot.
+ diskfrac = radius/1024.0
+ temp_xcenter = DICO_XCENTER-ndc_xcerr
+ temp_ycenter = DICO_YCENTER-ndc_ycerr
+ call gline (gp, temp_xcenter, temp_ycenter+diskfrac*.25*scale+.01,
+ temp_xcenter, temp_ycenter+diskfrac*.25*scale+.025)
+ call gline (gp, temp_xcenter, temp_ycenter-diskfrac*.25*scale-.01,
+ temp_xcenter, temp_ycenter-diskfrac*.25*scale-.025)
+
+ # Draw a little compass on the plot.
+ call gline (gp, .25, DICO_YCENTER+.25+.01,
+ .25, DICO_YCENTER+.25+.035)
+ call gtext (gp, .25, DICO_YCENTER+.25+.037,
+ "N", "v=b;h=c;s=.50")
+ call gmark (gp, .2565, DICO_YCENTER+.25+.037,
+ GM_CIRCLE, .006, .006)
+ call gmark (gp, .2565, DICO_YCENTER+.25+.037,
+ GM_CIRCLE, .001, .001)
+ call gline (gp, .25, DICO_YCENTER+.25+.01,
+ .28, DICO_YCENTER+.25+.01)
+ call gtext (gp, .282, DICO_YCENTER+.25+.01,
+ "W", "v=c;h=l;s=.50")
+ call gmark (gp, .290, DICO_YCENTER+.25+.01-.006,
+ GM_CIRCLE, .006, .006)
+ call gmark (gp, .290, DICO_YCENTER+.25+.01-.006,
+ GM_CIRCLE, .001, .001)
+
+ # Get the wavelength from the image header. If the user wants
+ # to force the wavelength, do so. (this is used if the header
+ # information about wavelength is wrong.)
+ wavelength = imgeti (im, "wv_lngth")
+ if (forcetype)
+ if (magnetic)
+ wavelength = 8688
+ else
+ wavelength = 10830
+
+ # Write the grey scale labels onto the plot.
+ delta_gblock = (IMGTR_X - IMGBL_X)/16.
+ y = IMGBL_Y - .005
+ do i = 1, 16 {
+ x = IMGBL_X + real(i-1) * delta_gblock + delta_gblock/2.
+ call sprintf (ltext, SZ_LINE, "%d")
+ if (wavelength == 8688)
+ call pargi ((i-1)*(int((512./15.)+0.5))-256)
+ else if (wavelength == 10830)
+ call pargi (gs10830(i))
+ call gtext (gp, x, y, ltext, "v=t;h=c;s=.20")
+ }
+
+ # Label on grey scale.
+ call sprintf (ltext, SZ_LINE, "%s")
+ if (wavelength == 8688)
+ call pargstr ("gauss")
+ else if (wavelength == 10830)
+ call pargstr ("relative line strength")
+ call gtext (gp, DICO_XCENTER, (IMGBL_Y-.024), ltext, "v=c;h=c;s=.5")
+
+ # Put the title on.
+ call sprintf (ltext, SZ_LINE, "%s")
+ if (wavelength == 8688)
+ call pargstr ("8688 MAGNETOGRAM")
+ else if (wavelength == 10830)
+ call pargstr ("10830 SPECTROHELIOGRAM")
+ else
+ call pargstr (" ")
+ call gtext (gp, DICO_XCENTER, .135, ltext, "v=c;h=c;s=.7")
+
+ # If we don't have a logo to plot, write the data origin on the plot.
+ if (!plotlogo) {
+
+ call sprintf (ltext, SZ_LINE, "%s")
+ call pargstr ("National")
+ call gtext (gp, .24, .155, ltext, "v=c;h=c;s=.7")
+ call sprintf (ltext, SZ_LINE, "%s")
+ call pargstr ("Solar")
+ call gtext (gp, .24, .135, ltext, "v=c;h=c;s=.7")
+ call sprintf (ltext, SZ_LINE, "%s")
+ call pargstr ("Observatory")
+ call gtext (gp, .24, .115, ltext, "v=c;h=c;s=.7")
+ }
+
+ # Put month/day/year on plot.
+ call sprintf (ltext, SZ_LINE, "%02d/%02d/%02d")
+ call pargi (month)
+ call pargi (day)
+ call pargi (year)
+ call gtext (gp, .70, .175, ltext, "v=c;h=l;s=.5")
+
+ # Put the hour:minute:second on plot.
+ call sprintf (ltext, SZ_LINE, "%02d:%02d:%02d UT")
+ call pargi (hour)
+ call pargi (minute)
+ call pargi (second)
+ call gtext (gp, .70, .155, ltext, "v=c;h=l;s=.5")
+
+ # Fill in the grey scale.
+ if (wavelength == 8688) {
+ do i = 1, 16
+ grey[i] = (trnsfrm[(i-1)*(int((512./15.)+0.5))+1])
+ call gpcell (gp, grey, 16, 1, IMGBL_X, IMGBL_Y, IMGTR_X, IMGTR_Y)
+ } else if (wavelength == 10830) {
+ do i = 1, 16
+ grey[i] = (lkup10830[gs10830(i)+1001])
+ call gpcell (gp, grey, 16, 1, IMGBL_X, IMGBL_Y, IMGTR_X, IMGTR_Y)
+ }
+
+ # Prepare some constants for plotting.
+ xstart = temp_xcenter - .25 * scale
+ xend = temp_xcenter + .25 * scale
+ ystart = temp_ycenter - .25 * scale
+ yend = temp_ycenter + .5 * scale
+ mapy1 = ystart
+ mapy2 = ystart
+ yinc = (.5*scale)/real(DIM_VTFD)
+
+ # Put the data on the plot. Line by line.
+ do i = 1, DIM_VTFD {
+
+ if (verbose) {
+ call printf ("line = %d\n")
+ call pargi (i)
+ call flush (STDOUT)
+ }
+
+ subrasp = imgl2s (im, i)
+
+ # Call the limb trimmer and data divider.
+ call fixline (Mems[subrasp], DIM_VTFD, wavelength, sbthresh)
+
+ # Update the top and bottom edges of this line.
+ mapy1 = mapy2
+ mapy2 = mapy2 + yinc
+
+ # Put the line on the output plot.
+ call gpcell (gp, Mems[subrasp], DIM_VTFD, 1, xstart,
+ mapy1, xend, mapy2)
+
+ } # End of do loop on image lines.
+
+ # Put the system identification on the plot.
+ call sysid (system_id, SZ_LINE)
+ call gtext (gp, DICO_XCENTER, .076, system_id, "h=c;s=0.45")
+
+ # Put the NSO logo on the plot.
+ if (plotlogo) {
+
+ # Read in the image. (the image is encoded in a text file)
+ do i = 1, 185 {
+ bufptr = 0
+ while (bufptr < 185-79) {
+ stat = read (lf, Memc[buff+bufptr], 80)
+ bufptr = bufptr + 79
+ }
+ stat = read (lf, Memc[buff+bufptr], 80)
+ do j = 1, 185 {
+ Mems[subras1+(i-1)*185+j-1] =
+ short((Memc[buff+j-1]-32.)*2.7027027)
+ }
+ }
+
+ # Put it on the plot.
+ call gpcell (gp, Mems[subras1], 185, 185, .24, .13, .32, .21)
+ }
+
+ # Close the graphics pointer, unmap images, free stack.
+ call gclose (gp)
+ call imunmap (im)
+ if (plotlogo)
+ call close (lf)
+ call sfree (sp)
+end
+
+
+# FIXLINE -- Clean up the line. Set the value of pixels off the limb to
+# zero, remove the squibby brightness from each pixel, and apply a
+# nonlinear lookup table to the greyscale mapping.
+
+procedure fixline (ln, xlength, wavelength, sbthresh)
+
+int xlength # length of line buffer
+short ln[xlength] # line buffer
+int wavelength # wavelength of the observation
+int sbthresh # squibby brightness threshold
+
+int trnsfrm[513]
+int lkup10830[1091]
+bool found
+int i, left, right
+include "trnsfrm.inc"
+
+begin
+ # Look in from the left end till squibby brightness goes above the
+ # threshold, remember where this limbpoint is.
+ found = false
+ do i = 1, xlength { # Find left limbpoint.
+ if (and(int(ln[i]),17B) > sbthresh) {
+ found = true
+ left = i
+ break
+ }
+ }
+
+ if (found) {
+ # Find the right limbpoint.
+ do i = xlength, 1, -1 {
+ if (and(int(ln[i]),17B) > sbthresh) {
+ right = i
+ break
+ }
+ }
+
+ # Divide the image by 16, map the greyscale, and trim the limb.
+ do i = left+1, right-1 {
+
+ # Remove squibby brightness.
+ ln[i] = ln[i]/16
+
+ if (wavelength == 8688) {
+ # Magnetogram, nonlinear greyscale.
+ # Make data fit in the table.
+ if (ln[i] < -256)
+ ln[i] = -256
+ if (ln[i] > 256)
+ ln[i] = 256
+
+ # Look it up in the table.
+ ln[i] = trnsfrm[ln[i]+257]
+ } else if (wavelength == 10830) {
+ # 10830 spectroheliogram, nonlinear greyscale.
+ # Make data fit in the table.
+ if (ln[i] < -1000)
+ ln[i] = -1000
+ if (ln[i] > 90)
+ ln[i] = 90
+ # Look it up in the table.
+ ln[i] = lkup10830[ln[i]+1001]
+ } else {
+ # Unknown type, linear greyscale.
+ if (ln[i] < 1)
+ ln[i] = 1
+ if (ln[i] > 255)
+ ln[i] = 255
+ }
+ }
+
+ # Set stuff outside the limb to zero.
+ do i = 1, left
+ ln[i] = 0
+ do i = right, xlength
+ ln[i] = 0
+ } else {
+ # This line is off the limb, set it to zero.
+ do i = 1, xlength
+ ln[i] = 0
+ }
+end
diff --git a/noao/imred/vtel/tcopy.par b/noao/imred/vtel/tcopy.par
new file mode 100644
index 00000000..4facf827
--- /dev/null
+++ b/noao/imred/vtel/tcopy.par
@@ -0,0 +1,5 @@
+inputfile,s,q,,,,Input file descriptor
+files,s,q,,,,List of files to be examined
+outputfile,s,q,,,,Output file descriptor
+new_tape,b,q,,,,Are you using a new tape?
+verbose,b,h,no,,,Print out header data and give progress reports
diff --git a/noao/imred/vtel/tcopy.x b/noao/imred/vtel/tcopy.x
new file mode 100644
index 00000000..42709563
--- /dev/null
+++ b/noao/imred/vtel/tcopy.x
@@ -0,0 +1,190 @@
+include <error.h>
+include <fset.h>
+include <printf.h>
+include <mach.h>
+include "vt.h"
+
+define SZ_VTRECFD 5120 # length, in chars, of full disk recs
+define YABUF 20000 # Yet Another BUFfer
+define SWAP {temp=$1;$1=$2;$2=temp}
+define MAX_RANGES 100
+
+# TCOPY -- This is an asynchronous tape to tape copy routine. It considers
+# the input and output to be streaming devices.
+# The user specifies which files on tape s/he wants and a root name for the
+# output file names.
+
+procedure t_tcopy()
+
+char inputfile[SZ_FNAME]
+char files[SZ_LINE]
+char outputfile[SZ_FNAME]
+
+char tapename[SZ_FNAME]
+int filerange[2 * MAX_RANGES + 1]
+int nfiles, filenumber, numrecords, whichfile
+bool verbose
+
+int decode_ranges(), mtfile()
+int get_next_number(), tapecopy(), mtneedfileno()
+bool clgetb()
+errchk tapecopy
+
+begin
+ call fseti (STDOUT, F_FLUSHNL, YES)
+
+ # Get input file(s).
+ call clgstr ("inputfile", inputfile, SZ_FNAME)
+ if (mtfile (inputfile) == NO || mtneedfileno (inputfile) == NO) {
+ call strcpy ("1", files, SZ_LINE)
+ } else {
+ call clgstr ("files", files, SZ_LINE)
+ }
+
+ if (decode_ranges (files, filerange, MAX_RANGES, nfiles) == ERR)
+ call error (0, "Illegal file number list.")
+
+ # Get the output file from the cl.
+ call clgstr ("outputfile", outputfile, SZ_FNAME)
+
+ # See if the output is mag tape, if not, error.
+ if (mtfile (outputfile) == NO)
+ call error (1, "Outputfile should be magnetic tape.")
+
+ # If no tape file number is given, then ask whether the tape
+ # is blank or contains data. If blank then start at [1], else
+ # start at [EOT].
+
+ if (mtneedfileno(outputfile) == YES)
+ if (!clgetb ("new_tape"))
+ call mtfname (outputfile, EOT, outputfile, SZ_FNAME)
+ else
+ call mtfname (outputfile, 1, outputfile, SZ_FNAME)
+
+ # Get verbose flag.
+ verbose = clgetb ("verbose")
+
+ # Loop over files
+ whichfile = 1
+ filenumber = 0
+ while (get_next_number (filerange, filenumber) != EOF) {
+
+ # Assemble the appropriate tape file name.
+ if (mtneedfileno (inputfile) == NO)
+ call strcpy (inputfile, tapename, SZ_FNAME)
+ else
+ call mtfname (inputfile, filenumber, tapename, SZ_FNAME)
+
+ if (whichfile > 1) {
+ # Assemble the appropriate output file name.
+ call mtfname (outputfile, EOT, outputfile, SZ_FNAME)
+ }
+
+ if (verbose) {
+ call printf ("reading %s, writing %s\n")
+ call pargstr(tapename)
+ call pargstr(outputfile)
+ }
+
+ iferr {
+ numrecords = tapecopy (tapename, outputfile)
+ } then {
+ call eprintf ("Error copying file: %s\n")
+ call pargstr (tapename)
+ call erract (EA_WARN)
+ next
+ } else if (numrecords == 0) {
+ call printf ("Tape at EOT\n")
+ break
+ }
+ whichfile = whichfile + 1
+
+ } # End while.
+end
+
+
+# TAPECOPY -- This is the actual tape to tape copy routine.
+
+int procedure tapecopy (infile, outfile)
+
+char infile[SZ_FNAME]
+char outfile[SZ_FNAME]
+
+pointer bufa, bufb, temp
+int bufsz, numrecords
+int nbytes, lastnbytes, in, out
+int fstati(), mtopen(), awaitb()
+errchk mtopen, areadb, awriteb, awaitb
+
+begin
+ # Open input file, see if it has anything in it. If not, return.
+ in = mtopen (infile, READ_ONLY, 0)
+
+ bufsz = fstati (in, F_MAXBUFSIZE) # Maximum output buffer size.
+ if (bufsz == 0) # If no max, set a max.
+ bufsz = YABUF
+
+ call malloc (bufa, bufsz, TY_CHAR) # Allocate output buffer.
+ call malloc (bufb, bufsz, TY_CHAR) # Other output buffer
+
+ call areadb (in, Memc[bufa], bufsz, 0)
+ nbytes = awaitb (in)
+ if (nbytes == EOF) {
+ call close (in)
+ call mfree (bufa, TY_CHAR)
+ call mfree (bufb, TY_CHAR)
+ return (EOF)
+ }
+
+ # Open the output file.
+ out = mtopen (outfile, WRITE_ONLY, 0)
+
+ lastnbytes = 0 # Last record size memory.
+ numrecords = 0 # Number of records read.
+
+ if (nbytes > 0) {
+ if (nbytes > SZ_VTRECFD*SZB_SHORT &&
+ nbytes < SZ_VTRECFD*SZB_SHORT+600)
+ nbytes = SZ_VTRECFD*SZB_SHORT
+ call awriteb (out, Memc[bufa], nbytes, 0)
+ call areadb (in, Memc[bufb], bufsz, 0)
+ numrecords = numrecords + 1
+ }
+
+ SWAP (bufa, bufb)
+
+ # Main Loop.
+ repeat {
+ if (awaitb (out) != nbytes) {
+ call printf ("Write error, record = %d.\n")
+ call pargi (numrecords+1)
+ }
+
+ nbytes = awaitb (in)
+ if (nbytes == ERR) {
+ call printf ("Read error, record = %d.\n")
+ call pargi (numrecords+1)
+ nbytes = lastnbytes
+ }
+ lastnbytes = nbytes
+
+ if (nbytes > 0) {
+ if (nbytes > SZ_VTRECFD*SZB_SHORT &&
+ nbytes < SZ_VTRECFD*SZB_SHORT+600)
+ nbytes = SZ_VTRECFD*SZB_SHORT
+ call awriteb (out, Memc[bufa], nbytes, 0)
+ call areadb (in, Memc[bufb], bufsz, 0)
+ numrecords = numrecords + 1
+ }
+
+ SWAP (bufa, bufb)
+
+ } until (nbytes == 0) # all done
+
+ call mfree (bufa, TY_CHAR)
+ call mfree (bufb, TY_CHAR)
+ call close (in)
+ call close (out)
+
+ return (numrecords)
+end
diff --git a/noao/imred/vtel/textim.x b/noao/imred/vtel/textim.x
new file mode 100644
index 00000000..4ca5a8c1
--- /dev/null
+++ b/noao/imred/vtel/textim.x
@@ -0,0 +1,114 @@
+include <mach.h>
+include <imhdr.h>
+
+define FONTWIDE 6
+define FONTHIGH 7
+define MAXSTRING 100
+
+# TEXTIM -- Write a text string into an image using a pixel font for speed.
+# Characters are made twice as big as the font by doubling in both axes.
+
+procedure textim (im, s, x, y, xmag, ymag, value, zerobgnd, bgndvalu)
+
+pointer im # Image to put the text in.
+char s[MAXSTRING] # Text to put in the image.
+int x, y # x, y position in the image.
+int xmag, ymag # x, y magnification values.
+int value # Value to use in image for text.
+int zerobgnd # Flag to tell if we should zero bgnd.
+int bgndvalu # Background value to use.
+
+int numrow, numcol, numchars
+int fonthigh, fontwide
+int i, l, ch
+int nchar, line
+int pixary[5]
+pointer lineget, lineput
+
+short tshort
+int strlen()
+pointer imgl2s(), impl2s()
+errchk imgl2s, impl2s
+
+begin
+ # Find the length of the string (if there aren't any chars, return).
+ numchars = strlen (s)
+ if (numchars <= 0)
+ return
+
+ # Calculate height and width of magnified font.
+ fonthigh = FONTHIGH * ymag
+ fontwide = FONTWIDE * xmag
+
+ # Check for row/col out of bounds.
+ numcol= IM_LEN(im,1)
+ numrow = IM_LEN(im,2)
+
+ if (x <= 0) {
+ call printf ("Warning: Image text deleted, column <= 0.\n")
+ return
+ }
+
+ if (x > numcol - fontwide*numchars) {
+ call printf ("Warning: Image text truncated or deleted\n")
+ numchars = int((numcol - x)/fontwide)
+ if (numchars <= 0)
+ return
+ }
+
+ if ((y <= 0) || (y > numrow - fonthigh)) {
+ call printf ("Warning: Image text deleted, wrong row number.\n")
+ return
+ }
+
+ # For each line of the text (backward).
+ for (i=7; i>=1; i=i-1) {
+ line = y+(8-i)*ymag-1
+
+ do l = 1, ymag {
+
+ # Get and put the line of the image.
+ lineget = imgl2s (im, line+(l-1))
+ lineput = impl2s (im, line+(l-1))
+
+ # Copy input array or the background value to output array.
+ if (zerobgnd == 1) {
+ tshort = bgndvalu
+ call amovks (tshort, Mems[lineput+x-1],
+ fontwide*numchars)
+ } else
+ call amovs (Mems[lineget], Mems[lineput], numcol)
+
+ # Put the font.
+ do ch = 1, numchars {
+ nchar = int(s[ch])
+ call pixbit (nchar, i, pixary)
+ call putpix (pixary, Mems[lineput], numcol,
+ x+(ch-1)*fontwide, value, xmag)
+ }
+ } # End of do on l.
+ }
+end
+
+
+# PUTPIX -- Put one line of one character into the data array.
+
+procedure putpix (pixary, array, size, position, value, xmag)
+
+int pixary[5] # array of pixels in character
+int size, position # size of data array
+short array[size] # data array in which to put character line
+int value # value to use for character pixels
+int xmag # x-magnification of text
+
+int i, k, x
+
+begin
+ do i = 1, 5 {
+ if (pixary[i] == 1) {
+ x = position + (i-1) * xmag
+ do k = 1, xmag
+ array[x+(k-1)] = value
+ }
+ }
+end
diff --git a/noao/imred/vtel/trim.par b/noao/imred/vtel/trim.par
new file mode 100644
index 00000000..49e6184e
--- /dev/null
+++ b/noao/imred/vtel/trim.par
@@ -0,0 +1,2 @@
+image,s,q,,,,Image name
+threshold,i,q,,0,15,Squibby brightness threshold for limb
diff --git a/noao/imred/vtel/trim.x b/noao/imred/vtel/trim.x
new file mode 100644
index 00000000..8e76489b
--- /dev/null
+++ b/noao/imred/vtel/trim.x
@@ -0,0 +1,75 @@
+include <mach.h>
+include <imhdr.h>
+include "vt.h"
+
+# TRIM -- Trim a full disk image using the squibby brightness template.
+# Leave all the squibby brightness information intact, set data outside the
+# limb to zero.
+
+procedure t_trim()
+
+char image[SZ_FNAME] # image to trim
+int threshold # squibby brightness threshold defining limb
+
+int i, numpix
+pointer im, lgp, lpp
+pointer immap(), imgl2s(), impl2s()
+int clgeti()
+errchk immap, imgl2s, impl2s
+
+begin
+ # Get parameters from the CL.
+ call clgstr ("image", image, SZ_FNAME)
+ threshold = clgeti("threshold")
+
+ # Open image.
+ im = immap (image, READ_WRITE, 0)
+
+ do i = 1, IM_LEN(im,2) {
+ lgp = imgl2s (im, i)
+ lpp = impl2s (im, i)
+ numpix = IM_LEN(im,1)
+ call trimline (Mems[lgp], Mems[lpp], numpix, threshold)
+ }
+
+ # Unmap image.
+ call imunmap (im)
+end
+
+
+# TRIMLINE -- trim line1 and put it into line2.
+
+procedure trimline (line1, line2, numpix, threshold)
+
+short line1[numpix] # input line
+short line2[numpix] # output line
+int numpix # number of pixels in this line
+int threshold # squibby brightness threshold
+
+int i, left, right
+
+begin
+ left = 0
+ right = 0
+
+ do i = 1, numpix {
+ if (and(int(line1[i]),17B) >= threshold) {
+ left = i
+ break
+ } else
+ line2[i] = and(int(line1[i]),17B)
+ }
+
+ if (left != 0)
+ do i = numpix, 1, -1 {
+ if(and(int(line1[i]),17B) >= threshold) {
+ right = i
+ break
+ } else
+ line2[i] = and(int(line1[i]),17B)
+ }
+
+ if (left != 0 && right != 0 && left < right)
+ do i = left, right
+ line2[i] = line1[i]
+end
diff --git a/noao/imred/vtel/trnsfrm.inc b/noao/imred/vtel/trnsfrm.inc
new file mode 100644
index 00000000..b916b126
--- /dev/null
+++ b/noao/imred/vtel/trnsfrm.inc
@@ -0,0 +1,163 @@
+data (trnsfrm[i], i = 1, 10) /56,56,56,56,57,57,57,57,58,58/
+data (trnsfrm[i], i = 11, 20) /58,58,59,59,59,59,60,60,60,60/
+data (trnsfrm[i], i = 21, 30) /61,61,61,61,62,62,62,63,63,63/
+data (trnsfrm[i], i = 31, 40) /63,64,64,64,64,65,65,65,65,66/
+data (trnsfrm[i], i = 41, 50) /66,66,67,67,67,67,68,68,68,68/
+data (trnsfrm[i], i = 51, 60) /69,69,69,70,70,70,70,71,71,71/
+data (trnsfrm[i], i = 61, 70) /71,72,72,72,73,73,73,73,74,74/
+data (trnsfrm[i], i = 71, 80) /74,75,75,75,75,76,76,76,77,77/
+data (trnsfrm[i], i = 81, 90) /77,77,78,78,78,79,79,79,79,80/
+data (trnsfrm[i], i = 91, 100) /80,80,81,81,81,82,82,82,82,83/
+data (trnsfrm[i], i = 101, 110) /83,83,84,84,84,85,85,85,85,86/
+data (trnsfrm[i], i = 111, 120) /86,86,87,87,87,88,88,88,89,89/
+data (trnsfrm[i], i = 121, 130) /89,90,90,90,90,91,91,91,92,92/
+data (trnsfrm[i], i = 131, 140) /92,93,93,93,94,94,94,95,95,95/
+data (trnsfrm[i], i = 141, 150) /96,96,96,97,97,97,98,98,98,99/
+data (trnsfrm[i], i = 151, 160) /99,99,100,100,101,101,101,102,102,102/
+data (trnsfrm[i], i = 161, 170) /103,103,103,104,104,104,105,105,106,106/
+data (trnsfrm[i], i = 171, 180) /106,107,107,107,108,108,109,109,109,110/
+data (trnsfrm[i], i = 181, 190) /110,110,111,111,112,112,112,113,113,114/
+data (trnsfrm[i], i = 191, 200) /114,114,115,115,116,116,117,117,117,118/
+data (trnsfrm[i], i = 201, 210) /118,119,119,120,120,120,121,121,122,122/
+data (trnsfrm[i], i = 211, 220) /123,123,124,124,125,125,126,126,127,127/
+data (trnsfrm[i], i = 221, 230) /128,128,129,129,130,130,131,131,132,132/
+data (trnsfrm[i], i = 231, 240) /133,133,134,135,135,136,136,137,138,138/
+data (trnsfrm[i], i = 241, 250) /139,140,140,141,142,143,143,144,145,146/
+data (trnsfrm[i], i = 251, 260) /147,148,149,150,151,153,156,158,160,161/
+data (trnsfrm[i], i = 261, 270) /162,163,164,165,166,167,168,168,169,170/
+data (trnsfrm[i], i = 271, 280) /171,171,172,173,173,174,175,175,176,176/
+data (trnsfrm[i], i = 281, 290) /177,178,178,179,179,180,180,181,181,182/
+data (trnsfrm[i], i = 291, 300) /182,183,183,184,184,185,185,186,186,187/
+data (trnsfrm[i], i = 301, 310) /187,188,188,189,189,190,190,191,191,191/
+data (trnsfrm[i], i = 311, 320) /192,192,193,193,194,194,194,195,195,196/
+data (trnsfrm[i], i = 321, 330) /196,197,197,197,198,198,199,199,199,200/
+data (trnsfrm[i], i = 331, 340) /200,201,201,201,202,202,202,203,203,204/
+data (trnsfrm[i], i = 341, 350) /204,204,205,205,205,206,206,207,207,207/
+data (trnsfrm[i], i = 351, 360) /208,208,208,209,209,209,210,210,210,211/
+data (trnsfrm[i], i = 361, 370) /211,212,212,212,213,213,213,214,214,214/
+data (trnsfrm[i], i = 371, 380) /215,215,215,216,216,216,217,217,217,218/
+data (trnsfrm[i], i = 381, 390) /218,218,219,219,219,220,220,220,221,221/
+data (trnsfrm[i], i = 391, 400) /221,221,222,222,222,223,223,223,224,224/
+data (trnsfrm[i], i = 401, 410) /224,225,225,225,226,226,226,226,227,227/
+data (trnsfrm[i], i = 411, 420) /227,228,228,228,229,229,229,229,230,230/
+data (trnsfrm[i], i = 421, 430) /230,231,231,231,232,232,232,232,233,233/
+data (trnsfrm[i], i = 431, 440) /233,234,234,234,234,235,235,235,236,236/
+data (trnsfrm[i], i = 441, 450) /236,236,237,237,237,238,238,238,238,239/
+data (trnsfrm[i], i = 451, 460) /239,239,240,240,240,240,241,241,241,241/
+data (trnsfrm[i], i = 461, 470) /242,242,242,243,243,243,243,244,244,244/
+data (trnsfrm[i], i = 471, 480) /244,245,245,245,246,246,246,246,247,247/
+data (trnsfrm[i], i = 481, 490) /247,247,248,248,248,248,249,249,249,250/
+data (trnsfrm[i], i = 491, 500) /250,250,250,251,251,251,251,252,252,252/
+data (trnsfrm[i], i = 501, 510) /252,253,253,253,253,254,254,254,254,255/
+data (trnsfrm[i], i = 511, 513) /255,255,255/
+
+data (lkup10830[i], i = 1, 10) /50,50,50,50,50,50,50,50,50,50/
+data (lkup10830[i], i = 11, 20) /50,50,50,50,50,50,50,50,50,50/
+data (lkup10830[i], i = 21, 30) /50,50,50,50,50,50,50,50,50,50/
+data (lkup10830[i], i = 31, 40) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 41, 50) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 51, 60) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 61, 70) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 71, 80) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 81, 90) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 91, 100) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 101, 110) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 111, 120) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 121, 130) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 131, 140) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 141, 150) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 151, 160) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 161, 170) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 171, 180) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 181, 190) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 191, 200) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 201, 210) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 211, 220) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 221, 230) /51,51,51,51,51,51,51,51,51,51/
+data (lkup10830[i], i = 231, 240) /51,51,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 241, 250) /52,52,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 251, 260) /52,52,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 261, 270) /52,52,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 271, 280) /52,52,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 281, 290) /52,52,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 291, 300) /52,52,52,52,52,52,52,52,52,52/
+data (lkup10830[i], i = 301, 310) /52,52,52,53,53,53,53,53,53,53/
+data (lkup10830[i], i = 311, 320) /53,53,53,53,53,53,53,53,53,53/
+data (lkup10830[i], i = 321, 330) /53,53,53,53,53,53,53,53,53,53/
+data (lkup10830[i], i = 331, 340) /53,53,53,53,53,53,53,53,53,54/
+data (lkup10830[i], i = 341, 350) /54,54,54,54,54,54,54,54,54,54/
+data (lkup10830[i], i = 351, 360) /54,54,54,54,54,54,54,54,54,54/
+data (lkup10830[i], i = 361, 370) /54,54,54,54,54,55,55,55,55,55/
+data (lkup10830[i], i = 371, 380) /55,55,55,55,55,55,55,55,55,55/
+data (lkup10830[i], i = 381, 390) /55,55,55,55,55,55,55,56,56,56/
+data (lkup10830[i], i = 391, 400) /56,56,56,56,56,56,56,56,56,56/
+data (lkup10830[i], i = 401, 410) /56,56,56,56,56,57,57,57,57,57/
+data (lkup10830[i], i = 411, 420) /57,57,57,57,57,57,57,57,57,57/
+data (lkup10830[i], i = 421, 430) /57,57,58,58,58,58,58,58,58,58/
+data (lkup10830[i], i = 431, 440) /58,58,58,58,58,58,58,59,59,59/
+data (lkup10830[i], i = 441, 450) /59,59,59,59,59,59,59,59,59,59/
+data (lkup10830[i], i = 451, 460) /59,60,60,60,60,60,60,60,60,60/
+data (lkup10830[i], i = 461, 470) /60,60,60,60,60,61,61,61,61,61/
+data (lkup10830[i], i = 471, 480) /61,61,61,61,61,61,61,62,62,62/
+data (lkup10830[i], i = 481, 490) /62,62,62,62,62,62,62,62,62,63/
+data (lkup10830[i], i = 491, 500) /63,63,63,63,63,63,63,63,63,63/
+data (lkup10830[i], i = 501, 510) /63,64,64,64,64,64,64,64,64,64/
+data (lkup10830[i], i = 511, 520) /64,64,65,65,65,65,65,65,65,65/
+data (lkup10830[i], i = 521, 530) /65,65,65,66,66,66,66,66,66,66/
+data (lkup10830[i], i = 531, 540) /66,66,66,67,67,67,67,67,67,67/
+data (lkup10830[i], i = 541, 550) /67,67,67,67,68,68,68,68,68,68/
+data (lkup10830[i], i = 551, 560) /68,68,68,68,69,69,69,69,69,69/
+data (lkup10830[i], i = 561, 570) /69,69,69,70,70,70,70,70,70,70/
+data (lkup10830[i], i = 571, 580) /70,70,70,71,71,71,71,71,71,71/
+data (lkup10830[i], i = 581, 590) /71,71,72,72,72,72,72,72,72,72/
+data (lkup10830[i], i = 591, 600) /72,73,73,73,73,73,73,73,73,73/
+data (lkup10830[i], i = 601, 610) /74,74,74,74,74,74,74,74,74,75/
+data (lkup10830[i], i = 611, 620) /75,75,75,75,75,75,75,75,76,76/
+data (lkup10830[i], i = 621, 630) /76,76,76,76,76,76,77,77,77,77/
+data (lkup10830[i], i = 631, 640) /77,77,77,77,78,78,78,78,78,78/
+data (lkup10830[i], i = 641, 650) /78,78,78,79,79,79,79,79,79,79/
+data (lkup10830[i], i = 651, 660) /79,80,80,80,80,80,80,80,80,81/
+data (lkup10830[i], i = 661, 670) /81,81,81,81,81,81,81,82,82,82/
+data (lkup10830[i], i = 671, 680) /82,82,82,82,82,83,83,83,83,83/
+data (lkup10830[i], i = 681, 690) /83,83,83,84,84,84,84,84,84,84/
+data (lkup10830[i], i = 691, 700) /85,85,85,85,85,85,85,85,86,86/
+data (lkup10830[i], i = 701, 710) /86,86,86,86,86,86,87,87,87,87/
+data (lkup10830[i], i = 711, 720) /87,87,87,88,88,88,88,88,88,88/
+data (lkup10830[i], i = 721, 730) /88,89,89,89,89,89,89,89,90,90/
+data (lkup10830[i], i = 731, 740) /90,90,90,90,90,91,91,91,91,91/
+data (lkup10830[i], i = 741, 750) /91,91,91,92,92,92,92,92,92,92/
+data (lkup10830[i], i = 751, 760) /93,93,93,93,93,93,93,94,94,94/
+data (lkup10830[i], i = 761, 770) /94,94,94,95,95,95,95,95,95,95/
+data (lkup10830[i], i = 771, 780) /96,96,96,96,96,96,97,97,97,97/
+data (lkup10830[i], i = 781, 790) /97,97,98,98,98,98,98,98,99,99/
+data (lkup10830[i], i = 791, 800) /99,99,99,99,100,100,100,100,100,101/
+data (lkup10830[i], i = 801, 810) /101,101,101,101,101,102,102,102,102,102/
+data (lkup10830[i], i = 811, 820) /103,103,103,103,103,104,104,104,104,104/
+data (lkup10830[i], i = 821, 830) /105,105,105,105,106,106,106,106,106,107/
+data (lkup10830[i], i = 831, 840) /107,107,107,108,108,108,108,108,109,109/
+data (lkup10830[i], i = 841, 850) /109,109,110,110,110,110,111,111,111,111/
+data (lkup10830[i], i = 851, 860) /112,112,112,113,113,113,113,114,114,114/
+data (lkup10830[i], i = 861, 870) /114,115,115,115,116,116,116,116,117,117/
+data (lkup10830[i], i = 871, 880) /117,118,118,118,118,119,119,119,120,120/
+data (lkup10830[i], i = 881, 890) /120,121,121,121,122,122,122,123,123,123/
+data (lkup10830[i], i = 891, 900) /124,124,124,125,125,125,126,126,126,127/
+data (lkup10830[i], i = 901, 910) /127,128,128,128,129,129,129,130,130,131/
+data (lkup10830[i], i = 911, 920) /131,131,132,132,132,133,133,134,134,135/
+data (lkup10830[i], i = 921, 930) /135,135,136,136,137,137,137,138,138,139/
+data (lkup10830[i], i = 931, 940) /139,140,140,141,141,141,142,142,143,143/
+data (lkup10830[i], i = 941, 950) /144,144,145,145,146,146,147,147,148,148/
+data (lkup10830[i], i = 951, 960) /149,149,150,150,151,151,152,152,153,153/
+data (lkup10830[i], i = 961, 970) /154,154,155,155,156,156,157,158,158,159/
+data (lkup10830[i], i = 971, 980) /159,160,160,161,162,162,163,163,164,164/
+data (lkup10830[i], i = 981, 990) /165,166,166,167,167,168,169,169,170,171/
+data (lkup10830[i], i = 991, 1000) /171,172,173,173,174,174,175,176,176,177/
+data (lkup10830[i], i = 1001, 1010) /178,178,179,180,180,181,182,183,183,184/
+data (lkup10830[i], i = 1011, 1020) /185,185,186,187,187,188,189,190,190,191/
+data (lkup10830[i], i = 1021, 1030) /192,193,193,194,195,196,196,197,198,199/
+data (lkup10830[i], i = 1031, 1040) /200,200,201,202,203,203,204,205,206,207/
+data (lkup10830[i], i = 1041, 1050) /208,208,209,210,211,212,213,213,214,215/
+data (lkup10830[i], i = 1051, 1060) /216,217,218,219,220,220,221,222,223,224/
+data (lkup10830[i], i = 1061, 1070) /225,226,227,228,229,230,230,231,232,233/
+data (lkup10830[i], i = 1071, 1080) /234,235,236,237,238,239,240,241,242,243/
+data (lkup10830[i], i = 1081, 1090) /244,245,246,247,248,249,250,251,252,253/
+data (lkup10830[i], i = 1091, 1091) /254/
diff --git a/noao/imred/vtel/unwrap.par b/noao/imred/vtel/unwrap.par
new file mode 100644
index 00000000..1a1d3504
--- /dev/null
+++ b/noao/imred/vtel/unwrap.par
@@ -0,0 +1,9 @@
+image,s,a,,,,image
+outimage,s,a,,,,outimage
+threshold1,i,h,128,,,threshold for first unwrap
+wrapval1,i,h,256,,,wrap displacement for first unwrap
+threshold2,i,h,128,,,threshold for second unwrap
+wrapval2,i,h,256,,,wrap displacement for second unwrap
+cstart,i,h,2,,,column start
+step,i,h,5,,,number of steps to take
+verbose,b,h,yes,,,verbose flag
diff --git a/noao/imred/vtel/unwrap.x b/noao/imred/vtel/unwrap.x
new file mode 100644
index 00000000..a753ddf4
--- /dev/null
+++ b/noao/imred/vtel/unwrap.x
@@ -0,0 +1,293 @@
+include <mach.h>
+include <imhdr.h>
+
+define MAXBADLINES 20 # maximum number of bad lines
+define BADTHRESH 1000 # threshold for bad lines
+define FIXWIDTH 20 # Width of average for fixline
+
+# UNWRAP -- Filter an iraf image. This filter checks for binary wraparound
+# in IRAF images. The algorithm is described in detail in the help page.
+# The program accepts templates for both input and output image lists.
+
+procedure t_unwrap()
+
+char image[SZ_FNAME] # input image template
+char outimage[SZ_FNAME] # output image template
+int threshold1 # threshold value for first unwrap
+int threshold2 # threshold value for second unwrap
+int wrapval1 # wrapvalue for first unwrap
+int wrapval2 # wrapvalue for second unwrap
+int cstart # column to start on
+int step # number of steps to perform
+bool verbose # verbose flag
+
+int i, j
+int listin, listout
+int length, nlines
+int badlines[MAXBADLINES]
+int diff, nbad
+char tempimage[SZ_FNAME]
+pointer im, imout, lgp, lgp2, lpp, cck, sp
+
+bool clgetb()
+int imtopenp(), imtlen(), imtgetim(), clgeti()
+pointer immap(), imgl2s(), impl2s()
+errchk immap, imgl2s, impl2s
+
+begin
+ # Get parameters from the CL.
+ listin = imtopenp ("image")
+ listout = imtopenp ("outimage")
+ threshold1 = clgeti("threshold1")
+ wrapval1 = clgeti("wrapval1")
+ threshold2 = clgeti("threshold2")
+ wrapval2 = clgeti("wrapval2")
+ cstart = clgeti("cstart")
+ step = clgeti("step")
+ verbose = clgetb("verbose")
+
+ if (verbose) {
+ call printf ("\n\nUNWRAP: ")
+ call printf ("threshold1 = %d\n")
+ call pargi (threshold1)
+ call printf ("\twrapval1 = %d\n")
+ call pargi (wrapval1)
+ call printf ("\tthreshold2 = %d\n")
+ call pargi (threshold2)
+ call printf ("\twrapval2 = %d\n")
+ call pargi (wrapval2)
+ call printf ("\tcstart = %d\n")
+ call pargi (cstart)
+ call printf ("\tstep = %d\n\n")
+ call pargi (step)
+ call flush (STDOUT)
+ }
+
+ # Check the number of elements.
+ if (imtlen (listin) != imtlen (listout)) {
+ call imtclose (listin)
+ call imtclose (listout)
+ call error (1, "Wrong number of elements in the operand lists")
+ }
+
+ # Get the next images from the lists.
+ while (imtgetim (listin, image, SZ_FNAME) != EOF) {
+ if (imtgetim (listout, outimage, SZ_FNAME) != EOF) {
+
+ if (verbose) {
+ # Write out about the input file name and output file name.
+ call printf ("\tUnwrapping %s into %s. ")
+ call pargstr (image)
+ call pargstr (outimage)
+ call flush (STDOUT)
+ }
+
+ # Open images.
+ iferr {
+ im = immap (image, READ_WRITE, 0)
+ } then {
+ call eprintf ("Cannot open image %s.\n")
+ call pargstr (image)
+ next
+ }
+
+ call xt_mkimtemp (image, outimage, tempimage, SZ_FNAME)
+
+ iferr {
+ imout = immap (outimage, NEW_COPY, im)
+ } then {
+ call eprintf ("Cannot open image %s, (already exists?).\n")
+ call pargstr (outimage)
+ next
+ }
+
+ length = IM_LEN(im,1)
+ nlines = IM_LEN(im,2)
+
+ # Set up the column check array, then unwrap line by line.
+ call smark (sp)
+ call salloc (cck, nlines, TY_INT)
+ call amovks (0, Memi[cck], nlines)
+ do i = 1, nlines {
+ lgp = imgl2s (im, i)
+ lpp = impl2s (imout, i)
+ call unwrapline (Mems[lgp], Mems[lpp], cck, length,
+ threshold1, wrapval1, threshold2, wrapval2, cstart,
+ step, i)
+ }
+
+ # Step 5 is the final step. (fixline)
+ if (step == 5) {
+ # Analyze the column, check for wraps.
+ nbad = 0
+ do i = 2, nlines {
+ diff = Memi[cck+i-1] - Memi[cck+i-2]
+ if (abs(diff) > BADTHRESH) {
+ # Mark this line bad.
+ nbad = nbad + 1
+ if (nbad > MAXBADLINES)
+ break
+ badlines[nbad] = i
+ }
+ }
+ }
+
+ # If number bad lines <= than MAXBADLINES, fix em, else, quit.
+ if (nbad <= MAXBADLINES && nbad > 0) {
+ do i = 1, nbad {
+
+ # GET the lines above and below the bad line and PUT the
+ # bad line. Then average the above and below lines and
+ # save in the bad line.
+
+ if (badlines[i] != 1 && badlines[i] != nlines) {
+ if ((badlines[i+1] - badlines[i]) == 1) {
+ lgp = imgl2s (imout, badlines[i]-1)
+ lgp2 = imgl2s (imout, badlines[i]+1)
+ lpp = impl2s (imout, badlines[i])
+ do j = 1, length {
+ Mems[lpp+j-1] = int((real(Mems[lgp+j-1]) +
+ real(Mems[lgp2+j-1]))/2. + .5)
+ }
+ }
+ }
+ }
+ }
+
+ if (verbose) {
+ call printf ("number of bad lines = %d\n")
+ call pargi (nbad)
+ do i = 1, nbad {
+ call printf ("\tbadlines[%d] = %d\n")
+ call pargi (i)
+ call pargi (badlines[i])
+ }
+ call printf ("\n")
+ call flush (STDOUT)
+ }
+
+ # Unmap images.
+ call imunmap (im)
+ call imunmap (imout)
+ call xt_delimtemp (outimage, tempimage)
+ call sfree (sp)
+
+ } # End of if (not EOF)
+ } # End of while loop on input images
+
+ call imtclose (listin)
+ call imtclose (listout)
+end
+
+
+# UNWRAPLINE -- Unwrap a line of the image.
+
+procedure unwrapline (line1, line2, cck, numpix, threshold1, wrapval1,
+ threshold2, wrapval2, cstart, step, whichline)
+
+short line1[numpix] # input line
+short line2[numpix] # output line
+pointer cck # pointer to array for column check
+int numpix # number of pixels per line
+int threshold1 # unwrap threshold for first unwrap
+int wrapval1 # unwrap value for first unwrap
+int threshold2 # unwrap threshold for second unwrap
+int wrapval2 # unwrap value for second unwrap
+int cstart # column to start on
+int step # steps to complete
+int whichline # which line this is we are unwrapping
+
+pointer tl1, tl2, tl3 # pointers of temporary arrays
+pointer sp # stack pointer
+int i, diff, sum
+short wrap # wrap number
+
+begin
+ # Mark the stack and allcoate the temporary arrays.
+ call smark (sp)
+ call salloc (tl1, numpix, TY_SHORT)
+ call salloc (tl2, numpix, TY_SHORT)
+ call salloc (tl3, numpix, TY_SHORT)
+
+ # Initialize wrap.
+ wrap = 0
+
+ # Copy the input line into the output line and the temporary arrays.
+ call amovs (line1, line2, numpix)
+ call amovs (line1, Mems[tl1], numpix)
+ call amovs (line1, Mems[tl2], numpix)
+ call amovs (line1, Mems[tl3], numpix)
+
+ # Check the image width, do various things if the image is too small.
+
+ # Too small for anything.
+ if (numpix <= 4) {
+ call sfree (sp)
+ return
+ }
+
+ # Too small for step 5 (fixline).
+ if (numpix <= FIXWIDTH && step == 5)
+ step = 4
+
+ # Unwrap1 (step 1).
+ Mems[tl1+cstart-1] = line1[cstart]
+ do i = cstart+1, numpix {
+ diff = line1[i] - line1[i-1]
+ if (diff < -threshold1)
+ wrap = wrap + 1
+ if (diff > threshold1)
+ wrap = wrap - 1
+
+ Mems[tl1+i-1] = line1[i] + wrap * wrapval1
+ }
+ if (step == 1) {
+ call amovs (Mems[tl1], line2, numpix)
+ call sfree (sp)
+ return
+ }
+
+ # If the user wants it, step 2 (dif).
+ do i = cstart, numpix
+ Mems[tl2+i-1] = Mems[tl1+i-1] - Mems[tl1+i-2]
+
+ if (step == 2) {
+ call amovs (Mems[tl2], line2, numpix)
+ call sfree (sp)
+ return
+ }
+
+ # If the user wants it, step 3 (unwrap2).
+ wrap = 0
+ line2[cstart] = Mems[tl2+cstart-1]
+ do i = cstart+1, numpix {
+ diff = Mems[tl2+i-1] - Mems[tl2+i-2]
+ if (diff < -threshold2)
+ wrap = wrap + 1
+ if (diff > threshold2)
+ wrap = wrap - 1
+
+ line2[i] = Mems[tl2+i-1] + wrap * wrapval2
+ }
+ if (step == 3) {
+ call sfree (sp)
+ return
+ }
+
+ # If the user wants it, step 4 (reconstruct).
+ do i = cstart, numpix
+ line2[i] = line2[i-1] + line2[i]
+
+ if (step == 4) {
+ call sfree (sp)
+ return
+ }
+
+ # Again, if the user wants it, save data for step 5, (fixline).
+ sum = 0
+ do i = numpix-FIXWIDTH+1, numpix
+ sum = sum + line2[i]
+ Memi[cck+whichline-1] = int(real(sum)/real(FIXWIDTH) + .5)
+
+ call sfree (sp)
+end
diff --git a/noao/imred/vtel/vt.h b/noao/imred/vtel/vt.h
new file mode 100644
index 00000000..73d9c22a
--- /dev/null
+++ b/noao/imred/vtel/vt.h
@@ -0,0 +1,73 @@
+# Vacuum_telescope analysis package header file.
+
+# General defines common to most of the programs in this package.
+define DIM_VTFD 2048 # full disk image = 2048 x 2048 array
+define SZB_SHORT SZ_SHORT*SZB_CHAR # number of bytes per short integer
+define SZB_REAL SZ_REAL*SZB_CHAR # number of bytes per real
+define THRESHOLD 4 # limb cutoff value, squib brightness
+
+# Defines related to the tape format.
+define SZ_VTHDR 20 # number of 16-bit words in vt header
+define SZ_VTREC 5120 # number of 16-bit words in vt record
+define NUM_VTREC 750 # number of records in full disk image
+
+# Ellipse structure defines.
+define LEN_ELSTRUCT 4 # real el[LEN_ELSTRUCT]
+
+define E_XCENTER $1[1] # x-coord of center of limb ellipse
+define E_YCENTER $1[2] # y-coord of center of limb ellipse
+define E_XSEMIDIAMETER $1[3] # length of x semiaxis of limb ellipse
+define E_YSEMIDIAMETER $1[4] # length of y semiaxis of limb ellipse
+
+# Defines for readvt, etc.
+define SWTH_HIGH 512 # height of each swath
+define SWTHWID_14 1700 # width of swaths 1 and 4
+define SWTHWID_23 2048 # width of swaths 2 and 3
+define HALF_DIF 174 # one half of difference in swath widths
+define SZ_TABLE 8192 # length of lookup table (16-bit words)
+define NUM_SRSTR 16 # total # of subrasters in full disk
+define LEN_HDRDAT 10 # length of header data
+define NUM_SRSTR_X 4 # number of subrasters in x direction
+define NUM_SRSTR_Y 4 # number of subrasters in y direction
+define SRSTR_WID 512 # width of each subraster
+define IS_DATA 1 # subswath data indicator
+define DTSTRING 100 # length of date/time string
+
+# Defines for rmap, etc.
+define DIM_IN_RAS 150 # y dimension for input image subraster
+define DIM_SQUAREIM 180 # x or y dimension of daily projection
+
+# Defines for merge, etc.
+define DIM_XCARMAP 360 # x dimension of carrington map
+define SZ_WTBL 180 # size of weight table for merge
+
+# Mscan text (pixelfont) structure.
+define LEN_TXSTRUCT 10
+
+define TX_XPOS Memi[$1] # x position of start of text
+define TX_YPOS Memi[$1+1] # y position of start of text
+define TX_VALUE Memi[$1+2] # value to write text with
+define PRINT_TEXT Memi[$1+3] # to text, or not to text (1=yes,0=no)
+define ZERO_BGND Memi[$1+4] # fill background w/ VALU? (1=yes,0=no)
+define BGND_VALU Memi[$1+5] # background value to use
+
+# Vacuum telescope header struture.
+define VT_LENHSTRUCT 10
+
+define VT_HMONTH Memi[$1] # month of observation (1-12)
+define VT_HDAY Memi[$1+1] # day of observation (1-31)
+define VT_HYEAR Memi[$1+2] # year (two digits)
+define VT_HTIME Memi[$1+3] # time (seconds since midnight)
+define VT_HWVLNGTH Memi[$1+4] # wavelength (angstroms)
+define VT_HOBSTYPE Memi[$1+5] # observation type (0,1,2,3,or 4)
+define VT_HAVINTENS Memi[$1+6] # average intensity
+define VT_HNUMCOLS Memi[$1+7] # number of columns
+define VT_HINTGPIX Memi[$1+8] # integrations per pixel
+define VT_HREPTIME Memi[$1+9] # repitition time
+
+# I/O buffer structure.
+define VT_LENBSTRUCT 3
+
+define VT_BUFP Memi[$1] # pointer, top of i/o buf
+define VT_BP Memi[$1+1] # pointer, current position in i/o buf
+define VT_BUFBOT Memi[$1+2] # pointer, current bottom of i/o buf
diff --git a/noao/imred/vtel/vtblink.cl b/noao/imred/vtel/vtblink.cl
new file mode 100644
index 00000000..d9e51a61
--- /dev/null
+++ b/noao/imred/vtel/vtblink.cl
@@ -0,0 +1,150 @@
+#{ VTBLINK -- Blink successive frames of daily grams to check registration.
+
+# imname1,s,a,,,,Name of first image
+# imname2,s,a,,,,Name of next image
+# z1,r,h,-3000.0,,,Minimum graylevel to be displayed.
+# z2,r,h,3000.0,,,Minimum graylevel to be displayed.
+
+{
+ real zz1, zz2, offset, currentoffset
+ char im1name, im2name, framelog[4]
+ int im1long, im2long, currentframe, offscreenflag
+
+ # initialize
+ print (" ")
+ print (" ")
+ print ("vtblink vtblink vtblink vtblink vtblink vtblink vtblink")
+ print (" ")
+ currentframe = 1
+ offscreenflag = 0
+ currentoffset = .72 # Start at the right side of the screen.
+ framelog[1] = "none"
+ framelog[2] = "none"
+ framelog[3] = "none"
+ framelog[4] = "none"
+
+ # Get the gray scale.
+ zz1 = z1
+ zz2 = z2
+
+ # Get the first frame from the user, display it, allow user to window.
+ im1name = imname1
+ if (im1name == "end") {
+ bye
+ }
+ while (!access(im1name//".imh") && im1name != "end") {
+ print (im1name, "not accessable, try again")
+ im1name = imname1
+ if (im1name == "end") {
+ bye
+ }
+ }
+ imgets (im1name, "L_ZERO")
+ im1long = real(imgets.value)
+ print ("Longitude of first image is ", im1long)
+ print ("Displaying frame.")
+ display (im1name, currentframe, xcenter=currentoffset, zrange=no,
+ zscale=no, z1=zz1, z2=zz2)
+ framelog[currentframe] = im1name
+ frame (currentframe)
+ print ("Now, please window this frame for the desired color table.")
+ window
+
+ # Make all the color tables of the other 3 frames the same as this.
+ print ("Equalizing color tables of 4 frames, Please wait.")
+ lumatch (2, currentframe)
+ lumatch (3, currentframe)
+ lumatch (4, currentframe)
+
+ # Get the next frame from the user.
+ im2name = imname2
+ while (im2name == "stat") {
+ print ("Frame 1 contains image ", framelog[1])
+ print ("Frame 2 contains image ", framelog[2])
+ print ("Frame 3 contains image ", framelog[3])
+ print ("Frame 4 contains image ", framelog[4])
+ im2name = imname2
+ }
+ if (im2name == "end") {
+ bye
+ }
+ while (!access(im2name//".imh") && im2name != "end") {
+ print (im2name, "not accessable, try again")
+ im2name = imname2
+ if (im2name == "end") {
+ bye
+ }
+ }
+ imgets (im2name, "L_ZERO")
+ im2long = real(imgets.value)
+ print ("Longitude of this image is ", im2long)
+
+ # While the user does not enter 'end' for the image name, keep going.
+ # also check the offscreenflag and exit it it becomes set.
+ while (im2name != 'end' && offscreenflag != 1) {
+
+ # Calculate offset. subsequent images in general have smaller
+ # longitudes, that is, longitude decreases with time.
+ # If the new image has a larger longitude then fix up offset.
+ if (im1long < im2long) {
+ offset = real((im2long - 360) - im1long)/512.
+ } else {
+ offset = real(im2long - im1long)/512.
+ }
+
+ # If we are getting too close to the left side, restart program.
+ if ((currentoffset+offset) <= .18) {
+ print("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
+ print("* The next image would overlap the edge of the *")
+ print("* screen. Please restart the program with the last *")
+ print("* image. *")
+ print("*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*")
+ offscreenflag = 1
+ }
+
+ # Display the next image and blink it with the previously displayed
+ # image.
+ if (offscreenflag != 1) {
+ print ("Displaying frame.")
+ display (im2name, mod(currentframe,4)+1,
+ xcenter=currentoffset+offset, zscale=no, zrange=no,
+ z1=zz1, z2=zz2)
+ framelog[mod(currentframe,4)+1] = im2name
+
+ # Return the user to the cl so s/he can do whatever s/he wants.
+ print(" ")
+ print("You are now in the cl, type 'bye' to return to vtlbink")
+ cl()
+ print(" ")
+
+ # Update currentframe and print it out, update the offset.
+ currentframe = mod(currentframe,4)+1
+ print ("The next frame to be used for display is frame ",
+ mod(currentframe,4)+1)
+ currentoffset += offset
+
+ # Move image2 to image1 and then get a new image2 and loop back.
+ im1name = im2name
+ im1long = im2long
+ im2name = imname2
+ while (im2name == "stat") {
+ print ("Frame 1 contains image ", framelog[1])
+ print ("Frame 2 contains image ", framelog[2])
+ print ("Frame 3 contains image ", framelog[3])
+ print ("Frame 4 contains image ", framelog[4])
+ im2name = imname2
+ }
+ while (!access(im2name//".imh") && im2name != "end") {
+ print (im2name, "not accessable, try again")
+ im2name = imname2
+ if (im2name == "end") {
+ bye
+ }
+ }
+ if (im2name != "end") {
+ imgets (im2name, "L_ZERO")
+ im2long = real(imgets.value)
+ }
+ }
+ }
+}
diff --git a/noao/imred/vtel/vtblink.par b/noao/imred/vtel/vtblink.par
new file mode 100644
index 00000000..def7c1eb
--- /dev/null
+++ b/noao/imred/vtel/vtblink.par
@@ -0,0 +1,4 @@
+imname1,s,a,,,,Name of first image
+imname2,s,a,,,,Name of next image
+z1,r,h,-3000.0,,,Minimum graylevel to be displayed.
+z2,r,h,3000.0,,,Minimum graylevel to be displayed.
diff --git a/noao/imred/vtel/vtel.cl b/noao/imred/vtel/vtel.cl
new file mode 100644
index 00000000..020a6455
--- /dev/null
+++ b/noao/imred/vtel/vtel.cl
@@ -0,0 +1,38 @@
+#{ VTEL -- Vacuum_telescope package.
+
+# load necessary packages
+images
+tv
+
+set vtel = "imred$vtel/"
+
+package vtel
+
+task readvt,
+ writevt,
+ unwrap,
+ quickfit,
+ getsqib,
+ putsqib,
+ mscan,
+ rmap,
+ merge,
+ destreak,
+ trim,
+ vtexamine,
+ tcopy,
+ pimtext,
+ syndico,
+ dicoplot = "vtel$x_vtel.e"
+
+# scripts
+
+task vtblink = "vtel$vtblink.cl"
+task writetape = "vtel$writetape.cl"
+task destreak5 = "vtel$destreak5.cl"
+task fitslogr = "vtel$fitslogr.cl"
+task mrotlogr = "vtel$mrotlogr.cl"
+task makeimages = "vtel$makeimages.cl"
+task makehelium = "vtel$makehelium.cl"
+
+clbye()
diff --git a/noao/imred/vtel/vtel.hd b/noao/imred/vtel/vtel.hd
new file mode 100644
index 00000000..5a9871ab
--- /dev/null
+++ b/noao/imred/vtel/vtel.hd
@@ -0,0 +1,29 @@
+# Help directory for the VACUUM package.
+
+$doc = "./doc/"
+
+vtel men=vtel$vtel.men, src=vtel$vtel.cl
+destreak hlp=doc$destreak.hlp, src=vtel$destreak.x
+destreak5 hlp=doc$destreak5.hlp, src=vtel$destreak5.cl
+readvt hlp=doc$readvt.hlp, src=vtel$readvt.x
+writevt hlp=doc$writevt.hlp, src=vtel$writevt.x
+rmap hlp=doc$rmap.hlp, src=vtel$rmap.x
+vtblink hlp=doc$vtblink.hlp, src=vtel$vtblink.cl
+quickfit hlp=doc$quickfit.hlp, src=vtel$quickfit.x
+merge hlp=doc$merge.hlp, src=vtel$merge.x
+dicoplot hlp=doc$dicoplot.hlp, src=vtel$dicoplot.x
+unwrap hlp=doc$unwrap.hlp, src=vtel$unwrap.x
+getsqib hlp=doc$getsqib.hlp, src=vtel$getsqib.x
+putsqib hlp=doc$putsqib.hlp, src=vtel$putsqib.x
+trim hlp=doc$trim.hlp, src=vtel$trim.x
+mscan hlp=doc$mscan.hlp, src=vtel$mscan.x
+vtexamine hlp=doc$vtexamine.hlp, src=vtel$vtexamine.x
+tcopy hlp=doc$tcopy.hlp, src=vtel$tcopy.x
+pimtext hlp=doc$pimtext.hlp, src=vtel$pimtext.x
+fitslogr hlp=doc$fitslogr.hlp, src=vtel$fitslogr.cl
+mrotlogr hlp=doc$mrotlogr.hlp, src=vtel$mrotlogr.cl
+makeimages hlp=doc$makeimages.hlp, src=vtel$makeimages.cl
+makehelium hlp=doc$makehelium.hlp, src=vtel$makehelium.cl
+writetape hlp=doc$writetape.hlp, src=vtel$writetape.cl
+syndico hlp=doc$syndico.hlp, src=vtel$syndico.x
+revisions sys=Revisions
diff --git a/noao/imred/vtel/vtel.men b/noao/imred/vtel/vtel.men
new file mode 100644
index 00000000..df98fc59
--- /dev/null
+++ b/noao/imred/vtel/vtel.men
@@ -0,0 +1,23 @@
+ destreak - Destreak He 10830 grams.
+ destreak5 - First pass processing CL script for 10830 grams.
+ dicoplot - Make dicomed plots of carrington maps.
+ fitslogr - Make a log of certain header parameters from a FITS tape.
+ getsqib - Extract the squibby brightness image from a full disk scan.
+ makehelium - Cl script for processing destreaked 10830 grams(second pass).
+ makeimages - Cl script for processing magnetograms into projected maps
+ merge - Merge daily grams into a Carrington map.
+ mrotlogr - Log some header parameters from a FITS rotation map tape.
+ mscan - Read all sector scans on a tape and put them into images.
+ pimtext - Put text directly into images using a pixel font.
+ putsqib - Merge a squibby brightness image into a full disk image.
+ quickfit - Fit an ellipse to the solar limb.
+ readvt - Read a full disk tape and produce an IRAF image.
+ rmap - Map a full disk image into a 180 by 180 flat image.
+ syndico - Make dicomed print of daily grams 18 cm across.
+ tcopy - Tape to tape copy routine.
+ trim - Set all pixels outside the limb to 0.0 (use sqib for limb).
+ unwrap - Remove effects of data wraparound on continuum scans.
+ vtblink - Blink daily grams on the IIS to check for registration.
+ vtexamine - Examine a vacuum telescope tape, print headers and profile.
+ writetape - Cl script to write 5 full disk grams to tape.
+ writevt - Write an IRAF image to tape in vacuum telescope format.
diff --git a/noao/imred/vtel/vtel.par b/noao/imred/vtel/vtel.par
new file mode 100644
index 00000000..dde78dd5
--- /dev/null
+++ b/noao/imred/vtel/vtel.par
@@ -0,0 +1 @@
+version,s,h,"8Jun87"
diff --git a/noao/imred/vtel/vtexamine.par b/noao/imred/vtel/vtexamine.par
new file mode 100644
index 00000000..39283d0e
--- /dev/null
+++ b/noao/imred/vtel/vtexamine.par
@@ -0,0 +1,3 @@
+input,s,q,,,,Input file descriptor
+headers,b,h,yes,,,Print out header data
+files,s,q,,,,List of files to be examined
diff --git a/noao/imred/vtel/vtexamine.x b/noao/imred/vtel/vtexamine.x
new file mode 100644
index 00000000..2b482bbe
--- /dev/null
+++ b/noao/imred/vtel/vtexamine.x
@@ -0,0 +1,195 @@
+include <error.h>
+include <fset.h>
+include <printf.h>
+include <mach.h>
+include "vt.h"
+
+define MAX_RANGES 100
+
+# VTEXAMINE -- Examine a vacuum telescope tape. Decode and print the
+# header and tell the user info about number and length of records
+# on the tape.
+
+procedure t_vtexamine()
+
+char input[SZ_FNAME] # input template
+char files[SZ_LINE] # which files to examine
+bool headers # print headers?
+
+char tapename[SZ_FNAME]
+int filerange[2 * MAX_RANGES + 1]
+int nfiles, filenumber, nrecords
+
+bool clgetb()
+int decode_ranges(), get_next_number()
+int vtexamine(), mtfile(), mtneedfileno()
+errchk vtexamine
+
+begin
+ call fseti (STDOUT, F_FLUSHNL, YES)
+
+ # Find out if user wants to see header info.
+ headers = clgetb ("headers")
+
+ # Get input file(s)
+ call clgstr ("input", input, SZ_FNAME)
+ if (mtfile (input) == NO || mtneedfileno (input) == NO)
+ call strcpy ("1", files, SZ_LINE)
+ else
+ call clgstr ("files", files, SZ_LINE)
+
+ if (decode_ranges (files, filerange, MAX_RANGES, nfiles) == ERR)
+ call error (0, "Illegal file number list.")
+ call printf ("\n")
+
+ # Loop over files.
+ filenumber = 0
+ while (get_next_number (filerange, filenumber) != EOF) {
+
+ # Assemble the appropriate tape file name.
+ call strcpy (input, tapename, SZ_FNAME)
+ if (mtfile(input) == YES && mtneedfileno (input) == YES)
+ call mtfname (input, filenumber, tapename, SZ_FNAME)
+
+ iferr {
+ nrecords = vtexamine (tapename, headers)
+ } then {
+ call eprintf ("Error reading file: %s\n")
+ call pargstr (tapename)
+ call erract (EA_WARN)
+ next
+ } else if (nrecords == 0) {
+ call printf ("Tape at EOT\n")
+ break
+ }
+
+ } # End while.
+end
+
+
+# VTEXAMINE -- examine a tape (or disk) file. Report about size and
+# number of records and, if requested, decode and print the header
+# information.
+
+int procedure vtexamine (input, headers)
+
+char input[ARB] # input file name
+bool headers
+
+int in, bufsize, totrecords
+int nrecords, totbytes, lastrecsize
+int recsize
+bool trufls
+pointer hs, sp
+pointer pchar, hpchar
+
+int mtopen(), fstati(), get_next_record()
+errchk mtopen, close, get_next_record
+
+begin
+ call smark (sp)
+ call salloc (hs, VT_LENHSTRUCT, TY_STRUCT)
+
+ in = mtopen (input, READ_ONLY, 0)
+ bufsize = fstati (in, F_BUFSIZE)
+
+ call malloc (pchar, bufsize, TY_CHAR)
+ call malloc (hpchar, bufsize, TY_SHORT)
+
+ call printf ("File %s: ")
+ call pargstr (input)
+
+ totrecords = 0
+ nrecords = 0
+ totbytes = 0
+ lastrecsize = 0
+
+
+ # First read the header file.
+ recsize = get_next_record (in, Memc[pchar], bufsize, recsize,
+ SZ_VTHDR * SZB_SHORT/SZB_CHAR)
+ if (recsize == EOF)
+ return (totrecords)
+ call amovs (Memc[pchar], Mems[hpchar], SZ_VTHDR * SZB_SHORT/SZB_CHAR)
+
+ nrecords = nrecords + 1
+ totrecords = totrecords + 1
+ totbytes = totbytes + recsize
+ lastrecsize = recsize
+ trufls = TRUE
+ if (headers)
+ call decodeheader (hpchar, hs, trufls)
+ call printf ("\n")
+
+ # Loop through the rest of the records.
+ while (get_next_record (in, Memc[pchar], bufsize, recsize,
+ lastrecsize) != EOF) {
+
+ if (recsize == lastrecsize)
+ nrecords = nrecords + 1
+ else {
+ call printf ("\t %d %d-byte records\n")
+ call pargi (nrecords)
+ call pargi (lastrecsize)
+ nrecords = 1
+ lastrecsize = recsize
+ }
+
+ totrecords = totrecords + 1
+ totbytes = totbytes + recsize
+
+ } # End while.
+
+ if (nrecords > 0 ) {
+ call printf ("\t %d %d-byte records\n")
+ call pargi (nrecords)
+ call pargi (lastrecsize)
+ }
+
+ # Print total number of records and bytes.
+ call printf ("\t Total %d records, %d bytes\n")
+ call pargi (totrecords)
+ call pargi (totbytes)
+
+ call mfree (pchar, TY_CHAR)
+ call mfree (hpchar, TY_SHORT)
+ call sfree (sp)
+ call close (in)
+
+ return (totrecords)
+end
+
+
+# GET_NEXT_RECORD -- Read the next record from tape (or disk) and,
+# if an error is found, patch up the data as best we can and use it.
+# Also, tell the user about the error.
+
+int procedure get_next_record(fd, buffer, bufsize, recsize, lastbufsize)
+
+int bufsize
+char buffer[bufsize]
+int recsize, lastbufsize
+pointer fd
+
+int read(), fstati()
+bool eofflag
+errchk read
+
+begin
+ eofflag = false
+ iferr {
+ if (read (fd, buffer, bufsize) == EOF)
+ eofflag = true
+ recsize = fstati (fd, F_SZBBLK)
+ } then {
+ call fseti (fd, F_VALIDATE, lastbufsize)
+ recsize = read (fd, buffer, bufsize)
+ recsize = fstati (fd, F_SZBBLK)
+ }
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (buffer, 1, buffer, 1, SZ_VTHDR*SZB_SHORT)
+ if (eofflag)
+ return (EOF)
+ else
+ return (recsize)
+end
diff --git a/noao/imred/vtel/writetape.cl b/noao/imred/vtel/writetape.cl
new file mode 100644
index 00000000..76bb23f2
--- /dev/null
+++ b/noao/imred/vtel/writetape.cl
@@ -0,0 +1,45 @@
+#{ WRITETAPE -- Write five images to a vacuum telescope tape. The
+# script accepts the name of the mag tape device and the general input
+# image filename from the user. Writetape appends a digit [1-5] to the
+# file name for each file to be written.
+
+# getmtape,s,a,,,,Mag tape device to write to
+# getname,s,a,,,,Root filename for the 5 images
+# magtape,s,h
+# imname,s,h
+
+{
+
+ imname = getname
+ magtape = getmtape
+
+ if (access(imname//"1.imh")) {
+ writevt (imname//"1", magtape//"1600[1]")
+ } else {
+ print (imname//"1 not accessable")
+ }
+
+ if (access(imname//"2.imh")) {
+ writevt (imname//"2", magtape//"1600[2]")
+ } else {
+ print (imname//"2 not accessable")
+ }
+
+ if (access(imname//"3.imh")) {
+ writevt (imname//"3", magtape//"1600[3]")
+ } else {
+ print (imname//"3 not accessable")
+ }
+
+ if (access(imname//"4.imh")) {
+ writevt (imname//"4", magtape//"1600[4]")
+ } else {
+ print (imname//"4 not accessable")
+ }
+
+ if (access(imname//"5.imh")) {
+ writevt (imname//"5", magtape//"1600[5]")
+ } else {
+ print (imname//"5 not accessable")
+ }
+}
diff --git a/noao/imred/vtel/writetape.par b/noao/imred/vtel/writetape.par
new file mode 100644
index 00000000..863a283d
--- /dev/null
+++ b/noao/imred/vtel/writetape.par
@@ -0,0 +1,5 @@
+
+getmtape,s,a,,,,Mag tape device to write to
+getname,s,a,,,,Root filename for the 5 images
+magtape,s,h
+imname,s,h
diff --git a/noao/imred/vtel/writevt.par b/noao/imred/vtel/writevt.par
new file mode 100644
index 00000000..de11cb13
--- /dev/null
+++ b/noao/imred/vtel/writevt.par
@@ -0,0 +1,4 @@
+imagefile,s,q,,,,Image file descriptor
+outputfile,s,q,,,,Output file descriptor
+verbose,b,h,no,,,Print out header data and give progress reports
+new_tape,b,q,,,,Are you using a new tape?
diff --git a/noao/imred/vtel/writevt.x b/noao/imred/vtel/writevt.x
new file mode 100644
index 00000000..390884b2
--- /dev/null
+++ b/noao/imred/vtel/writevt.x
@@ -0,0 +1,232 @@
+include <error.h>
+include <mach.h>
+include <fset.h>
+include "vt.h"
+
+define SZ_TABLE 8192 # size of lookup table (data)
+
+# WRITEVT -- Write an IRAF image (vacuum telescope full disk image) out to
+# tape in a format identical to the format produced bye the vacuum telescope.
+
+procedure t_writevt()
+
+char imagefile[SZ_FNAME] # name of image to be written
+char outputfile[SZ_FNAME] # output file name (tape)
+bool verbose # verbose flag
+
+int obsdate
+int x1, y1, subraster, outfd
+int one
+pointer table
+pointer srp, im, hs, sp
+
+int imgeti(), mtopen()
+int mtfile(), mtneedfileno()
+bool clgetb()
+pointer imgs2s(), immap()
+errchk immap, imgs2s, mtopen
+
+begin
+ call smark (sp)
+ call salloc (table, SZ_TABLE, TY_SHORT)
+ call salloc (hs, VT_LENHSTRUCT, TY_STRUCT)
+
+ # Get the image name and the verbose flag from the cl.
+ call clgstr ("imagefile", imagefile, SZ_FNAME)
+ verbose = clgetb ("verbose")
+
+ # Get the output file from the cl.
+ call clgstr ("outputfile", outputfile, SZ_FNAME)
+
+ # See if the outputfile is mag tape, if not, error.
+ if (mtfile (outputfile) == NO)
+ call error (1, "Outputfile should be magnetic tape.")
+
+ # If no tape file number is given, then ask whether the tape
+ # is blank or contains data. If blank then start at [1], else
+ # start at [EOT].
+
+ if (mtneedfileno(outputfile) == YES)
+ if (!clgetb ("new_tape"))
+ call mtfname (outputfile, EOT, outputfile, SZ_FNAME)
+ else
+ call mtfname (outputfile, 1, outputfile, SZ_FNAME)
+
+ if (verbose) {
+ call printf ("outputfile name = %s\n")
+ call pargstr (outputfile)
+ }
+
+ # Open the image file and the output file.
+ im = immap (imagefile, READ_ONLY, 0)
+ outfd = mtopen (outputfile, WRITE_ONLY, SZ_VTREC)
+
+ # Get date and time from the header.
+ obsdate = imgeti (im, "OBS_DATE")
+ VT_HMONTH(hs) = obsdate/10000
+ VT_HDAY(hs) = obsdate/100 - 100 * (obsdate/10000)
+ VT_HYEAR(hs) = obsdate - 100 * (obsdate/100)
+ VT_HTIME(hs) = imgeti (im, "OBS_TIME")
+ VT_HWVLNGTH(hs) = imgeti(im, "wv_lngth")
+ VT_HOBSTYPE(hs) = imgeti (im, "obs_type")
+ VT_HAVINTENS(hs) = imgeti (im, "av_intns")
+ VT_HNUMCOLS(hs) = imgeti (im, "num_cols")
+ VT_HINTGPIX(hs) = imgeti (im, "intg/pix")
+ VT_HREPTIME(hs) = imgeti (im, "rep_time")
+
+ # Write header data to tape.
+ call writeheader (outfd, hs, verbose)
+
+ # Set up lookuptable for data subswaths.
+ one = 1
+ call amovks (one, Mems[table], SZ_TABLE)
+ call aclrs (Mems[table], HALF_DIF)
+ call aclrs (Mems[table + SWTHWID_14 + HALF_DIF], HALF_DIF)
+ call aclrs (Mems[table + SWTHWID_23 * 3], HALF_DIF)
+ call aclrs (Mems[table + SZ_TABLE - HALF_DIF], HALF_DIF)
+
+ # Write the image data to tape.
+ do subraster = 1, NUM_SRSTR {
+
+ # Calculate position of bottom left corner of this subraster.
+ x1 = ((NUM_SRSTR_X - 1) - mod((subraster - 1), NUM_SRSTR_X)) *
+ SRSTR_WID + 1
+ y1 = ((NUM_SRSTR_Y - 1) - ((subraster - mod((subraster - 1),
+ NUM_SRSTR_Y)) / NUM_SRSTR_Y)) * SWTH_HIGH + 1
+
+ # Get subraster.
+ srp = imgs2s (im, x1, x1+(SRSTR_WID - 1), y1, y1+(SWTH_HIGH - 1))
+ iferr (call putsubraster (outfd, Mems[srp], SRSTR_WID,
+ SWTH_HIGH, Mems[table], subraster))
+ call eprintf ("Error in putsubraster, subraster = %d\n")
+ call pargi (subraster)
+ if (verbose) {
+ call printf("%d%% done\n")
+ call pargi ((subraster*100)/NUM_SRSTR)
+ call flush (STDOUT)
+ }
+ }
+
+ # Close output file and unmap image.
+ call close (outfd)
+ call imunmap (im)
+ call sfree (sp)
+end
+
+
+# WRITEHEADER -- Write header info to the output, pack date
+# and time, and, if 'verbose' flag is set, display some information
+# to the user.
+
+procedure writeheader(outfd, hs, verbose)
+
+int outfd # output file descriptor
+pointer hs # header data structure pointer
+bool verbose # verbose flag
+
+int i
+short hbuf[SZ_VTHDR]
+int fstati()
+errchk write
+
+begin
+ # Pack date, time. The constants below are explained in the
+ # description of the image header and how it is packed. If any
+ # changes are made the following code will have to be rewritten.
+
+ call bitpak (VT_HMONTH(hs)/10, hbuf[1], 13, 4)
+ call bitpak ((VT_HMONTH(hs)-(VT_HMONTH(hs)/10)*10), hbuf[1], 9, 4)
+ call bitpak (VT_HDAY(hs)/10, hbuf[1], 5, 4)
+ call bitpak ((VT_HDAY(hs)-(VT_HDAY(hs)/10)*10), hbuf[1], 1, 4)
+ call bitpak (VT_HYEAR(hs)/10, hbuf[2], 13, 4)
+ call bitpak ((VT_HYEAR(hs)-(VT_HYEAR(hs)/10)*10), hbuf[2], 9, 4)
+ call bitpak (VT_HTIME(hs)/2**15, hbuf[3], 1, 2)
+ call bitpak ((VT_HTIME(hs)-(VT_HTIME(hs)/2**15)*2**15), hbuf[4], 1, 15)
+
+ # Put other parameters in appropriate places.
+ hbuf[5] = VT_HWVLNGTH(hs)
+ hbuf[6] = VT_HOBSTYPE(hs)
+ hbuf[7] = VT_HAVINTENS(hs)
+ hbuf[8] = VT_HNUMCOLS(hs)
+ hbuf[9] = VT_HINTGPIX(hs)
+ hbuf[10] = VT_HREPTIME(hs)
+
+ # Store other header parameters.
+ for (i = 11 ; i <= SZ_VTHDR ; i = i + 1)
+ hbuf[i] = 0
+
+ if (verbose) {
+ call printf ("\nmonth/day/year = %d/%d/%d\n")
+ call pargi (VT_HMONTH(hs))
+ call pargi (VT_HDAY(hs))
+ call pargi (VT_HYEAR(hs))
+ call printf ("time = %d seconds since midnight\n")
+ call pargi (VT_HTIME(hs))
+ call printf ("wavelength = %d\nobservation type = %d\n")
+ call pargi (VT_HWVLNGTH(hs))
+ call pargi (VT_HOBSTYPE(hs))
+ call flush (STDOUT)
+ }
+
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (hbuf, 1, hbuf, 1, SZ_VTHDR*SZB_SHORT)
+ call write (outfd, hbuf, SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ if (fstati (outfd, F_NCHARS) != SZ_VTHDR*SZB_SHORT/SZB_CHAR)
+ call error (0, "error when writing header")
+ call flush (outfd)
+end
+
+
+# PUTSUBRASTER -- Write data to the output from this subraster, look
+# in the table to see if each subswath should be filled with data or zeros.
+
+procedure putsubraster (outfd, array, nx, ny, table, subraster)
+
+int outfd # output file descriptor
+int subraster # subraster number
+int nx # size of the data array (x)
+int ny # size of the data array (y)
+short array[nx, ny] # data array
+short table[SZ_TABLE] # subswath lookup table
+
+int i, subswath, tableindex
+pointer sp, bufpointer
+errchk writesubswath
+
+begin
+ call smark (sp)
+ call salloc (bufpointer, ny, TY_SHORT)
+
+ do subswath = nx, 1, -1 {
+ tableindex = (subraster - 1) * nx + ((nx + 1) - subswath)
+ if (table[tableindex] == IS_DATA) {
+ do i = ny, 1, -1
+ Mems[bufpointer + ny - i] = array[subswath,i]
+ call writesubswath (outfd, Mems[bufpointer], ny)
+ } else
+ next
+ }
+
+ call sfree(sp)
+end
+
+
+# WRITESUBSWATH -- Write data to file whose logical unit is outfd.
+# Swap the bytes in each data word.
+
+procedure writesubswath (outfd, buf, buflength)
+
+int outfd # output file descriptor
+int buflength # length of data buffer
+short buf[buflength] # data buffer
+
+int fstati()
+errchk write
+
+begin
+ if (BYTE_SWAP2 == YES)
+ call bswap2 (buf, 1, buf, 1, buflength * SZB_SHORT)
+ call write (outfd, buf, buflength*SZB_SHORT/SZB_CHAR)
+ if (fstati (outfd, F_NCHARS) != buflength*SZB_SHORT/SZB_CHAR)
+ call error (0, "eof encountered when reading subswath")
+end
diff --git a/noao/imred/vtel/x_vtel.x b/noao/imred/vtel/x_vtel.x
new file mode 100644
index 00000000..942afcf0
--- /dev/null
+++ b/noao/imred/vtel/x_vtel.x
@@ -0,0 +1,16 @@
+task readvt = t_readvt,
+ writevt = t_writevt,
+ unwrap = t_unwrap,
+ quickfit = t_quickfit,
+ getsqib = t_getsqib,
+ putsqib = t_putsqib,
+ rmap = t_rmap,
+ merge = t_merge,
+ destreak = t_destreak,
+ trim = t_trim,
+ dicoplot = t_dicoplot,
+ vtexamine = t_vtexamine,
+ tcopy = t_tcopy,
+ mscan = t_mscan,
+ syndico = t_syndico,
+ pimtext = t_pimtext