Commits
81 81 | ''' |
82 82 | |
83 83 | ########################################################################## |
84 84 | ########################################################################## |
85 85 | |
86 86 | # Imports # |
87 87 | import os |
88 88 | import glob |
89 89 | import subprocess |
90 90 | import unittest |
91 - | import numpy |
92 91 | import shutil |
93 - | import scipy |
94 - | import matplotlib.pyplot as pyplot |
95 92 | import json |
96 93 | import pickle |
97 94 | import copy |
95 + | import numpy |
96 + | import scipy |
97 + | import matplotlib.pyplot as pyplot |
98 98 | |
99 99 | from casatestutils.imagerhelpers import TestHelpers |
100 100 | th = TestHelpers() |
101 101 | from casatestutils import generate_weblog |
102 102 | from casatestutils import add_to_dict |
103 103 | from casatestutils import stats_dict |
104 104 | from casatestutils.stakeholder import almastktestutils |
105 105 | |
106 106 | from casatools import ctsys, image |
107 107 | from casatasks import tclean, immoments#, imview |
142 142 | # self.delData() |
143 143 | |
144 144 | def getExpdicts(self, testname): |
145 145 | ''' read the json file containung exp_dicts (fiducial metric values) |
146 146 | for a specific test ''' |
147 147 | self.exp_dicts = almastktestutils.read_testcase_expdicts(self.expdict_jsonfile, testname, self.refversion) |
148 148 | |
149 149 | # Separate functions here, for special-case tests that need their own MS. |
150 150 | def prepData(self, msname=None, inpmask=None): |
151 151 | |
152 - | if msname != None: |
152 + | if msname is not None: |
153 153 | self.msfile = msname |
154 - | if inpmask != None: |
154 + | if inpmask is not None: |
155 155 | self.inpmask = inpmask |
156 156 | |
157 157 | def delData(self, msname=None): |
158 158 | del_files = [self.img_subdir] |
159 - | if msname != None: |
159 + | if msname is not None: |
160 160 | self.msfile=msname |
161 - | if (os.path.exists(self.msfile)): |
161 + | if os.path.exists(self.msfile): |
162 162 | del_files.append(self.msfile) |
163 163 | img_files = glob.glob(self.img+'*') |
164 164 | del_files += img_files |
165 165 | for f in del_files: |
166 166 | shutil.rmtree(f) |
167 167 | |
168 168 | def check_dict_vals_beam(self, exp_dict, act_dict, suffix, epsilon=0.01): |
169 169 | """ Compares expected dictionary with actual dictionary. Useful for comparing the restoring beam. |
170 170 | |
171 171 | Parameters |
232 232 | |
233 233 | def image_stats(self, image, fit_region=None, field_regions=None, masks=None): |
234 234 | """ function that takes an image file and returns a statistics |
235 235 | dictionary |
236 236 | """ |
237 237 | self._myia.open(image) |
238 238 | imagename=os.path.basename(image) |
239 239 | stats_dict = {} |
240 240 | |
241 241 | statistics = self._myia.statistics() |
242 - | |
243 242 | imshape = self._myia.shape() |
244 243 | npol = imshape[2] |
245 244 | statisticsperpol={} |
246 245 | if npol > 1: |
247 246 | _rg = rgtool() |
248 247 | stokes = list(self._myia.coordsys().torecord()['stokes1']['stokes']) |
249 248 | for pol in stokes: |
250 - | statisticsperpol[pol] = self._myia.statistics(region=_rg.box([0,0,stokes.index(pol),0],[imshape[0]-1, imshape[1]-1, stokes.index(pol), 0])) |
249 + | statisticsperpol[pol] = self._myia.statistics(region= \ |
250 + | _rg.box([0,0,stokes.index(pol),0],[imshape[0]-1, imshape[1]-1, stokes.index(pol), 0])) |
251 251 | # Return data chunk; transpose to make channel selection easier |
252 252 | chunk = numpy.transpose(self._myia.getchunk(dropdeg=True)) |
253 253 | |
254 254 | # stats returned for all images |
255 255 | im_size = self._myia.boundingbox()['imageShape'].tolist() |
256 256 | print('im_size=',im_size) |
257 257 | #stats_dict['npts'] = im_size[0]*im_size[1]*im_size[3] |
258 258 | stats_dict['npts'] = im_size[0]*im_size[1]*im_size[2]*im_size[3] |
259 259 | stats_dict['npts_unmasked'] = statistics['npts'][0] |
260 260 | stats_dict['npts_real'] = numpy.count_nonzero(~numpy.isnan(chunk)) |
274 274 | max_loc = [stats_dict['max_val_pos'][0], \ |
275 275 | stats_dict['max_val_pos'][1]] |
276 276 | stats_dict['min_val'] = statistics['min'][0] |
277 277 | stats_dict['min_val_pos'] = statistics['minpos'].tolist() |
278 278 | stats_dict['im_rms'] = statistics['rms'][0] |
279 279 | # per polarization values |
280 280 | if npol > 1: |
281 281 | # this is duplicated info but for completeness... |
282 282 | for pol in stokes: |
283 283 | maxkey = 'max_val_'+pol |
284 - | rmskey = 'rms_val_'+pol |
284 + | maxposkey = 'max_val_pos_' + pol |
285 + | rmskey = 'im_rms_'+pol |
285 286 | stats_dict[maxkey] = statisticsperpol[pol]['max'][0] |
287 + | stats_dict[maxposkey] = statisticsperpol[pol]['maxpos'].tolist() |
286 288 | stats_dict[rmskey] = statisticsperpol[pol]['rms'][0] |
287 289 | |
288 290 | |
289 291 | # stats returned if a region file is given |
290 - | if fit_region != None: |
292 + | if fit_region is not None: |
291 293 | if '_cube' in imagename: |
292 294 | if '.pb' in imagename: |
293 295 | fit_region = fit_region + ', range=[%schan,%schan]'\ |
294 296 | % (int(im_size[3]/2), int(im_size[3]/2)) |
295 297 | if '.psf' in imagename: |
296 298 | # using chan 1 as first because ia.fitcomponents fits |
297 299 | # every channel if chan=0 |
298 300 | fit_regions = [(fit_region + ', range=[%schan,%schan]' \ |
299 301 | % (1, 1)), \ |
300 302 | (fit_region + ', range=[%schan,%schan]' \ |
336 338 | stats_dict['regn_sum'] = self._myia.statistics( \ |
337 339 | region=fit_region)['sum'][0] |
338 340 | else: |
339 341 | for pol in stokes: |
340 342 | regnsumkey = 'regn_sum_'+pol |
341 343 | per_pol_region = fit_region+f' corr=[{pol}]' |
342 344 | stats_dict[regnsumkey] = self._myia.statistics( \ |
343 345 | region=per_pol_region)['sum'][0] |
344 346 | if ('image' in imagename and 'mosaic_cube_eph' not in imagename) or 'pb' in imagename or ('psf' in imagename and 'cube' not in imagename): |
345 347 | try: |
346 - | if npol == 1: |
347 - | fit_dict = self._myia.fitcomponents( \ |
348 - | region=fit_region)['results']['component0'] |
349 - | stats_dict['fit'] = [fit_dict['peak']['value'], \ |
350 - | fit_dict['shape']['majoraxis']['value'], \ |
351 - | fit_dict['shape']['minoraxis']['value']] |
352 - | stats_dict['fit_loc_chan'] = fit_dict['spectrum']['channel'] |
353 - | stats_dict['fit_loc_freq'] = fit_dict['spectrum']['frequency']['m0']['value'] |
354 - | stats_dict['fit_pix'] = fit_dict['pixelcoords'].tolist() |
355 - | else: # mulitple polarization case |
348 + | |
349 + | fit_dict = self._myia.fitcomponents( \ |
350 + | region=fit_region)['results']['component0'] |
351 + | stats_dict['fit'] = [fit_dict['peak']['value'], \ |
352 + | fit_dict['shape']['majoraxis']['value'], \ |
353 + | fit_dict['shape']['minoraxis']['value']] |
354 + | stats_dict['fit_loc_chan'] = fit_dict['spectrum']['channel'] |
355 + | stats_dict['fit_loc_freq'] = fit_dict['spectrum']['frequency']['m0']['value'] |
356 + | stats_dict['fit_pix'] = fit_dict['pixelcoords'].tolist() |
357 + | if npol > 1 : # mulitple polarization case |
356 358 | for pol in stokes: |
357 359 | per_pol_region = fit_region + f' corr=[{pol}]' |
358 360 | fit_dict = self._myia.fitcomponents( \ |
359 361 | region=per_pol_region)['results']['component0'] |
360 - | fitky = 'fit_' + pol |
362 + | fitky = 'fit_' + pol |
361 363 | fitlocchanky = 'fit_loc_chan_' + pol |
362 364 | fitlocfreqky = 'fit_loc_freq_' + pol |
363 365 | fitpixky = 'fit_pix_' + pol |
364 366 | stats_dict[fitky] = [fit_dict['peak']['value'], \ |
365 367 | fit_dict['shape']['majoraxis']['value'], \ |
366 368 | fit_dict['shape']['minoraxis']['value']] |
367 369 | stats_dict[fitlocchanky] = fit_dict['spectrum']['channel'] |
368 370 | stats_dict[fitlocfreqky] = fit_dict['spectrum']['frequency']['m0']['value'] |
369 371 | stats_dict[fitpixky] = fit_dict['pixelcoords'].tolist() |
370 372 | except KeyError: |
387 389 | if 'mosaic' in imagename: |
388 390 | stats_dict['rms_per_field'] = [] |
389 391 | for region in field_regions: |
390 392 | stats_dict['rms_per_field'].append( \ |
391 393 | self._myia.statistics(region=region)['rms'][0]) |
392 394 | |
393 395 | |
394 396 | # stats returned if not .pb(.tt0), .sumwt(.tt0), or .mask |
395 397 | # if 'pb' not in image and 'sumwt' not in image and not image.endswith('.mask'): |
396 398 | stats_dict['im_sum'] = statistics['sum'][0] |
399 + | if npol > 1: |
400 + | for pol in stokes: |
401 + | imsumkey = 'im_sum_'+pol |
402 + | stats_dict[imsumkey] = statisticsperpol[pol]['sum'][0] |
397 403 | |
398 404 | if image.endswith('.mask'): |
399 405 | stats_dict['mask_pix'] = numpy.count_nonzero(chunk) |
400 406 | stats_dict['mask_regns'] = scipy.ndimage.label(chunk)[1] |
401 407 | stats_dict['mask'] = ~numpy.array(chunk, dtype=bool) |
402 408 | |
403 409 | if 'pb' in imagename: |
404 410 | pb_mask_02 = chunk>0.2 |
405 411 | pb_mask_05 = chunk>0.5 |
406 412 | if 'cube' in image: |
526 532 | """ |
527 533 | try: |
528 534 | import casatasks as __casatasks |
529 535 | casaversion = __casatasks.version_string() |
530 536 | del __casatasks |
531 537 | except: |
532 538 | casaversion = '' |
533 539 | |
534 540 | if casaversion !='': |
535 541 | casaversion = '_' + casaversion |
536 - | if type(indict) != dict: |
542 + | if not isinstance(indict, dict): |
537 543 | print("indict is not a dict. Saved file may not be in correct format") |
538 544 | nestedDict={} |
539 545 | nestedDict[topkey]=indict |
540 546 | print("Saving %s dictionaries", len(indict)) |
541 547 | if outformat == 'pickle': |
542 548 | # writing to pickle: note if writing this way (without protocol=2) |
543 549 | # in casa6 and read in casa5 it will fail |
544 550 | with open(outfilename+casaversion+'.pickle', 'wb') as outf: |
545 551 | pickle.dump(nestedDict, outf) |
546 552 | elif outformat== 'JSON': |
3548 3554 | pbcor=False, weighting='briggs', robust=0.5, npixels=0, niter=0, \ |
3549 3555 | threshold='0.0mJy', nsigma=0.0, usemask='auto' |
3550 3556 | '-multithresh', sidelobethreshold=1.25, noisethreshold=5.0, \ |
3551 3557 | lownoisethreshold=2.0, negativethreshold=0.0, minbeamfrac=0.1, \ |
3552 3558 | growiterations=75, dogrowprune=True, minpercentchange=1.0, \ |
3553 3559 | fastnoise=False, savemodel='none', parallel=self.parallel, |
3554 3560 | verbose=True) |
3555 3561 | |
3556 3562 | # move files to iter1 |
3557 3563 | print('Copying iter0 files to iter1') |
3558 - | file_list = glob.glob(file_name+'0*') |
3559 3564 | self.copy_products(file_name+'0', file_name+'1') |
3560 3565 | |
3561 3566 | print("STARTING: iter1 routine") |
3562 3567 | |
3563 3568 | # iter1 (restart) |
3564 3569 | tclean(vis=self.msfile, field='NGC5363', spw=['0:113.893653412' |
3565 3570 | '~114.629981537GHz;114.8809581~115.758887787GHz,1:99.90983902' |
3566 3571 | '~100.494799957GHz;100.68327652~101.77312027GHz'], \ |
3567 3572 | antenna=['0,1,2,3,4,5,6,7,8'], scan=['6,9'], \ |
3568 3573 | intent='OBSERVE_TARGET#ON_SOURCE', datacolumn='data', \ |
4993 4998 | |
4994 4999 | test_name = self._testMethodName |
4995 5000 | file_name = self.remove_prefix(test_name, 'test_')+'.iter' |
4996 5001 | img = os.getcwd()+'/'+file_name+'1' |
4997 5002 | mydatapath='/stash/users/ttsutsum/casadev/stktests/almacalpol/' |
4998 5003 | #mydatapath='/export/home/murasame2/casadev/imagerRefact/stktestdev/cas14462/' |
4999 5004 | self.prepData([mydatapath+'uid___A002_Xfafcc0_X70fe.ms', |
5000 5005 | mydatapath+'uid___A002_Xfafcc0_X6adf.ms']) |
5001 5006 | #self.getExpdicts(test_name) |
5002 5007 | |
5003 - | os.system('cp -r '+mydatapath+'test_cal_mfs_IQUV.iter1.cleanmask .') |
5008 + | os.system('cp -r '+mydatapath+'cal_mfs_IQUV.iter1.cleanmask .') |
5004 5009 | |
5005 5010 | print("\nSTARTING: iter0 routine") |
5006 5011 | |
5007 5012 | # iter0 routine |
5008 5013 | tclean(vis=self.msfile, field='J0522-3627', \ |
5009 5014 | spw=['0,1,2,3,4', '0,1,2,3,4'], \ |
5010 5015 | antenna=['0,1,2,3,4,5,6,7,8,9&', '0,1,2,3,4,5,6,7,8,9&'], \ |
5011 5016 | scan=['6,28,48', '3,9,31,51'], \ |
5012 5017 | intent='CALIBRATE_POLARIZATION#ON_SOURCE', datacolumn='corrected', \ |
5013 5018 | imagename=file_name+'0', imsize=[256, 256], cell=['0.88arcsec'], \ |