#
# IO.PY
# functions to read input catalogues and to save point source collections.
#
# date: 2017-12-21
# author: GIUSEPPE PUGLISI
#
# Copyright (C) 2017 Giuseppe Puglisi giuspugl@sissa.it
#
import healpy as hp
import numpy as np
import h5py as h5
from PointSources import *
[docs]def write_ps_selection_to_hdf5(ps_list,filename):
"""
From a list of :class:`PointSources` save the list into a hdf5 binary file.
"""
f=h5.File(filename,"w")
n_sources=len(ps_list)
f.create_dataset('N_sources',np.shape(n_sources), dtype=h5.h5t.STD_I32BE,data=n_sources)
for p,idx in zip(ps_list,np.arange(n_sources)):
g=f.create_group("PointSource_"+str(idx))
d=dict(p)
for k in d.keys():
g.create_dataset(k, data= d[k])
f.close()
[docs]def read_ps_selection_from_hdf5(filename):
"""
Read a list of :class:`PointSources` from a hdf5 binary file.
"""
f=h5.File(filename,"r")
n_sources=f['N_sources'][...]
ps_list=[]
for idx in xrange(n_sources):
g=f["PointSource_"+str(idx)]
d={k:v[...] for k,v in g.items()}
ps_list.append(PointSource(None,None,d))
f.close()
return ps_list
[docs]def project_sources2map(ps_list,nside,omega_beam,polarization=False,forecast_pol=None ,muK=1e6 ):
"""
Project the list of :class:`PointSources` into a healpix map
"""
source_map={'I': np.zeros(hp.nside2npix(nside))}
if polarization:
source_map={'I':np.zeros(hp.nside2npix(nside)),'Q': np.zeros(hp.nside2npix(nside)),'U':np.zeros(hp.nside2npix(nside))}
cres=0
for p in ps_list:
if forecast_pol is not None:
p.forecast_polarization(forecast_pol)
center=hp.ang2vec(np.deg2rad(90 - p.glat),np.deg2rad(p.glon) )
if p.resolved:
cres+=1
flux=p.gauflux
radius= np.deg2rad(p.omega_eff/60.) # the angle is in arcmin
else :
flux=p.detflux
radius=np.deg2rad(omega_beam/60.)
listpix=hp.query_disc(nside,center, radius)
T_b=flux2brightness_temp(flux,np.pi*np.deg2rad(radius)**2)
source_map['I'][listpix]=T_b *muK
if polarization:
tpol=flux2brightness_temp(p.fpol,np.pi*np.deg2rad(radius)**2)
source_map['Q'][listpix]= tpol*np.cos(2.*p.polangle)*muK
source_map['U'][listpix]= - tpol*np.sin(2.*p.polangle)*muK #for this - sign check Planck 2015 PCCS2 paper
print "extended sources %d"%cres
return [ i for i in source_map.values()]
[docs]def search_in_catalogue(catalogue,minlat,maxlat,minlong,maxlong,coord='EQ',
pol_sensitive=True, LFI=False, verbose = True):
"""
Look for point sources within :math:`b_{min}< b<b_{max}` and :math:`\ell_{min}<\ell<\ell_{max}` in a catalogue .
**Returns**
- `counter`:{int}
number of sources found
- `point_sources`:{list}
list of the point source found within the ranges
"""
if pol_sensitive and not LFI:
dict_catalog={'id':0, 'glon':1,'glat':2, 'ra':3, 'dec':4, 'detflux':5,'deterr':6, 'gauflux':11,'gauerr':12,\
'omega_eff':19,'polflux':20,'polerr':21,'polangle':22,'polang_err':23,'resolved':37,'ext_val':38}
elif pol_sensitive and LFI:
dict_catalog={'id':0, 'glon':1,'glat':2, 'ra':3, 'dec':4, 'detflux':5,'deterr':6, 'gauflux':11,'gauerr':12,\
'omega_eff':19,'polflux':20,'polerr':21,'polangle':22,'polang_err':23,'resolved':30,'ext_val':31}
else:
dict_catalog={'id':0, 'glon':1,'glat':2, 'ra':3, 'dec':4, 'detflux':5,'deterr':6, 'gauflux':11,'gauerr':12,\
'omega_eff':19,'resolved':20,'ext_val':21}
n_sources=len(catalogue[0])
counter=0
found=[]
#
if coord=='EQ':
x1,x2=(catalogue[dict_catalog['ra']]),(catalogue[dict_catalog['dec']])
limits=lambda glong,glat: ((glong <= minlong or maxlong<= glong) and (minlat<=glat and glat<=maxlat))
else:
x1,x2=(catalogue[dict_catalog['glon']]),(catalogue[dict_catalog['glat']])
limits=lambda glong,glat: ((minlong <= glong <=maxlong) and (minlat<=glat <=maxlat))
mlow=np.ma.masked_where(58< x2,x2)
mup=np.ma.masked_where(x2<60,x2)
masklat=np.logical_and(mlow.mask,mup.mask)
for idx,glong,glat in zip(xrange(n_sources), x1,x2):
if limits(glong,glat):
found.append(idx)
counter+=1
else:
continue
if verbose :
print " Found %d sources within deg=(%.1f,%.1f),ra=(%.1f,%.1f) ."%(counter,(minlat),(maxlat),(minlong),(maxlong))
point_sources=[]
for f in found:
point_sources.append(PointSources(f,catalogue,dict_catalog,pol_sensitive=pol_sensitive))
#print catalogue[dict_catalog['polerr']][f],catalogue[dict_catalog['polang_err']][f],catalogue[dict_catalog['deterr']][f],catalogue[dict_catalog['gauerr']][f]
return counter,point_sources
[docs]def read_nvss_catalog(filename, **kwargs ):
if not 'verbose' in kwargs:
kwargs['verbose']=False
f=open(filename,'r')
c=0
for l in f.readlines():
cols=l.split('|',27)[1:-1]
c+=1
if c==3:
nfields=len(cols)
dict_catalog= {key: val for key,val in zip(cols,xrange(nfields))}
catalog={key: [] for key in dict_catalog.keys()}
if c>3:
for k,i in dict_catalog.items():
if i==0:
catalog[k].append(cols[i])
elif i ==1 : #ra
hh,mm,ss= cols[i].split(' ', 3)
angle=(int(hh)*15. + float(mm)*15./60. +float(ss)*15./3600)
catalog[k].append(angle )
elif i==2: #dec
dd,mm,ss= cols[i].split(' ',3)
s= np.sign(int(dd))
angle=s*( s*int(dd)+ float(mm)/60. +float(ss)/3600.)
catalog[k].append(angle )
else:
try:
catalog[k].append(float(cols[i]))
except ValueError:
catalog[k].append(np.nan)
if kwargs['verbose']:
print "Read %d total sources from %s. "%(c -3 ,filename)
f.close()
return catalog
[docs]def read_number_counts(filename, **kwargs):
if not 'verbose' in kwargs:
kwargs['verbose']=False
f=open(filename,'r')
c=0
#print np.loadtxt(filename, comments='#', delimiter=' ' ,skiprows=3, unpack=False)
for l in f.readlines():
if c==1:
cols=l.split('\t',6)
nfields=len(cols)
dictd= {val:key for key,val in zip(cols,xrange(nfields))}
data={key: [] for key in dictd.values()}
if c>1:
v=np.fromstring(l,sep=' ')
for i in xrange(len(v)):
data[dictd[i]].append(v[i])
c+=1
if not 'verbose' in kwargs:
print "Read %d lines from "%(c -3 , filename)
f.close()
return data
[docs]def read_spass_nvss_catalog(filename, **kwargs):
if not 'verbose' in kwargs:
kwargs['verbose']=False
f=open(filename,'r')
c=0
fields=[]
for l in f.readlines():
li=l.strip()
if not li.startswith("#") and not li.startswith('---') and not li.startswith('deg'):
cols=l.split(';')[2:-3]
#print cols
if c==1:
#print cols
fields= cols
nfields=len(fields)
dict_catalog= {k:v for k, v in zip(fields,xrange(nfields))}
catalog= {k: [] for k in fields}
#print catalog
elif c>1:
v=[np.fromstring(i,sep=';') for i in cols]
#print v, len(v)==nfields #fields.append(cols[4])
for i,k in zip(v,fields):
if k==fields[0]:
catalog[k].append(cols[0])
else:
catalog[k].append(i)
c+=1
f.close()
#print catalog['NVSS']
for k in catalog.keys():
if k=='NVSS': continue
catalog[k]=np.concatenate(catalog[k])
if kwargs['verbose']: print "Read %d total sources from %s "%(len(catalog['NVSS']) , filename)
return catalog,dict_catalog
[docs]def read_galluzzi_fpol_dat(filename, **kwargs):
if not 'verbose' in kwargs:
kwargs['verbose']=False
arr=np.loadtxt(filename)
flag=np.array([a[0] for a in arr])
errpi=np.array([a[2]/100. for a in arr])
pi=np.array([a[1]/100. for a in arr])
if kwargs['verbose']:
print "Read %d lines from %s "%(len(flag), filename)
return flag,pi,(errpi)
[docs]def read_jvas_catalog(filename, **kwargs ):
if not 'verbose' in kwargs:
kwargs['verbose']=False
dic={'ra':0, 'dec':1, 'z':2, 'I':3, 'eI':4, 'P':5, 'eP':6, 'fpol':7, 'efpol':8, 'PA':9,'ePA':10}
loadfile =np.loadtxt(filename)
dicdata={i:[] for i in dic.keys()}
for row in loadfile:
if row[dic['fpol']] >=100:
if kwargs['verbose']:
print "Skipping Weird value of Pi",row[dic['fpol']]
continue
for i in dic.keys():
dicdata[i].append(row[dic[i]])
dicdata={i:np.array(dicdata[i]) for i in dic.keys()}
if kwargs['verbose']:
print "Read %d lines from %s"%(len(dicdata['ra']), filename)
return dicdata
[docs]def read_atca_catalog(filename, **kwargs ):
if not 'verbose' in kwargs:
kwargs['verbose']=False
f=open(filename,'r')
c=0
fields=[]
for l in f.readlines():
li=l.strip()
if not li.startswith("#") and not li.startswith('---') and not li.startswith('deg'):
cols=l.split(';')
if c==1:
fields= cols
nfields=len(fields)
dict_catalog= {k:v for k, v in zip(fields,xrange(nfields))}
catalog= {k: [] for k in fields}
elif c>2:
v=[np.fromstring(i,sep=';') for i in cols]
if (v[dict_catalog['m5\n']] >100 or v[dict_catalog['m8']]>100
or v[dict_catalog['m20']]>100) :
if kwargs['verbose']: print "Skipping Weird value of Pi",v[dict_catalog['m5\n']], v[dict_catalog['m8']], v[dict_catalog['m20']]
continue
for k,i in dict_catalog.items():
if i==0 or i==9:
catalog[k].append(cols[i])
elif i ==1 : #ra
hh,mm,ss= cols[i].split(' ', 3)
angle=(int(hh)*15. + float(mm)*15./60. +float(ss)*15./3600)
catalog[k].append(angle )
elif i==2: #dec
dd,mm,ss= cols[i].split(' ',3)
s= np.sign(int(dd))
angle=s*( s*int(dd)+ float(mm)/60. +float(ss)/3600.)
catalog[k].append(angle )
else:
try:
catalog[k].append(float(cols[i]))
except ValueError:
catalog[k].append(np.nan)
c+=1
#for k in catalog.keys():#if c==2000:
catal= { k: np.array(catalog[k]) for k in catalog.keys()} #break
if kwargs['verbose']:
print "Read %d rows from the AT20G catalog"%c
f.close()
return catal,dict_catalog
[docs]class bash_colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
[docs] def blue(self,string):
return self.OKBLUE+str(string)+self.ENDC
[docs] def green(self,string):
return self.OKGREEN+str(string)+self.ENDC
[docs] def warning(self,string):
return self.WARNING+str(string)+self.ENDC
[docs] def fail(self,string):
return self.FAIL+str(string)+self.ENDC
[docs] def bold(self,string):
return self.BOLD+str(string)+self.ENDC
[docs] def underline(self,string):
return self.UNDERLINE+str(string)+self.ENDC
[docs]def read_IRAM_catalog(filename, **kwargs ):
if not 'verbose' in kwargs:
kwargs['verbose']=False
f=open(filename,'r')
c=0
fields=[]
for l in f.readlines():
li=l.strip()
if not li.startswith("#") and not li.startswith('---') and not li.startswith('deg'):
cols=l.split(';')
if c==0:
fields= cols
nfields=len(fields)
dict_catalog= {k:v for k, v in zip(fields,xrange(nfields))}
catalog={k:[] for k in fields}
else:
v=[np.fromstring(i,sep=';') for i in cols]
strings=['Name', 'l_mL','l_mC' ]
for k,i in dict_catalog.items():
if strings.__contains__(k) :
catalog[k].append(cols[i])
else:
catalog[k].append(v[i][0])
c+=1
for k in catalog.keys():
catal= { k: np.array(catalog[k]) for k in catalog.keys()} #break
if kwargs['verbose']:
print "Read %d rows from the AT20G catalog"%c
f.close()
return catal,dict_catalog
[docs]def read_VLA_catalog(filename, **kwargs ):
if not 'verbose' in kwargs:
kwargs['verbose']=False
f=open(filename,'r')
c=0
fields=[]
for l in f.readlines():
li=l.strip()
if not li.startswith("#") and not li.startswith('---') and not li.startswith('deg'):
cols=l.split(';')
if c==0:
fields= cols
nfields=len(fields)
dict_catalog= {k:v for k, v in zip(fields,xrange(nfields))}
catalog={k:[] for k in fields}
else:
v=[np.fromstring(i,sep=';') for i in cols]
strings=['AT20G', 'm_AT20G' , 'l_pC','l_pX','l_pK','l_pQ','Class\n' ]
for k,i in dict_catalog.items():
if strings.__contains__(k) :
catalog[k].append(cols[i])
else:
catalog[k].append(v[i][0])
c+=1
for k in catalog.keys():#if c==2000:
catal= { k: np.array(catalog[k]) for k in catalog.keys()} #break
if kwargs['verbose']:
print "Read %d rows from the AT20G catalog"%c
f.close()
return catal,dict_catalog