This notebook presents the merge of the various pristine catalogues to produce the HELP master catalogue on XMM-LSS.
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
import os
import time
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from pymoc import MOC
from herschelhelp_internal.masterlist import merge_catalogues, nb_merge_dist_plot, specz_merge
from herschelhelp_internal.utils import coords_to_hpidx, ebv, gen_help_id, inMoc
TMP_DIR = os.environ.get('TMP_DIR', "./data_tmp")
OUT_DIR = os.environ.get('OUT_DIR', "./data")
SUFFIX = os.environ.get('SUFFIX', time.strftime("_%Y%m%d"))
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
#candels = Table.read("{}/CANDELS.fits".format(TMP_DIR)) # 1.1
#cfht_wirds = Table.read("{}/CFHT-WIRDS.fits".format(TMP_DIR)) # 1.3
#cfhtls_wide = Table.read("{}/CFHTLS-WIDE.fits".format(TMP_DIR)) # 1.4a
#cfhtls_deep = Table.read("{}/CFHTLS-DEEP.fits".format(TMP_DIR)) # 1.4b
#We no longer use CFHTLenS as it is the same raw data set as CFHTLS-WIDE
# cfhtlens = Table.read("{}/CFHTLENS.fits".format(TMP_DIR)) # 1.5
#decals = Table.read("{}/DECaLS.fits".format(TMP_DIR)) # 1.6
#servs = Table.read("{}/SERVS.fits".format(TMP_DIR)) # 1.8
#swire = Table.read("{}/SWIRE.fits".format(TMP_DIR)) # 1.7
#hsc_wide = Table.read("{}/HSC-WIDE.fits".format(TMP_DIR)) # 1.9a
#hsc_deep = Table.read("{}/HSC-DEEP.fits".format(TMP_DIR)) # 1.9b
#hsc_udeep = Table.read("{}/HSC-UDEEP.fits".format(TMP_DIR)) # 1.9c
#ps1 = Table.read("{}/PS1.fits".format(TMP_DIR)) # 1.10
#sxds = Table.read("{}/SXDS.fits".format(TMP_DIR)) # 1.11
#sparcs = Table.read("{}/SpARCS.fits".format(TMP_DIR)) # 1.12
#dxs = Table.read("{}/UKIDSS-DXS.fits".format(TMP_DIR)) # 1.13
#uds = Table.read("{}/UKIDSS-UDS.fits".format(TMP_DIR)) # 1.14
#vipers = Table.read("{}/VIPERS.fits".format(TMP_DIR)) # 1.15
vhs = Table.read("{}/VISTA-VHS.fits".format(TMP_DIR)) # 1.16
video = Table.read("{}/VISTA-VIDEO.fits".format(TMP_DIR)) # 1.17
viking = Table.read("{}/VISTA-VIKING.fits".format(TMP_DIR)) # 1.18
We first merge the optical catalogues and then add the infrared ones. We start with PanSTARRS because it coevrs the whole field.
At every step, we look at the distribution of the distances separating the sources from one catalogue to the other (within a maximum radius) to determine the best cross-matching radius.
master_catalogue = vhs
master_catalogue['vhs_ra'].name = 'ra'
master_catalogue['vhs_dec'].name = 'dec'
nb_merge_dist_plot(
SkyCoord(master_catalogue['ra'], master_catalogue['dec']),
SkyCoord(video['video_ra'], video['video_dec'])
)
# Given the graph above, we use 0.8 arc-second radius
master_catalogue = merge_catalogues(master_catalogue, video, "video_ra", "video_dec", radius=0.8*u.arcsec)
nb_merge_dist_plot(
SkyCoord(master_catalogue['ra'], master_catalogue['dec']),
SkyCoord(viking['viking_ra'], viking['viking_dec'])
)
# Given the graph above, we use 0.8 arc-second radius
master_catalogue = merge_catalogues(master_catalogue, viking, "viking_ra", "viking_dec", radius=0.8*u.arcsec)
When we merge the catalogues, astropy masks the non-existent values (e.g. when a row comes only from a catalogue and has no counterparts in the other, the columns from the latest are masked for that row). We indicate to use NaN for masked values for floats columns, False for flag columns and -1 for ID columns.
for col in master_catalogue.colnames:
if "m_" in col or "merr_" in col or "f_" in col or "ferr_" in col or "stellarity" in col:
master_catalogue[col] = master_catalogue[col].astype(float)
master_catalogue[col].fill_value = np.nan
elif "flag" in col:
master_catalogue[col].fill_value = 0
elif "id" in col:
master_catalogue[col].fill_value = -1
master_catalogue = master_catalogue.filled()
#Since this is not the final merged catalogue. We rename column names to make them unique
master_catalogue['ra'].name = 'vircam_ra'
master_catalogue['dec'].name = 'vircam_dec'
master_catalogue['flag_merged'].name = 'vircam_flag_merged'
master_catalogue[:10].show_in_notebook()
master_catalogue.add_column(Column(data=(np.char.array(master_catalogue['vhs_id'].astype(str))
+ np.char.array(master_catalogue['video_id'].astype(str) )
+ np.char.array(master_catalogue['viking_id'].astype(str))),
name="vircam_intid"))
id_names = []
for col in master_catalogue.colnames:
if '_id' in col:
id_names += [col]
if '_intid' in col:
id_names += [col]
print(id_names)
According to Mattia Vacari VIDEO is deeper than VIKING which is deeper than VHS
vista_origin = Table()
vista_origin.add_column(master_catalogue['vircam_intid'])
vista_stats = Table()
vista_stats.add_column(Column(data=['y','j','h','k','z'], name="Band"))
vista_stats.add_column(Column(data=np.full(5, 0), name="VIDEO"))
vista_stats.add_column(Column(data=np.full(5, 0), name="VIKING"))
vista_stats.add_column(Column(data=np.full(5, 0), name="VHS"))
vista_stats.add_column(Column(data=np.full(5, 0), name="use VIDEO"))
vista_stats.add_column(Column(data=np.full(5, 0), name="use VIKING"))
vista_stats.add_column(Column(data=np.full(5, 0), name="use VHS"))
vista_stats.add_column(Column(data=np.full(5, 0), name="VIDEO ap"))
vista_stats.add_column(Column(data=np.full(5, 0), name="VIKING ap"))
vista_stats.add_column(Column(data=np.full(5, 0), name="VHS ap"))
vista_stats.add_column(Column(data=np.full(5, 0), name="use VIDEO ap"))
vista_stats.add_column(Column(data=np.full(5, 0), name="use VIKING ap"))
vista_stats.add_column(Column(data=np.full(5, 0), name="use VHS ap"))
vista_bands = ['y','j','h','k','z'] # Lowercase naming convention (k is Ks)
for band in vista_bands:
#print('For VISTA band ' + band + ':')
# VISTA total flux
has_video = ~np.isnan(master_catalogue['f_video_' + band])
has_viking = ~np.isnan(master_catalogue['f_viking_' + band])
if band == 'z':
has_vhs = np.full(len(master_catalogue), False, dtype=bool)
else:
has_vhs = ~np.isnan(master_catalogue['f_vhs_' + band])
#print("{} sources with VIDEO flux".format(np.sum(has_video)))
#print("{} sources with VIKING flux".format(np.sum(has_viking)))
#print("{} sources with VHS flux".format(np.sum(has_vhs)))
#print("{} sources with VIDEO, VIKING, and VHS flux".format(np.sum(has_video & has_viking & has_vhs)))
use_video = has_video
use_viking = has_viking & ~has_video
use_vhs = has_vhs & ~has_video & ~has_viking
#print("{} sources for which we use VIDEO".format(np.sum(use_video)))
#print("{} sources for which we use VIKING".format(np.sum(use_viking)))
#print("{} sources for which we use VHS".format(np.sum(use_vhs)))
f_vista = np.full(len(master_catalogue), np.nan)
f_vista[use_video] = master_catalogue['f_video_' + band][use_video]
f_vista[use_viking] = master_catalogue['f_viking_' + band][use_viking]
if not (band == 'z'):
f_vista[use_vhs] = master_catalogue['f_vhs_' + band][use_vhs]
ferr_vista = np.full(len(master_catalogue), np.nan)
ferr_vista[use_video] = master_catalogue['ferr_video_' + band][use_video]
ferr_vista[use_viking] = master_catalogue['ferr_viking_' + band][use_viking]
if not (band == 'z'):
ferr_vista[use_vhs] = master_catalogue['ferr_vhs_' + band][use_vhs]
m_vista = np.full(len(master_catalogue), np.nan)
m_vista[use_video] = master_catalogue['m_video_' + band][use_video]
m_vista[use_viking] = master_catalogue['m_viking_' + band][use_viking]
if not (band == 'z'):
m_vista[use_vhs] = master_catalogue['m_vhs_' + band][use_vhs]
merr_vista = np.full(len(master_catalogue), np.nan)
merr_vista[use_video] = master_catalogue['merr_video_' + band][use_video]
merr_vista[use_viking] = master_catalogue['merr_viking_' + band][use_viking]
if not (band == 'z'):
merr_vista[use_vhs] = master_catalogue['merr_vhs_' + band][use_vhs]
flag_vista = np.full(len(master_catalogue), False, dtype=bool)
flag_vista[use_video] = master_catalogue['flag_video_' + band][use_video]
flag_vista[use_viking] = master_catalogue['flag_viking_' + band][use_viking]
if not (band == 'z'):
flag_vista[use_vhs] = master_catalogue['flag_vhs_' + band][use_vhs]
master_catalogue.add_column(Column(data=f_vista, name="f_vista_" + band))
master_catalogue.add_column(Column(data=ferr_vista, name="ferr_vista_" + band))
master_catalogue.add_column(Column(data=m_vista, name="m_vista_" + band))
master_catalogue.add_column(Column(data=merr_vista, name="merr_vista_" + band))
master_catalogue.add_column(Column(data=flag_vista, name="flag_vista_" + band))
old_video_and_viking_columns = ['f_video_' + band,
'f_viking_' + band,
'ferr_video_' + band,
'ferr_viking_' + band,
'm_video_' + band,
'm_viking_' + band,
'merr_video_' + band,
'merr_viking_' + band,
'flag_video_' + band,
'flag_viking_' + band]
old_vhs_columns = ['f_vhs_' + band,
'ferr_vhs_' + band,
'm_vhs_' + band,
'merr_vhs_' + band,
'flag_vhs_' + band]
if not (band == 'z'):
old_columns = old_video_and_viking_columns + old_vhs_columns
else:
old_columns = old_video_and_viking_columns
master_catalogue.remove_columns(old_columns)
origin = np.full(len(master_catalogue), ' ', dtype='<U5')
origin[use_video] = "VIDEO"
origin[use_viking] = "VIKING"
origin[use_vhs] = "VHS"
vista_origin.add_column(Column(data=origin, name= 'f_vista_' + band ))
# VISTA Aperture flux
has_ap_video = ~np.isnan(master_catalogue['f_ap_video_' + band])
has_ap_viking = ~np.isnan(master_catalogue['f_ap_viking_' + band])
if (band == 'z'):
has_ap_vhs = np.full(len(master_catalogue), False, dtype=bool)
else:
has_ap_vhs = ~np.isnan(master_catalogue['f_ap_vhs_' + band])
#print("{} sources with VIDEO aperture flux".format(np.sum(has_ap_video)))
#print("{} sources with VIKING aperture flux".format(np.sum(has_ap_viking)))
#print("{} sources with VHS aperture flux".format(np.sum(has_ap_vhs)))
#print("{} sources with VIDEO, VIKING and VHS aperture flux".format(np.sum(has_ap_video & has_ap_viking & has_ap_vhs)))
use_ap_video = has_ap_video
use_ap_viking = has_ap_viking & ~has_ap_video
use_ap_vhs = has_ap_vhs & ~has_ap_video & ~has_ap_viking
#print("{} sources for which we use VIDEO aperture fluxes".format(np.sum(use_ap_video)))
#print("{} sources for which we use VIKING aperture fluxes".format(np.sum(use_ap_viking)))
#print("{} sources for which we use VHS aperture fluxes".format(np.sum(use_ap_vhs)))
f_ap_vista = np.full(len(master_catalogue), np.nan)
f_ap_vista[use_ap_video] = master_catalogue['f_ap_video_' + band][use_ap_video]
f_ap_vista[use_ap_viking] = master_catalogue['f_ap_viking_' + band][use_ap_viking]
if not (band == 'z'):
f_ap_vista[use_ap_vhs] = master_catalogue['f_ap_vhs_' + band][use_ap_vhs]
ferr_ap_vista = np.full(len(master_catalogue), np.nan)
ferr_ap_vista[use_ap_video] = master_catalogue['ferr_ap_video_' + band][use_ap_video]
ferr_ap_vista[use_ap_viking] = master_catalogue['ferr_ap_viking_' + band][use_ap_viking]
if not (band == 'z'):
ferr_ap_vista[use_ap_vhs] = master_catalogue['ferr_ap_vhs_' + band][use_ap_vhs]
m_ap_vista = np.full(len(master_catalogue), np.nan)
m_ap_vista[use_ap_video] = master_catalogue['m_ap_video_' + band][use_ap_video]
m_ap_vista[use_ap_viking] = master_catalogue['m_ap_viking_' + band][use_ap_viking]
if not (band == 'z'):
m_ap_vista[use_ap_vhs] = master_catalogue['m_ap_vhs_' + band][use_ap_vhs]
merr_ap_vista = np.full(len(master_catalogue), np.nan)
merr_ap_vista[use_ap_video] = master_catalogue['merr_ap_video_' + band][use_ap_video]
merr_ap_vista[use_ap_viking] = master_catalogue['merr_ap_viking_' + band][use_ap_viking]
if not (band == 'z'):
merr_ap_vista[use_ap_vhs] = master_catalogue['merr_ap_vhs_' + band][use_ap_vhs]
master_catalogue.add_column(Column(data=f_ap_vista, name="f_ap_vista_" + band))
master_catalogue.add_column(Column(data=ferr_ap_vista, name="ferr_ap_vista_" + band))
master_catalogue.add_column(Column(data=m_ap_vista, name="m_ap_vista_" + band))
master_catalogue.add_column(Column(data=merr_vista, name="merr_ap_vista_" + band))
ap_old_video_and_viking_columns = ['f_ap_video_' + band,
'f_ap_viking_' + band,
'ferr_ap_video_' + band,
'ferr_ap_viking_' + band,
'm_ap_video_' + band,
'm_ap_viking_' + band,
'merr_ap_video_' + band,
'merr_ap_viking_' + band]
ap_old_vhs_columns = ['f_ap_vhs_' + band,
'ferr_ap_vhs_' + band,
'm_ap_vhs_' + band,
'merr_ap_vhs_' + band]
if not (band == 'z'):
ap_old_columns = ap_old_video_and_viking_columns + ap_old_vhs_columns
else:
ap_old_columns = ap_old_video_and_viking_columns
master_catalogue.remove_columns(ap_old_columns)
origin_ap = np.full(len(master_catalogue), ' ', dtype='<U5')
origin_ap[use_ap_video] = "VIDEO"
origin_ap[use_ap_viking] = "VIKING"
origin_ap[use_ap_vhs] = "VHS"
vista_origin.add_column(Column(data=origin_ap, name= 'f_ap_vista_' + band ))
vista_stats['VIDEO'][vista_stats['Band'] == band] = np.sum(has_video)
vista_stats['VIKING'][vista_stats['Band'] == band] = np.sum(has_viking)
vista_stats['VHS'][vista_stats['Band'] == band] = np.sum(has_vhs)
vista_stats['use VIDEO'][vista_stats['Band'] == band] = np.sum(use_video)
vista_stats['use VIKING'][vista_stats['Band'] == band] = np.sum(use_viking)
vista_stats['use VHS'][vista_stats['Band'] == band] = np.sum(use_vhs)
vista_stats['VIDEO ap'][vista_stats['Band'] == band] = np.sum(has_ap_video)
vista_stats['VIKING ap'][vista_stats['Band'] == band] = np.sum(has_ap_viking)
vista_stats['VHS ap'][vista_stats['Band'] == band] = np.sum(has_ap_vhs)
vista_stats['use VIDEO ap'][vista_stats['Band'] == band] = np.sum(use_ap_video)
vista_stats['use VIKING ap'][vista_stats['Band'] == band] = np.sum(use_ap_viking)
vista_stats['use VHS ap'][vista_stats['Band'] == band] = np.sum(use_ap_vhs)
for col in master_catalogue.colnames:
if 'vista' in col:
master_catalogue[col].name = col.replace('vista', 'vircam')
For each band show how many objects have fluxes from each survey for both total and aperture photometries.
vista_stats.show_in_notebook()
vista_origin.write("{}/xmm-lss_vista_fluxes_origins{}.fits".format(OUT_DIR, SUFFIX), overwrite=True)
columns = ["help_id", "field", "ra", "dec", "hp_idx"]
bands = [column[5:] for column in master_catalogue.colnames if 'f_ap' in column]
for band in bands:
columns += ["f_ap_{}".format(band), "ferr_ap_{}".format(band),
"m_ap_{}".format(band), "merr_ap_{}".format(band),
"f_{}".format(band), "ferr_{}".format(band),
"m_{}".format(band), "merr_{}".format(band),
"flag_{}".format(band)]
columns += ["stellarity", "stellarity_origin", "flag_cleaned", "flag_merged", "flag_gaia", "flag_optnir_obs", "flag_optnir_det",
"zspec", "zspec_qual", "zspec_association_flag", "ebv"]
# We check for columns in the master catalogue that we will not save to disk.
print("Missing columns: {}".format(set(master_catalogue.colnames) - set(columns)))
master_catalogue.write("{}/vircam_merged_catalogue_xmm-lss.fits".format(TMP_DIR), overwrite=True)