This notebook presents the merge of the various pristine catalogues to produce the HELP master catalogue on XMM-LSS.
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
import os
import time
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from pymoc import MOC
from herschelhelp_internal.masterlist import merge_catalogues, nb_merge_dist_plot, specz_merge
from herschelhelp_internal.utils import coords_to_hpidx, ebv, gen_help_id, inMoc
TMP_DIR = os.environ.get('TMP_DIR', "./data_tmp")
OUT_DIR = os.environ.get('OUT_DIR', "./data")
SUFFIX = os.environ.get('SUFFIX', time.strftime("_%Y%m%d"))
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
#candels = Table.read("{}/CANDELS.fits".format(TMP_DIR)) # 1.1
#cfht_wirds = Table.read("{}/CFHT-WIRDS.fits".format(TMP_DIR)) # 1.3
#cfhtls_wide = Table.read("{}/CFHTLS-WIDE.fits".format(TMP_DIR)) # 1.4a
#cfhtls_deep = Table.read("{}/CFHTLS-DEEP.fits".format(TMP_DIR)) # 1.4b
#We no longer use CFHTLenS as it is the same raw data set as CFHTLS-WIDE
# cfhtlens = Table.read("{}/CFHTLENS.fits".format(TMP_DIR)) # 1.5
#decals = Table.read("{}/DECaLS.fits".format(TMP_DIR)) # 1.6
#servs = Table.read("{}/SERVS.fits".format(TMP_DIR)) # 1.8
#swire = Table.read("{}/SWIRE.fits".format(TMP_DIR)) # 1.7
#hsc_wide = Table.read("{}/HSC-WIDE.fits".format(TMP_DIR)) # 1.9a
#hsc_deep = Table.read("{}/HSC-DEEP.fits".format(TMP_DIR)) # 1.9b
#hsc_udeep = Table.read("{}/HSC-UDEEP.fits".format(TMP_DIR)) # 1.9c
#ps1 = Table.read("{}/PS1.fits".format(TMP_DIR)) # 1.10
#sxds = Table.read("{}/SXDS.fits".format(TMP_DIR)) # 1.11
#sparcs = Table.read("{}/SpARCS.fits".format(TMP_DIR)) # 1.12
dxs = Table.read("{}/UKIDSS-DXS.fits".format(TMP_DIR)) # 1.13
uds = Table.read("{}/UKIDSS-UDS.fits".format(TMP_DIR)) # 1.14
#vipers = Table.read("{}/VIPERS.fits".format(TMP_DIR)) # 1.15
#vhs = Table.read("{}/VISTA-VHS.fits".format(TMP_DIR)) # 1.16
#video = Table.read("{}/VISTA-VIDEO.fits".format(TMP_DIR)) # 1.17
#viking = Table.read("{}/VISTA-VIKING.fits".format(TMP_DIR)) # 1.18
We first merge the optical catalogues and then add the infrared ones. We start with PanSTARRS because it coevrs the whole field.
At every step, we look at the distribution of the distances separating the sources from one catalogue to the other (within a maximum radius) to determine the best cross-matching radius.
master_catalogue = dxs
master_catalogue['dxs_ra'].name = 'ra'
master_catalogue['dxs_dec'].name = 'dec'
nb_merge_dist_plot(
SkyCoord(master_catalogue['ra'], master_catalogue['dec']),
SkyCoord(uds['uds_ra'], uds['uds_dec'])
)
# Given the graph above, we use 0.8 arc-second radius
master_catalogue = merge_catalogues(master_catalogue, uds, "uds_ra", "uds_dec", radius=0.8*u.arcsec)
When we merge the catalogues, astropy masks the non-existent values (e.g. when a row comes only from a catalogue and has no counterparts in the other, the columns from the latest are masked for that row). We indicate to use NaN for masked values for floats columns, False for flag columns and -1 for ID columns.
for col in master_catalogue.colnames:
if "m_" in col or "merr_" in col or "f_" in col or "ferr_" in col or "stellarity" in col:
master_catalogue[col] = master_catalogue[col].astype(float)
master_catalogue[col].fill_value = np.nan
elif "flag" in col:
master_catalogue[col].fill_value = 0
elif "id" in col:
master_catalogue[col].fill_value = -1
master_catalogue = master_catalogue.filled()
#Since this is not the final merged catalogue. We rename column names to make them unique
master_catalogue['ra'].name = 'ukidss_ra'
master_catalogue['dec'].name = 'ukidss_dec'
master_catalogue['flag_merged'].name = 'ukidss_flag_merged'
master_catalogue[:10].show_in_notebook()
master_catalogue.add_column(Column(data=(np.char.array(master_catalogue['dxs_id'].astype(str))
+ np.char.array(master_catalogue['uds_id'].astype(str) )),
name="ukidss_intid"))
id_names = []
for col in master_catalogue.colnames:
if '_id' in col:
id_names += [col]
if '_intid' in col:
id_names += [col]
print(id_names)
There is no overlap between UDS and DXS so I simply merge the two columns.
#Band H is only in UDS so we can simply rename it
for col in master_catalogue.colnames:
if 'uds_h' in col:
master_catalogue[col].name = col.replace('uds_h', 'ukidss_h')
has_uds_k = ~np.isnan(master_catalogue['f_uds_k'])
has_uds_j = ~np.isnan(master_catalogue['f_uds_j'])
has_ukidss_k = ~np.isnan(master_catalogue['f_ukidss_k'])
has_ukidss_j = ~np.isnan(master_catalogue['f_ukidss_j'])
master_catalogue['f_ukidss_k'][has_uds_k] = master_catalogue['f_uds_k'][has_uds_k]
master_catalogue['ferr_ukidss_k'][has_uds_k] = master_catalogue['ferr_uds_k'][has_uds_k]
master_catalogue['m_ukidss_k'][has_uds_k] = master_catalogue['m_uds_k'][has_uds_k]
master_catalogue['merr_ukidss_k'][has_uds_k] = master_catalogue['merr_uds_k'][has_uds_k]
master_catalogue['flag_ukidss_k'][has_uds_k] = master_catalogue['flag_uds_k'][has_uds_k]
master_catalogue['f_ukidss_j'][has_uds_j] = master_catalogue['f_uds_j'][has_uds_j]
master_catalogue['ferr_ukidss_j'][has_uds_j] = master_catalogue['ferr_uds_j'][has_uds_j]
master_catalogue['m_ukidss_j'][has_uds_j] = master_catalogue['m_uds_j'][has_uds_j]
master_catalogue['merr_ukidss_j'][has_uds_j] = master_catalogue['merr_uds_j'][has_uds_j]
master_catalogue['flag_ukidss_j'][has_uds_j] = master_catalogue['flag_uds_j'][has_uds_j]
has_ap_uds_k = ~np.isnan(master_catalogue['f_ap_uds_k'])
has_ap_uds_j = ~np.isnan(master_catalogue['f_ap_uds_j'])
has_ap_ukidss_k = ~np.isnan(master_catalogue['f_ap_ukidss_k'])
has_ap_ukidss_j = ~np.isnan(master_catalogue['f_ap_ukidss_j'])
master_catalogue['f_ap_ukidss_k'][has_ap_uds_k] = master_catalogue['f_ap_uds_k'][has_ap_uds_k]
master_catalogue['ferr_ap_ukidss_k'][has_ap_uds_k] = master_catalogue['ferr_ap_uds_k'][has_ap_uds_k]
master_catalogue['m_ap_ukidss_k'][has_ap_uds_k] = master_catalogue['m_ap_uds_k'][has_ap_uds_k]
master_catalogue['merr_ap_ukidss_k'][has_ap_uds_k] = master_catalogue['merr_ap_uds_k'][has_ap_uds_k]
master_catalogue['f_ap_ukidss_j'][has_ap_uds_j] = master_catalogue['f_ap_uds_j'][has_ap_uds_j]
master_catalogue['ferr_ap_ukidss_j'][has_ap_uds_j] = master_catalogue['ferr_ap_uds_j'][has_ap_uds_j]
master_catalogue['m_ap_ukidss_j'][has_ap_uds_j] = master_catalogue['m_ap_uds_j'][has_ap_uds_j]
master_catalogue['merr_ap_ukidss_j'][has_ap_uds_j] = master_catalogue['merr_ap_uds_j'][has_ap_uds_j]
master_catalogue.remove_columns(['f_uds_j','ferr_uds_j','m_uds_j','merr_uds_j','flag_uds_j',
'f_uds_k','ferr_uds_k','m_uds_k','merr_uds_k','flag_uds_k',
'f_ap_uds_j','ferr_ap_uds_j','m_ap_uds_j','merr_ap_uds_j',
'f_ap_uds_k','ferr_ap_uds_k','m_ap_uds_k','merr_ap_uds_k'])
ukidss_origin = Table()
ukidss_origin.add_column(master_catalogue['ukidss_intid'])
origin = np.full(len(master_catalogue), ' ', dtype='<U5')
origin[has_uds_k] = "UDS"
origin[has_ukidss_k] = "DXS"
ukidss_origin.add_column(Column(data=origin, name= 'f_ukidss_k' ))
origin = np.full(len(master_catalogue), ' ', dtype='<U5')
origin[has_uds_j] = "UDS"
origin[has_ukidss_j] = "DXS"
ukidss_origin.add_column(Column(data=origin, name= 'f_ukidss_j' ))
origin_ap = np.full(len(master_catalogue), ' ', dtype='<U5')
origin_ap[has_ap_uds_k] = "UDS"
origin_ap[has_ap_ukidss_k] = "DXS"
ukidss_origin.add_column(Column(data=origin_ap, name= 'f_ap_ukidss_k' ))
origin_ap = np.full(len(master_catalogue), ' ', dtype='<U5')
origin_ap[has_ap_uds_j] = "UDS"
origin_ap[has_ap_ukidss_j] = "DXS"
ukidss_origin.add_column(Column(data=origin_ap, name= 'f_ap_ukidss_j' ))
ukidss_origin.write("{}/xmm-lss_ukidss_fluxes_origins{}.fits".format(OUT_DIR, SUFFIX), overwrite=True)
We are producing a table associating to each HELP identifier, the identifiers of the sources in the pristine catalogues. This can be used to easily get additional information from them.
For convenience, we also cross-match the master list with the SDSS catalogue and add the objID associated with each source, if any. TODO: should we correct the astrometry with respect to Gaia positions?
columns = ["help_id", "field", "ra", "dec", "hp_idx"]
bands = [column[5:] for column in master_catalogue.colnames if 'f_ap' in column]
for band in bands:
columns += ["f_ap_{}".format(band), "ferr_ap_{}".format(band),
"m_ap_{}".format(band), "merr_ap_{}".format(band),
"f_{}".format(band), "ferr_{}".format(band),
"m_{}".format(band), "merr_{}".format(band),
"flag_{}".format(band)]
columns += ["stellarity", "stellarity_origin", "flag_cleaned", "flag_merged", "flag_gaia", "flag_optnir_obs", "flag_optnir_det",
"zspec", "zspec_qual", "zspec_association_flag", "ebv"]
# We check for columns in the master catalogue that we will not save to disk.
print("Missing columns: {}".format(set(master_catalogue.colnames) - set(columns)))
master_catalogue.write("{}/ukidss_merged_catalogue_xmm-lss.fits".format(TMP_DIR), overwrite = True)