Evaluating footprints for various survey modes, in baseline survey v4#
This notebook uses the input target footprint from the scheduler to describe the on-sky goal footprint for various survey modes. Then MAF is used with the simulation output to describe the typical number of visits per filter in each of these areas.
# Import required python modules
import os
from IPython.display import display, Markdown, HTML
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
import pandas as pd
import sqlite3
from astropy.coordinates import SkyCoord
import astropy.units as u
import rubin_scheduler.scheduler.utils as sched_utils
from rubin_scheduler.utils import ddf_locations
from rubin_sim.data import get_baseline
import rubin_sim.maf as maf
version = 'v4.0'
The current survey ‘target map’ is defined in the class CurrentAreaMap.
nside = 64
sky = sched_utils.CurrentAreaMap(nside=nside)
# footprints contains the goal weights per filter, for the sky (all survey modes together)
# labels contains the label (survey mode) for each healpix.
footprints, labels = sky.return_maps()
# Each healpix is only labelled with a single survey mode label
np.unique(labels)
# Make a plot of the WFD-level coverage
pix = np.where((labels == "euclid_overlap") | (labels == "lowdust") | (labels == "virgo"))
pix2 = np.where((labels == "LMC_SMC") | (labels == "bulgy"))
fp = np.zeros(hp.nside2npix(nside))
fp[pix] = 1
fp[pix2] = 0.8
fp[np.where(labels == "")] = hp.UNSEEN
plt.figure(figsize=(8, 6))
hp.mollview(fp, title=None, cbar=False)
plt.title(f"WFD Footprint{version}", fontsize='x-large', fontweight='regular', color='black')
plt.figtext(0.51, 0.3, 'Low-dust\n WFD', fontsize='x-large', fontweight='bold', color='black')
#plt.figtext(0.31, 0.6, 'NES', fontsize='x-large', fontweight='bold', color='black')
plt.figtext(0.68, 0.3, 'GP\n WFD', fontsize='large', fontweight='bold', color='black')
plt.figtext(0.9, 0.62, "Virgo", fontsize='large', fontweight='bold', color='black')
plt.figtext(0.38, 0.18, "LMC SMC", fontsize='large', fontweight='bold', color='black')
#plt.figtext(0.18, 0.4, 'Dusty\n Plane', fontsize='x-large', fontweight='bold', color='black')
#plt.figtext(0.55, 0.1, 'SCP', fontsize='x-large', fontweight='bold', color='black')
#plt.figtext(0.36, 0.3, 'DDFs', fontsize='x-large', fontweight='bold', color='black')
#hp.graticule()
plt.savefig('WFD_footprint.png', format='png')
# Total area in "WFD" above
pix2area = hp.nside2pixarea(nside, degrees=True)
len(np.where(fp > 0.7)[0]) * pix2area
# Make a plot of the Minisurvey areas
fp = np.zeros(hp.nside2npix(nside))
fp[np.where(labels == "dusty_plane")] = 0.9
fp[np.where(labels == "nes")] = 1
fp[np.where(labels == "scp")] = 0.6
fp[np.where(labels == "")] = hp.UNSEEN
plt.figure(figsize=(8, 6))
hp.mollview(fp, title=None, cbar=False)
plt.title(f"Minisurvey Footprint {version}", fontsize='x-large', fontweight='regular', color='black')
#plt.figtext(0.51, 0.3, 'Low-dust\n WFD', fontsize='x-large', fontweight='bold', color='black')
plt.figtext(0.31, 0.6, 'NES', fontsize='x-large', fontweight='bold', color='black')
#plt.figtext(0.68, 0.3, 'GP\n WFD', fontsize='large', fontweight='bold', color='black')
#plt.figtext(0.9, 0.62, "Virgo", fontsize='large', fontweight='bold', color='black')
#plt.figtext(0.38, 0.18, "LMC SMC", fontsize='large', fontweight='bold', color='black')
plt.figtext(0.18, 0.3, 'Dusty\n Plane', rotation=75, fontsize='x-large', fontweight='bold', color='black')
plt.figtext(0.55, 0.13, 'SCP', fontsize='x-large', fontweight='bold', color='black')
#plt.figtext(0.36, 0.3, 'DDFs', fontsize='x-large', fontweight='bold', color='black')
#hp.graticule()
plt.savefig("Minisurvey_footprint.png", format="png")
# minisurvey areas
pix2area = hp.nside2pixarea(nside, degrees=True)
len(np.where(fp > 0.5)[0]) * pix2area
# Describe the DDF locations
ddfs = ddf_locations()
ddfs
pdfs = {}
for i in ddfs:
ra = ddfs[i][0]
dec = ddfs[i][1]
coord = SkyCoord(ra=ra * u.deg, dec=dec * u.deg, frame="icrs")
eclip_lat = coord.barycentrictrueecliptic.lat.deg
eclip_lon = coord.barycentrictrueecliptic.lon.deg
gal_lon = coord.galactic.l.deg
gal_lat = coord.galactic.b.deg
pdfs[i] = [ra, dec, gal_lon, gal_lat, eclip_lon, eclip_lat]
d = pd.DataFrame(pdfs, index=['RA', 'Dec', 'Gal l', 'Gal b', 'Eclip l', 'Eclip b']).round(2)
print(d.to_markdown(tablefmt="rst"))
Query the database directly to see the “note” information, which describes the method in which different observations were acquired
opsdb = '/Users/lynnej/opsim/fbs_4.0/baseline_v4.0_10yrs.db'
conn = sqlite3.connect(opsdb)
query = "select count(*) as cc, scheduler_note from observations group by scheduler_note"
d = pd.read_sql(query, conn)
nn = list(d.scheduler_note.unique())
print([n for n in nn if 'ToO' not in n])
# Total number of visits and calculate fractional visits for each of the above modes
total = d.cc.sum()
d['fraction'] = d.cc/total* 100
display(HTML(d.query('not scheduler_note.str.contains("ToO")').to_html()))
# The pairs are generally related to WFD, but also include pairs in the NES and galactic plane
# Pairs are acquired during relatively constant weather conditions, in 'regular' observing
norm = total
print('all pairs (but not triplet pairs)')
allpairs = d.query('scheduler_note.str.contains("pair")').cc.sum()
allpairs / norm
# the "blob long" + long survey mode are the *triplet* visits
blob_long_sum = d.query('scheduler_note.str.contains("blob_long")').cc.sum()
blob_long_all = d.query('scheduler_note.str.contains("long")').cc.sum()
thirds = blob_long_all - blob_long_sum
print('triplet pairs, thirds, all triplet visits (out of all visits)')
print(blob_long_sum/norm, thirds/norm, blob_long_all/norm,)
print()
# This should be about 3%
print("triplet visits (out of all *pairs*)")
print((blob_long_all - blob_long_sum) / ((allpairs + blob_long_sum)/2))
# Visits per DDF
d.query('scheduler_note.str.contains("DD")')
print('percent of survey in DDF visits')
d.query('scheduler_note.str.contains("DD") and ~scheduler_note.str.contains("RGES")').fraction.sum()
# Twilight NEO visit are for the near-sun twilight microsurvey
print('percent of survey in near-sun twilight visits')
d.query('scheduler_note.str.contains("twilight")').cc.sum()/2 / total * 100
To evaluate the typical number of visits per pointing in each of the footprint areas above, it’s relatively straightforward to calculate the number of visits per pointing (in all filters and per filter) and then use the healpix ‘label’ maps above to identify the relevant healpixels for each area.
filterlist, colors, orders, sqls, info_labels = maf.batches.filter_list(all=True)
opsim_name = os.path.split(opsdb)[-1].replace('.db', '')
print('Running metrics on baseline run:', opsim_name)
# Define the metric bundles
bundles = {}
nvisit_metric = maf.CountMetric(col='observationStartMJD', metric_name='NVisits')
coadd_metric = maf.Coaddm5Metric()
depth_metric = maf.MedianMetric(col='fiveSigmaDepth', metric_name='Median m5')
slicer = maf.HealpixSlicer(nside=nside)
not_special_modes = 'scheduler_note not like "%DD%" and scheduler_note not like "%twi%"'
for f in filterlist:
if f != 'all':
sql_add = ' and ' + not_special_modes
else:
sql_add = '' # not_special_modes
bundles[f'nvisits_{f}'] = maf.MetricBundle(metric=nvisit_metric,
slicer=slicer,
constraint=sqls[f] + sql_add,
run_name=opsim_name,
info_label=info_labels[f],
plot_dict={'color': colors[f], 'xlabel': 'N Visits'})
bundles[f'coadd_{f}'] = maf.MetricBundle(metric=coadd_metric,
slicer=slicer,
constraint=sqls[f] + sql_add,
run_name=opsim_name,
info_label=info_labels[f],
plot_dict={'color': colors[f], 'xlabel': 'Coadd m5'})
bundles[f'visit_depth_{f}'] = maf.MetricBundle(metric=depth_metric,
slicer=slicer,
constraint=sqls[f] + sql_add,
run_name=opsim_name,
info_label=info_labels[f],
plot_dict={'color': colors[f], 'xlabel': 'Median visit m5'})
# run the metrics
out_dir = 'metric_data'
g = maf.MetricBundleGroup(bundles, opsdb, out_dir=out_dir, results_db=None, verbose=True)
g.run_all()
# Make a plot with the number of visits
ph = maf.PlotHandler(out_dir=out_dir, fig_format='png', thumbnail=False)
ph.set_metric_bundles([bundles['nvisits_all']])
user_plot_dict = {'extend': 'max', 'color_min': 0, 'color_max': 1000, 'figsize': (8, 7)}
ph.plot(maf.HealpixSkyMap(), plot_dicts=user_plot_dict)
plt.figtext(0.51, 0.45, 'Low-dust\n WFD', fontsize='xx-large', fontweight='bold', color='black')
plt.figtext(0.28, 0.6, 'NES', fontsize='xx-large', fontweight='bold', color='black')
plt.figtext(0.9, 0.62, "Virgo", fontsize='large', fontweight='bold', color='black')
plt.figtext(0.71, 0.35, 'GP\n WFD', fontsize='x-large', fontweight='bold', color='black')
plt.figtext(0.72, 0.44, 'Roman', fontsize='large', rotation=0, fontweight='bold', color='black')
plt.figtext(0.18, 0.4, 'Dusty\n Plane', fontsize='x-large', rotation=75, fontweight='bold', color='black')
plt.figtext(0.56, 0.26, 'SCP', fontsize='xx-large', fontweight='bold', color='black')
plt.figtext(0.36, 0.38, 'DDFs', fontsize='x-large', fontweight='bold', color='black')
plt.title(f'{opsim_name}', fontsize='xx-large', fontweight='normal')
# We have to save the figure separately, because otherwise labels aren't getting saved.
plt.savefig(f'{opsim_name}_nvisits.png',
facecolor='w', edgecolor='w', bbox_inches='tight',
dpi=270, format='png')
So let’s compare these metric results in some of the subsets of areas described up above … low-dust WFD, GP WFD, NES, SCP, dusty plane.
regions = [region for region in np.unique(labels) if len(region) > 0]
print(regions)
nvisits = {}
coadd_m5 = {}
visit_depth = {}
for region in regions:
pix = np.where(labels == region)
nvisits[region] = {}
coadd_m5[region] = {}
visit_depth[region] = {}
for f in filterlist:
nvisits[region][f] = np.median(bundles[f'nvisits_{f}'].metric_values.filled(0)[pix])
coadd_m5[region][f] = np.median(bundles[f'coadd_{f}'].metric_values.filled(np.nan)[pix])
visit_depth[region][f] = np.median(bundles[f'visit_depth_{f}'].metric_values.filled(np.nan)[pix])
# Number of visits
nvis = pd.DataFrame(nvisits)
region_order = ['lowdust', 'euclid_overlap', 'virgo', 'bulgy', 'LMC_SMC', 'dusty_plane', 'nes', 'scp']
nn = [k for k in regions if k not in region_order]
region_order += nn
nvis.loc['ugrizy'] = nvis.loc[['u', 'g', 'r', 'i', 'z', 'y']].sum()
nvis[region_order]
# Median single visit depth
single_m5 = pd.DataFrame(visit_depth)
single_m5.round(2)[region_order]
# Coadded depth
coadd_m5 = pd.DataFrame(coadd_m5)
coadd_m5.round(2)[region_order]
We could also plot just a subset of regions of interest.
regions = ['lowdust', 'euclid_overlap', 'virgo', 'bulgy', 'LMC_SMC']
pix = np.zeros(hp.nside2npix(nside))
for r in regions:
pix[np.where(labels == r)] = 1
data = bundles['nvisits_all'].metric_values.data * pix
# Let's make a temporary metric bundle, just so we can use the same plotting tools
temp_bundle = maf.MetricBundle(nvisit_metric, slicer, None,
run_name=opsim_name,
info_label="WFD",
plot_dict={'color_min': 400, 'color_max':1000,
'x_min': 0, 'x_max':1000,
'extend': 'both'})
temp_bundle.metric_values = np.ma.MaskedArray(data=data,
mask=pix-1)
temp_bundle.plot()
regions = ['dusty_plane', 'nes', 'scp']
pix = np.zeros(hp.nside2npix(nside))
for r in regions:
pix[np.where(labels == r)] = 1
data = bundles['nvisits_all'].metric_values.data * pix
# Let's make a temporary metric bundle, just so we can use the same plotting tools
temp_bundle = maf.MetricBundle(nvisit_metric, slicer, None,
run_name=opsim_name,
info_label="Minisurveys",
plot_dict={'color_min': 0, 'color_max':350,
'extend': 'max'})
temp_bundle.metric_values = np.ma.MaskedArray(data=data,
mask=pix-1)
temp_bundle.plot()
# Generate twilight microsurvey footprint
m = maf.CountMetric('observationStartMJD', metric_name="NVisits")
s = maf.HealpixSlicer(nside=64)
constraint = 'scheduler_note like "%twi%"'
b = maf.MetricBundle(m, s, constraint, run_name=opsim_name, info_label="near sun twilight")
g = maf.MetricBundleGroup({0: b}, opsdb)
g.run_all()
ph.set_metric_bundles([b])
fig = ph.plot(maf.HealpixSkyMap(),
plot_dicts={'title': f"{opsim_name} NearSun Twilight Survey",
'xlabel': "NVisits", "n_ticks": 7,
"color_min": 0, "color_max":90, "extend":"max"})
conn = sqlite3.connect(opsdb)
query = "select * from observations"
visits = pd.read_sql(query, conn)
visits.query('scheduler_note.str.contains("twi")')['filter'].unique()
plt.figure(figsize=(8, 5))
tt = visits.query('scheduler_note.str.contains("twi")')
tto = visits.query('scheduler_note.str.contains("twi") == False')
b,p,n = plt.hist(tto.solarElong, bins=100)
b,p,n = plt.hist(tt.solarElong, bins=p, label="Twilight Near-Sun", color='orange')
plt.legend()
plt.xlabel("Solar elongation (deg)", fontsize='x-large')
plt.ylabel("Number of observations", fontsize='x-large')
plt.title(f"{opsim_name}", fontsize='x-large')
plt.savefig(f"{opsim_name}_solar_elongation_twi.png")
visits.query('scheduler_note.str.contains("twilight")')['airmass'].describe()