Skip to content

Instantly share code, notes, and snippets.

@mgdaily
Created March 26, 2024 19:40
Show Gist options
  • Save mgdaily/aabb6fc8d97a50a09ec15315ce4aab63 to your computer and use it in GitHub Desktop.
Save mgdaily/aabb6fc8d97a50a09ec15315ce4aab63 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"id": "c2ef8125",
"metadata": {},
"source": [
"# Photometry Testbench"
]
},
{
"cell_type": "markdown",
"id": "6be237e9",
"metadata": {},
"source": [
"## First do all of the imports to nab the data we need"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "51fb157a",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"from banzai import settings\n",
"from banzai.lco import LCOFrameFactory\n",
"import banzai.main\n",
"\n",
"import requests\n",
"\n",
"\n",
"os.environ['OPENTSDB_PYTHON_METRICS_TEST_MODE'] = 'True'\n",
"\n",
"settings.fpack=True\n",
"settings.db_address = os.getenv('BANZAI_DB_ADDRESS')\n",
"settings.reduction_level = 91\n",
"settings.no_file_cache=True\n",
"\n",
"# set up the context object.\n",
"context = banzai.main.parse_args(settings, parse_system_args=False)\n",
"\n",
"reduced_basename = 'tfn1m014-fa20-20240306-0175-e91'\n",
"frame_record = requests.get(f'https://archive-api.lco.global/frames/?basename_exact={reduced_basename}', headers=context.RAW_DATA_AUTH_HEADER).json()['results'][0]\n",
"frame_record['frameid'] = frame_record['id']\n",
"\n",
"frame_factory = LCOFrameFactory()\n",
"\n",
"image = frame_factory.open(frame_record, context)"
]
},
{
"cell_type": "markdown",
"id": "62b83ee5",
"metadata": {},
"source": [
"## Now set up the photometry stage"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4b082a87",
"metadata": {},
"outputs": [],
"source": [
"from urllib.parse import urljoin\n",
"\n",
"import numpy as np\n",
"from astropy.table import Table\n",
"from requests import HTTPError\n",
"\n",
"from banzai.utils import stats, array_utils\n",
"from banzai.utils.photometry_utils import get_reference_sources, match_catalogs, to_magnitude, fit_photometry\n",
"from banzai.stages import Stage\n",
"from banzai.data import DataTable\n",
"from banzai import logs\n",
"\n",
"from photutils.background import Background2D\n",
"from skimage import measure\n",
"from photutils.segmentation import make_2dgaussian_kernel, detect_sources, deblend_sources, SourceCatalog\n",
"from astropy.convolution import convolve\n",
"from astropy.convolution.kernels import CustomKernel\n",
"\n",
"logger = logs.get_logger()\n"
]
},
{
"cell_type": "markdown",
"id": "6722f089",
"metadata": {},
"source": [
"## Define the utils we need"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1c357e9",
"metadata": {},
"outputs": [],
"source": [
"def radius_of_contour(contour, source):\n",
" x = contour[:, 1]\n",
" y = contour[:, 0]\n",
" x_center = (source.bbox_xmax - source.bbox_xmin + 1) / 2.0 - 0.5\n",
" y_center = (source.bbox_ymax - source.bbox_ymin + 1) / 2.0 - 0.5\n",
"\n",
" return np.percentile(np.sqrt((x - x_center)**2.0 + (y - y_center) ** 2.0), 90)\n",
"\n",
"\n",
"def flag_sources(sources, source_labels, segmentation_map, mask, flag, mask_value):\n",
" affected_sources = np.unique(segmentation_map.data[mask == mask_value])\n",
" sources['flag'][np.in1d(source_labels, affected_sources)] |= flag\n",
"\n",
"\n",
"def flag_deblended(sources, catalog, segmentation_map, deblended_seg_map, flag_value=2):\n",
" # By default deblending appends labels instead of reassigning them so we can just use the\n",
" # extras in the deblended map\n",
" deblended_sources = np.unique(deblended_seg_map.data[deblended_seg_map > np.max(segmentation_map)])\n",
" # Get the sources that were originally blended\n",
" original_blends = np.unique(segmentation_map.data[deblended_seg_map > np.max(segmentation_map)])\n",
" deblended_sources = np.hstack([deblended_sources, original_blends])\n",
" sources['flag'][np.in1d(catalog.labels, deblended_sources)] |= flag_value\n",
"\n",
"\n",
"def flag_edge_sources(image, sources, flag_value=8):\n",
" ny, nx = image.shape\n",
" # Check 4 points on the kron aperture, one on each side of the major and minor axis\n",
" minor_xmin = sources['x'] - sources['b'] * sources['kronrad'] * np.sin(np.deg2rad(sources['theta']))\n",
" minor_xmax = sources['x'] + sources['b'] * sources['kronrad'] * np.sin(np.deg2rad(sources['theta']))\n",
" minor_ymin = sources['y'] - sources['b'] * sources['kronrad'] * np.cos(np.deg2rad(sources['theta']))\n",
" minor_ymax = sources['y'] + sources['b'] * sources['kronrad'] * np.cos(np.deg2rad(sources['theta']))\n",
" major_ymin = sources['y'] - sources['a'] * sources['kronrad'] * np.cos(np.deg2rad(sources['theta']))\n",
" major_ymax = sources['y'] + sources['a'] * sources['kronrad'] * np.cos(np.deg2rad(sources['theta']))\n",
" major_xmin = sources['x'] - sources['a'] * sources['kronrad'] * np.sin(np.deg2rad(sources['theta']))\n",
" major_xmax = sources['x'] + sources['a'] * sources['kronrad'] * np.sin(np.deg2rad(sources['theta']))\n",
"\n",
" # Note we are already 1 indexed here\n",
" sources_off = np.logical_or(minor_xmin < 1, major_xmin < 1)\n",
" sources_off = np.logical_or(sources_off, minor_ymin < 1)\n",
" sources_off = np.logical_or(sources_off, major_ymin < 1)\n",
" sources_off = np.logical_or(sources_off, minor_xmax > nx)\n",
" sources_off = np.logical_or(sources_off, major_xmax > nx)\n",
" sources_off = np.logical_or(sources_off, minor_ymax > ny)\n",
" sources_off = np.logical_or(sources_off, major_ymax > ny)\n",
" sources['flag'][sources_off] |= flag_value"
]
},
{
"cell_type": "markdown",
"id": "ea5a1813",
"metadata": {},
"source": [
"## Now define the Source Extractor Stage"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9819bfdb",
"metadata": {},
"outputs": [],
"source": [
"class SourceDetector(Stage):\n",
" threshold = 2.5\n",
" min_area = 9\n",
"\n",
" def __init__(self, runtime_context):\n",
" super(SourceDetector, self).__init__(runtime_context)\n",
"\n",
" def do_stage(self, image):\n",
" try:\n",
" data = image.data.copy()\n",
" error = image.uncertainty\n",
"\n",
" # From what I can piece together, the background estimator makes a low resolution mesh set by box size\n",
" # (32, 32) here and then applies a filter to the low resolution image. The filter size is 3x3 here.\n",
" # The defaults we use here are a mesh creator is from source extractor which is a mode estimator.\n",
" # The default filter that works on the mesh image is a median filter.\n",
" bkg = Background2D(data, (32, 32), filter_size=(3, 3))\n",
" data -= bkg.background\n",
"\n",
" # Convolve the image with a 2D Guassian, but with the normalization SEP uses as\n",
" # that is correct.\n",
" # The default kernel used by Source Extractor is [[1,2,1], [2,4,2], [1,2,1]]\n",
" # The kernel corresponds to fwhm = 1.9 which we adopt here\n",
" kernel = make_2dgaussian_kernel(1.9, size=3)\n",
" convolved_data = convolve(data / (error * error), kernel)\n",
"\n",
" # We include the correct match filter normalization here that is not included in\n",
" # vanilla source extractor\n",
" kernel_squared = CustomKernel(kernel.array * kernel.array)\n",
" normalization = np.sqrt(convolve(1 / (error * error), kernel_squared))\n",
" convolved_data /= normalization\n",
" logger.info('Running image segmentation', image=image)\n",
" # Do an initial source detection\n",
" segmentation_map = detect_sources(convolved_data, self.threshold, npixels=self.min_area)\n",
"\n",
" logger.info('Deblending sources', image=image)\n",
" # Note that nlevels here is DEBLEND_NTHRESH in source extractor which is 32 by default\n",
" deblended_seg_map = deblend_sources(convolved_data, segmentation_map,\n",
" npixels=self.min_area, nlevels=32,\n",
" contrast=0.005, progress_bar=False,\n",
" nproc=1, mode='sinh')\n",
" \n",
" \n",
" ####\n",
" # NEED TO PROFILE THIS CODE #\n",
" ####\n",
" \n",
" \n",
" logger.info('Finished deblending. Saving catalog')\n",
" # Convert the segmentation map to a source catalog\n",
" catalog = SourceCatalog(data, deblended_seg_map, convolved_data=convolved_data, error=error,\n",
" background=bkg.background, progress_bar=True)\n",
" \n",
" logger.info('Finished converting to catalog')\n",
"\n",
" sources = Table({'x': catalog.xcentroid + 1.0, 'y': catalog.ycentroid + 1.0,\n",
" 'xwin': catalog.xcentroid_win + 1.0, 'ywin': catalog.ycentroid_win + 1.0,\n",
" 'xpeak': catalog.maxval_xindex + 1, 'ypeak': catalog.maxval_yindex + 1,\n",
" 'peak': catalog.max_value,\n",
" 'a': catalog.semimajor_sigma.value, 'b': catalog.semiminor_sigma.value,\n",
" 'theta': catalog.orientation.to('deg').value, 'ellipticity': catalog.ellipticity.value,\n",
" 'kronrad': catalog.kron_radius.value,\n",
" 'flux': catalog.kron_flux, 'fluxerr': catalog.kron_fluxerr,\n",
" 'x2': catalog.covar_sigx2.value, 'y2': catalog.covar_sigy2.value,\n",
" 'xy': catalog.covar_sigxy.value,\n",
" 'background': catalog.background_mean})\n",
" \n",
" logger.info('Got sources table')\n",
" \n",
" # This section took 5min40s out of 8.5min\n",
" \n",
" for r in range(1, 7):\n",
" radius_arcsec = r / image.pixel_scale\n",
" sources[f'fluxaper{r}'], sources[f'fluxerr{r}'] = catalog.circular_photometry(radius_arcsec)\n",
" \n",
" logger.info('Calculating flux radii')\n",
" \n",
" # this loop huuuurts - took 5min30s or so\n",
" for r in [0.25, 0.5, 0.75]:\n",
" sources['fluxrad' + f'{r:.2f}'.lstrip(\"0.\")] = catalog.fluxfrac_radius(r)\n",
" \n",
" logger.info('Done calculating flux radii')\n",
"\n",
" sources['flag'] = 0\n",
"\n",
" # Flag = 1 for sources with bad pixels\n",
" flag_sources(sources, catalog.labels, deblended_seg_map, image.mask, flag=1, mask_value=1)\n",
" # Flag = 2 for sources that are deblended\n",
" flag_deblended(sources, catalog, segmentation_map, deblended_seg_map, flag_value=2)\n",
" # Flag = 4 for sources that have saturated pixels\n",
" flag_sources(sources, catalog.labels, deblended_seg_map, image.mask, flag=4, mask_value=2)\n",
" # Flag = 8 if kron aperture falls off the image\n",
" flag_edge_sources(image, sources, flag_value=8)\n",
" # Flag = 16 if source has cosmic ray pixels\n",
" flag_sources(sources, catalog.labels, deblended_seg_map, image.mask, flag=16, mask_value=8)\n",
" \n",
" logger.info('Finished flagging')\n",
"\n",
" rows_with_nans = array_utils.find_nans_in_table(sources)\n",
" catalog = catalog[~rows_with_nans]\n",
" sources = sources[~rows_with_nans]\n",
"\n",
" # Cut individual bright pixels. Often cosmic rays\n",
" not_individual_bright_pixels = sources['fluxrad50'] > 0.5\n",
" catalog = catalog[not_individual_bright_pixels]\n",
" sources = sources[not_individual_bright_pixels]\n",
"\n",
" # Cut sources that are less than 2 pixels wide\n",
" thin_sources = np.logical_or((catalog.bbox_ymax - catalog.bbox_ymin) < 1.5,\n",
" (catalog.bbox_xmax - catalog.bbox_xmin) < 1.5)\n",
" catalog = catalog[~thin_sources]\n",
" sources = sources[~thin_sources]\n",
" \n",
" logger.info('Finished misc. source cleanup')\n",
"\n",
" # Calculate the FWHMs of the stars:\n",
" sources['fwhm'] = np.nan\n",
" sources['fwtm'] = np.nan\n",
" # Here we estimate contours\n",
" for source, row in zip(sources, catalog):\n",
" if source['flag'] == 0:\n",
" for ratio, keyword in zip([0.5, 0.1], ['fwhm', 'fwtm']):\n",
" contours = measure.find_contours(data[row.bbox_ymin: row.bbox_ymax + 1,\n",
" row.bbox_xmin: row.bbox_xmax + 1],\n",
" ratio * source['peak'])\n",
" if contours:\n",
" # If there are multiple contours like a donut might have take the outer\n",
" contour_radii = [radius_of_contour(contour, row) for contour in contours]\n",
" source[keyword] = 2.0 * np.nanmax(contour_radii)\n",
" \n",
" logger.info('Finished calculating FWHMs')\n",
"\n",
" # Add the units and description to the catalogs\n",
" sources['x'].unit = 'pixel'\n",
" sources['x'].description = 'X coordinate of the object'\n",
" sources['y'].unit = 'pixel'\n",
" sources['y'].description = 'Y coordinate of the object'\n",
" sources['xwin'].unit = 'pixel'\n",
" sources['xwin'].description = 'Windowed X coordinate of the object'\n",
" sources['ywin'].unit = 'pixel'\n",
" sources['ywin'].description = 'Windowed Y coordinate of the object'\n",
" sources['xpeak'].unit = 'pixel'\n",
" sources['xpeak'].description = 'X coordinate of the peak'\n",
" sources['ypeak'].unit = 'pixel'\n",
" sources['ypeak'].description = 'Windowed Y coordinate of the peak'\n",
" sources['flux'].unit = 'count'\n",
" sources['flux'].description = 'Flux within a Kron-like elliptical aperture'\n",
" sources['fluxerr'].unit = 'count'\n",
" sources['fluxerr'].description = 'Error on the flux within Kron aperture'\n",
" sources['peak'].unit = 'count'\n",
" sources['peak'].description = 'Peak flux (flux at xpeak, ypeak)'\n",
" for diameter in [1, 2, 3, 4, 5, 6]:\n",
" sources['fluxaper{0}'.format(diameter)].unit = 'count'\n",
" sources['fluxaper{0}'.format(diameter)].description = 'Flux from fixed circular aperture: {0}\" diameter'.format(diameter)\n",
" sources['fluxerr{0}'.format(diameter)].unit = 'count'\n",
" sources['fluxerr{0}'.format(diameter)].description = 'Error on Flux from circular aperture: {0}\"'.format(diameter)\n",
"\n",
" sources['background'].unit = 'count'\n",
" sources['background'].description = 'Average background value in the aperture'\n",
" sources['fwhm'].unit = 'pixel'\n",
" sources['fwhm'].description = 'FWHM of the object'\n",
" sources['fwtm'].unit = 'pixel'\n",
" sources['fwtm'].description = 'Full-Width Tenth Maximum'\n",
" sources['a'].unit = 'pixel'\n",
" sources['a'].description = 'Semi-major axis of the object'\n",
" sources['b'].unit = 'pixel'\n",
" sources['b'].description = 'Semi-minor axis of the object'\n",
" sources['theta'].unit = 'degree'\n",
" sources['theta'].description = 'Position angle of the object'\n",
" sources['kronrad'].unit = 'pixel'\n",
" sources['kronrad'].description = 'Kron radius used for extraction'\n",
" sources['ellipticity'].description = 'Ellipticity'\n",
" sources['fluxrad25'].unit = 'pixel'\n",
" sources['fluxrad25'].description = 'Radius containing 25% of the flux'\n",
" sources['fluxrad50'].unit = 'pixel'\n",
" sources['fluxrad50'].description = 'Radius containing 50% of the flux'\n",
" sources['fluxrad75'].unit = 'pixel'\n",
" sources['fluxrad75'].description = 'Radius containing 75% of the flux'\n",
" sources['x2'].unit = 'pixel^2'\n",
" sources['x2'].description = 'Variance on X coordinate of the object'\n",
" sources['y2'].unit = 'pixel^2'\n",
" sources['y2'].description = 'Variance on Y coordinate of the object'\n",
" sources['xy'].unit = 'pixel^2'\n",
" sources['xy'].description = 'XY covariance of the object'\n",
" sources['flag'].description = 'Bit mask of extraction/photometry flags'\n",
"\n",
" sources.sort('flux')\n",
" sources.reverse()\n",
" \n",
" logger.info('Finished adding metadata')\n",
"\n",
" # Save some background statistics in the header\n",
" mean_background = stats.sigma_clipped_mean(bkg.background, 5.0)\n",
" image.meta['L1MEAN'] = (mean_background,\n",
" '[counts] Sigma clipped mean of frame background')\n",
"\n",
" median_background = np.median(bkg.background)\n",
" image.meta['L1MEDIAN'] = (median_background,\n",
" '[counts] Median of frame background')\n",
"\n",
" std_background = stats.robust_standard_deviation(bkg.background)\n",
" image.meta['L1SIGMA'] = (std_background,\n",
" '[counts] Robust std dev of frame background')\n",
"\n",
" # Save some image statistics to the header\n",
" good_objects = sources['flag'] == 0\n",
" for quantity in ['fwhm', 'ellipticity', 'theta']:\n",
" good_objects = np.logical_and(good_objects, np.logical_not(np.isnan(sources[quantity])))\n",
" if good_objects.sum() == 0:\n",
" image.meta['L1FWHM'] = ('NaN', '[arcsec] Frame FWHM in arcsec')\n",
" image.meta['L1FWTM'] = ('NaN', 'Ratio of FWHM to Full-Width Tenth Max')\n",
"\n",
" image.meta['L1ELLIP'] = ('NaN', 'Mean image ellipticity (1-B/A)')\n",
" image.meta['L1ELLIPA'] = ('NaN', '[deg] PA of mean image ellipticity')\n",
" else:\n",
" seeing = np.nanmedian(sources['fwhm'][good_objects]) * image.pixel_scale\n",
" image.meta['L1FWHM'] = (seeing, '[arcsec] Frame FWHM in arcsec')\n",
" image.meta['L1FWTM'] = (np.nanmedian(sources['fwtm'][good_objects] / sources['fwhm'][good_objects]),\n",
" 'Ratio of FWHM to Full-Width Tenth Max')\n",
"\n",
" mean_ellipticity = stats.sigma_clipped_mean(sources['ellipticity'][good_objects], 3.0)\n",
" image.meta['L1ELLIP'] = (mean_ellipticity, 'Mean image ellipticity (1-B/A)')\n",
"\n",
" mean_position_angle = stats.sigma_clipped_mean(sources['theta'][good_objects], 3.0)\n",
" image.meta['L1ELLIPA'] = (mean_position_angle, '[deg] PA of mean image ellipticity')\n",
"\n",
" logging_tags = {key: float(image.meta[key]) for key in ['L1MEAN', 'L1MEDIAN', 'L1SIGMA',\n",
" 'L1FWHM', 'L1ELLIP', 'L1ELLIPA']}\n",
" \n",
" logger.info('Extracted sources', image=image, extra_tags=logging_tags)\n",
" # adding catalog (a data table) to the appropriate images attribute.\n",
" image.add_or_update(DataTable(sources, name='CAT'))\n",
" except Exception:\n",
" logger.error(logs.format_exception(), image=image)\n",
" return image"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "012a544e",
"metadata": {},
"outputs": [],
"source": [
"photometry_stage = SourceDetector(context)\n",
"image = photometry_stage.do_stage(image)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a4ba9fbf",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "banzai-venv",
"language": "python",
"name": "banzai-venv"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment