This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import cartopy.crs as ccrs | |
| import cartopy.feature as cfeature | |
| import matplotlib.pyplot as plt | |
| import matplotlib.patheffects as pe | |
| import datetime | |
| all_flash_areas = {} | |
| all_flash_hours = {} | |
| for fname in os.listdir(json_dir): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import matplotlib.pyplot as plt | |
| n_buckets = int(max_dist // bucket_size) | |
| param_names = ('duration', 'hull_area', 'mean_power', 'n_sources') | |
| param_units = ('(ms)', '(square km)', '(dBW)', '') | |
| stats_buckets = [{param: [] for param in param_names} for i in range(n_buckets)] | |
| for fname in os.listdir(json_dir): | |
| with open(os.path.join(json_dir, fname), 'r') as f: | |
| data = json.load(f) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from clustering_algorithm.cluster_flashes import load_dat, to_ecef, cluster, get_flash_params, merge_flashes | |
| import numpy as np | |
| import os | |
| from datetime import date, timedelta | |
| import json | |
| from pathlib import Path | |
| all_files = os.listdir(data_dir) | |
| for f_idx, fname in enumerate(all_files): |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from scipy.spatial import ConvexHull | |
| def get_flash_params(flashes: array_type, start_time: datetime.datetime) -> List[Dict]: | |
| flash_params: List[Dict] = [] | |
| north_pole = np.array([90.0,0.0,0.0]) | |
| for flash in flashes: | |
| grid_points = np.unique(flash[:,5:7], axis=0).astype('int64') | |
| n_sources = len(flash) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def merge_flashes(flashes: List[array_type], t_threshold: float=0.15, xyz_threshold: float=3000.0) -> List[array_type]: | |
| flashes_sorted = sorted(flashes, key=lambda x: x[0,0]) | |
| flash_merges = np.zeros((len(flashes), 2)) - 1.0 | |
| for j in range(1, len(flashes_sorted)): | |
| branch_flash = flashes_sorted[j] | |
| for i in range(j): | |
| base_flash = flashes_sorted[i] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from sklearn.cluster import DBSCAN | |
| # sources[time, x, y, z, power, grid_lat, grid_lon, init_lat, init_lon, init_alt] | |
| def cluster(sources: array_type, xyz_scale: float, t_scale: float, grid_max: int, min_samples: int=10, epsilon: float=1.0, max_duration: float=3.0) -> Tuple[List[array_type], int]: | |
| min_t = np.min(sources[:,0]) | |
| max_t = np.max(sources[:,0]) | |
| time_start = min_t | |
| all_flashes: List[array_type]=[] | |
| total_removed = 0 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| from clustering_algorithm.cluster_flashes import load_dat, to_ecef | |
| import numpy as np | |
| center_geo = np.array([40.4463980, -104.6368130, 1000.00]) # COLMA center | |
| lma_center = to_ecef(np.expand_dims(center_geo, axis=0)) | |
| n_grid_points, grid_spacing = 18, 0.2559 | |
| grid_start_lat = center_geo[0] - (n_grid_points / 2) * grid_spacing | |
| grid_start_lon = center_geo[1] - (n_grid_points / 2) * grid_spacing |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| def to_ecef(spatial_data: array_type) -> array_type: | |
| rad_lat = spatial_data[:,0] * np.pi / 180 | |
| rad_lon = spatial_data[:,1] * np.pi / 180 | |
| altitudes = spatial_data[:,2] | |
| a=6378137.0 | |
| b=6356752.314245 | |
| e2=1 - b**2/a**2 | |
| cot = 1 / np.tan(rad_lat) | |
| n_phi = a / np.sqrt(1 - (e2/ (1+ np.square(cot)))) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import numpy as np | |
| from typing import List, Dict, Tuple | |
| import gzip | |
| import datetime | |
| array_type = np.ndarray[tuple[float], np.dtype[np.float64]] | |
| def load_dat(gz_file_path: str, min_stations: int=7, max_chi_squared: float=1.0, max_altitude: float=20e3) -> Tuple[array_type, datetime.datetime]: | |
| with gzip.open(gz_file_path, 'rt') as f: |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| cutoff_a = 2.5 | |
| l_u_copy = deepcopy(all_residues_dict["l_u"]) | |
| close_irmsds = [] | |
| for score_rank in 1:length(top_scores) | |
| rotation_idx, t, score = top_scores[score_rank] | |
| translation = voxel_size * ([t[1],t[2],t[3]] - [center_val+1,center_val+1,center_val+1]) | |
| all_residues_dict["l_u"] = apply_transformation(l_u_copy, get_rotation(rotation_vectors[rotation_idx,:]), translation) | |
| i_rmsd = calc_interface_rmsd(all_residues_dict) |
NewerOlder