Skip to content

Instantly share code, notes, and snippets.

########## CODE#################################
from skyfield.api import load as sky_load, wgs84
from random import choice, sample
# load data
satellites2012 = sky_load.tle_file('data/TLE/tle2012.txt')
satellites2017 = sky_load.tle_file('data/TLE/tle2017.txt/tle2017.txt') # these don't get unzipped to consistent structures
# combine lists of satellite objects and get distinct set of satellite numbers
satellites = satellites2012 + satellites2017
from skyfield.api import EarthSatellite, load
from datetime import datetime
import math
# TLE DATA
line1 = '1 25544U 98067A 14020.93268519 .00009878 00000-0 18200-3 0 5082'
line2 = '2 25544 51.6498 109.4756 0003572 55.9686 274.8005 15.49815350868473'
# GET TIME IN UTC FROM SKYFIELD LIBRARY
ts = load.timescale()
@MaxPowerWasTaken
MaxPowerWasTaken / gist:71366e8db01e2d07883548b4844a7700
Created June 26, 2019 18:25
DeepSpeech/TF/cudnn error after removing miniconda from my machine
I Restored variables from most recent checkpoint at /home/mepstein/.local/share/deepspeech/ldc93s1/train-1158, step 1158
I STARTING Optimization
I Training epoch 0...
+ [ ! -f DeepSpeech.py ]
+ [ ! -f data/ldc93s1/ldc93s1.csv ]
+ [ -d ]
+ python -c from xdg import BaseDirectory as xdg; print(xdg.save_data_path("deepspeech/ldc93s1"))
+ checkpoint_dir=/home/mepstein/.local/share/deepspeech/ldc93s1
+ export CUDA_VISIBLE_DEVICES=0
+ python -u DeepSpeech.py --noshow_progressbar --train_files data/ldc93s1/ldc93s1.csv --test_files data/ldc93s1/ldc93s1.csv --train_batch_size 1 --test_batch_size 1 --n_hidden 100 --epochs 200 --checkpoint_dir /home/mepstein/.local/share/deepspeech/ldc93s1
@MaxPowerWasTaken
MaxPowerWasTaken / gist:27a578bd7592077c9658af5981aab996
Created June 26, 2019 15:08
cuDnn error from running DeepSpeech.py with cuda10.0 / cudnn7.5 / tf-gpu1.13.1
# created for issue https://github.com/mozilla/DeepSpeech/issues/2211
# this log was produced setting environment var 'LD_DEBUG' = 'all', to help diagnose
# why retraining a DeepSpeech model keeps failing for trivial/small data, unless I set
# environment var 'TF_FORCE_GPU_ALLOW_GROWTH' = 'true'...
(base) mepstein@pop-os:~/DeepSpeech$ source dsenv/bin/activate
(dsenv) (base) mepstein@pop-os:~/DeepSpeech$ ./bin/run-ldc93s1.sh
+ [ ! -f DeepSpeech.py ]
+ [ ! -f data/ldc93s1/ldc93s1.csv ]
import numpy as np
import pandas as pd
def check_var_size_requirements(df):
''' for integer columns, display smallest int type which could safely store values'''
# Iterate through int columns
# (pandas' read_* is good about assigning some int type if all vals are ints and no NaNs)
int_types = ['int', np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64]