Get cat.bin
import CoreML
# CTC vanilla and CTC via crossentropy are equal, and their gradients as well. In this reformulation it's easier to experiment with modifications of CTC. | |
# References on CTC regularization: | |
# "A Novel Re-weighting Method for Connectionist Temporal Classification", Li et al, https://arxiv.org/abs/1904.10619 | |
# "Focal CTC Loss for Chinese Optical Character Recognition on Unbalanced Datasets", Feng et al, https://www.hindawi.com/journals/complexity/2019/9345861/ | |
# "Improved training for online end-to-end speech recognition systems", Kim et al, https://arxiv.org/abs/1711.02212 | |
import torch | |
import torch.nn.functional as F | |
## generate example data |
" Append this to your vimrc file | |
""""""""""""""""""""""""""""""""" | |
" auto-update the timestamp right before saving a file | |
" The Timestamp format is : Sat 07 Dec 2013 12:51:00 AM CST | |
" Within the 20 first lines, the matched lines are : | |
" Last [Cc]hange(d) | |
" Changed | |
" Last [Mm]odified | |
" Modified | |
" Last [Uu]pdate(d) |
from typing import Optional | |
import numpy as np | |
import torch | |
from torch.utils.data import DataLoader, TensorDataset | |
from baal.active import ActiveLearningDataset | |
class AlternateIterator: |
#!/bin/bash | |
# | |
# Install specific version of a Homebrew formula | |
# | |
# Usage: brewv.sh formula_name desired_version | |
# | |
# Notes: | |
# - this will unshallow your brew repo copy. It might take some time the first time | |
# you call this script | |
# - it will uninstall (instead of unlink) all your other versions of the formula. |
import torch | |
import torch.nn as nn | |
from torch.autograd import Variable | |
# Do this to display pytorch version. | |
# The version used in this gist is 0.3.0.post4. | |
print(torch.__version__) | |
# There are three steps to demonstrate multi head network | |
# 1. build the network |
def plot_timings(loader, n_batches, model_time=0.2, max_time=2.5): | |
fig, ax = plt.subplots() | |
ax.set_axisbelow(True) | |
ax.yaxis.grid(which="major", color='black', linewidth=1) | |
zero_time = time.time() | |
worker_ids = {} | |
worker_count = count() |
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): | |
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering | |
Args: | |
logits: logits distribution shape (..., vocabulary size) | |
top_k >0: keep only top k tokens with highest probability (top-k filtering). | |
top_p >0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). | |
""" | |
top_k = min(top_k, logits.size(-1)) # Safety check | |
if top_k > 0: | |
# Remove all tokens with a probability less than the last token of the top-k |
import onnx | |
import struct | |
import torch | |
import torch.nn as nn | |
import torchvision as tv | |
import warnings | |
# enum DataType { | |
# UNDEFINED = 0; |
Get cat.bin
import CoreML