Install MLX LM and openai
:
pip install mlx-lm openai
from typing import Callable, Tuple | |
import operator | |
from functools import reduce | |
from itertools import product | |
import mlx.core as mx | |
def _interpolate( | |
x: mx.array, scale_factor: Tuple, indices_fn: Callable, align_corners: bool = False | |
): |
This is a short article on a common type of not-yet-supported operation in MLX: ops where the output shape depends on the input data. Here's an outline:
import time | |
import mlx.core as mx | |
import mlx.nn as nn | |
from dataclasses import dataclass | |
from typing import Dict, Optional, Tuple, Union | |
@dataclass | |
class ModelArgs: |
import numpy as np | |
def fleiss_kappa(ratings): | |
""" | |
Args: | |
ratings: An N x R numpy array. N is the number of | |
samples and R is the number of reviewers. Each | |
entry (n, r) is the category assigned to example | |
n by reviewer r. | |
Returns: |
""" | |
Author: Awni Hannun | |
This is an example CTC decoder written in Python. The code is | |
intended to be a simple example and is not designed to be | |
especially efficient. | |
The algorithm is a prefix beam search for a model trained | |
with the CTC loss function. |
import time | |
import torch | |
import torch.nn as nn | |
from torch.autograd import Variable | |
def attend_bmm(eh, dhx): | |
dhx = dhx.unsqueeze(1) | |
pax = torch.bmm(eh, dhx.transpose(1,2)).squeeze(dim=2) | |
ax = nn.functional.softmax(pax) |
import torch | |
import torch.autograd as autograd | |
import numpy as np | |
np.random.seed(11) | |
for size in range(1, 2000, 1): | |
a = np.random.randint(0, 2, size).astype(np.uint8) | |
av = autograd.Variable(torch.ByteTensor(a)) |