Skip to content

Instantly share code, notes, and snippets.

import argparse
import glob
import tqdm
import json
from transformers import LlamaTokenizer
from nltk import tokenize
pretrained_model_path = '/home/models/Llama-2-7b-hf'
tokenizer = LlamaTokenizer.from_pretrained(pretrained_model_path)
import json
import multiprocessing
import pathlib
import ebooklib
import typer
from ebooklib import epub
from markdownify import markdownify as md
from transformers import LlamaTokenizer
from nltk import tokenize
import json
import multiprocessing
import pathlib
import ebooklib
import typer
from ebooklib import epub
from markdownify import markdownify as md
from transformers import LlamaTokenizer
from nltk import tokenize
@ewof
ewof / quant_autogptq.py
Created December 15, 2023 15:37
it's just thebloke script wth VMware/open-instruct support
#
# A wrapper script to quantise models with GPTQ, from one of various datasets
#
import time
import os
import logging
import random
from datasets import load_dataset