Let's checkout the PR:
git fetch origin pull/625/head:dbrx
git switch dbrx
pip install -vvv --no-build-isolation -e .
Download the model:
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/trl/trainer/utils.py", line 338, in __call__ | |
to_pad = [torch.LongTensor(ex[k]) for ex in features] | |
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/trl/trainer/utils.py", line 338, in <listcomp> | |
to_pad = [torch.LongTensor(ex[k]) for ex in features] | |
File "/root/miniconda3/envs/py3.10/lib/python3.10/site-packages/trl/trainer/utils.py", line 338, in <listcomp> | |
to_pad = [torch.LongTensor(ex[k]) for ex in features] | |
TypeError: 'NoneType' object cannot be interpreted as an integer | |
to_pad = [torch.LongTensor(ex[k]) for ex in features] | |
TypeError: 'NoneType' object cannot be interpreted as an integer | |
return inner_training_loop( |
Let's checkout the PR:
git fetch origin pull/625/head:dbrx
git switch dbrx
pip install -vvv --no-build-isolation -e .
Download the model:
from transformers import LlamaConfig as LC, LlamaForCausalLM as LLM, LlamaTokenizer as LT | |
from accelerate import init_empty_weights, load_checkpoint_and_dispatch | |
import torch | |
lt = LT.from_pretrained("NousResearch/Llama-2-7b-hf") | |
c = LC.from_pretrained("NousResearch/Llama-2-70b-hf") | |
c.max_position_embeddings = 32764 | |
c.rope_theta = 1000000 | |
with init_empty_weights(): m = LLM(c) | |
m = m.half().eval() | |
m.requires_grad_(False) |
#!/bin/sh | |
log() { | |
format="$1"; shift | |
# shellcheck disable=SC2059 | |
>&2 printf "$format\n" "$@" | |
} | |
usage() { | |
>&2 cat <<EOF |
You can use ssacli
(smart storage administrator command line interface) tool to manage any of supported HP Smart Array Controllers in your Proxmox host without need to reboot your server to access Smart Storage Administrator in BIOS. That means no host downtime when managing your storage.
CLI is not as convenient as GUI interface provided by BIOS or desktop utilities, but still allows you to fully manage your controller, physical disks and logical drives on the fly with no Proxmox host downtime.
ssacli
replaces older hpssacli
, but shares the same syntax and adds support for newer servers and controllers.
[ | |
{ | |
"tourBlurb" : "Big Sur is big country. The Big Sur Retreat takes you to the most majestic part of the Pacific Coast and show you the secret trails.", | |
"tourName" : "Big Sur Retreat", | |
"tourPackage" : "Backpack Cal", | |
"tourBullets" : "\"Accommodations at the historic Big Sur River Inn, Privately guided hikes through any of the 5 surrounding national parks, Picnic lunches prepared by the River Inn kitchen, Complimentary country breakfast, Admission to the Henry Miller Library and the Point Reyes Lighthouse \"", | |
"tourRegion" : "Central Coast", | |
"tourDifficulty" : "Medium", | |
"tourLength" : 3, | |
"tourPrice" : 750, |
from sparknlp.annotator import * | |
from sparknlp.base import * | |
from pyspark.ml import Pipeline | |
imageAssembler = ImageAssembler() \ | |
.setInputCol("image") \ | |
.setOutputCol("image_assembler") | |
imageClassifier = ViTForImageClassification \ |
from transformers import ViTFeatureExtractor, ViTForImageClassification | |
from transformers import pipeline | |
import torch | |
device = "cuda:0" if torch.cuda.is_available() else "cpu" | |
print(device) | |
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224') | |
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224') | |
model = model.to(device) |
from transformers import pipeline | |
pipe = pipeline("image-classification", model=model, feature_extractor=feature_extractor, device=-1) | |
for batch_size in [1, 8, 32, 64, 128]: | |
print("-" * 30) | |
print(f"Streaming batch_size={batch_size}") | |
for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): | |
pass |
from transformers import ViTFeatureExtractor, ViTForImageClassification | |
from transformers import pipeline | |
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224') | |
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224') | |
pipe = pipeline("image-classification", model=model, feature_extractor=feature_extractor, device=-1) |