-
on source machine (behind the network), run:
ssh -N -R 2222:localhost:22 YOURUSER@YOURSERVER
-
on YOURSERVER, run:
ssh -l SOURCEUSER -p 2222 localhost
-
If you want to automated it, do something like...
----- | |
2023-06-30 23:25:38.764871: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX512F AVX512_VNNI AVX512_BF16 AVX_VNNI | |
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags. | |
2023-06-30 23:25:38.925571: I tensorflow/core/util/port.cc:104] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`. | |
-------------------------------------------------------------------------- | |
WARNING: No preset parameters were found for the device that Open MPI | |
detected: |
#!/bin/bash | |
# ----- | |
# WARNING: this does NOT work on Lambda Labs H100 instances... yet | |
# ----- | |
# From: https://huggingface.co/tiiuae/falcon-40b/discussions/38#6479de427c18dca75e9a0903 | |
pip install git+https://www.github.com/huggingface/transformers@2e2088f24b60d8817c74c32a0ac6bb1c5d39544d | |
pip install huggingface-hub==0.15.1 | |
pip install tokenizers==0.13.3 | |
pip install safetensors==0.3.1 |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import time | |
print("Loading tokenizer...") | |
tokenizer = AutoTokenizer.from_pretrained("gpt2") | |
print("Loading model...") | |
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") | |
print("Model Loaded..!") |
#!/usr/bin/env python | |
import os | |
import docx2txt | |
from absl import app | |
from absl import flags | |
FLAGS = flags.FLAGS | |
flags.DEFINE_string("output", "output.txt", "The path of final output.") | |
flags.DEFINE_string("source", None, "The source path to process.") |
number of messages 2: | |
fingerprint 388: 4, 1024: 8 | |
number of messages 4: | |
fingerprint 388: 4, 448: 6, 1024: 8, 1025: 8 | |
number of messages 9: | |
fingerprint 274: 2, 388: 4, 448: 6, 752: 2, 817: 8, 906: 8, 924: 8, 1024: 8, 1025: 8 | |
number of messages 14: | |
fingerprint 274: 2, 388: 4, 448: 6, 520: 8, 532: 8, 564: 8, 752: 2, 817: 8, 906: 8, 924: 8, 937: 8, 996: 8, 1024: 8, 1025: 8 | |
number of messages 18: | |
fingerprint 274: 2, 388: 4, 448: 6, 520: 8, 532: 8, 564: 8, 752: 2, 817: 8, 820: 8, 906: 8, 924: 8, 926: 3, 937: 8, 995: 8, 996: 8, 1000: 8, 1024: 8, 1025: 8 |
#!/usr/bin/env python | |
#!pip install sklearn xgboost | |
#!wget https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv | |
from numpy import loadtxt | |
from xgboost import XGBClassifier | |
from sklearn.model_selection import train_test_split | |
from sklearn.metrics import accuracy_score | |
import pprint |
#!/bin/sh | |
ulimit -v unlimited | |
wget http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm | |
rpm -Uvh epel-release-6-8.noarch.rpm | |
yum -w install bison gettext glib2 freetype fontconfig libpng libpng-devel libX11 libX11-devel glib2-devel libgdi* libexif glibc-devel urw-fonts java unzip gcc gcc-c++ automake autoconf libtool make bzip2 wget | |
cd /usr/local/src | |
wget http://download.mono-project.com/sources/mono/mono-2.10.8.tar.gz | |
# http://download.mono-project.com/sources/mono/mono-3.6.0.tar.bz2 | |
tar zxvf mono-2.10.8.tar.gz | |
cd mono-2.10.8 |
# Modified from: https://github.com/xeb/fastText-docker/blob/master/Dockerfile | |
# To build & run, do: | |
# docker build -t fasttext-py3.5 -f Dockerfile.py35 . | |
# docker run --rm -it fasttext-py3.5 ./eval.py | |
# | |
FROM python:3.5 | |
RUN apt-get update && apt-get install -y \ | |
build-essential \ | |
wget \ | |
git \ |