Get the processor core affinity for a process ( cores on which it is allowed to run )
taskset -cp <PID>
Example,
[root@user]# taskset -cp 74515
pid 74515's current affinity list: 0-7
Get the processor core affinity for a process ( cores on which it is allowed to run )
taskset -cp <PID>
Example,
[root@user]# taskset -cp 74515
pid 74515's current affinity list: 0-7
# Correlation Matrix Heatmap | |
f, ax = plt.subplots(figsize=(10, 6)) | |
corr = wines.corr() | |
hm = sns.heatmap(round(corr,2), annot=True, ax=ax, cmap="coolwarm",fmt='.2f', | |
linewidths=.05) | |
f.subplots_adjust(top=0.93) | |
t= f.suptitle('Wine Attributes Correlation Heatmap', fontsize=14) |
train_dir = 'training_data' | |
val_dir = 'validation_data' | |
test_dir = 'test_data' | |
train_files = np.concatenate([cat_train, dog_train]) | |
validate_files = np.concatenate([cat_val, dog_val]) | |
test_files = np.concatenate([cat_test, dog_test]) | |
os.mkdir(train_dir) if not os.path.isdir(train_dir) else None | |
os.mkdir(val_dir) if not os.path.isdir(val_dir) else None |
from nltk.parse.stanford import StanfordDependencyParser | |
sdp = StanfordDependencyParser(path_to_jar='E:/stanford/stanford-parser-full-2015-04-20/stanford-parser.jar', | |
path_to_models_jar='E:/stanford/stanford-parser-full-2015-04-20/stanford-parser-3.5.2-models.jar') | |
result = list(sdp.raw_parse(sentence)) | |
# print the dependency tree | |
dep_tree = [parse.tree() for parse in result][0] | |
print(dep_tree) |
tf.logging.set_verbosity(tf.logging.ERROR) | |
results = {} | |
results["nnlm-en-dim128"] = train_and_evaluate_with_sentence_encoder( | |
"https://tfhub.dev/google/nnlm-en-dim128/1", path='/storage/models/nnlm-en-dim128_f/') | |
results["nnlm-en-dim128-with-training"] = train_and_evaluate_with_sentence_encoder( | |
"https://tfhub.dev/google/nnlm-en-dim128/1", train_module=True, path='/storage/models/nnlm-en-dim128_t/') |
from keras.preprocessing import text | |
from keras.utils import np_utils | |
from keras.preprocessing import sequence | |
tokenizer = text.Tokenizer() | |
tokenizer.fit_on_texts(norm_bible) | |
word2id = tokenizer.word_index | |
# build vocabulary of unique words | |
word2id['PAD'] = 0 |
from keras.preprocessing import text | |
tokenizer = text.Tokenizer() | |
tokenizer.fit_on_texts(norm_bible) | |
word2id = tokenizer.word_index | |
id2word = {v:k for k, v in word2id.items()} | |
vocab_size = len(word2id) + 1 | |
embed_size = 100 |
from keras.layers import Merge | |
from keras.layers.core import Dense, Reshape | |
from keras.layers.embeddings import Embedding | |
from keras.models import Sequential | |
# build skip-gram architecture | |
word_model = Sequential() | |
word_model.add(Embedding(vocab_size, embed_size, | |
embeddings_initializer="glorot_uniform", | |
input_length=1)) |
%%time | |
sample_test_data = test_images | |
sample_test_labels = test_labels | |
IMG_DIMS = (32, 32) | |
sample_test_data_processed = (np.array([resize_image_array(img, | |
img_size_dims=IMG_DIMS) | |
for img in np.stack([sample_test_data]*3, | |
axis=-1)])) / 255. | |
data = json.dumps({"signature_name": "serving_default", |
# set java path | |
import os | |
java_path = r'C:\Program Files\Java\jdk1.8.0_102\bin\java.exe' | |
os.environ['JAVAHOME'] = java_path | |
from nltk.parse.stanford import StanfordParser | |
scp = StanfordParser(path_to_jar='E:/stanford/stanford-parser-full-2015-04-20/stanford-parser.jar', | |
path_to_models_jar='E:/stanford/stanford-parser-full-2015-04-20/stanford-parser-3.5.2-models.jar') | |