μ½λ μμ λͺ¨μ
Created
August 4, 2021 06:12
-
-
Save sunrise2575/e5dd9c5c10c3deb42087c185842bcdb5 to your computer and use it in GitHub Desktop.
Code snippet collection
Python multiprocessing (non-blocking)
import multiprocessing
def work():
print("μμ νλ‘μΈμ€")
t = multiprocessing.Process(target=work)
t.start()
print("λΆλͺ¨ νλ‘μΈμ€")
t.join()
Javascript coroutine (non-blocking)
async function main(){
const c = () => new Promise(res => {
console.log("μμ μ½λ£¨ν΄");
res();
});
console.log("λΆλͺ¨ μ½λ£¨ν΄");
await c();
return 0;
}
main()
Rust thread (non-blocking)
use std::thread;
fn main() {
let t = thread::spawn(move || {
println!("μμ μ°λ λ");
});
println!("λΆλͺ¨ μ°λ λ");
t.join();
}
Tensorflow single-GPU & multi-GPU matrix multiplication
Python 3.7
TensorFlow 1.13
Numpy 1.16
TensorFlow 1.13λ²μ μ°λ €λ©΄ μ 3κ°μ§ μΈν (νμ΄μ¬, λνμ΄ λ²μ ) μ΄ μ νν λ§μμΌ ν¨
import numpy as np
import datetime
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
def singleGPU(A, B, n: int):
print("Single GPU")
with tf.device('/gpu:0'):
a = tf.identity(A)
b = tf.identity(B)
for _ in range(n):
a = tf.matmul(a, a)
b = tf.matmul(b, b)
with tf.device('/cpu:0'):
sum = tf.add(a, b)
t_start = datetime.datetime.now()
with tf.Session() as sess:
sess.run(sum)
t_end = datetime.datetime.now()
print(t_end - t_start)
def multiGPU(A, B, n: int):
print("multi GPU")
with tf.device('/gpu:0'):
a = tf.identity(A)
for _ in range(n):
a = tf.matmul(a, a)
with tf.device('/gpu:1'):
b = tf.identity(B)
for _ in range(n):
b = tf.matmul(b, b)
with tf.device('/cpu:0'):
sum = tf.add(a, b)
t_start = datetime.datetime.now()
with tf.Session() as sess:
sess.run(sum)
t_end = datetime.datetime.now()
print(t_end - t_start)
def main():
with tf.device('/cpu:0'):
A = tf.random.uniform((1 << 14, 1 << 14), dtype=tf.float32)
B = tf.random.uniform((1 << 14, 1 << 14), dtype=tf.float32)
n = 10
print("A: {}, B: {}".format(A.shape, B.shape))
singleGPU(A, B, n)
multiGPU(A, B, n)
main()
sequential data 1-d convolutionμΌλ‘ νμ΅νλ μμ
sequential dataκ° scalarκ° λ€μ΄μ€λκ² μλκ³ vectorκ°μ΄ λ€μ΄μ€λ©΄ MyModel
μμ in_channel` μ vector dimension λ§νΌ λλ €μ£Όλ©΄ λ.
μ λ ₯ λ°μ΄ν° λ£κΈ° μ μ -1~1 μ¬μ΄λ‘ κ°μ μ κ·ν νλ©΄ μ±λ₯μ΄ ν¨μ¬ ν₯μλ κ².
import torch
class MyModel(torch.nn.Module):
def __init__(self, time_length, kernel_size):
super(MyModel, self).__init__()
self.time_length = time_length
self.conv = torch.nn.Conv1d(
in_channels=1, out_channels=32, kernel_size=kernel_size)
self.fc = torch.nn.Conv1d(
in_channels=32, out_channels=1, kernel_size=1)
self.relu = torch.nn.ReLU()
def forward(self, x):
h = self.relu(self.conv(x[:, :, :self.time_length - 1]))
y_hat = self.fc(h)
return y_hat
def main_():
batch_size = 100
time_length = 32
kernel_size = 16
total_epoch = 350
x = torch.Tensor([[range(x, x + time_length)] for x in range(batch_size)])
y = x[:, :, kernel_size:]
cut = int(0.8 * batch_size)
x_train, y_train = x[:cut], y[:cut]
x_test, y_test = x[cut:], y[cut:]
model = MyModel(time_length=time_length, kernel_size=kernel_size)
loss_fn = torch.nn.L1Loss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for epoch in range(350):
# train
y_train_hat = model(x_train)
loss_train = loss_fn(y_train_hat, y_train)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
# test
y_test_hat = model(x_test)
loss_test = loss_fn(y_test_hat, y_test)
if epoch % 25 == 0 or total_epoch == 25 - 1:
print("epoch {:4d}, train loss: {:.3f}, test loss: {:.3f}".format(
epoch, loss_train.item(), loss_test.item()))
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Python threading (non-blocking, μ£Όμ: GILλλ¬Έμ μ¬μ€μ single-threadμμλ§ λμ)
GIL(Global Interpreter Lock)
νμ΄μ¬μμλ νλμ νλ‘μΈμ€ μμ λͺ¨λ μμμ λ½(Lock)μ κΈλ‘λ²(Global)νκ² κ΄λ¦¬ν¨μΌλ‘μ¨ νλ²μ νλμ μ°λ λλ§ μμμ 컨νΈλ‘€νμ¬ λμνλλ‘ νλ€. GIL λλ¬Έμ νλ²μ νλμ μ°λ λλ§ κ³μ°μ μ€ννμ¬ μ€ν μκ°μ΄ λΉμ·ν κ²μ΄λ€. GILμ΄ μ μ©λλ κ²μ cpu λμμμμ΄κ³ μ°λ λκ° cpu λμμ λ§μΉκ³ I/O μμ μ μ€ννλ λμμλ λ€λ₯Έ μ°λ λκ° cpu λμμ λμμ μ€νν μ μλ€.