Skip to content

Instantly share code, notes, and snippets.

@yukw777
yukw777 / game-v0-training-results.ipynb
Created December 7, 2017 04:18
Game v0 Training Results
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@yukw777
yukw777 / game-v1-training-results.ipynb
Created December 7, 2017 05:20
Game v1 Training Results
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
/*
This server simply bombards each websocket connection with a message every millisecond up to 20k messages.
How to run.
1. Run pushpin locally with the default publish port and backend port as 8080.
ex: pushpin --merge-output --port=17999 --route="* localhost:8080,over_http" --config pushpin.conf
2. Start this server in another terminal
ex: node loadtest.js
@yukw777
yukw777 / pl_result_ex.py
Created August 27, 2020 15:30
PyTorch Lightning Result Example
def training_step(self, batch: DataPoint, batch_idx: int) -> pl.TrainResult:
planes, target_move, target_val = batch
pred_move, pred_val = self(planes)
mse_loss, cross_entropy_loss, loss = self.loss(
pred_move, pred_val, target_move, target_val
)
result = pl.TrainResult(minimize=loss)
result.log("train_loss", loss, prog_bar=True)
result.log_dict(
{
@yukw777
yukw777 / pl_metrics_ex.py
Created August 27, 2020 15:34
PyTorch Lightning Metrics Example
def training_step(self, batch: DataPoint, batch_idx: int) -> pl.TrainResult:
planes, target_move, target_val = batch
pred_move, pred_val = self(planes)
mse_loss, cross_entropy_loss, loss = self.loss(
pred_move, pred_val, target_move, target_val
)
result = pl.TrainResult(minimize=loss)
result.log("train_loss", loss, prog_bar=True)
result.log_dict(
{
@yukw777
yukw777 / pl_data_module_ex.py
Created August 27, 2020 15:35
PyTorch Lightning Data Module Example
class DataModule(pl.LightningDataModule):
def __init__(
self,
train_data_dir: str,
val_data_dir: str,
test_data_dir: str,
train_dataloader_conf: Optional[DictConfig] = None,
val_dataloader_conf: Optional[DictConfig] = None,
test_dataloader_conf: Optional[DictConfig] = None,
):
@yukw777
yukw777 / hydra_package_directive_ex.yaml
Last active August 27, 2020 16:12
Hydra Package Directive Example
# @package _group_
_target_: leela_zero_pytorch.network.NetworkLightningModule
network_conf:
residual_channels: 32
residual_layers: 8
@yukw777
yukw777 / big.yaml
Last active August 27, 2020 15:40
Hydra Object Instantiation Configuration Example
# @package _group_
_target_: leela_zero_pytorch.network.NetworkLightningModule
network_conf:
residual_channels: 128
residual_layers: 6
@yukw777
yukw777 / hydra_obj_inst_ex.py
Created August 27, 2020 15:41
Hydra Object Instantiation Example
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig) -> Trainer:
logger.info(f"Training with the following config:\n{OmegaConf.to_yaml(cfg)}")
network = instantiate(cfg.network, cfg.train)
@yukw777
yukw777 / hydra_compose_api_unit_test_ex.py
Created August 27, 2020 15:43
Hydra Compose API Unit Tests Example
@pytest.mark.parametrize("network_size", ["small", "big", "huge"])
def test_train_network_size(monkeypatch, tmp_path, capsys, network_size):
with initialize(config_path="../leela_zero_pytorch/conf"):
cfg = compose(
config_name="config",
overrides=[
f"+network={network_size}",
"data.train_data_dir=tests/test-data",
"data.train_dataloader_conf.batch_size=2",
"data.val_data_dir=tests/test-data",