start new:
tmux
start new with session name:
tmux new -s myname
| import torch | |
| import torch.nn as nn | |
| def create_child_class(parent_class): | |
| class MyLora(parent_class): | |
| def __init__(self, *args, **kwargs): | |
| super().__init__(*args, **kwargs) | |
| # Child-specific initialization | |
| self.scale = nn.Parameter(torch.ones(1)) |
| """ | |
| This program reproduces an issue where: | |
| - An Op's inputs are all DTensor | |
| - The Op's outputs are all DTensor | |
| - One output is not used by the downstream | |
| - Hence the gradients should be all zeros for the unused output | |
| Run with: torchrun --nnodes 1 --nproc-per-node 4 dtensor_unused_output.py | |
| """ |
| import torch | |
| import torch.nn as nn | |
| class SimpleLinear(nn.Module): | |
| """A simple module with just a linear layer.""" | |
| def __init__(self, dim): | |
| super().__init__() | |
| self.linear = nn.Linear(dim, dim) |
| '''Solution to the Cartpole problem using Policy Gradients in Tensorflow.''' | |
| # written October 2016 by Sam Greydanus | |
| # inspired by gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 | |
| import numpy as np | |
| import gym | |
| import tensorflow as tf | |
| # hyperparameters | |
| n_obs = 4 # dimensionality of observations | |
| h = 128 # hidden layer neurons |
| '''Solves Pong with Policy Gradients in Tensorflow.''' | |
| # written October 2016 by Sam Greydanus | |
| # inspired by karpathy's gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5 | |
| import numpy as np | |
| import gym | |
| import tensorflow as tf | |
| # hyperparameters | |
| n_obs = 80 * 80 # dimensionality of observations | |
| h = 200 # number of hidden layer neurons |
| #!/bin/sh | |
| # dropbox service | |
| # Replace with linux users you want to run Dropbox clients for | |
| DROPBOX_USERS="user1 user2" | |
| DAEMON=.dropbox-dist/dropbox | |
| start() { | |
| echo "Starting dropbox..." | |
| for dbuser in $DROPBOX_USERS; do |