Skip to content

Instantly share code, notes, and snippets.

@tyjeon24
Created May 20, 2023 08:24
Show Gist options
  • Save tyjeon24/291e59359b5a08d24fa40c17e35f56e2 to your computer and use it in GitHub Desktop.
Save tyjeon24/291e59359b5a08d24fa40c17e35f56e2 to your computer and use it in GitHub Desktop.
Understanding pyTorch regression model quickly.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"### 1. What is this code?\n",
"* A simple NeuralNet model using pyTorch.\n",
"### 2. How can I use this?\n",
"* Just run and see the output, and modify as you want."
]
},
{
"cell_type": "code",
"execution_count": 82,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from sklearn.datasets import make_regression\n",
"\n",
"class NeuralNet(torch.nn.Module):\n",
" def __init__(self, input_size, hidden_size):\n",
" super(NeuralNet, self).__init__()\n",
" self.input_size = input_size\n",
" self.hidden_size = hidden_size\n",
" self.linear_1 = torch.nn.Linear(self.input_size, self.hidden_size)\n",
" self.relu = torch.nn.ReLU()\n",
" self.linear_2 = torch.nn.Linear(self.hidden_size, 1)\n",
" \n",
" def forward(self, input_tensor):\n",
" linear1 = self.linear_1(input_tensor)\n",
" relu = self.relu(linear1)\n",
" linear2 = self.linear_2(relu)\n",
" return linear2\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 83,
"metadata": {},
"outputs": [],
"source": [
"# [1] Data preparation\n",
"X, y = make_regression(\n",
" n_samples=5000, # 5000 data\n",
" n_features=50, # 50 columns = 50 dimensions\n",
" noise=1, # randomness(standard deviation)\n",
" random_state=42\n",
" )\n",
"\n",
"X, y = torch.FloatTensor(X), torch.FloatTensor(y)\n",
"\n",
"X_train = X[:4000]\n",
"X_test = X[4000:]\n",
"y_train = y[:4000]\n",
"y_test = y[4000:]"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [],
"source": [
"# [2] Hyperparameters\n",
"number_of_x_features = X.shape[1]\n",
"model = NeuralNet(number_of_x_features, 128)\n",
"learning_rate = 0.03\n",
"criterion = torch.nn.MSELoss()\n",
"epochs = 2000\n",
"optimizer = torch.optim.Adam(model.parameters(), lr = 0.03)"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Before Training, test loss is 35319.47265625\n",
"Train loss at 0 is 38226.875\n",
"Train loss at 500 is 1.2112720012664795\n",
"Train loss at 1000 is 0.18726365268230438\n",
"Train loss at 1500 is 0.04877258837223053\n"
]
}
],
"source": [
"# [3] Train\n",
"model.eval()\n",
"test_loss_before = criterion(model(X_test).squeeze(), y_test)\n",
"print('Before Training, test loss is {}'.format(test_loss_before.item()))\n",
"\n",
"for epoch in range(epochs):\n",
" model.train()\n",
" optimizer.zero_grad()\n",
" train_output = model(X_train)\n",
" train_loss = criterion(train_output.squeeze(), y_train)\n",
" if epoch % 500 == 0:\n",
" print('Train loss at {} is {}'.format(epoch, train_loss.item()))\n",
" train_loss.backward()\n",
" optimizer.step()"
]
},
{
"cell_type": "code",
"execution_count": 86,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"After Training, test loss is 51.258384704589844\n"
]
}
],
"source": [
"# [4] Evaluate\n",
"model.eval()\n",
"test_loss = criterion(torch.squeeze(model(X_test)), y_test)\n",
"print('After Training, test loss is {}'.format(test_loss.item()))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment