Skip to content

Instantly share code, notes, and snippets.

@beader
Created January 16, 2018 06:05
Show Gist options
  • Save beader/0cf972dbaca30f4565a07f27d6af8165 to your computer and use it in GitHub Desktop.
Save beader/0cf972dbaca30f4565a07f27d6af8165 to your computer and use it in GitHub Desktop.
NTU Machine Learning 2017 Fall - Assignment 2
Display the source blob
Display the rendered blob
Raw
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"X_TRAIN_FILE = './X_train'\n",
"Y_TRAIN_FILE = './Y_train'\n",
"X_TEST_FILE = './X_test'\n",
"X_TEST_ANS = './correct_answer.csv'"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"x_train = pd.read_csv(X_TRAIN_FILE, header=0).values\n",
"y_train = pd.read_csv(Y_TRAIN_FILE, header=0).label.values\n",
"x_test = pd.read_csv(X_TEST_FILE, header=0).values\n",
"y_test = pd.read_csv(X_TEST_ANS, header=0).label.values"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"scrolled": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(32561, 106)\n",
"(32561,)\n",
"(16281, 106)\n"
]
}
],
"source": [
"print(x_train.shape)\n",
"print(y_train.shape)\n",
"print(x_test.shape)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def accuracy_score(y_true, y_pred):\n",
" return np.mean(y_true == y_pred)\n",
"\n",
"def train_valid_split(X, y, valid_size=0.2):\n",
" assert len(X) == len(y)\n",
" valid_size = int(len(X) * valid_size) if valid_size < 1 else int(valid_size)\n",
" random_indexer = np.arange(len(X))\n",
" np.random.shuffle(random_indexer)\n",
" return (X[random_indexer][valid_size:], y[random_indexer][valid_size:],\n",
" X[random_indexer][:valid_size], y[random_indexer][:valid_size])\n",
"\n",
"def normalize(*Xs):\n",
" X_all = np.concatenate(Xs)\n",
" mu = X_all.mean(axis=0, keepdims=True)\n",
" sigma = X_all.std(axis=0, keepdims=True)\n",
" Xs = tuple((X - mu) / sigma for X in Xs)\n",
" return Xs if len(Xs) > 1 else Xs[0]\n",
"\n",
"class LogisticClassifier:\n",
" \n",
" def __init__(self, lr=0.1, batch_size=64, n_epoch=100, l1=0, l2=0):\n",
" self._batch_size = batch_size\n",
" self._lr = lr\n",
" self._l1 = l1\n",
" self._l2 = l2\n",
" self._n_epoch = n_epoch\n",
" \n",
" def _shuffle(self, X, y):\n",
" assert len(X) == len(y)\n",
" indexer = np.arange(len(X))\n",
" np.random.shuffle(indexer)\n",
" return X[indexer], y[indexer]\n",
" \n",
" def _sigmoid(self, z):\n",
" z = np.clip(z, -18, 18)\n",
" return 1 / (1 + np.exp(-z))\n",
" \n",
" def _init_weights(self, dim):\n",
" return np.zeros((dim, )), np.zeros((1,))\n",
" \n",
" def _gen_batch(self, X, y):\n",
" batch_size = self._batch_size\n",
" num_steps = len(X) // batch_size\n",
" for step in range(num_steps):\n",
" yield (X[step * batch_size: (step + 1) * batch_size],\n",
" y[step * batch_size: (step + 1) * batch_size])\n",
" \n",
" def fit(self, X, y, report_every_n_epoch=1):\n",
" print('training logistic classfier, bath_size=%d, learning_rate=%.2f' % \n",
" (self._batch_size, self._lr))\n",
" w, b = self._init_weights(X.shape[1])\n",
" for epoch in range(1, self._n_epoch + 1):\n",
" X_train, y_train = self._shuffle(X, y)\n",
" step = 0\n",
" tot_loss = 0.0\n",
" for input_x, input_y in self._gen_batch(X_train, y_train):\n",
" hypo = self._sigmoid(np.dot(input_x, w) + b)\n",
" cross_ent_loss = -( np.dot(input_y, np.log(hypo)) +\n",
" np.dot(1 - input_y, np.log(1 - hypo)) )\n",
" tot_loss += cross_ent_loss\n",
" w_grad = np.mean(-1 * (input_y - hypo)[:, np.newaxis] * input_x, axis=0)\n",
" w_grad = w_grad + self._l1 * np.sign(w) + self._l2 * w\n",
" b_grad = np.mean(-1 * (input_y - hypo))\n",
" b_grad = b_grad + self._l1 * np.sign(b) + self._l2 * b\n",
"\n",
" w = w - self._lr * w_grad\n",
" b = b - self._lr * b_grad\n",
" step += 1\n",
" if epoch % report_every_n_epoch == 0:\n",
" print('epoch %d, loss: %.5f' % (epoch, tot_loss / step / self._batch_size))\n",
" print('finish!')\n",
" self.w_ = w\n",
" self.b_ = b\n",
" \n",
" def predict(self, X):\n",
" probs = self.predict_prob(X)\n",
" y_ = np.around(probs)\n",
" return y_\n",
" \n",
" def predict_prob(self, X):\n",
" logit = np.dot(X, self.w_) + self.b_\n",
" probs = self._sigmoid(logit)\n",
" return probs"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"x_train, x_test = normalize(x_train, x_test)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"tr_x, tr_y, va_x, va_y = train_valid_split(x_train, y_train, valid_size=0.2)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"lr = LogisticClassifier(n_epoch=200, l1=0.001)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"training logistic classfier, bath_size=64, learning_rate=0.10\n",
"epoch 10, loss: 0.32192\n",
"epoch 20, loss: 0.32200\n",
"epoch 30, loss: 0.32155\n",
"epoch 40, loss: 0.32184\n",
"epoch 50, loss: 0.32182\n",
"epoch 60, loss: 0.32154\n",
"epoch 70, loss: 0.32161\n",
"epoch 80, loss: 0.32151\n",
"epoch 90, loss: 0.32176\n",
"epoch 100, loss: 0.32141\n",
"epoch 110, loss: 0.32159\n",
"epoch 120, loss: 0.32187\n",
"epoch 130, loss: 0.32138\n",
"epoch 140, loss: 0.32162\n",
"epoch 150, loss: 0.32190\n",
"epoch 160, loss: 0.32192\n",
"epoch 170, loss: 0.32174\n",
"epoch 180, loss: 0.32151\n",
"epoch 190, loss: 0.32153\n",
"epoch 200, loss: 0.32167\n",
"finish!\n"
]
}
],
"source": [
"lr.fit(tr_x, tr_y, report_every_n_epoch=10)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.85270067948865602"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy_score(lr.predict(tr_x), tr_y)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.85488329238329241"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy_score(lr.predict(va_x), va_y)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"training logistic classfier, bath_size=64, learning_rate=0.10\n",
"epoch 10, loss: 0.32098\n",
"epoch 20, loss: 0.32188\n",
"epoch 30, loss: 0.32071\n",
"epoch 40, loss: 0.32135\n",
"epoch 50, loss: 0.32141\n",
"epoch 60, loss: 0.32119\n",
"epoch 70, loss: 0.32086\n",
"epoch 80, loss: 0.32151\n",
"epoch 90, loss: 0.32132\n",
"epoch 100, loss: 0.32113\n",
"epoch 110, loss: 0.32154\n",
"epoch 120, loss: 0.32149\n",
"epoch 130, loss: 0.32112\n",
"epoch 140, loss: 0.32124\n",
"epoch 150, loss: 0.32143\n",
"epoch 160, loss: 0.32121\n",
"epoch 170, loss: 0.32095\n",
"epoch 180, loss: 0.32097\n",
"epoch 190, loss: 0.32152\n",
"epoch 200, loss: 0.32079\n",
"finish!\n"
]
}
],
"source": [
"lr.fit(x_train, y_train, report_every_n_epoch=10)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"preds = lr.predict(x_test)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.8519132731404705"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy_score(preds, y_test)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment