Skip to content

Instantly share code, notes, and snippets.

@zymiboxpay
Created March 20, 2017 10:37
Show Gist options
  • Save zymiboxpay/2e4782030f3c622f29d3c964c58b3590 to your computer and use it in GitHub Desktop.
Save zymiboxpay/2e4782030f3c622f29d3c964c58b3590 to your computer and use it in GitHub Desktop.
machine_learning_with_python
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Accuracy:87.0%.\n"
]
}
],
"source": [
"from matplotlib import pyplot as plt\n",
"import numpy as np\n",
"from sklearn.datasets import load_iris\n",
"\n",
"data = load_iris()\n",
"features = data.data\n",
"feature_names = data.feature_names\n",
"target = data.target\n",
"target_names = data.target_names\n",
"\n",
"labels = target_names[target]\n",
"\n",
"plength = features[:, 2]\n",
"\n",
"is_setosa = (labels == 'setosa')\n",
"max_setosa = plength[is_setosa].max()\n",
"min_non_setosa = plength[~is_setosa].min()\n",
"# print('Maximum of setosa: {0}.'.format(max_setosa))\n",
"# print('Minimum of others: {0}.'.format(min_non_setosa))\n",
"\n",
"features = features[~is_setosa]\n",
"labels = labels[~is_setosa]\n",
"is_virginica = (labels == 'virginica')\n",
"\n",
"def fit_model(features, labels):\n",
" best_acc = -1.0\n",
" for fi in range(features.shape[1]):\n",
" \n",
" thresh = features[:, fi]\n",
" for t in thresh:\n",
" feature_i = features[:, fi]\n",
" pred = (feature_i > t)\n",
" acc = (pred == labels).mean()\n",
" rev_acc = (pred == ~labels).mean()\n",
" if rev_acc > acc:\n",
" reverse = True\n",
" acc = rev_acc\n",
" else:\n",
" reverse = False\n",
" if acc > best_acc:\n",
" best_acc = acc\n",
" best_fi = fi\n",
" best_t = t\n",
" best_reverse = reverse\n",
" return best_t, best_fi, best_reverse\n",
"\n",
"def predict(model, features):\n",
" t, fi, reverse = model\n",
" if reverse:\n",
" return features[:, fi] <= t\n",
" else:\n",
" return features[:, fi] > t\n",
" \n",
"correct = 0.0\n",
"for ei in range(len(features)):\n",
" training = np.ones(len(features),bool)\n",
" training[ei] = False\n",
" testing = ~training\n",
" model = fit_model(features[training], is_virginica[training])\n",
" predictions = predict(model, features[testing])\n",
" correct += np.sum(predictions == is_virginica[testing])\n",
"acc = correct/float(len(features))\n",
"print('Accuracy:{0:.1%}.'.format(acc))"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Mean accuracy: 93.0%\n"
]
}
],
"source": [
"from sklearn.neighbors import KNeighborsClassifier\n",
"from sklearn.cross_validation import KFold\n",
"\n",
"classifier = KNeighborsClassifier(n_neighbors = 1)\n",
"\n",
"kf = KFold(len(features), n_folds=5, shuffle=True)\n",
"\n",
"means = []\n",
"for training, testing in kf:\n",
" classifier.fit(features[training], labels[training])\n",
" prediction = classifier.predict(features[testing])\n",
" curmean = np.mean(prediction == labels[testing])\n",
" means.append(curmean)\n",
"\n",
"print('Mean accuracy: {:.1%}'.format(np.mean(means)))"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Mean accuracy: 94.0%\n"
]
}
],
"source": [
"from sklearn.pipeline import Pipeline\n",
"from sklearn.preprocessing import StandardScaler\n",
"\n",
"classifier = KNeighborsClassifier(n_neighbors = 1)\n",
"classifier = Pipeline([('norm', StandardScaler()), ('knn', classifier)])\n",
"\n",
"kf = KFold(len(features), n_folds=5, shuffle=True)\n",
"\n",
"means = []\n",
"for training, testing in kf:\n",
" classifier.fit(features[training], labels[training])\n",
" prediction = classifier.predict(features[testing])\n",
" curmean = np.mean(prediction == labels[testing])\n",
" means.append(curmean)\n",
"\n",
"print('Mean accuracy: {:.1%}'.format(np.mean(means)))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": false
},
"outputs": [
{
"ename": "SyntaxError",
"evalue": "invalid syntax (<ipython-input-6-3d88389e28e4>, line 10)",
"output_type": "error",
"traceback": [
"\u001b[0;36m File \u001b[0;32m\"<ipython-input-6-3d88389e28e4>\"\u001b[0;36m, line \u001b[0;32m10\u001b[0m\n\u001b[0;31m return lambda doc: (english_stemmer.stem(w)) for w in analyzer(doc)\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
]
}
],
"source": [
"import os\n",
"import sys\n",
"import scipy as sp\n",
"from sklearn.feature_extraction.text import CountVectorizer\n",
"import nltk.stem\n",
"english_stemmer = nltk.stem.SnowballStemmer('english')\n",
"class StemmedCountVectorizer(CountVectorizer):\n",
" def build_analyzer(self):\n",
" analyzer = super(StemmedCountVectorizer, self).build_analyzer()\n",
" return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\n",
" \n",
"\n",
"vectorizer = StemmedCountVectorizer(min_df=1, stop_words='english')\n",
"DIR = \"/home/zym/learn/machine_learning/BuildingMachineLearningSystemsWithPython/ch03/data/toy\"\n",
"posts = [open(os.path.join(DIR, f)).read() for f in os.listdir(DIR)]\n",
"X_train = vectorizer.fit_transform(posts)\n",
"num_samples, num_features = X_train.shape\n",
"\n",
"# print(\"#samples: %d, #features: %d\" % (num_samples, num_features))\n",
"# print(vectorizer.get_feature_names())\n",
"\n",
"new_post = \"imaging databases\"\n",
"new_post_vec = vectorizer.transform([new_post])\n",
"\n",
"def dist_raw(v1, v2):\n",
" delta = v1 - v2\n",
" return sp.linalg.norm(delta.toarray())\n",
"\n",
"def dist_norm(v1, v2):\n",
" v1_normalized = v1/sp.linalg.norm(v1.toarray())\n",
" v2_normalized = v2/sp.linalg.norm(v2.toarray())\n",
" delta = v1_normalized - v2_normalized\n",
" return sp.linalg.norm(delta.toarray())\n",
"\n",
"best_doc = None\n",
"best_dist = sys.maxsize\n",
"best_i = None\n",
"for i, post in enumerate(posts):\n",
" if post == new_post:\n",
" continue\n",
" post_vec = X_train.getrow(i)\n",
" d = dist_norm(post_vec, new_post_vec)\n",
" print('=== Post %i with dist=%.2f: %s' % (i, d, post))\n",
" if d < best_dist:\n",
" best_dist = d\n",
" best_i = i\n",
"\n",
"print(\"Best post is %i with dist=%.2f\" % (best_i, best_dist))\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"#samples: 3529, #features: 4712\n",
"Initialization complete\n",
"Iteration 0, inertia 5899.560\n",
"Iteration 1, inertia 3218.298\n",
"Iteration 2, inertia 3184.333\n",
"Iteration 3, inertia 3164.867\n",
"Iteration 4, inertia 3152.004\n",
"Iteration 5, inertia 3143.111\n",
"Iteration 6, inertia 3136.256\n",
"Iteration 7, inertia 3129.325\n",
"Iteration 8, inertia 3124.567\n",
"Iteration 9, inertia 3121.900\n",
"Iteration 10, inertia 3120.210\n",
"Iteration 11, inertia 3118.627\n",
"Iteration 12, inertia 3117.363\n",
"Iteration 13, inertia 3116.811\n",
"Iteration 14, inertia 3116.588\n",
"Iteration 15, inertia 3116.417\n",
"Iteration 16, inertia 3115.760\n",
"Iteration 17, inertia 3115.374\n",
"Iteration 18, inertia 3115.155\n",
"Iteration 19, inertia 3114.949\n",
"Iteration 20, inertia 3114.515\n",
"Iteration 21, inertia 3113.937\n",
"Iteration 22, inertia 3113.720\n",
"Iteration 23, inertia 3113.548\n",
"Iteration 24, inertia 3113.475\n",
"Iteration 25, inertia 3113.447\n",
"Converged at iteration 25\n",
"['ac', 'birmingham', 'host', 'kingdom', 'nntp', 'sorri', 'test', 'uk', 'unit', 'univers']\n"
]
}
],
"source": [
"import os\n",
"import sys\n",
"import scipy as sp\n",
"from sklearn.feature_extraction.text import TfidfVectorizer\n",
"import nltk.stem\n",
"english_stemmer = nltk.stem.SnowballStemmer('english')\n",
"class StemmedTfidfVectorizer(TfidfVectorizer):\n",
" def build_analyzer(self):\n",
" analyzer = super(StemmedTfidfVectorizer, self).build_analyzer()\n",
" return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))\n",
"\n",
"import sklearn.datasets\n",
"all_data = sklearn.datasets.fetch_20newsgroups(subset='all')\n",
"# print(len(all_data.filenames))\n",
"\n",
"groups = ['comp.graphics', 'comp.os.ms-windows.misc','comp.sys.ibm.pc.hardware', \n",
" 'comp.sys.mac.hardware', 'comp.windows.x','sci.space']\n",
"\n",
"train_data = sklearn.datasets.fetch_20newsgroups(subset='train', categories=groups)\n",
"test_data = sklearn.datasets.fetch_20newsgroups(subset='test', categories=groups)\n",
"\n",
"# print(len(train_data.filenames))\n",
"# print(len(test_data.filenames))\n",
"vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5, stop_words='english', decode_error='ignore')\n",
"vectorized = vectorizer.fit_transform(train_data.data)\n",
"num_samples, num_features = vectorized.shape\n",
"\n",
"print(\"#samples: %d, #features: %d\" % (num_samples, num_features))\n",
"\n",
"num_clusters = 50\n",
"from sklearn.cluster import KMeans\n",
"km = KMeans(n_clusters=num_clusters, init='random', n_init=1, verbose=1, random_state=3)\n",
"km.fit(vectorized)\n",
"\n",
"new_post = \"\"\"\n",
"Disk drive problems. Hi, I have a problem with my hard disk.\n",
"After 1 year it is working only sporadically now.\n",
"tried to format it, but now it doesn’t boot any more.\n",
"Any ideas? Thanks.\n",
"\"\"\"\n",
"\n",
"new_post_vec = vectorizer.transform([new_post])\n",
"new_post_label = km.predict(new_post_vec)[0]\n",
"\n",
"similar_indices = (km.labels_ == new_post_label).nonzero()[0]\n",
"\n",
"similar = []\n",
"for i in similar_indices:\n",
" dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())\n",
" similar.append((dist, train_data.data[i]))\n",
" \n",
"similar = sorted(similar)\n",
"# print(len(similar))\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['ac', 'birmingham', 'host', 'kingdom', 'nntp', 'sorri', 'test', 'uk', 'unit', 'univers']\n"
]
}
],
"source": [
"post_group = zip(train_data.data, train_data.target)\n",
"all = [(len(post[0]), post[0], train_data.target_names[post[1]]) for post in post_group]\n",
"\n",
"graphics = sorted([post for post in all if post[2] == 'comp.graphics'])\n",
"# print(graphics[5])\n",
"\n",
"noise_post = graphics[5][1]\n",
"analyzer = vectorizer.build_analyzer()\n",
"# print(list(analyzer(noise_post)))\n",
"useful = set(analyzer(noise_post)).intersection(vectorizer.get_feature_names())\n",
"\n",
"print(sorted(useful))"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAAg0AAAFkCAYAAACjCwibAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAHjhJREFUeJzt3X+QZWV95/H3B5GZoEvjZjIMRKaiMeCEZF2nBYckEl0M\nRGGNiVtZWqYiUmbVRcrq3dSypqQgsJsYrDAUgppNWImMdoodY6nFyKAkMREQDEMMhnZSiZhGcQZb\noWdqcPg1z/5xTsudy8z0c/vXvdPzflXd6r7Pec6533M4zP30c36llIIkSdJMjuh3AZIk6dBgaJAk\nSVUMDZIkqYqhQZIkVTE0SJKkKoYGSZJUxdAgSZKqGBokSVIVQ4MkSapiaJAkSVV6Cg1JHkyydz+v\nD7XTlyW5Pslkkl1JNiVZ2bWME5PckmR3ku1JrkpieJEkacD1+mX9KmBVx+tXgALc3E6/BjgHeAtw\nBnAC8KnpmdtwsBk4ElgHvA24ALhitisgSZIWR+bywKok1wBvLKWclOQY4HvAeaWUT7fTTwbGgXWl\nlHuSvAH4LHB8KWWy7fNO4APAT5RSnp7b6kiSpIUy68MCSZ4PnA/c0Da9imYE4fbpPqWUbcAEcHrb\ntA64fzowtLYAQ8Aps61FkiQtvCPnMO+v03zZ/1n7/jjgyVLKzq5+O2gOZdD+3LGf6dPTvra/D0ry\n48DZwLeAPXOoWZKkw81y4KeALaWU789lQXMJDRcCny+lbJ+hX2jOe5jJwfqcDXyitjBJkvQc5wOf\nnMsCZhUakqwGXg+8uaN5O3BUkmO6RhtW8uxownbg1K7FHdf+7B6B6PQtgI0bN7JmzZrZlKxZGB0d\nZcOGDf0u47DiNl98bvPF5zZfXOPj46xfvx7a79K5mO1Iw4U0X/KbO9ruBZ4GzgSmT4Q8CVgN3Nn2\nuQv43SQrOs5rOAuYAh44yOftAVizZg1r166dZcnq1dDQkNt7kbnNF5/bfPG5zftmzof3ew4NSUJz\nmeSNpZS90+2llJ1JbgCuTvIosAu4FrijlPLVttttNOHgpiSXAMcDVwLXlVKemtOaSJKkBTWbkYbX\nAycCH9vPtFHgGWATsAy4FbhoemIpZW+Sc4GP0Iw+7AZuBC6bRR2SJGkR9RwaSilfAJ53gGlPABe3\nrwPN/xBwbq+fK0mS+svbN+uARkZG+l3CYcdtvvjc5ovPbX7omtMdIRdLkrXAvffee68nz0iS1IOt\nW7cyPDwMMFxK2TqXZTnSIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFUxNEiSpCqGBkmS\nVMXQIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFUxNEiSpCqGBkmSVMXQIEmSqhgaJElS\nFUODJEmqYmiQJElVDA2SJKnKkf0uQFpsExMTTE5O9ruMKitWrGD16tX9LkOSAEODDjMTExOcfPIa\n9ux5vN+lVFm+/Gi2bRs3OEgaCIYGHVYmJyfbwLARWNPvcmYwzp4965mcnDQ0SBoIhgYdptYAa/td\nhCQdUjwRUpIkVTE0SJKkKoYGSZJUxdAgSZKqGBokSVIVQ4MkSarSc2hIckKSm5JMJnk8ydeSrO3q\nc0WSh9vpX0jysq7pL0ryiSRTSR5N8qdJXjDXlZEkSQunp9CQ5FjgDuAJ4Gyai93/O/BoR59LgPcA\n7wROA3YDW5Ic1bGoT7bzngmcA5wB/PGs10KSJC24Xm/u9D+BiVLKOzra/rWrz3uBK0spnwNI8lvA\nDuDNwM1J1tAEjuFSyn1tn4uBW5L8Till+yzWQ5IkLbBeD0/8R+DvktycZEeSrUl+FCCSvARYBdw+\n3VZK2QncDZzeNq0DHp0ODK0vAgV49SzWQZIkLYJeQ8NLgXcD24CzgI8C1yZZ305fRfPlv6Nrvh3t\ntOk+j3ROLKU8A/ygo48kSRowvR6eOAK4p5Ryafv+a0lOoQkSGw8yX2jCxMHU9JEkSX3Sa2j4LjDe\n1TYO/Eb7+3aaL//j2He0YSVwX0eflZ0LSPI84EU8d4RiH6OjowwNDe3TNjIywsjISP0aSJK0RI2N\njTE2NrZP29TU1Lwtv9fQcAdwclfbybQnQ5ZSHkyyneaqiH8ASHIMzbkK17f97wKOTfLKjvMazqQJ\nG3cf7MM3bNjA2rU+mVCSpP3Z3x/SW7duZXh4eF6W32to2ADckeR9wM00YeAdwG939LkGeH+Sfwa+\nBVwJfBv4DEAp5RtJtgB/kuTdwFHAh4Axr5yQJGlw9RQaSil/l+TXgQ8AlwIPAu8tpfx5R5+rkhxN\nc9+FY4G/Bd5QSnmyY1FvBa6juWpiL7CJ5lJNSZI0oHodaaCUshnYPEOfy4HLDzL9MWD9gaZLkqTB\n47MnJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFUxNEiSpCqGBkmSVMXQIEmSqhgaJElSFUODJEmq\nYmiQJElVDA2SJKmKoUGSJFUxNEiSpCqGBkmSVMXQIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmK\noUGSJFUxNEiSpCqGBkmSVMXQIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFUxNEiSpCqG\nBkmSVMXQIEmSqhgaJElSFUODJEmq0lNoSHJZkr1drwc6pi9Lcn2SySS7kmxKsrJrGScmuSXJ7iTb\nk1yVxPAiSdKAO3IW83wdOBNI+/7pjmnXAG8A3gLsBK4HPgW8BqANB5uBh4F1wAnATcCTwPtnUYsk\nSVokswkNT5dSvtfdmOQY4ELgvFLKl9q2twPjSU4rpdwDnA28HHhdKWUSuD/JpcAHklxeSnm6e7mS\nJGkwzOawwM8k+U6Sf0myMcmJbfswTQi5fbpjKWUbMAGc3jatA+5vA8O0LcAQcMosapEkSYuk19Dw\nFeACmhGDdwEvAf4myQuAVcCTpZSdXfPsaKfR/tyxn+l09JEkSQOop8MTpZQtHW+/nuQe4F+B3wT2\nHGC2AKVm8TN1GB0dZWhoaJ+2kZERRkZGKhYvSdLSNjY2xtjY2D5tU1NT87b82ZzT8COllKkk/wS8\nDPgicFSSY7pGG1by7GjCduDUrsUc1/7sHoF4jg0bNrB27dq5lCxJ0pK1vz+kt27dyvDw8Lwsf06X\nOiZ5IfDTNFdD3EtzJcWZHdNPAlYDd7ZNdwE/n2RFx2LOAqaAB5AkSQOrp5GGJB8EPkdzSOIngd+j\nCQp/XkrZmeQG4OokjwK7gGuBO0opX20XcRtNOLgpySXA8cCVwHWllKfmY4UkSdLC6PXwxIuBTwI/\nDnwP+DKwrpTy/Xb6KPAMsAlYBtwKXDQ9cyllb5JzgY/QjD7sBm4ELpv9KkiSpMXQ64mQBz3jsJTy\nBHBx+zpQn4eAc3v5XEmS1H/evlmSJFUxNEiSpCqGBkmSVMXQIEmSqhgaJElSFUODJEmqYmiQJElV\nDA2SJKmKoUGSJFUxNEiSpCqGBkmSVMXQIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFUx\nNEiSpCqGBkmSVMXQIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFUxNEiSpCqGBkmSVMXQ\nIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFWZU2hI8r4ke5Nc3dG2LMn1SSaT7EqyKcnK\nrvlOTHJLkt1Jtie5KokBRpKkATbrL+okpwK/DXyta9I1wDnAW4AzgBOAT3XMdwSwGTgSWAe8DbgA\nuGK2tUiSpIU3q9CQ5IXARuAdwGMd7ccAFwKjpZQvlVLuA94O/GKS09puZwMvB84vpdxfStkCXApc\nlOTI2a+KJElaSLMdabge+Fwp5S+72l9FM4Jw+3RDKWUbMAGc3jatA+4vpUx2zLcFGAJOmWU9kiRp\ngfX8l32S84B/TxMQuh0HPFlK2dnVvgNY1f6+qn3fPX16WvfhDkmSNAB6Cg1JXkxzzsKvlFKe6mVW\noFT0O2if0dFRhoaG9mkbGRlhZGSkh1IkSVqaxsbGGBsb26dtampq3pbf60jDMPATwL1J0rY9Dzgj\nyXuAXwWWJTmma7RhJc+OJmwHTu1a7nHtz+4RiH1s2LCBtWvX9liyJEmHh/39Ib1161aGh4fnZfm9\nntPwReDnaQ5PvKJ9/R3NSZHTvz8FnDk9Q5KTgNXAnW3TXcDPJ1nRsdyzgCnggd5XQZIkLYaeRhpK\nKbvp+mJPshv4fillvH1/A3B1kkeBXcC1wB2llK+2s9zWLuOmJJcAxwNXAtf1eMhDkiQtovm4xLH7\nPIRR4BlgE7AMuBW46EedS9mb5FzgIzSjD7uBG4HL5qEWSZK0QOYcGkop/6Hr/RPAxe3rQPM8BJw7\n18+WJEmLx1s3S5KkKt6BUfNiYmKCycnJmTv22fj4eL9LkKRDlqFBczYxMcHJJ69hz57H+12KJGkB\nGRo0Z5OTk21g2Ais6Xc5M9hM86gTSVKvDA2aR2uAQb/5locnJGm2PBFSkiRVMTRIkqQqhgZJklTF\n0CBJkqoYGiRJUhVDgyRJqmJokCRJVQwNkiSpiqFBkiRVMTRIkqQqhgZJklTF0CBJkqoYGiRJUhVD\ngyRJqmJokCRJVQwNkiSpiqFBkiRVMTRIkqQqR/a7AEkHNz4+3u8SZrRixQpWr17d7zIkLTBDgzSw\nvgscwfr16/tdyIyWLz+abdvGDQ7SEmdokAbWY8BeYCOwps+1HMw4e/asZ3Jy0tAgLXGGBmngrQHW\n9rsISfJESEmSVMfQIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKlKT6EhybuSfC3JVPu6M8mvdkxf\nluT6JJNJdiXZlGRl1zJOTHJLkt1Jtie5KonhRZKkAdfrl/VDwCXAcPv6S+AzSabvPHMNcA7wFuAM\n4ATgU9Mzt+FgM839IdYBbwMuAK6Y9RpIkqRF0dPNnUopt3Q1vT/Ju4F1Sb4DXAicV0r5EkCStwPj\nSU4rpdwDnA28HHhdKWUSuD/JpcAHklxeSnl6riskSZIWxqwPCyQ5Isl5wNHAXTQjD0cCt0/3KaVs\nAyaA09umdcD9bWCYtgUYAk6ZbS2SJGnh9Rwakvxckl3AE8CHgV8vpXwDWAU8WUrZ2TXLjnYa7c8d\n+5lORx9JkjSAZvPsiW8ArwCOpTl34eNJzjhI/wClYrk1fSRJUp/0HBra8w6+2b7dmuQ04L3AzcBR\nSY7pGm1YybOjCduBU7sWeVz7s3sE4jlGR0cZGhrap21kZISRkZHeVkKSpCVobGyMsbGxfdqmpqbm\nbfnz8ZTLI4BlwL3A08CZwKcBkpwErAbubPveBfxukhUd5zWcBUwBD8z0QRs2bGDtWp/2J0nS/uzv\nD+mtW7cyPDw8L8vvKTQk+d/A52kuvfw3wPnALwNnlVJ2JrkBuDrJo8Au4FrgjlLKV9tF3EYTDm5K\ncglwPHAlcF0p5an5WCFJkrQweh1pOA74OM2X/RTwDzSB4S/b6aPAM8AmmtGHW4GLpmcupexNci7w\nEZrRh93AjcBls18FSZK0GHq9T8M7Zpj+BHBx+zpQn4eAc3v5XEmS1H/evlmSJFUxNEiSpCqGBkmS\nVMXQIEmSqhgaJElSFUODJEmqMh93hNQCmZiYYHJycuaOfTY+Pt7vEiRJi8DQMKAmJiY4+eQ17Nnz\neL9LkSQJMDQMrMnJyTYwbATW9LucGWwGLu13EZKkBWZoGHhrgEF/SJeHJyTpcOCJkJIkqYqhQZIk\nVTE0SJKkKoYGSZJUxdAgSZKqGBokSVIVQ4MkSapiaJAkSVUMDZIkqYqhQZIkVTE0SJKkKoYGSZJU\nxdAgSZKqGBokSVIVH40taV6Mjx8aj0hfsWIFq1ev7ncZ0iHJ0CBpjr4LHMH69ev7XUiV5cuPZtu2\ncYODNAuGBklz9BiwF9gIrOlzLTMZZ8+e9UxOThoapFkwNEiaJ2uAtf0uQtIC8kRISZJUxdAgSZKq\nGBokSVIVQ4MkSapiaJAkSVV6Cg1J3pfkniQ7k+xI8ukkJ3X1WZbk+iSTSXYl2ZRkZVefE5PckmR3\nku1JrkpigJEkaYD1+kX9GuBDwKuB1wPPB25L8mMdfa4BzgHeApwBnAB8anpiGw4201zuuQ54G3AB\ncMWs1kCSJC2Knu7TUEp5Y+f7JBcAjwDDwJeTHANcCJxXSvlS2+ftwHiS00op9wBnAy8HXldKmQTu\nT3Ip8IEkl5dSnp7rSkmSpPk310MCxwIF+EH7fpgmiNw+3aGUsg2YAE5vm9YB97eBYdoWYAg4ZY71\nSJKkBTLr0JAkNIcivlxKeaBtXgU8WUrZ2dV9Rzttus+O/Uyno48kSRowc7mN9IeBnwV+qaJvaEYk\nZlLTR5Ik9cGsQkOS64A3Aq8ppTzcMWk7cFSSY7pGG1by7GjCduDUrkUe1/7sHoHYx+joKENDQ/u0\njYyMMDIy0uMaSJK09IyNjTE2NrZP29TU1Lwtv+fQ0AaGXwN+uZQy0TX5XuBp4Ezg023/k4DVwJ1t\nn7uA302youO8hrOAKeABDmLDhg2sXesDcSRJ2p/9/SG9detWhoeH52X5PYWGJB8GRoA3AbuTTI8Q\nTJVS9pRSdia5Abg6yaPALuBa4I5SylfbvrfRhIObklwCHA9cCVxXSnlq7qskSZIWQq8jDe+iOe/g\nr7va3w58vP19FHgG2AQsA24FLpruWErZm+Rc4CM0ow+7gRuBy3qsRZIkLaJe79Mw49UWpZQngIvb\n14H6PASc28tnz5cf/vCHPPHEE/346J7s2rWr3yVIkrSPuVw9cciZnJzklFNewSOPPDxzZ0mStI/D\nKjQ88sgjbWD4XzQ3pRxkf05zhEeSpMFwWIWGZ70O+IV+FzGDf+h3AZIk7cMnS0qSpCqGBkmSVMXQ\nIEmSqhgaJElSFUODJEmqYmiQJElVDA2SJKmKoUGSJFU5TG/uJOlwNj4+3u8SqqxYsYLVq1f3uwzp\nRwwNkg4j3wWOYP369f0upMry5Uezbdu4wUEDw9Ag6TDyGLAX2Ais6XMtMxlnz571TE5OGho0MAwN\nkg5Da4C1/S5COuR4IqQkSapiaJAkSVUMDZIkqYqhQZIkVTE0SJKkKoYGSZJUxdAgSZKqGBokSVIV\nQ4MkSapiaJAkSVUMDZIkqYqhQZIkVTE0SJKkKoYGSZJUxdAgSZKqGBokSVIVQ4MkSapiaJAkSVUM\nDZIkqUrPoSHJa5J8Nsl3kuxN8qb99LkiycNJHk/yhSQv65r+oiSfSDKV5NEkf5rkBXNZEUmStLBm\nM9LwAuDvgYuA0j0xySXAe4B3AqcBu4EtSY7q6PZJYA1wJnAOcAbwx7OoRZIkLZIje52hlHIrcCtA\nkuyny3uBK0spn2v7/BawA3gzcHOSNcDZwHAp5b62z8XALUl+p5SyfVZrIkmSFtS8ntOQ5CXAKuD2\n6bZSyk7gbuD0tmkd8Oh0YGh9kWbU4tXzWY8kSZo/830i5CqaL/8dXe072mnTfR7pnFhKeQb4QUcf\nSZI0YHo+PDFLYT/nP/TaZ3R0lKGhoX3aRkZGGBkZmVt1kiQtAWNjY4yNje3TNjU1NW/Ln+/QsJ3m\ny/849h1tWAnc19FnZedMSZ4HvIjnjlDsY8OGDaxdu3beipUkaSnZ3x/SW7duZXh4eF6WP6+HJ0op\nD9KEgjOn25IcQ3Ouwp1t013AsUle2THrmTRh4+75rEeSJM2fnkca2vspvIzmSx7gpUleAfyglPIQ\ncA3w/iT/DHwLuBL4NvAZgFLKN5JsAf4kybuBo4APAWNeOSFJ0uCazeGJVwF/RXP+QQH+qG3/M+DC\nUspVSY6mue/CscDfAm8opTzZsYy3AtfRXDWxF9hEc6mmJEkaULO5T8OXmOGwRinlcuDyg0x/DFjf\n62dLkqT+8dkTkiSpiqFBkiRVMTRIkqQqhgZJklTF0CBJkqoYGiRJUhVDgyRJqmJokCRJVQwNkiSp\niqFBkiRVMTRIkqQqhgZJklTF0CBJkqoYGiRJUhVDgyRJqmJokCRJVQwNkiSpiqFBkiRVMTRIkqQq\nhgZJklTF0CBJkqoYGiRJUhVDgyRJqnJkvwuQJB3Y+Ph4v0uosmLFClavXt3vMrTADA2SNJC+CxzB\n+vXr+11IleXLj2bbtnGDwxJnaJCkgfQYsBfYCKzpcy0zGWfPnvVMTk4aGpY4Q4MkDbQ1wNp+FyEB\nnggpSZIqGRokSVIVQ4MkSapiaJAkSVUMDZIkqYqhQQcx1u8CDkNu88XnNl9sY2Nu80NV3y65THIR\n8DvAKuBrwMWllK/2qx7tzxgw0u8iDjNu88XnNp8vtXev/OhHP8rJJ5+8wNUcmHevnL2+hIYk/xn4\nI+C/APcAo8CWJCeVUib7UZMkabZ6v3vl8PDwwpUzA+9eOXv9GmkYBf64lPJxgCTvAs4BLgSu6lNN\nkqRZ6fXulaPAhgWt6MC8e+VcLHpoSPJ8YBj4/em2UkpJ8kXg9MWuR5I0X2rvXjlU2W/hHAoPAhvE\nwyj9GGlYATwP2NHVvgM40EGu5TD3/8jf/OY3298+A3x9TstaePe2PzcD/dq5vw18oqLfHe3PftZa\na9Br7dzmg17rtEOlTth/rbX7+WI71LfrwfRzm98H5JB4ENhRRy3nL/5iE8cff/ycltPx3bl8rjWl\nlDLXZfT2gcnxwHeA00spd3e0XwX8UinlF/Yzz1sZzP+rJUk6VJxfSvnkXBbQj5GGSeAZ4Liu9pU8\nd/Rh2hbgfOBbwJ4Fq0ySpKVnOfBTNN+lc7LoIw0ASb4C3F1KeW/7PsAEcG0p5YOLXpAkSZpRv66e\nuBr4syT38uwll0cDN/apHkmSNIO+hIZSys1JVgBX0Bym+Hvg7FLK9/pRjyRJmllfDk9IkqRDj8+e\nkCRJVQwNkiSpysCHhiQXJXkwyQ+TfCXJqf2uaSlLclmSvV2vB/pd11KS5DVJPpvkO+32fdN++lyR\n5OEkjyf5QpKX9aPWpWKmbZ7kY/vZ7zf3q95DXZL3Jbknyc4kO5J8OslJXX2WJbk+yWSSXUk2JVnZ\nr5oPdZXb/K+79vFnkny4l88Z6NDQ8WCry4BX0jwNc0t7EqUWztdpTlBd1b5+qb/lLDkvoDn59yLg\nOScVJbkEeA/wTuA0YDfNfn/UYha5xBx0m7c+z777vY++nL3XAB8CXg28Hng+cFuSH+vocw3NM4fe\nApwBnAB8apHrXEpqtnkB/g/P7ufHA/+jlw8Z6BMhD3A/h4do7ufgg60WQJLLgF8rpfT3xvCHiSR7\ngTeXUj7b0fYw8MFSyob2/TE0Nz57Wynl5v5UunQcYJt/DBgqpfxG/ypbuto/9B4BziilfLndp78H\nnFdK+XTb52Sae1CvK6Xc079ql4bubd62/RVwXynlv812uQM70tDxYKvbp9tKk3B8sNXC+5l2GPdf\nkmxMcmK/CzpcJHkJzV8Anfv9TuBu3O8X2mvbYd1vJPlwkn/b74KWkGNp/sr9Qft+mOaS/879fBvN\nTf7cz+dH9zafdn6S7yW5P8nvd41EzKhfN3eqMZsHW2nuvgJcAGyjGbq6HPibJD9XStndx7oOF6to\n/kff336/avHLOWx8nmZo/EHgp4E/ADYnOb0M8nDsIaAdIb4G+HIpZfr8qFXAk20g7uR+Pg8OsM2h\neYbTvwIPA/8OuAo4CfhPtcse5NBwIOHAxyQ1R6WUznuTfz3JPTQ72W8CH+tPVcL9fkF1Hfb5xyT3\nA/8CvBb4q74UtXR8GPhZ6s6Ncj+fH9Pb/Bc7G0spf9rx9h+TbAe+mOQlpZQHaxY8sIcnmN2DrTTP\nSilTwD8Bnr2/OLbT/MPpft9H7T+gk7jfz0mS64A3Aq8tpTzcMWk7cFR7bkMn9/M56trm352h+900\n/95U7+cDGxpKKU8B9wJnTre1Qy5nAnf2q67DTZIX0gzXzrTzaR60X1bb2Xe/P4bmjGj3+0WS5MXA\nj+N+P2vtl9evAa8rpUx0Tb4XeJp99/OTgNXAXYtW5BIzwzbfn1fSjOxU7+eDfnjCB1stsiQfBD5H\nc0jiJ4Hfo/mfe6yfdS0lSV5Ak+zTNr00ySuAH5RSHqI5Fvn+JP9M8zj4K4FvA5/pQ7lLwsG2efu6\njOachu1tvz+kGWGb86OED0fttf8jwJuA3UmmR86mSil7Sik7k9wAXJ3kUWAXcC1wh1dOzM5M2zzJ\nS4G3ApuB7wOvoPmO/VIp5evVH1RKGegX8F9p/uH8IU0CfVW/a1rKL5pw8O12e08AnwRe0u+6ltIL\n+GVgL83ht87X/+3ocznNyUqP03xxvazfdR/Kr4Ntc2A5cCtNYNgDfBP4CPAT/a77UH0dYFs/A/xW\nR59lNPcVmKQJDf8PWNnv2g/V10zbHHgx8Nc0l7o+TnOy+x8AL+zlcwb6Pg2SJGlwDOw5DZIkabAY\nGiRJUhVDgyRJqmJokCRJVQwNkiSpiqFBkiRVMTRIkqQqhgZJklTF0CBJkqoYGiRJUhVDgyRJqvL/\nASbi2o2juOxoAAAAAElFTkSuQmCC\n",
"text/plain": [
"<matplotlib.figure.Figure at 0x7f7d54fd8080>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import os\n",
"from matplotlib import pyplot as plt\n",
"from gensim import corpora, models\n",
"DIR = '/home/zym/learn/machine_learning/BuildingMachineLearningSystemsWithPython/ch04/ap'\n",
"corpus = corpora.BleiCorpus(os.path.join(DIR, 'ap.dat'), os.path.join(DIR, 'vocab.txt'))\n",
"\n",
"model = models.ldamodel.LdaModel(corpus, num_topics=100,id2word=corpus.id2word)\n",
"doc = corpus.docbyoffset(0)\n",
"topics = model[doc]\n",
"# print(topics)\n",
"\n",
"num_topics_used = [len(model[doc]) for doc in corpus]\n",
"plt.hist(num_topics_used)\n",
"plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Topic #0: \n",
"windows thanks file edu use drive software mail help does card pc know hi using need program problem looking window\n",
"Topic #1: \n",
"dog attack head drive talking human maybe disk printer drivers window problem mac advance thanks use hard work ii windows\n",
"Topic #2: \n",
"god accept read clock driving stuff nature port think error hard usually 60 computer faith know following limited peter control\n",
"Topic #3: \n",
"pp 22 18 19 11 23 26 55 van 10 31 44 27 21 38 34 48 24 armenian 37\n",
"Topic #4: \n",
"2nd math ground value said leafs display long try lunar launch monitor space version shuttle luck computer door probe moon\n",
"Topic #5: \n",
"cases 00 soon edu sale condition effective asking consider good general john stuff nhl card san cd deleted end hear\n",
"Topic #6: \n",
"mike love graphics hear heard try state time looking good performance windows marriage os crime company parts program comp men\n",
"Topic #7: \n",
"assume magi home order card right lot supposed hit better thing especially week record require use ground important course today\n",
"Topic #8: \n",
"just don people like think know good god time new does way say right make did really want year use\n",
"Topic #9: \n",
"wanted 1992 gm season 1993 wouldn vs john 30 price st want condition just excellent sale wrong file 10 really\n",
"\n"
]
}
],
"source": [
"from __future__ import print_function\n",
"from time import time\n",
"\n",
"from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer\n",
"from sklearn.decomposition import LatentDirichletAllocation\n",
"from sklearn.datasets import fetch_20newsgroups\n",
"\n",
"n_samples = 2000\n",
"n_features = 1000\n",
"n_topics = 10\n",
"n_top_words = 20\n",
"\n",
"def print_top_words(model, feature_names, n_top_words):\n",
" for topic_idx, topic in enumerate(model.components_):\n",
" print(\"Topic #%d: \" % topic_idx)\n",
" print(\" \".join([feature_names[i]\n",
" for i in topic.argsort()[:-n_top_words - 1: -1]]))\n",
" print()\n",
" \n",
"dataset = fetch_20newsgroups(shuffle=True, random_state=1,\n",
" remove=('headers', 'footers', 'quotes'))\n",
"data_samples = dataset.data[:n_samples]\n",
"tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n",
" max_features=n_features,\n",
" stop_words='english')\n",
"tfidf = tfidf_vectorizer.fit_transform(data_samples)\n",
"\n",
"\n",
"lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,\n",
" learning_method='online',\n",
" learning_offset=50.,\n",
" random_state=0)\n",
"\n",
"lda.fit(tfidf)\n",
"\n",
"tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n",
"print_top_words(lda, tfidf_feature_names, n_top_words)\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.1+"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment