Created
December 26, 2016 15:00
-
-
Save mrlonely001/e600aeb25c5e73e2e1bff84088deca0b to your computer and use it in GitHub Desktop.
百度百科词条爬虫
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#coding:utf8 | |
import urllib2 | |
class HtmlDownloader(object): | |
def download(self,url): | |
if url is None: | |
return None | |
response = urllib2.urlopen(url) | |
if response.getcode() !=200: | |
return None | |
return response.read() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#coding:utf8 | |
class HtmlOutputer(object): | |
def __init__(self): | |
self.datas = [] | |
def collect_data(self,data): | |
if data is None: | |
return | |
self.datas.append(data) | |
def output_html(self): | |
fout = open('output.html','w') | |
fout.write("<html>") | |
fout.write("<body>") | |
fout.write("<table>") | |
for data in self.datas: | |
fout.write('<tr>') | |
fout.write('<td>%s</td>' % data['url'].encode('utf-8')) | |
fout.write('<td>%s</td>' % data['title'].encode('utf-8')) | |
fout.write('<td>%s</td>' % data['summary'].encode('utf-8')) | |
fout.write('</tr>') | |
fout.write("</table>") | |
fout.write("</body>") | |
fout.write("</html>") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#coding:utf8 | |
import bs4,urlparse,re | |
from bs4 import BeautifulSoup | |
class HtmlParser(object): | |
def _get_new_urls(self,page_url,soup): | |
new_urls = set() | |
#/view/11.htm | |
links = soup.find_all('a',href=re.compile(r"/view/\d+\.htm")) | |
for link in links: | |
new_url = link['href'] | |
new_full_url = urlparse.urljoin(page_url,new_url) | |
new_urls.add(new_full_url) | |
return new_urls | |
def _get_new_data(self,page_url,soup): | |
res_data = {} | |
#<dd class="lemmaWgt-lemmaTitle-title"> | |
# <h1>Python</h1> | |
#</dd> | |
title_node = soup.find('dd',class_='lemmaWgt-lemmaTitle-title').find('h1') | |
res_data['title'] = title_node.get_text() | |
#<div class="lemma-summary" label-module="lemmaSummary"> | |
# <div class="para" label-module="para">Python<sup>[1]</sup><a class="sup-anchor" name="ref_[1]_21087"> </a> | |
# (英国发音:/ˈpaɪθən/ 美国发音:/ˈpaɪθɑːn/), 是一种面向对象的解释型<a target="_blank" href="/view/2561555.htm">计算机程序设计语言</a>,由荷兰人<a target="_blank" href="/view/2975166.htm">Guido van Rossum</a>于1989年发明,第一个公开发行版发行于1991年。</div><div class="para" label-module="para">Python是纯粹的<a target="_blank" href="/view/20965.htm">自由软件</a>, <a target="_blank" href="/subview/60376/5122159.htm" data-lemmaid="3969">源代码</a>和<a target="_blank" href="/view/592974.htm">解释器</a>CPython遵循 <a target="_blank" href="/view/130692.htm">GPL</a>(<a target="_blank" href="/view/36272.htm">GNU</a> General Public License)协议<sup>[2]</sup><a class="sup-anchor" name="ref_[2]_21087"> </a> | |
# 。</div><div class="para" label-module="para">Python语法简洁清晰,特色之一是强制用空白符(white space)作为语句缩进。</div><div class="para" label-module="para">Python具有丰富和强大的库。它常被昵称为<a target="_blank" href="/view/2993364.htm">胶水语言</a>,能够把用其他语言制作的各种模块(尤其是<a target="_blank" href="/subview/10075/6770152.htm" data-lemmaid="7252092">C</a>/<a target="_blank" href="/view/824.htm">C++</a>)很轻松地联结在一起。常见的一种应用情形是,使用Python快速生成程序的原型(有时甚至是程序的最终界面),然后对其中<sup>[3]</sup><a class="sup-anchor" name="ref_[3]_21087"> </a> | |
# 有特别要求的部分,用更合适的语言改写,比如<a target="_blank" href="/view/96860.htm">3D游戏</a>中的图形渲染模块,性能要求特别高,就可以用C/C++重写,而后封装为Python可以调用的扩展类库。需要注意的是在您使用扩展类库时可能需要考虑平台问题,某些可能不提供<a target="_blank" href="/view/469855.htm">跨平台</a>的实现。</div> | |
#</div> | |
summary_node = soup.find('div',class_='lemma-summary') | |
res_data['summary'] = summary_node.get_text() | |
res_data['url'] = page_url | |
return res_data | |
def parse(self,page_url,html_cont): | |
if page_url is None or html_cont is None: | |
return | |
soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8') | |
new_urls = self._get_new_urls(page_url,soup) | |
new_data = self._get_new_data(page_url,soup) | |
return new_urls,new_data |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
<html> | |
<head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head> | |
<body><table><tr><td>http://baike.baidu.com/item/Python</td><td>Python</td><td> | |
Python[1] | |
(英国发音:/ˈpaɪθən/ 美国发音:/ˈpaɪθɑːn/), 是一种面向对象的解释型计算机程序设计语言,由荷兰人Guido van Rossum于1989年发明,第一个公开发行版发行于1991年。Python是纯粹的自由软件, 源代码和解释器CPython遵循 GPL(GNU General Public License)协议[2] | |
。Python语法简洁清晰,特色之一是强制用空白符(white space)作为语句缩进。Python具有丰富和强大的库。它常被昵称为胶水语言,能够把用其他语言制作的各种模块(尤其是C/C++)很轻松地联结在一起。常见的一种应用情形是,使用Python快速生成程序的原型(有时甚至是程序的最终界面),然后对其中[3] | |
有特别要求的部分,用更合适的语言改写,比如3D游戏中的图形渲染模块,性能要求特别高,就可以用C/C++重写,而后封装为Python可以调用的扩展类库。需要注意的是在您使用扩展类库时可能需要考虑平台问题,某些可能不提供跨平台的实现。 | |
</td></tr><tr><td>http://baike.baidu.com/view/16068.htm</td><td>api</td><td> | |
API(Application Programming Interface,应用程序编程接口)是一些预先定义的函数,目的是提供应用程序与开发人员基于某软件或硬件得以访问一组例程的能力,而又无需访问源码,或理解内部工作机制的细节。 | |
</td></tr><tr><td>http://baike.baidu.com/view/53557.htm</td><td>系统资源</td><td> | |
当应用程序在Windows中运行时,Windows必须实时"跟踪"该应用程序的运行,并保留与之相关的许多信息,如光标、窗口的状况等,这些信息由Windows保留在一种叫堆的内存块中,堆的英文为Heap。简单地说,堆是采用特殊机制管理的内存块。由Windows的一个系统内核User.exe管理的堆叫作User资源堆(User Resource Heap),由另一个系统内核Gdi.exe管理的堆叫作GDI资源堆(Graphical Device Interface Resource Heap),User资源堆和GDI资源堆合称为系统资源堆(System Resource Heap),习惯上就把它们叫作系统资源(System Resource)。 | |
</td></tr><tr><td>http://baike.baidu.com/view/10812277.htm</td><td>百度百科:多义词</td><td> | |
百度百科里,当同一个词条名可指代含义概念不同的事物时,这个词条称为多义词。如词条“苹果”,既可以代表一种水果,也可以指代苹果公司,因此“苹果”是一个多义词。 | |
</td></tr><tr><td>http://baike.baidu.com/view/32571.htm</td><td>数据缓存</td><td> | |
</td></tr><tr><td>http://baike.baidu.com/view/757238.htm</td><td>多任务</td><td> | |
多任务处理是指用户可以在同一时间内运行多个应用程序,每个应用程序被称作一个任务.Linux、windows就是支持多任务的操作系统,比起单任务系统它的功能增强了许多。当多任务操作系统使用某种任务调度策略允许两个或更多进程并发共享一个处理器时,事实上处理器在某一时刻只会给一件任务提供服务。因为任务调度机制保证不同任务之间的切换速度十分迅速,因此给人多个任务同时运行的错觉。多任务系统中有3个功能单位:任务、进程和线程。 | |
</td></tr><tr><td>http://baike.baidu.com/view/2888099.htm</td><td>PyGTK</td><td> | |
PyGTK让你用Python轻松创建具有图形用户界面的程序.底层的GTK+提供了各式的可视元素和功能,如果需要,你能开发在GNOME桌面系统运行的功能完整的软件。[1] | |
</td></tr><tr><td>http://baike.baidu.com/view/1369367.htm</td><td>PIL</td><td> | |
太平船务有限公司(PIL)由张允中先生于1967年在新加坡成立。公司成立初期,以经营区域性的散杂货运输为主,从1983年起,首次推出了集装箱运输服务。 | |
</td></tr><tr><td>http://baike.baidu.com/view/737.htm</td><td>wiki</td><td> | |
Wiki一词来源于夏威夷语的“wee kee wee kee”, 发音wiki, 原本是“快点快点”的意思,被译为“维基”或“维客”。一种多人协作的写作工具。Wiki站点可以有多人(甚至任何访问者)维护,每个人都可以发表自己的意见,或者对共同的主题进行扩展或者探讨。Wiki也指一种超文本系统。这种超文本系统支持面向社群的协作式写作,同时也包括一组支持这种写作。 | |
</td></tr><tr><td>http://baike.baidu.com/view/38648.htm</td><td>苹果电脑</td><td> | |
苹果公司(Apple Inc.,NASDAQ:AAPL,LSE:ACP),原称苹果电脑公司(Apple Computer, Inc.)总部位于美国加利福尼亚的库比提诺,核心业务是电子科技产品,目前全球电脑市场占有率为3.8%。苹果的Apple II于1970年代助长了个人电脑革命,其后的Macintosh接力于1980年代持续发展。最知名的产品是其出品的Apple II、Macintosh电脑、iPod数位音乐播放器、iTunes音乐商店和iPhone智能手机,它在高科技企业中以创新而闻名。苹果公司于2007年1月9日旧金山的Macworld Expo上宣布改名。 | |
</td></tr></table></body></html> |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#coding:utf8 | |
from baidu_baike import url_manager,html_downloader,html_parser,html_outputer | |
class SpiderMain(): | |
def __init__(self): | |
self.urls = url_manager.UrlManager() | |
self.downloader = html_downloader.HtmlDownloader() | |
self.parser = html_parser.HtmlParser() | |
self.outputer = html_outputer.HtmlOutputer() | |
def craw(self,root_url): | |
print 'begin' | |
count = 1 | |
self.urls.add_new_url(root_url) | |
while self.urls.has_new_url(): | |
try: | |
new_url = self.urls.get_new_url() | |
print 'crew %d:%s'% (count,new_url) | |
html_cont = self.downloader.download(new_url) | |
new_urls,new_data = self.parser.parse(new_url,html_cont) | |
self.urls.add_new_urls(new_urls) | |
self.outputer.collect_data(new_data) | |
#这里只爬了10个网页 | |
if count == 10: | |
break | |
count = count +1 | |
except: | |
print 'crew failed!!' | |
self.outputer.output_html() | |
if __name__=="__main__": | |
root_url = 'http://baike.baidu.com/item/Python' | |
obj_spider = SpiderMain() | |
obj_spider.craw(root_url) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#coding:utf8 | |
class UrlManager(object): | |
def __init__(self): | |
self.new_urls = set() | |
self.old_urls = set() | |
def add_new_url(self,url): | |
if url is None: | |
return | |
if url not in self.new_urls and url not in self.old_urls: | |
self.new_urls.add(url) | |
def add_new_urls(self,urls): | |
if urls is None or len(urls) == 0: | |
return | |
for url in urls: | |
self.add_new_url(url) | |
def has_new_url(self): | |
return len(self.new_urls)!=0 | |
def get_new_url(self): | |
new_url = self.new_urls.pop() | |
self.old_urls.add(new_url) | |
return new_url | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment