【3月20日】Python自制简易爬虫框架

2017-03-21 08:20:32来源:CSDN作者:Moon_treee人点击

第七城市th7cn

简易框架由以下几个模块:

  1. url管理器 :管理网址new_urls存放未爬取的网址的集合,old_urls存放已经爬取网址的集合
  2. html下载器 :使用urllib.request.urlopen(python3)对html资源进行下载
  3. html解析器 :使用BeautifulSoup对页面内容进行解析
  4. html输出器 :对爬取的结果封装成html文件输出

url管理器

class UrlManager(object):    def __init__(self):        self.new_urls = set()        self.old_urls = set()    def add_new_url(self, url):        if url is None:            return        if url not in self.new_urls and url not in self.old_urls:            self.new_urls.add(url)    def add_new_urls(self, urls):        if urls is None or len(urls) == 0:            return        for url in urls:            self.add_new_url(url)    def has_new_url(self):        return len(self.new_urls) != 0    def get_new_url(self):        new_url = self.new_urls.pop()        self.old_urls.add(new_url)        return new_url

html下载器

import urllib.requestclass HtmlDownloader(object):    def download(self, url):        if url is None:            return None        response = urllib.request.urlopen(url)        if response.getcode() != 200:            return None        return response.read()

html解析器

from bs4 import BeautifulSoupimport reimport urllib.parseclass HtmlParser(object):    def parse(self, page_url, html_cont):        if page_url is None or html_cont is None:            return        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')        new_urls = self._get_new_urls(page_url, soup)        new_data = self._get_new_data(page_url, soup)        return new_urls, new_data    def _get_new_urls(self, page_url, soup):        new_urls = set()        links = soup.find_all('a', href=re.compile(r"/item//w+"))        for link in links:            new_url = link['href']            new_full_url = urllib.parse.urljoin(page_url, new_url)            new_urls.add(new_full_url)        return new_urls    def _get_new_data(self, page_url, soup):        res_data = {}        res_data['url'] = page_url        # <dd><h1>Python</h1>        title_node = soup.find('dd', class_="lemmaWgt-lemmaTitle-title").find('h1')        res_data['title'] = title_node.get_text()        # <div label-module="lemmaSummary">        summary_node = soup.find('div', class_="lemma-summary")        res_data['summary'] = summary_node.get_text()        return res_data

html输出器

class HtmlOutputer():    def __init__(self):        self.datas = []    def collect_data(self, data):        if data is None:            return        self.datas.append(data)    def output_html(self):        fout = open('output.html', 'w', encoding='utf-8')        fout.write('<html>')        fout.write('<body>')        fout.write('<table>')        for data in self.datas:            fout.write('<tr>')            fout.write('<td>%s</td>' % data['url'])            fout.write('<td>%s</td>' % data['title'])            fout.write('<td>%s</td>' % data['summary'])            fout.write('</tr>')        fout.write('</table>')        fout.write('</body>')        fout.write('</html>')        fout.close()

main

from baike_spider import html_downloaderfrom baike_spider import html_outputerfrom baike_spider import html_parserfrom baike_spider import url_managerclass SpiderMain(object):    def __init__(self):        self.urls = url_manager.UrlManager()        self.downloader = html_downloader.HtmlDownloader()        self.parser = html_parser.HtmlParser()        self.outputer = html_outputer.HtmlOutputer()    def craw(self,root_url):        count = 1        self.urls.add_new_url(root_url)        while self.urls.has_new_url():            try:                new_url = self.urls.get_new_url()                print("craw %d : %s" % (count, new_url))                html_cont = self.downloader.download(new_url)                new_urls, new_data = self.parser.parse(new_url, html_cont)                self.urls.add_new_urls(new_urls)                self.outputer.collect_data(new_data)                if count == 100:                    break                count += 1            except:                print("craw failed")        self.outputer.output_html()if __name__ == "__main__":    root_url = "http://baike.baidu.com/item/Python"    # root_url = "http://www.baidu.com"    obj_spider = SpiderMain()    obj_spider.craw(root_url)

结果展示
输出的html文档

第七城市th7cn

最新文章

123

最新摄影

微信扫一扫

第七城市微信公众平台