python爬虫从入门到放弃(九)之 R

发布时间:2019-03-12 22:59:43编辑:auto阅读(1869)

    import requests
    from requests.exceptions import RequestException
    import re
    import json
    from multiprocessing import Pool
    
    def get_one_page(url):
        headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'}
        try:
            response = requests.get(url,headers=headers)
            if response.status_code == 200:
                return response.text
            return None
        except RequestException:
            return None
    
    def parse_one_page(html):
        pattern = re.compile('<dd>.*?board-index.*?>(\d+).*?data-src="(.*?)".?name"><a.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>',re.S)
        items = re.findall(pattern, html)
        for item in items:
            yield {
            'index': item[0],
            'image': item[1],
            'title': item[2].strip(),
            'actor': item[3].strip()[3:] ,#if len(item[3]) > 3 else '',
            'time': item[4].strip()[5:] ,#if len(item[4]) > 5 else '',
            'score': item[5].strip() + item[6].strip()
            }
    
    def write_to_file(content):
        with open('result.txt', 'a', encoding='utf-8') as f:
            f.write(json.dumps(content, ensure_ascii=False) + '\n')
    
    def main(offset):
        url = 'http://maoyan.com/board/4?offset='+str(offset)
        html = get_one_page(url)
        for item in parse_one_page(html):
            print(item)
            write_to_file(item)
    
    if __name__=='__main__':
    #for i in range(10):
    #main(i * 10)
    # 多线程方法
        pool =Pool()#多线程会打乱数据的顺序
        # 声明一个进程池
        pool.map(main,[i*10 for i in range(10)]) # 利用map函数
        pool.close()

     

关键字