多进程爬取-成都快上网建站

多进程爬取

import requests
from lxml import etree
import re
import time
from multiprocessing import Pool  #导入multiprocessing库的Pool模块

headers = {'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'}

def get_info(url):
    html = requests.get(url,headers = headers)
    selector = etree.HTML(html.text)
    names = selector.xpath('//*[@class="article block untagged mb15 typs_hot"]/div[1]/a[2]/h3/text()')
    centents = re.findall('
.*?(.*?)',html.text,re.S) #第一个正则是为了匹配换行符 laughs = re.findall('(\d+)',html.text,re.S) comments = re.findall('(\d+) 评论',html.text,re.S) for name,centent,laugh,comment in zip(names,centents,laughs,comments): info = { 'name':name, 'centent':centents, 'laugh':laughs, 'comment':comments } return (info) if __name__ == '__main__': urls = ["https://www.qiushibaike.com/text/page/{}/".format(num)for num in range(0,14)] start_1 = time.time() for url in urls: get_info(url) end_1 = time.time() print('串行爬取花费时间:' + str(end_1 - start_1)) start_2 = time.time() pool = Pool(processes=2) #创建进程池,processes为设置的进程个数 pool.map(get_info,urls) #利用map()函数运行进程,参数fuc为运行的函数,iterable为迭代参数 end_2 = time.time() print('2个进程:' + str(end_2 - start_2)) start_3 = time.time() pool = Pool(processes=4) # 创建进程池,processes为设置的进程个数 pool.map(get_info, urls) # 利用map()函数运行进程,参数fuc为运行的函数,iterable为迭代参数 end_3 = time.time() print('4个进程:' + str(end_3 - start_3))

输出:

创新互联建站专业为企业提供尼河口网站建设、尼河口做网站、尼河口网站设计、尼河口网站制作等企业网站建设、网页设计与制作、尼河口企业网站模板建站服务,10余年尼河口做网站经验,不只是建网站,更提供有价值的思路和整体网络服务。

D:\Python\venv\Scripts\python.exe D:/Python/venv/test12.py
串行爬取花费时间:5.043288469314575
2个进程:3.351191759109497
4个进程:2.882164716720581

Process finished with exit code 0

标题名称:多进程爬取
标题链接:http://kswjz.com/article/jdgscj.html
扫二维码与项目经理沟通

我们在微信上24小时期待你的声音

解答本文疑问/技术咨询/运营咨询/技术建议/互联网交流