From 79f297a466df4652b6e6a7bc2ea16e879dd9fcd3 Mon Sep 17 00:00:00 2001 From: jackfrued Date: Wed, 30 May 2018 11:42:00 +0800 Subject: [PATCH] hello crawler --- Day66-75/code/example06.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/Day66-75/code/example06.py b/Day66-75/code/example06.py index 237cced..a8ac96f 100644 --- a/Day66-75/code/example06.py +++ b/Day66-75/code/example06.py @@ -17,20 +17,22 @@ def main(): seed_url = urljoin(base_url, 'explore') # 创建Redis客户端 client = Redis(host='1.2.3.4', port=6379, password='1qaz2wsx') - # 设置用户代理 + # 设置用户代理(否则访问会被拒绝) headers = {'user-agent': 'Baiduspider'} # 通过requests模块发送GET请求并指定用户代理 resp = requests.get(seed_url, headers=headers) # 创建BeautifulSoup对象并指定使用lxml作为解析器 soup = BeautifulSoup(resp.text, 'lxml') href_regex = re.compile(r'^/question') + # 将URL处理成SHA1摘要(长度固定更简短) + hasher_proto = sha1() # 查找所有href属性以/question打头的a标签 for a_tag in soup.find_all('a', {'href': href_regex}): # 获取a标签的href属性值并组装完整的URL href = a_tag.attrs['href'] full_url = urljoin(base_url, href) - # 将URL处理成SHA1摘要(长度固定更简短) - hasher = sha1() + # 传入URL生成SHA1摘要 + hasher = hasher_proto.copy() hasher.update(full_url.encode('utf-8')) field_key = hasher.hexdigest() # 如果Redis的键'zhihu'对应的hash数据类型中没有URL的摘要就访问页面并缓存 -- GitLab