diff --git "a/Day66-75/02.\346\225\260\346\215\256\351\207\207\351\233\206\345\222\214\350\247\243\346\236\220.md" "b/Day66-75/02.\346\225\260\346\215\256\351\207\207\351\233\206\345\222\214\350\247\243\346\236\220.md" index 09998eac4fb030e91c2823ffe8bd14ec564fcf4d..a99a46de4b664c0cd00223e3dc8134b6c6622990 100644 --- "a/Day66-75/02.\346\225\260\346\215\256\351\207\207\351\233\206\345\222\214\350\247\243\346\236\220.md" +++ "b/Day66-75/02.\346\225\260\346\215\256\351\207\207\351\233\206\345\222\214\350\247\243\346\236\220.md" @@ -87,7 +87,7 @@ > 说明:更多内容可以参考BeautifulSoup的[官方文档]()。 -### 例子 - 获取知乎发现上的问题链接 +### 实例 - 获取知乎发现上的问题链接 ```Python from urllib.parse import urljoin diff --git "a/Day66-75/03.\345\255\230\345\202\250\346\225\260\346\215\256.md" "b/Day66-75/03.\345\255\230\345\202\250\346\225\260\346\215\256.md" index 6849a51085b3d18ba0b51864b574c77f9034ce53..d6f0875e8ee84d275ca269442c2e6218dfbabfc2 100644 --- "a/Day66-75/03.\345\255\230\345\202\250\346\225\260\346\215\256.md" +++ "b/Day66-75/03.\345\255\230\345\202\250\346\225\260\346\215\256.md" @@ -197,5 +197,63 @@ b'admin' +### 实例 - 缓存知乎发现上的链接和页面代码 + +```Python + +from hashlib import sha1 +from urllib.parse import urljoin + +import pickle +import re +import requests +import zlib + +from bs4 import BeautifulSoup +from redis import Redis + + +def main(): + # 指定种子页面 + base_url = 'https://www.zhihu.com/' + seed_url = urljoin(base_url, 'explore') + # 创建Redis客户端 + client = Redis(host='1.2.3.4', port=6379, password='1qaz2wsx') + # 设置用户代理(否则访问会被拒绝) + headers = {'user-agent': 'Baiduspider'} + # 通过requests模块发送GET请求并指定用户代理 + resp = requests.get(seed_url, headers=headers) + # 创建BeautifulSoup对象并指定使用lxml作为解析器 + soup = BeautifulSoup(resp.text, 'lxml') + href_regex = re.compile(r'^/question') + # 查找所有href属性以/question打头的a标签 + for a_tag in soup.find_all('a', {'href': href_regex}): + # 获取a标签的href属性值并组装完整的URL + href = a_tag.attrs['href'] + full_url = urljoin(base_url, href) + # 将URL处理成SHA1摘要(长度固定更简短) + hasher = sha1() + hasher.update(full_url.encode('utf-8')) + field_key = hasher.hexdigest() + # 如果Redis的键'zhihu'对应的hash数据类型中没有URL的摘要就访问页面并缓存 + if not client.hexists('zhihu', field_key): + html_page = requests.get(full_url, headers=headers).text + # 对页面进行序列化和压缩操作 + zipped_page = zlib.compress(pickle.dumps(html_page)) + # 使用hash数据类型保存URL摘要及其对应的页面代码 + client.hset('zhihu', field_key, zipped_page) + # 显示总共缓存了多少个页面 + print('Total %d question pages found.' % client.hlen('zhihu')) + + +if __name__ == '__main__': + main() + +``` + + + + + diff --git a/Day66-75/code/example05.py b/Day66-75/code/example05.py index e48fa940e9dc803cff970b3084f4903036b51362..33490652fc6aac99a0c145a21cec7bbfe1e5665b 100644 --- a/Day66-75/code/example05.py +++ b/Day66-75/code/example05.py @@ -50,7 +50,7 @@ def get_matched_parts(page_html, pattern_str, pattern_ignore_case=re.I): # 开始执行爬虫程序 def start_crawl(seed_url, match_pattern, *, max_depth=-1): - client = redis.Redis(host='120.77.222.217', port=11223, password='1qaz2wsx') + client = redis.Redis(host='1.2.3.4', port=6379, password='1qaz2wsx') charsets = ('utf-8', 'gbk', 'gb2312') logging.info('[Redis ping]', client.ping()) url_list = [seed_url] diff --git a/Day66-75/code/example06.py b/Day66-75/code/example06.py new file mode 100644 index 0000000000000000000000000000000000000000..237ccedf8eda0c4d9ed2e681d8a780e41aba3493 --- /dev/null +++ b/Day66-75/code/example06.py @@ -0,0 +1,49 @@ + +from hashlib import sha1 +from urllib.parse import urljoin + +import pickle +import re +import requests +import zlib + +from bs4 import BeautifulSoup +from redis import Redis + + +def main(): + # 指定种子页面 + base_url = 'https://www.zhihu.com/' + seed_url = urljoin(base_url, 'explore') + # 创建Redis客户端 + client = Redis(host='1.2.3.4', port=6379, password='1qaz2wsx') + # 设置用户代理 + headers = {'user-agent': 'Baiduspider'} + # 通过requests模块发送GET请求并指定用户代理 + resp = requests.get(seed_url, headers=headers) + # 创建BeautifulSoup对象并指定使用lxml作为解析器 + soup = BeautifulSoup(resp.text, 'lxml') + href_regex = re.compile(r'^/question') + # 查找所有href属性以/question打头的a标签 + for a_tag in soup.find_all('a', {'href': href_regex}): + # 获取a标签的href属性值并组装完整的URL + href = a_tag.attrs['href'] + full_url = urljoin(base_url, href) + # 将URL处理成SHA1摘要(长度固定更简短) + hasher = sha1() + hasher.update(full_url.encode('utf-8')) + field_key = hasher.hexdigest() + # 如果Redis的键'zhihu'对应的hash数据类型中没有URL的摘要就访问页面并缓存 + if not client.hexists('zhihu', field_key): + html_page = requests.get(full_url, headers=headers).text + # 对页面进行序列化和压缩操作 + zipped_page = zlib.compress(pickle.dumps(html_page)) + # 使用hash数据类型保存URL摘要及其对应的页面代码 + client.hset('zhihu', field_key, zipped_page) + # 显示总共缓存了多少个页面 + print('Total %d question pages found.' % client.hlen('zhihu')) + + +if __name__ == '__main__': + main() +