diff --git a/NO49/csdn/__init__.py b/NO49/csdn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/NO49/csdn/__pycache__/__init__.cpython-37.pyc b/NO49/csdn/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47e3e977e94e290bdfb595c76609db6f858368c6 Binary files /dev/null and b/NO49/csdn/__pycache__/__init__.cpython-37.pyc differ diff --git a/NO49/csdn/__pycache__/settings.cpython-37.pyc b/NO49/csdn/__pycache__/settings.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07cb0a3c217ae06a271fb15f57b1255823f7b347 Binary files /dev/null and b/NO49/csdn/__pycache__/settings.cpython-37.pyc differ diff --git a/NO49/csdn/items.py b/NO49/csdn/items.py new file mode 100644 index 0000000000000000000000000000000000000000..ffc1ebb5d85b66d753723c02e132d3db87f1760c --- /dev/null +++ b/NO49/csdn/items.py @@ -0,0 +1,12 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class CsdnItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + pass diff --git a/NO49/csdn/middlewares.py b/NO49/csdn/middlewares.py new file mode 100644 index 0000000000000000000000000000000000000000..d41669ba8b2b0e5783f93e1476e89f546a68ddd0 --- /dev/null +++ b/NO49/csdn/middlewares.py @@ -0,0 +1,105 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class CsdnSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class CsdnDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + diff --git a/NO49/csdn/pipelines.py b/NO49/csdn/pipelines.py new file mode 100644 index 0000000000000000000000000000000000000000..797e40e1a425e318ed0cf34e7289b10635a17279 --- /dev/null +++ b/NO49/csdn/pipelines.py @@ -0,0 +1,13 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter + + +class CsdnPipeline: + def process_item(self, item, spider): + return item diff --git a/NO49/csdn/settings.py b/NO49/csdn/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..f9e335e2d5e12a0d872e04c33228ea82156e0257 --- /dev/null +++ b/NO49/csdn/settings.py @@ -0,0 +1,89 @@ +# Scrapy settings for csdn project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'csdn' + +SPIDER_MODULES = ['csdn.spiders'] +NEWSPIDER_MODULE = 'csdn.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'csdn (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = False + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'csdn.middlewares.CsdnSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'csdn.middlewares.CsdnDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'csdn.pipelines.CsdnPipeline': 300, +#} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' +LOG_LEVEL = 'WARNING' \ No newline at end of file diff --git a/NO49/csdn/spiders/__init__.py b/NO49/csdn/spiders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebd689ac51d69c5e1dbbe80083c2b20a39f8bb79 --- /dev/null +++ b/NO49/csdn/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/NO49/csdn/spiders/__pycache__/__init__.cpython-37.pyc b/NO49/csdn/spiders/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e59b106278422b8a5a0a868a4e93e19d765db03 Binary files /dev/null and b/NO49/csdn/spiders/__pycache__/__init__.cpython-37.pyc differ diff --git a/NO49/csdn/spiders/__pycache__/c.cpython-37.pyc b/NO49/csdn/spiders/__pycache__/c.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1911ee1c875ac5ce1e947940a58e5c921fdafb53 Binary files /dev/null and b/NO49/csdn/spiders/__pycache__/c.cpython-37.pyc differ diff --git a/NO49/csdn/spiders/c.py b/NO49/csdn/spiders/c.py new file mode 100644 index 0000000000000000000000000000000000000000..c80676b71f46d365fcd79463943f09c5ec6cb94a --- /dev/null +++ b/NO49/csdn/spiders/c.py @@ -0,0 +1,15 @@ +import scrapy +from scrapy.selector import Selector +from scrapy.http import HtmlResponse + + +class CSpider(scrapy.Spider): + name = 'c' + allowed_domains = ['csdn.net'] + start_urls = ['https://blog.csdn.net/rank/list/column'] + + def parse(self, response): + response = HtmlResponse(url=self.start_urls[0]) + + ret = Selector(response=response).xpath("//title").get() + print(ret) diff --git a/NO49/scrapy.cfg b/NO49/scrapy.cfg new file mode 100644 index 0000000000000000000000000000000000000000..1e233dd5b417006ad1196770702e2e12663234c0 --- /dev/null +++ b/NO49/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = csdn.settings + +[deploy] +#url = http://localhost:6800/ +project = csdn diff --git a/NO50/mid_test/__init__.py b/NO50/mid_test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/NO50/mid_test/__pycache__/__init__.cpython-37.pyc b/NO50/mid_test/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01af5c33c2a9711c85b0b92ef1817b8c8ff832af Binary files /dev/null and b/NO50/mid_test/__pycache__/__init__.cpython-37.pyc differ diff --git a/NO50/mid_test/__pycache__/middlewares.cpython-37.pyc b/NO50/mid_test/__pycache__/middlewares.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a00480fedd51d31ee9910fa214d45b0dc4862b0 Binary files /dev/null and b/NO50/mid_test/__pycache__/middlewares.cpython-37.pyc differ diff --git a/NO50/mid_test/__pycache__/settings.cpython-37.pyc b/NO50/mid_test/__pycache__/settings.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bfddf19eeac2b212cdf06c3f4ca9f75b4d3bd64 Binary files /dev/null and b/NO50/mid_test/__pycache__/settings.cpython-37.pyc differ diff --git a/NO50/mid_test/items.py b/NO50/mid_test/items.py new file mode 100644 index 0000000000000000000000000000000000000000..decd00cc065fdd893b63a995c9d83597cc590b80 --- /dev/null +++ b/NO50/mid_test/items.py @@ -0,0 +1,12 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class MidTestItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + pass diff --git a/NO50/mid_test/middlewares.py b/NO50/mid_test/middlewares.py new file mode 100644 index 0000000000000000000000000000000000000000..948c364c4d0653e075847f349fb803d56397e179 --- /dev/null +++ b/NO50/mid_test/middlewares.py @@ -0,0 +1,117 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class MidTestSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class MidTestDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class MyUserAgentMiddleware(object): + + def process_request(self, request, spider): + request.headers.setdefault('User-Agent', + 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36') + + return None + + def process_response(self, request, response, spider): + print(response) # 单纯输出一下 response + + return response diff --git a/NO50/mid_test/pipelines.py b/NO50/mid_test/pipelines.py new file mode 100644 index 0000000000000000000000000000000000000000..1540988c4247b1f0702ed35a3669be90af847f1f --- /dev/null +++ b/NO50/mid_test/pipelines.py @@ -0,0 +1,13 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter + + +class MidTestPipeline: + def process_item(self, item, spider): + return item diff --git a/NO50/mid_test/settings.py b/NO50/mid_test/settings.py new file mode 100644 index 0000000000000000000000000000000000000000..67e617373070d44d5c4a75e466c2a65762f9637c --- /dev/null +++ b/NO50/mid_test/settings.py @@ -0,0 +1,90 @@ +# Scrapy settings for mid_test project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'mid_test' + +SPIDER_MODULES = ['mid_test.spiders'] +NEWSPIDER_MODULE = 'mid_test.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'mid_test (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'mid_test.middlewares.MidTestSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +DOWNLOADER_MIDDLEWARES = { + 'mid_test.middlewares.MyUserAgentMiddleware': 543 + +} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'mid_test.pipelines.MidTestPipeline': 300, +#} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' +# LOG_LEVEL = 'WARNING' \ No newline at end of file diff --git a/NO50/mid_test/spiders/__init__.py b/NO50/mid_test/spiders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebd689ac51d69c5e1dbbe80083c2b20a39f8bb79 --- /dev/null +++ b/NO50/mid_test/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/NO50/mid_test/spiders/__pycache__/__init__.cpython-37.pyc b/NO50/mid_test/spiders/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef0fcab0eaaccee2ec4a50275730a110136914bc Binary files /dev/null and b/NO50/mid_test/spiders/__pycache__/__init__.cpython-37.pyc differ diff --git a/NO50/mid_test/spiders/__pycache__/hbin.cpython-37.pyc b/NO50/mid_test/spiders/__pycache__/hbin.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6099ec2991867f4beb036a8f2f498cd60f53965a Binary files /dev/null and b/NO50/mid_test/spiders/__pycache__/hbin.cpython-37.pyc differ diff --git a/NO50/mid_test/spiders/hbin.py b/NO50/mid_test/spiders/hbin.py new file mode 100644 index 0000000000000000000000000000000000000000..7f366fd3549a11dcad8db6284caa1b028cd15b0c --- /dev/null +++ b/NO50/mid_test/spiders/hbin.py @@ -0,0 +1,10 @@ +import scrapy + + +class HbinSpider(scrapy.Spider): + name = 'hbin' + allowed_domains = ['httpbin.org'] + start_urls = ['http://httpbin.org/get'] + + def parse(self, response): + print(response.text) diff --git a/NO50/scrapy.cfg b/NO50/scrapy.cfg new file mode 100644 index 0000000000000000000000000000000000000000..40c7afaab08c3e40e0fc3c3ce88c389caf5db181 --- /dev/null +++ b/NO50/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = mid_test.settings + +[deploy] +#url = http://localhost:6800/ +project = mid_test diff --git a/README.md b/README.md index db6e141d639be152c18103da03722a1ecaeec06d..dc457702448d121cd01c7ef833570a86fa7039d7 100644 --- a/README.md +++ b/README.md @@ -92,4 +92,6 @@ 46. [你只认识大众汽车的车标怎么能行?赶紧用python采集所有车标学习一下](https://dream.blog.csdn.net/article/details/120988302) 47. [拿爱奇艺练手Python爬虫,是在法律边缘试探吗?爬虫技巧学习](https://dream.blog.csdn.net/article/details/121007901) 48. [程序员跨行帮朋友,python爬虫之饲料添加剂数据,采集+备份](https://dream.blog.csdn.net/article/details/121028282) +49. [CSDN热榜、华为云博客都可用来练习Python scrapy 爬虫](https://dream.blog.csdn.net/article/details/121066927) +50. [纯纯的爬虫知识,python scrapy 下载中间件知多少](https://dream.blog.csdn.net/article/details/121083780)