提交 e3c56027 编写于 作者: H hjdhnx

更新了py源

上级 076eb2ce
3.9.29beta1
\ No newline at end of file
3.9.29beta2
\ No newline at end of file
......@@ -54,6 +54,7 @@
- [X] alist.js更新,增加了首页推荐
- [X] 仙人模式逻辑优化,增加ver参数,0,1,2。其中默认0为仙人自动识别,1为强制drpy.min.js,2为强制drpy2.min.js
- [X] 默哀模式优化
- [X] 更新部分py源
###### 2022/12/2
- [X] js0/js1 api更新,增加新特性:pagecount(单独指定某些分类的总页数)
```json
......
此差异已折叠。
......@@ -5,11 +5,7 @@ import sys
sys.path.append('..')
from base.spider import Spider
import json
from requests import session, utils
import os
import time
import base64
import threading
class Spider(Spider):
box_video_type = ''
......@@ -34,17 +30,19 @@ class Spider(Spider):
def homeContent(self, filter):
result = {}
cateManual = {
"动态": "动态",
"UP": "UP",
"关注": "关注",
"追番": "追番",
"追剧": "追剧",
"收藏": "收藏",
"历史记录": "历史记录",
# ————————以下可自定义UP主,冒号后须填写UID————————
#"虫哥说电影": "29296192",
# ————————以下可自定义关键词,结果以搜索方式展示————————
"宅舞": "宅舞",
"cosplay": "cosplay",
"周杰伦": "周杰伦",
"狗狗": "汪星人",
"猫咪": "喵星人",
"请自定义关键词": "美女",
# ————————以下可自定义UP主,冒号后须填写UID————————
"徐云流浪中国": "697166795",
# "虫哥说电影": "29296192",
#"狗狗": "汪星人",
#"猫咪": "喵星人",
}
classes = []
for k in cateManual:
......@@ -56,7 +54,11 @@ class Spider(Spider):
if (filter):
filters = {}
for lk in cateManual:
if not cateManual[lk].isdigit():
if lk in self.bilibili.config['filter']:
filters.update({
cateManual[lk]: self.bilibili.config['filter'][lk]
})
elif not cateManual[lk].isdigit():
link = cateManual[lk]
filters.update({
link: [{"key": "order", "name": "排序",
......@@ -73,18 +75,90 @@ class Spider(Spider):
# 用户cookies,请在py_bilibili里填写,此处不用改
cookies = ''
userid = ''
def getCookie(self):
self.cookies = self.bilibili.getCookie()
self.userid = self.bilibili.userid
return self.cookies
def homeVideoContent(self):
result = {}
videos = self.bilibili.get_dynamic(1)['list'][0:3]
result['list'] = videos
return result
def get_up_videos(self, tid, pg):
def get_follow(self, pg, order):
if len(self.cookies) <= 0:
self.getCookie()
result = {}
url = 'https://api.bilibili.com/x/space/arc/search?mid={0}&pn={1}&ps=10'.format(tid, pg)
ps = 10
url = 'https://api.bilibili.com/x/relation/followings?vmid={0}&order=desc&order_type={3}&ps={1}&pn={2}'.format(self.userid, ps, pg, order)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
follow = []
for f in jo['data']['list']:
mid = f['mid']
title = str(f['uname']).strip()
img = str(f['face']).strip()
remark = ''
if f['special'] == 1:
remark = '特别关注'
follow.append({
"vod_id": str(mid) + '_mid',
"vod_name": title,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
total = jo['data']['total']
pc = divmod(total, ps)
if pc[1] != 0:
pc = pc[0] + 1
else:
pc = pc[0]
result['list'] = follow
result['page'] = pg
result['pagecount'] = pc
result['limit'] = 2
result['total'] = 999999
return result
def get_up_archive(self, pg, order):
mid = self.bilibili.up_mid
if mid.isdigit():
return self.get_up_videos(mid, pg, order)
else:
return {}
get_up_videos_mid = ''
get_up_videos_pc = 1
def get_up_videos(self, mid, pg, order):
result = {}
ps = 10
order2 = ''
if order == 'oldest':
order2 = order
order = 'pubdate'
if order2 and int(pg) == 1:
url = 'https://api.bilibili.com/x/space/arc/search?mid={0}&pn={1}&ps={2}&order={3}'.format(mid, pg, ps, order)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
total = jo['data']['page']['count']
pc = divmod(total, ps)
if pc[1] != 0:
pc = pc[0] + 1
else:
pc = pc[0]
self.get_up_videos_mid = mid
self.get_up_videos_pc = pc
tmp_pg = pg
if order2:
tmp_pg = self.get_up_videos_pc - int(pg) + 1
url = 'https://api.bilibili.com/x/space/arc/search?mid={0}&pn={1}&ps={2}&order={3}'.format(mid, tmp_pg, ps, order)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
......@@ -99,20 +173,88 @@ class Spider(Spider):
videos.append({
"vod_id": aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
if order2:
videos.reverse()
if int(pg) == 1:
info = {}
self.bilibili.get_up_info(mid, info)
gotoUPHome={
"vod_id": str(mid) + '_mid',
"vod_name": info['name'] + " 个人主页",
"vod_pic": info['face'] + '@672w_378h_1c.jpg',
"vod_remarks": info['following'] + ' 投稿:' + str(info['vod_count'])
}
videos.insert(0, gotoUPHome)
pc = self.get_up_videos_pc
if self.get_up_videos_mid != mid:
total = jo['data']['page']['count']
pc = divmod(total, ps)
if pc[1] != 0:
pc = pc[0] + 1
else:
pc = pc[0]
self.get_up_videos_mid = mid
self.get_up_videos_pc = pc
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['pagecount'] = pc
result['limit'] = 2
result['total'] = 999999
return result
def get_zhui(self, pg, mode):
result = {}
if len(self.cookies) <= 0:
self.getCookie()
url = 'https://api.bilibili.com/x/space/bangumi/follow/list?type={2}&pn={1}&ps=10&vmid={0}'.format(self.userid, pg, mode)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
videos = []
vodList = jo['data']['list']
for vod in vodList:
aid = str(vod['season_id']).strip()
title = vod['title']
img = vod['cover'].strip()
remark = ''
if 'index_show' in vod['new_ep']:
remark = vod['new_ep']['index_show']
videos.append({
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 2
result['total'] = 999999
return result
def categoryContent(self, tid, pg, filter, extend):
self.box_video_type = "分区"
if tid.isdigit():
return self.get_up_videos(tid, pg)
order = 'pubdate'
if 'order' in extend:
order = extend['order']
return self.get_up_videos(tid, pg, order)
elif tid == "关注":
order = 'attention'
if 'order' in extend:
order = extend['order']
return self.get_follow(pg, order)
elif tid == "UP":
order = 'pubdate'
if 'order' in extend:
order = extend['order']
return self.get_up_archive(pg, order)
elif tid == "追番":
return self.get_zhui(pg, 1)
elif tid == "追剧":
return self.get_zhui(pg, 2)
else:
result = self.bilibili.categoryContent(tid, pg, filter, extend)
return result
......@@ -120,48 +262,87 @@ class Spider(Spider):
def cleanSpace(self, str):
return str.replace('\n', '').replace('\t', '').replace('\r', '').replace(' ', '')
def detailContent(self, array):
if self.box_video_type == "搜索":
mid = array[0]
# 获取UP主视频列表,ps后面为视频数量,默认为20,加快加载速度
url = 'https://api.bilibili.com/x/space/arc/search?mid={0}&pn=1&ps=20'.format(mid)
rsp = self.fetch(url, headers=self.header)
con = threading.Condition()
def get_up_vod(self, mid, n, nList, urlList):
# 获取UP主视频列表
url = 'https://api.bilibili.com/x/space/arc/search?mid={0}&ps=50&pn={1}'.format(mid, n)
try:
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jRoot = json.loads(content)
jo = jRoot['data']['list']['vlist']
url2 = "https://api.bilibili.com/x/web-interface/card?mid={0}".format(mid)
rsp2 = self.fetch(url2, headers=self.header)
jRoot2 = json.loads(rsp2.text)
jo2 = jRoot2['data']['card']
name = jo2['name'].replace("<em class=\"keyword\">", "").replace("</em>", "")
pic = jo2['face']
desc = jo2['Official']['desc'] + " " + jo2['Official']['title']
except:
with self.con:
nList.remove(n)
self.con.notifyAll()
return
jRoot = json.loads(content)
jo = jRoot['data']['list']['vlist']
if len(jo) == 0:
with self.con:
nList.remove(n)
self.con.notifyAll()
return
playUrl = ''
vodItems = []
for tmpJo in jo:
aid = tmpJo['aid']
part = tmpJo['title'].replace("#", "-")
url = '{0}${1}_cid'.format(part, aid)
vodItems.append(url)
playUrl = '#'.join(vodItems)
with self.con:
while True:
if n == nList[0]:
urlList.append(playUrl)
nList.remove(n)
self.con.notifyAll()
break
else:
self.con.wait()
def detailContent(self, array):
if 'mid' in array[0]:
arrays = array[0].split("_")
mid = arrays[0]
self.bilibili.up_mid = mid
info = {}
i = threading.Thread(target=self.bilibili.get_up_info, args=(mid, info, ))
i.start()
#最多获取最近2页的投稿
pn = 3
urlList = []
#nList = []
#for n in range(pn):
# n += 1
# nList.append(n)
# with self.con:
# if threading.active_count() > 10:
# self.con.wait()
# t = threading.Thread(target=self.get_up_vod, args=(mid, n, nList, urlList, ))
# t.start()
while True:
_count = threading.active_count()
#计算线程数,不出结果就调大,结果少了就调小
if _count <= 2:
break
vod = {
"vod_id": mid,
"vod_name": name + " " + "个人主页",
"vod_pic": pic,
"type_name": "最近投稿",
"vod_year": "",
"vod_name": info['name'] + " 个人主页",
"vod_pic": info['face'],
"vod_area": "bilidanmu",
"vod_remarks": "", # 不会显示
'vod_tags': 'mv', # 不会显示
"vod_actor": "粉丝数:" + self.bilibili.zh(jo2['fans']),
"vod_director": name,
"vod_content": desc
"vod_tags": 'mv', # 不会显示
"vod_actor": "粉丝数:" + info['fans'] + " 投稿数:" + info['vod_count'] + " 点赞数:" +info['like_num'],
"vod_director": info['name'] + ' UID:' +str(mid) + " " + info['following'],
"vod_content": info['desc'],
'vod_play_from': '更多视频在我的哔哩——UP标签,按上键刷新查看'
}
playUrl = ''
for tmpJo in jo:
eid = tmpJo['aid']
url3 = "https://api.bilibili.com/x/web-interface/view?aid=%s" % str(eid)
rsp3 = self.fetch(url3)
jRoot3 = json.loads(rsp3.text)
cid = jRoot3['data']['cid']
part = tmpJo['title'].replace("#", "-")
playUrl = playUrl + '{0}${1}_{2}#'.format(part, eid, cid)
vod['vod_play_from'] = 'B站'
vod['vod_play_url'] = playUrl
first = '点击相应按钮可以关注/取关$' + str(mid) + '_mid'
follow = '关注$' + str(mid) + '_1_mid_follow'
unfollow = '取消关注$' + str(mid) + '_2_mid_follow'
doWhat = [first, follow, unfollow]
urlList = doWhat + urlList
vod['vod_play_url'] = '#'.join(urlList)
result = {
'list': [
......@@ -173,7 +354,6 @@ class Spider(Spider):
return self.bilibili.detailContent(array)
def searchContent(self, key, quick):
self.box_video_type = "搜索"
if len(self.cookies) <= 0:
self.getCookie()
url = 'https://api.bilibili.com/x/web-interface/search/type?search_type=bili_user&keyword={0}'.format(key)
......@@ -183,14 +363,14 @@ class Spider(Spider):
videos = []
vodList = jo['data']['result']
for vod in vodList:
aid = str(vod['mid']) # str(vod["res"][0]["aid"])
mid = str(vod['mid'])
title = "UP主:" + vod['uname'].strip() + " ☜" + key
img = 'https:' + vod['upic'].strip()
remark = "粉丝数" + self.bilibili.zh(vod['fans'])
videos.append({
"vod_id": aid,
"vod_id": mid + '_mid',
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result = {
......
......@@ -10,7 +10,6 @@ import os
import time
import base64
class Spider(Spider):
def getDependence(self):
return ['py_bilibili']
......@@ -40,8 +39,6 @@ class Spider(Spider):
"纪录片": "3",
"综艺": "7",
"全部": "全部",
"追番": "追番",
"追剧": "追剧",
"时间表": "时间表",
# ————————以下可自定义关键字,结果以影视类搜索展示————————
# "喜羊羊": "喜羊羊"
......@@ -60,11 +57,9 @@ class Spider(Spider):
# 用户cookies
cookies = ''
userid = ''
def getCookie(self):
self.cookies = self.bilibili.getCookie()
self.userid = self.bilibili.userid
return self.cookies
# 将超过10000的数字换成成以万和亿为单位
......@@ -82,100 +77,87 @@ class Spider(Spider):
def homeVideoContent(self):
result = {}
videos = self.get_rank(1)['list'][0:5]
for i in [4, 2, 5, 3, 7]:
videos += self.get_rank2(i)['list'][0:5]
videos = self.get_rank2(tid=4, pg=1)['list'][0:3]
#videos = self.get_rank(tid=1, pg=1)['list'][0:5]
#for i in [4, 2, 5, 3, 7]:
# videos += self.get_rank2(tid=i, pg=1)['list'][0:5]
result['list'] = videos
return result
def get_rank(self, tid):
def get_rank(self, tid, pg):
ps=9
pg_max= int(pg) * ps
pg_min= pg_max - ps
result = {}
url = 'https://api.bilibili.com/pgc/web/rank/list?season_type={0}&day=3'.format(tid)
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
videos = []
vodList = jo['result']['list']
pc = int(len(vodList) / ps) + 1
vodList = vodList[pg_min:pg_max]
for vod in vodList:
aid = str(vod['season_id']).strip()
title = vod['title'].strip()
img = vod['cover'].strip()
remark = vod['new_ep']['index_show']
remark = ''
if 'index_show' in vod['new_ep']:
remark = vod['new_ep']['index_show']
videos.append({
"vod_id": aid,
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = 1
result['pagecount'] = 1
result['limit'] = 90
result['page'] = pg
result['pagecount'] = pc
result['limit'] = 2
result['total'] = 999999
return result
def get_rank2(self, tid):
def get_rank2(self, tid, pg):
ps=9
pg_max= int(pg) * ps
pg_min= pg_max - ps
result = {}
url = 'https://api.bilibili.com/pgc/season/rank/web/list?season_type={0}&day=3'.format(tid)
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
videos = []
vodList = jo['data']['list']
pc = int(len(vodList) / ps) + 1
vodList = vodList[pg_min:pg_max]
for vod in vodList:
aid = str(vod['season_id']).strip()
title = vod['title'].strip()
img = vod['cover'].strip()
remark = vod['new_ep']['index_show']
remark = ''
if 'index_show' in vod['new_ep']:
remark = vod['new_ep']['index_show']
videos.append({
"vod_id": aid,
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = 1
result['pagecount'] = 1
result['limit'] = 90
result['page'] = pg
result['pagecount'] = pc
result['limit'] = 2
result['total'] = 999999
return result
def get_zhui(self, pg, mode):
result = {}
if len(self.cookies) <= 0:
self.getCookie()
url = 'https://api.bilibili.com/x/space/bangumi/follow/list?type={2}&follow_status=0&pn={1}&ps=10&vmid={0}'.format(self.userid, pg, mode)
rsp = self.fetch(url, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
videos = []
vodList = jo['data']['list']
for vod in vodList:
aid = str(vod['season_id']).strip()
title = vod['title']
img = vod['cover'].strip()
remark = vod['new_ep']['index_show'].strip()
videos.append({
"vod_id": aid,
"vod_name": title,
"vod_pic": img,
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['total'] = 999999
return result
def get_all(self, tid, pg, order, season_status, extend):
result = {}
if len(self.cookies) <= 0:
self.getCookie()
url = 'https://api.bilibili.com/pgc/season/index/result?order={2}&pagesize=10&type=1&season_type={0}&page={1}&season_status={3}'.format(tid, pg, order, season_status)
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
videos = []
......@@ -186,22 +168,22 @@ class Spider(Spider):
img = vod['cover'].strip()
remark = vod['index_show'].strip()
videos.append({
"vod_id": aid,
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['limit'] = 2
result['total'] = 999999
return result
def get_timeline(self, tid, pg):
result = {}
url = 'https://api.bilibili.com/pgc/web/timeline/v2?season_type={0}&day_before=2&day_after=4'.format(tid)
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
......@@ -213,9 +195,9 @@ class Spider(Spider):
img = vod['cover'].strip()
remark = vod['pub_index'] + ' ' + vod['follows'].replace('系列', '')
videos1.append({
"vod_id": aid,
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
videos2 = []
......@@ -229,9 +211,9 @@ class Spider(Spider):
date = str(time.strftime("%m-%d %H:%M", time.localtime(vod['pub_ts'])))
remark = date + " " + vod['pub_index']
videos2.append({
"vod_id": aid,
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos2 + videos1
......@@ -246,9 +228,9 @@ class Spider(Spider):
if len(self.cookies) <= 0:
self.getCookie()
if tid == "1":
return self.get_rank(tid=tid)
return self.get_rank(tid=tid, pg=pg)
elif tid in {"2", "3", "4", "5", "7"}:
return self.get_rank2(tid=tid)
return self.get_rank2(tid=tid, pg=pg)
elif tid == "全部":
tid = '1' # 全部界面默认展示最多播放的番剧
order = '2'
......@@ -260,10 +242,6 @@ class Spider(Spider):
if 'season_status' in extend:
season_status = extend['season_status']
return self.get_all(tid, pg, order, season_status, extend)
elif tid == "追番":
return self.get_zhui(pg, 1)
elif tid == "追剧":
return self.get_zhui(pg, 2)
elif tid == "时间表":
tid = 1
if 'tid' in extend:
......@@ -277,69 +255,20 @@ class Spider(Spider):
return str.replace('\n', '').replace('\t', '').replace('\r', '').replace(' ', '')
def detailContent(self, array):
aid = array[0]
url = "https://api.bilibili.com/pgc/view/web/season?season_id={0}".format(aid)
rsp = self.fetch(url, headers=self.header)
jRoot = json.loads(rsp.text)
jo = jRoot['result']
id = jo['season_id']
title = jo['title']
pic = jo['cover']
# areas = jo['areas']['name'] 改bilidanmu显示弹幕
typeName = jo['share_sub_title']
date = jo['publish']['pub_time'][0:4]
dec = jo['evaluate']
remark = jo['new_ep']['desc']
stat = jo['stat']
# 演员和导演框展示视频状态,包括以下内容:
status = "弹幕: " + self.zh(stat['danmakus']) + " 点赞: " + self.zh(stat['likes']) + " 投币: " + self.zh(
stat['coins']) + " 追番追剧: " + self.zh(stat['favorites'])
if 'rating' in jo:
score = "评分: " + str(jo['rating']['score']) + ' ' + jo['subtitle']
else:
score = "暂无评分" + ' ' + jo['subtitle']
vod = {
"vod_id": id,
"vod_name": title,
"vod_pic": pic,
"type_name": typeName,
"vod_year": date,
"vod_area": "bilidanmu",
"vod_remarks": remark,
"vod_actor": status,
"vod_director": score,
"vod_content": dec
}
ja = jo['episodes']
playUrl = ''
for tmpJo in ja:
aid = tmpJo['aid']
cid = tmpJo['cid']
part = tmpJo['title'].replace("#", "-")
playUrl = playUrl + '{0}${1}_{2}#'.format(part, aid, cid)
vod['vod_play_from'] = 'B站'
vod['vod_play_url'] = playUrl
result = {
'list': [
vod
]
}
return result
return self.bilibili.ysContent(array)
def searchContent(self, key, quick):
if len(self.cookies) <= 0:
self.getCookie()
url1 = 'https://api.bilibili.com/x/web-interface/search/type?search_type=media_bangumi&keyword={0}'.format(
key) # 番剧搜索
rsp1 = self.fetch(url1, cookies=self.cookies)
rsp1 = self.fetch(url1, headers=self.header, cookies=self.cookies)
content1 = rsp1.text
jo1 = json.loads(content1)
rs1 = jo1['data']
url2 = 'https://api.bilibili.com/x/web-interface/search/type?search_type=media_ft&keyword={0}'.format(
key) # 影视搜索
rsp2 = self.fetch(url2, cookies=self.cookies)
rsp2 = self.fetch(url2, headers=self.header, cookies=self.cookies)
content2 = rsp2.text
jo2 = json.loads(content2)
rs2 = jo2['data']
......@@ -356,9 +285,9 @@ class Spider(Spider):
img = vod['cover'].strip() # vod['eps'][0]['cover'].strip()原来的错误写法
remark = vod['index_show']
videos.append({
"vod_id": aid,
"vod_id": 'ss' + aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result = {
......@@ -367,46 +296,7 @@ class Spider(Spider):
return result
def playerContent(self, flag, id, vipFlags):
result = {}
ids = id.split("_")
header = {
"Referer": "https://www.bilibili.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
}
url = 'https://api.bilibili.com/pgc/player/web/playurl?qn=116&aid={0}&cid={1}'.format(ids[0], ids[1])
if len(self.cookies) <= 0:
self.getCookie()
self.bilibili.post_history(ids[0], ids[1]) # 回传播放历史记录
rsp = self.fetch(url, cookies=self.cookies, headers=header)
jRoot = json.loads(rsp.text)
if jRoot['message'] != 'success':
print("需要大会员权限才能观看")
return {}
jo = jRoot['result']
ja = jo['durl']
maxSize = -1
position = -1
for i in range(len(ja)):
tmpJo = ja[i]
if maxSize < int(tmpJo['size']):
maxSize = int(tmpJo['size'])
position = i
url = ''
if len(ja) > 0:
if position == -1:
position = 0
url = ja[position]['url']
result["parse"] = 0
result["playUrl"] = ''
result["url"] = url
result["header"] = {
"Referer": "https://www.bilibili.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36"
}
result["contentType"] = 'video/x-flv'
return result
return self.bilibili.playerContent(flag, id, vipFlags)
config = {
"player": {},
......
......@@ -5,11 +5,6 @@ import sys
sys.path.append('..')
from base.spider import Spider
import json
import requests
from requests import session, utils
import time
import base64
class Spider(Spider):
def getDependence(self):
......@@ -22,7 +17,10 @@ class Spider(Spider):
def homeContent(self, filter):
result = {}
cateManual = {
"我的关注": "我的关注",
"观看记录": "观看记录",
"推荐": "推荐",
"热门": "热门",
"网游": "2",
"手游": "3",
"单机": "6",
......@@ -32,8 +30,6 @@ class Spider(Spider):
"赛事": "13",
"电台": "5",
"虚拟": "9",
"我的关注": "我的关注",
"观看记录": "观看记录",
}
classes = []
......@@ -49,7 +45,6 @@ class Spider(Spider):
# 用户cookies
cookies = ''
userid = ''
def getCookie(self):
self.cookies = self.bilibili.getCookie()
......@@ -83,19 +78,22 @@ class Spider(Spider):
def get_live_userInfo(self, uid):
url = 'https://api.live.bilibili.com/live_user/v1/Master/info?uid=%s' % uid
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
return jo['data']["info"]["uname"]
def homeVideoContent(self,):
return self.get_hot(1)
result = {}
videos = self.get_hot(1)['list'][0:3]
result['list'] = videos
return result
def get_recommend(self, pg):
result = {}
url = 'https://api.live.bilibili.com/xlive/web-interface/v1/webMain/getList?platform=web&page=%s' % pg
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
......@@ -109,20 +107,20 @@ class Spider(Spider):
videos.append({
"vod_id": aid + '&live',
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['limit'] = 2
result['total'] = 999999
return result
def get_hot(self, pg):
result = {}
url = 'https://api.live.bilibili.com/room/v1/room/get_user_recommend?page=%s&page_size=20' % pg
rsp = self.fetch(url, cookies=self.cookies)
url = 'https://api.live.bilibili.com/room/v1/room/get_user_recommend?page=%s&page_size=10' % pg
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
......@@ -136,13 +134,13 @@ class Spider(Spider):
videos.append({
"vod_id": aid + '&live',
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['limit'] = 2
result['total'] = 999999
return result
......@@ -150,7 +148,7 @@ class Spider(Spider):
result = {}
url = 'https://api.live.bilibili.com/xlive/web-interface/v1/second/getList?platform=web&parent_area_id=%s&area_id=%s&sort_type=online&page=%s' % (
parent_area_id, area_id, pg)
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
......@@ -164,20 +162,20 @@ class Spider(Spider):
videos.append({
"vod_id": aid + '&live',
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['limit'] = 2
result['total'] = 999999
return result
def get_fav(self, pg):
result = {}
url = 'https://api.live.bilibili.com/xlive/web-ucenter/v1/xfetter/GetWebList?page=%s&page_size=10' % pg
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
videos = []
......@@ -190,20 +188,20 @@ class Spider(Spider):
videos.append({
"vod_id": aid + '&live',
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
result['page'] = pg
result['pagecount'] = 9999
result['limit'] = 90
result['limit'] = 2
result['total'] = 999999
return result
def get_history(self):
result = {}
url = 'https://api.bilibili.com/x/web-interface/history/cursor?ps=10&type=live'
rsp = self.fetch(url, cookies=self.cookies)
url = 'https://api.bilibili.com/x/web-interface/history/cursor?ps=21&type=live'
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
content = rsp.text
jo = json.loads(content)
if jo['code'] == 0:
......@@ -217,7 +215,7 @@ class Spider(Spider):
videos.append({
"vod_id": aid + '&live',
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
result['list'] = videos
......@@ -239,6 +237,8 @@ class Spider(Spider):
return self.get_live(pg=pg, parent_area_id=parent_area_id, area_id=area_id)
if tid == "推荐":
return self.get_recommend(pg)
if tid == "热门":
return self.get_hot(pg)
if tid == "我的关注":
return self.get_fav(pg)
if tid == "观看记录":
......@@ -250,8 +250,8 @@ class Spider(Spider):
def detailContent(self, array):
arrays = array[0].split("&")
aid = arrays[0]
url = "https://api.live.bilibili.com/room/v1/Room/get_info?room_id=%s" % aid
room_id = arrays[0]
url = "https://api.live.bilibili.com/room/v1/Room/get_info?room_id=%s" % room_id
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
jRoot = json.loads(rsp.text)
if jRoot.get('code') == 0:
......@@ -259,27 +259,34 @@ class Spider(Spider):
title = jo['title'].replace("<em class=\"keyword\">", "").replace("</em>", "")
pic = jo.get("user_cover")
desc = jo.get('description')
dire = self.get_live_userInfo(jo["uid"])
typeName = jo.get("area_name")
live_status = str(jo.get('live_status')).replace("0", "未开播").replace("1", "").replace("2", "")
live_time = str(jo.get('live_time'))[5: 16]
uid = str(jo["uid"])
info = {}
self.bilibili.get_up_info(uid, info)
dire = self.get_live_userInfo(uid)
self.bilibili.up_mid = uid
typeName = jo['parent_area_name'] + '--' + jo['area_name']
if jo['live_status'] == 0:
live_status = "未开播"
else:
live_status = "开播时间:" + jo['live_time']
remark = '在线人数:' + str(jo['online']).strip()
vod = {
"vod_id": aid,
"vod_id": room_id,
"vod_name": title,
"vod_pic": pic,
"type_name": typeName,
"vod_year": "",
"vod_area": "bilidanmu",
"vod_remarks": remark,
"vod_actor": "主播:" + dire + "  " + "房间号:" + aid + "  " + live_status,
"vod_director": "关注:" + self.zh(jo.get('attention')) + "  " + "开播时间:" + live_time,
"vod_actor": "关注:" + self.zh(jo.get('attention')) + " 房间号:" + room_id + " UID:" + uid,
"vod_director": dire + '  ' + info['following'] + "  " + live_status,
"vod_content": desc,
}
playUrl = 'flv线路原画$platform=web&quality=4_' + aid + '#flv线路高清$platform=web&quality=3_' + aid + '#h5线路原画$platform=h5&quality=4_' + aid + '#h5线路高清$platform=h5&quality=3_' + aid
playUrl = 'flv线路原画$platform=web&quality=4_' + room_id + '#flv线路高清$platform=web&quality=3_' + room_id + '#h5线路原画$platform=h5&quality=4_' + room_id + '#h5线路高清$platform=h5&quality=3_' + room_id
vod['vod_play_from'] = 'B站'
vod['vod_play_url'] = playUrl
vod['vod_play_from'] = 'B站$$$关注/取关'
secondP = info['followActName'] + '$' + str(uid) + '_' + str(info['followAct']) + '_follow#'
vod['vod_play_url'] = playUrl + '$$$' + secondP
result = {
'list': [
vod
......@@ -309,7 +316,7 @@ class Spider(Spider):
videos1.append({
"vod_id": aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
videos2 = []
......@@ -323,7 +330,7 @@ class Spider(Spider):
videos2.append({
"vod_id": aid,
"vod_name": title,
"vod_pic": img,
"vod_pic": img + '@672w_378h_1c.jpg',
"vod_remarks": remark
})
videos = videos1 + videos2
......@@ -335,13 +342,14 @@ class Spider(Spider):
def playerContent(self, flag, id, vipFlags):
result = {}
ids = id.split("_")
if 'follow' in ids:
self.bilibili.do_follow(ids[0], ids[1])
return result
url = 'https://api.live.bilibili.com/room/v1/Room/playUrl?cid=%s&%s' % (ids[1], ids[0])
# raise Exception(url)
if len(self.cookies) <= 0:
self.getCookie()
rsp = self.fetch(url, cookies=self.cookies)
rsp = self.fetch(url, headers=self.header, cookies=self.cookies)
jRoot = json.loads(rsp.text)
if jRoot['code'] == 0:
......
......@@ -67,32 +67,40 @@ class Spider(Spider): # 元类 默认的元类 type
}
return result
def getCookie(self,url,header):
def getCookie(self,url):
header = {
"Referer": 'https://czspp.com/',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = requests.session()
rsp = session.get(url)
nurl = 'https://czspp.com' + self.regStr(rsp.text, 'src=\"(.*?)\"')
nrsp = session.get(nurl, headers=header)
key = self.regStr(nrsp.text, 'var key=\"(.*?)\"')
avalue = self.regStr(nrsp.text, 'value=\"(.*?)\"')
c = ''
for i in range(0, len(avalue)):
a = avalue[i]
b = ord(a)
c = c + str(b)
value = hashlib.md5(c.encode()).hexdigest()
session.get('https://czspp.com/a20be899_96a6_40b2_88ba_32f1f75f1552_yanzheng_ip.php?type=96c4e20a0e951f471d32dae103e83881&key={0}&value={1}'.format(key,value), headers=header)
return session
if '人机验证' in rsp.text:
append = self.regStr(rsp.text, 'src=\"(/.*?)\"')
nurl = 'https://czspp.com' + append
nrsp = session.get(nurl, headers=header)
key = self.regStr(nrsp.text, 'var key=\"(.*?)\"')
avalue = self.regStr(nrsp.text, 'value=\"(.*?)\"')
c = ''
for i in range(0, len(avalue)):
a = avalue[i]
b = ord(a)
c = c + str(b)
value = hashlib.md5(c.encode()).hexdigest()
session.get('https://czspp.com/a20be899_96a6_40b2_88ba_32f1f75f1552_yanzheng_ip.php?type=96c4e20a0e951f471d32dae103e83881&key={0}&value={1}'.format(key, value), headers=header)
return session.get(url, headers=header)
elif '检测中' in rsp.text:
append = self.regStr(rsp.text, 'href =\"(/.*?)\"')
session.get('https://czspp.com{0}'.format(append), headers=header)
return session.get(url, headers=header)
else:
return rsp
def categoryContent(self, tid, pg, filter, extend):
result = {}
url = 'https://czspp.com/{0}/page/{1}'.format(tid,pg)
header = {
"Connection": "keep-alive",
"Referer": url,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = self.getCookie(url,header)
rsp = session.get(url, headers=header)
rsp = self.getCookie(url)
root = self.html(self.cleanText(rsp.text))
aList = root.xpath("//div[contains(@class,'bt_img mi_ne_kd mrb')]/ul/li")
videos = []
......@@ -121,13 +129,7 @@ class Spider(Spider): # 元类 默认的元类 type
def detailContent(self, array):
tid = array[0]
url = 'https://czspp.com/movie/{0}.html'.format(tid)
header = {
"Connection": "keep-alive",
"Referer": url,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = self.getCookie(url, header)
rsp = session.get(url, headers=header)
rsp = self.getCookie(url)
root = self.html(self.cleanText(rsp.text))
node = root.xpath("//div[@class='dyxingq']")[0]
pic = node.xpath(".//div[@class='dyimg fl']/img/@src")[0]
......@@ -198,13 +200,7 @@ class Spider(Spider): # 元类 默认的元类 type
def searchContent(self, key, quick):
url = 'https://czspp.com/xssearch?q={0}'.format(urllib.parse.quote(key))
header = {
"Connection": "keep-alive",
"Referer": url,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = self.getCookie(url, header)
rsp = session.get(url, headers=header)
rsp = self.getCookie(url)
root = self.html(self.cleanText(rsp.text))
vodList = root.xpath("//div[contains(@class,'mi_ne_kd')]/ul/li/a")
videos = []
......@@ -247,14 +243,8 @@ class Spider(Spider): # 元类 默认的元类 type
def playerContent(self, flag, id, vipFlags):
result = {}
url = 'https://czspp.com/v_play/{0}.html'.format(id)
header = {
"Connection": "keep-alive",
"Referer": url,
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36"
}
session = self.getCookie(url, header)
rsp = self.getCookie(url)
pat = '\\"([^\\"]+)\\";var [\\d\\w]+=function dncry.*md5.enc.Utf8.parse\\(\\"([\\d\\w]+)\\".*md5.enc.Utf8.parse\\(([\\d]+)\\)'
rsp = session.get(url, headers=header)
html = rsp.text
content = self.regStr(html, pat)
if content == '':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册