提交 ce94403e 编写于 作者: H hjdhnx

升级了

上级 a460d935
无法预览此类型文件
......@@ -11,7 +11,7 @@
var rule = {
title:'JRKAN直播',
host:'http://www.jrskan.com',
host:'http://jrkankan.com',
// JRKAN备用域名:jrkan365.com / jrkankan.com / jryyds.com / jrsbxj.com
// JRKAN网址发布:qiumi1314.com
url:'/fyclass',
......@@ -34,6 +34,6 @@ var rule = {
// 一级:'.loc_match .d-touch;li&&Text;img&&src;.lab_time&&Text;a:eq(0)&&href',//play.sportsteam365.com
一级:'.loc_match:eq(2) ul;li:gt(1):lt(4)&&Text;img&&src;li:lt(2)&&Text;a:eq(1)&&href',//play.sportsteam333.com
// 一级:'.loc_match .d-touch;li&&Text;img&&src;.lab_time&&Text;a:eq(2)&&href',//play.sportsteam666.com
二级:{title:'.sub_list li:lt(2)&&Text;.sub_list li:eq(0)&&Text',img:'img&&src',desc:';;;.lab_team_home&&Text;.lab_team_away&&Text',content:'.sub_list ul&&Text',tabs:'',tab_text:'',lists:'.sub_channel a',list_text:'a&&data-group',list_url:'a&&data-play'},
二级:{title:'.sub_list li:lt(2)&&Text;.sub_list li:eq(0)&&Text',img:'img&&src',desc:';;;.lab_team_home&&Text;.lab_team_away&&Text',content:'.sub_list ul&&Text',tabs:'',tab_text:'',lists:'.sub_channel a',list_text:'a&&Text',list_url:'a&&data-play'},
搜索:'',
}
\ No newline at end of file
3.9.15beta1
\ No newline at end of file
3.9.15beta2
\ No newline at end of file
......@@ -17,7 +17,7 @@ var rule = {
limit:6,
double:false,
//推荐:'*',
一级:"js:pdfa=jsp.pdfa;pdfa=jsp.pdfa;pd=jsp.pd;let d=[];log(input);let html=request(input);let list=pdfa(html,'.text_list li');let burl=input.match(/(.*)\\/.*?.html/)[1];log(burl);MY_URL=burl;print(list);list.forEach(function(it){d.push({title:pdfh(it,'a&&Text'),desc:pdfh(it,'.date&&Text'),url:pd(it,'a&&href')})});setResult(d)",
一级:'js:pdfa=jsp.pdfa;pdfa=jsp.pdfa;pd=jsp.pd;let d=[];log(input);let html=request(input);let list=pdfa(html,".text_list li");let burl=input.match(/(.*)\\/.*?.html/)[1];log(burl);MY_URL=burl;print(list);list.forEach(function(it){let title=pdfh(it,"a&&Text");d.push({title:title,desc:pdfh(it,".date&&Text"),url:pd(it,"a&&href")+"@@"+title})});setResult(d);',
// 一级:'.text_list li;a&&Text;;.date&&Text;a&&href',
二级:'*',
搜索:'',
......
// 道长 drpy仓库 https://gitcode.net/qq_32394351/dr_py
// 道长 drpy安卓本地搭建说明 https://gitcode.net/qq_32394351/dr_py/-/blob/master/%E5%AE%89%E5%8D%93%E6%9C%AC%E5%9C%B0%E6%90%AD%E5%BB%BA%E8%AF%B4%E6%98%8E.md
// 道长 drpy写源 模板规则说明 https://gitcode.net/supertlo/dr_py#%E6%A8%A1%E6%9D%BF%E8%A7%84%E5%88%99%E8%AF%B4%E6%98%8E
// 道长 drpy写源 套模模版 https://gitcode.net/qq_32394351/dr_py/-/raw/master/js/%E6%A8%A1%E6%9D%BF.js
// 道长 drpy写源 影片教程 http://101.34.67.237:5244/%E6%95%99%E8%82%B2/drpy
// 道长 drpy写源 影片教程(m3u8切片) https://freedrpy.run.goorm.io/txt/jc/playlist.m3u8
// 海阔下载 https://haikuo.lanzoui.com/u/GoldRiver
// Pluto Player官方TG https://t.me/PlutoPlayer
// Pluto Player官方TG https://t.me/PlutoPlayerChannel
var rule = {
title:'爱车MV',
host:'https://www.ichemv.com',
homeUrl:'/mv/',
url:'/mv/fyclass_fypage.html',
searchUrl:'/search.php?key=**',
searchable:2,
quickSearch:0,
class_parse:'.m_bor li;a&&Text;a&&href;/mv/(\\d+)_1.html',
headers:{
'User-Agent':'MOBILE_UA'
},
timeout:5000,
play_parse:true,
lazy:'',
limit:6,
double:false,
推荐:'*',
一级:'.mv_list li;.mv_name&&Text;.pic img&&src;.mv_p a:eq(0)&&Text;a&&href',
二级:'*',
搜索:'.play_xg li;.name&&Text;*;*;*',
}
js:
pdfa=jsp.pdfa;
pdfa=jsp.pdfa;
pd=jsp.pd;
let d=[];
log(input);
let html=request(input);
let list=pdfa(html,'.text_list li');
let burl=input.match(/(.*)\/.*?.html/)[1];
log(burl);
MY_URL=burl;
print(list);
list.forEach(function(it){
let title = pdfh(it,'a&&Text');
d.push({
title:title,
desc:pdfh(it,'.date&&Text'),
url:pd(it,'a&&href')+'@@'+title
});
});
setResult(d)
\ No newline at end of file
......@@ -53,6 +53,7 @@
- [X] 修复 JRKAN直播 二级智能拼接链接不正常的问题
- [X] 学生录像一级用js重写(urljoin没毛病.网址结构有问题,只能如此了,仅支持js1)
- [X] 完善荐片源的筛选
- [X] 3.9.15beta2 优化jrkan直播二级 学生直播二级 新增 爱车MV
###### 2022/10/21
- [X] 源新增了play_json属性和与之对应的哔哩直播,jrs看直播等源
- [X] 修复俊版无法搜索问题(还需要壳子彻底解决console.log函数问题,无法打印大数据)
......
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File : update.py
# Author: DaShenHan&道长-----先苦后甜,任凭晚风拂柳颜------
# Date : 2022/9/6
import re
from time import time as getTime
import sys
import requests
import os
import zipfile
import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
from utils.log import logger
from utils.web import get_interval
from utils.htmlParser import jsoup
headers = {
'Referer': 'https://gitcode.net/',
# 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
'X-T5-Auth': 'ZjQxNDIh',
'User-Agent': 'baiduboxapp',
}
proxies={"https":"http://cloudnproxy.baidu.com:443","http":"http://cloudnproxy.baidu.com:443"}
def getHotSuggest(url='http://4g.v.sogou.com/hotsugg'):
jsp = jsoup(url)
pdfh = jsp.pdfh
pdfa = jsp.pdfa
pd = jsp.pd
try:
r = requests.get(url,headers=headers,timeout=2)
html = r.text
data = pdfa(html,'ul.hot-list&&li')
suggs = [{'title':pdfh(dt,'a&&Text'),'url':pd(dt,'a&&href')} for dt in data]
# print(html)
# print(suggs)
return suggs
except:
return []
def getLocalVer():
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
version_path = os.path.join(base_path, f'js/version.txt')
if not os.path.exists(version_path):
with open(version_path,mode='w+',encoding='utf-8') as f:
version = '1.0.0'
f.write(version)
else:
with open(version_path,encoding='utf-8') as f:
version = f.read()
return version
def getOnlineVer():
ver = '1.0.1'
msg = ''
try:
r = requests.get('https://gitcode.net/qq_32394351/dr_py/-/raw/master/js/version.txt',timeout=(2,2),proxies=proxies)
ver = r.text
except Exception as e:
# print(f'{e}')
msg = f'{e}'
logger.info(msg)
return ver,msg
def checkUpdate():
local_ver = getLocalVer()
online_ver,msg = getOnlineVer()
if local_ver != online_ver:
return True
return False
def del_file(filepath):
"""
删除execl目录下的所有文件或文件夹
:param filepath: 路径
:return:
"""
del_list = os.listdir(filepath)
for f in del_list:
file_path = os.path.join(filepath, f)
if os.path.isfile(file_path):
os.remove(file_path)
def copytree(src, dst, ignore=None):
if ignore is None:
ignore = []
dirs = os.listdir(src) # 获取目录下的所有文件包括文件夹
logger.info(f'{dirs}')
for dir in dirs: # 遍历文件或文件夹
from_dir = os.path.join(src, dir) # 将要复制的文件夹或文件路径
to_dir = os.path.join(dst, dir) # 将要复制到的文件夹或文件路径
if os.path.isdir(from_dir): # 判断是否为文件夹
if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建
os.mkdir(to_dir)
copytree(from_dir, to_dir,ignore) # 迭代 遍历子文件夹并复制文件
elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件
if ignore:
regxp = '|'.join(ignore).replace('\\','/') # 组装正则
to_dir_str = str(to_dir).replace('\\','/')
if not re.search(rf'{regxp}', to_dir_str, re.M):
shutil.copy(from_dir, to_dir) # 复制文件
else:
shutil.copy(from_dir, to_dir) # 复制文件
def force_copy_files(from_path, to_path, exclude_files=None):
# print(f'开始拷贝文件{from_path}=>{to_path}')
if exclude_files is None:
exclude_files = []
logger.info(f'开始拷贝文件{from_path}=>{to_path}')
if not os.path.exists(to_path):
os.makedirs(to_path,exist_ok=True)
try:
if sys.version_info < (3, 8):
copytree(from_path, to_path,exclude_files)
else:
if len(exclude_files) > 0:
shutil.copytree(from_path, to_path, dirs_exist_ok=True,ignore=shutil.ignore_patterns(*exclude_files))
else:
shutil.copytree(from_path, to_path, dirs_exist_ok=True)
except Exception as e:
logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
def copy_to_update():
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
tmp_path = os.path.join(base_path, f'tmp')
dr_path = os.path.join(tmp_path, f'dr_py-master')
if not os.path.exists(dr_path):
# print(f'升级失败,找不到目录{dr_path}')
logger.info(f'升级失败,找不到目录{dr_path}')
return False
# 千万不能覆盖super,base
paths = ['js','models','controllers','libs','static','templates','utils','txt','jiexi','py','whl']
exclude_files = ['txt/pycms0.json','txt/pycms1.json','txt/pycms2.json','base/rules.db','utils/update.py']
for path in paths:
force_copy_files(os.path.join(dr_path, path),os.path.join(base_path, path),exclude_files)
try:
shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件
except Exception as e:
logger.info(f'更新app.py发生错误:{e}')
logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
return True
def download_new_version():
t1 = getTime()
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
tmp_path = os.path.join(base_path, f'tmp')
os.makedirs(tmp_path,exist_ok=True)
url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip'
# tmp_files = os.listdir(tmp_path)
# for tp in tmp_files:
# print(f'清除缓存文件:{tp}')
# os.remove(os.path.join(tmp_path, tp))
del_file(tmp_path)
msg = ''
try:
# print(f'开始下载:{url}')
logger.info(f'开始下载:{url}')
r = requests.get(url,headers=headers,timeout=(20,20),proxies=proxies)
rb = r.content
download_path = os.path.join(tmp_path, 'dr_py.zip')
with open(download_path,mode='wb+') as f:
f.write(rb)
# print(f'开始解压文件:{download_path}')
logger.info(f'开始解压文件:{download_path}')
f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
for file in f.namelist():
f.extract(file, tmp_path) # 解压位置
f.close()
# print('解压完毕,开始升级')
logger.info('解压完毕,开始升级')
ret = copy_to_update()
logger.info(f'升级完毕,结果为:{ret}')
# print(f'升级完毕,结果为:{ret}')
msg = '升级成功'
except Exception as e:
msg = f'升级失败:{e}'
logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
return msg
def download_lives(live_url:str):
t1 = getTime()
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
live_path = os.path.join(base_path, f'base/直播.txt')
logger.info(f'尝试同步{live_url}远程内容到{live_path}')
try:
r = requests.get(live_url,headers=headers,timeout=3)
auto_encoding = r.apparent_encoding
if auto_encoding.lower() in ['utf-8','gbk','bg2312','gb18030']:
r.encoding = auto_encoding
# print(r.encoding)
html = r.text
# print(len(html))
if re.search('cctv|.m3u8',html,re.M|re.I) and len(html) > 1000:
logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒')
with open(live_path,mode='w+',encoding='utf-8') as f:
f.write(html)
return True
else:
logger.info(f'直播源同步失败,远程文件看起来不是直播源。耗时{get_interval(t1)}毫秒')
return False
except Exception as e:
logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}')
return False
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册