Skip to content

Instantly share code, notes, and snippets.

@littleya
Last active February 20, 2024 00:39
Show Gist options
  • Save littleya/86cd895f97b614ebea376a1008291ccf to your computer and use it in GitHub Desktop.
Save littleya/86cd895f97b614ebea376a1008291ccf to your computer and use it in GitHub Desktop.
u2@share
# -*- coding:utf-8 -*-
# !/usr/bin/env python3
'''
Author: ayasakinagi
Email: xyz.wang@outlook.com
环境: Python3
依赖: deluge_client
使用: 修改host, port, username, password, limitSize, dataPath变量
'''
import os
import time
import logging
from deluge_client import DelugeRPCClient
# 主机地址
host = '127.0.0.1'
# deluge端口
port = 58846
# deluge 管理用户名,可在~/.config/deluge/auth找到
username = ''
# deluge密码,非webUI密码
password = ''
# 限制deluge data目录大小, 默认为1TB
limitSize = 1000000000000
# deluge data目录
dataPath = '/media/sdad1/littleya/private/deluge/data/'
logPath = '/tmp/delugeDelete.log'
# init log
handler1 = logging.FileHandler(logPath)
handler2 = logging.StreamHandler()
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(message)s'
formatter = logging.Formatter(fmt)
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger = logging.getLogger('log')
logger.addHandler(handler1)
logger.addHandler(handler2)
logger.setLevel(logging.DEBUG)
def main():
client = DelugeRPCClient(host, port, username, password)
client.connect()
# get torrent's status
resList = []
if client.connected:
torrentList = client.call('core.get_session_state')
for i in torrentList:
torrentStatus = client.call('core.get_torrent_status', i, ['is_finished', 'total_size', 'total_peers', 'peers'])
if not torrentStatus[b'is_finished']:
continue
elif torrentStatus[b'total_peers'] == 0 or len(torrentStatus[b'peers']) == 0:
total_speed = 0
else:
total_speed = 0
for j in torrentStatus[b'peers']:
total_speed += j[b'up_speed']
resList.append([bytes.decode(i), total_speed, torrentStatus[b'is_finished'], torrentStatus[b'total_size']])
else:
logger.error('Connect error')
# get download folder's size
# dataSize = client.call('core.get_path_size', dataPath)
dataSize = int(os.popen("du -sB 1KB " + dataPath + " | awk '{print $1}'").readlines()[0]) * 1000
# sort by upload speed(if same, sort by size)
resList = sorted(resList, key=lambda x:x[3])
resList = sorted(resList, key=lambda x:x[1])
logger.info('Finished torrents: ' + str(resList))
logger.info('All torrent\'s size: ' + str(dataSize) + '(' + str(round(dataSize/1000/1000/1000, 3)) + 'GB)')
# delete lowest uploadspeed's torrent
if dataSize > limitSize:
selectedTorrent = resList[0]
logger.info('Deleting ' + str(selectedTorrent[0]) + ', size is ' + str(dataSize) + '(' + str(round(selectedTorrent[3]/1000/1000/1000, 3)) + 'GB)')
client.call('core.remove_torrent', selectedTorrent[0], True)
torrentList = client.call('core.get_session_state')
if selectedTorrent[0].encode() not in torrentList:
logger.info('Delete success')
else:
logger.error('Delete error')
if __name__ == '__main__':
while True:
try:
main()
except Exception as e:
logger.error(e)
time.sleep(150)
# !/bin/bash
procName="deluged"
main()
{
num=`ps -ef | grep -v grep | grep $procName | wc -l`
if [ $num -eq 0 ]
then
deluged
echo `date "+%Y-%m-%d %H:%M:%S"` - deluged restart
else
echo `date "+%Y-%m-%d %H:%M:%S"` - deluged running
fi
}
while :
do
main
sleep 10
done
"""A simple python script to get the seed torrent info
Python version:
3.5 or above
Requirements:
lxml
Usage:
1. save the webpage as file:
https://{u2_domain}/getusertorrentlistajax.php?userid={id}&type=seeding
2. run the script:
python3 ./get_seed_torrent_info.py ./saved_webpage_file
OutPut:
{torrent type}\t{torrent name}\t{torrent size(in GiB)}\t{torrent link}
Note:
The script only tested in u2
"""
import sys
from lxml import etree
def get_html_file_content(file):
with open(file, 'r') as f:
return ''.join(f.readlines())
def parse_html(content):
ehtml = etree.HTML(content)
for top_tr in ehtml.xpath('/html/body/table/tbody/tr')[1:]:
classification = ' '.join(top_tr.xpath('./td')[0].xpath('./a/text()'))
name = ''.join(top_tr.xpath('./td')[1].xpath(
'./table/tbody/tr')[0].xpath('./td/a/b/text()'))
name= name.replace('\n', '')
link = top_tr.xpath('./td')[1].xpath('./table/tbody/tr')[0].xpath(
'./td/a/@href')[0]
size = top_tr.xpath('./td')[2].xpath('./text()')
if 'MiB' in size:
size = float(size[0]) / 1024
elif 'GiB' in size:
size = float(size[0])
elif 'TiB' in size:
size = float(size[0]) * 1024
yield(f'{classification}\t{name}\t{size}\t{link}')
def main():
for line in parse_html(get_html_file_content(sys.argv[1])):
print(line)
if __name__ == '__main__':
main()
'''
pip install progress torrentool
'''
import argparse
import copy
import gzip
import logging
import os
import pickle
from progress.counter import Counter
from torrentool.api import Torrent
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger('torrent-parse')
class Tree(object):
__slots__ = ['name', 'children', 'children_mapping']
def __init__(self, name, children=[]):
self.name = name
self.children = children
self.children_mapping = {}
def get_children_name(self):
return self.children_mapping.keys()
def build_children_mapping(self):
for child in self.children:
self.children_mapping[child.name] = child
def get_child_by_name(self, name):
return self.children_mapping.get(name, None)
class DirTree(Tree):
__slots__ = ['name', 'children', 'children_mapping', 'absname']
def __init__(self, dir_name, children=[]):
self.absname = os.path.abspath(dir_name)
super(DirTree, self).__init__(os.path.basename(self.absname))
@staticmethod
def build_from_dir_name(dir_name):
dir_tree = DirTree(dir_name)
file_list = {f: os.path.isdir(os.path.join(dir_tree.absname, f)) \
for f in os.listdir(dir_tree.absname)}
children = []
for f, is_dir in file_list.items():
if is_dir:
children.append(DirTree.build_from_dir_name(
os.path.join(dir_tree.absname, f)))
else:
children.append(
DirTree(os.path.join(dir_tree.absname, f)))
dir_tree.children = children
dir_tree.build_children_mapping()
return dir_tree
class TorrentTree(Tree):
@staticmethod
def build_from_torrent_files(torrent_file):
torrent = Torrent.from_file(torrent_file)
torrent_tree_root = TorrentTree(torrent.name)
def build_from_relative_name(torrent_tree, relative_name):
if not relative_name:
return
fname = relative_name.split('/')[0]
relative_name_split = '/'.join(relative_name.split('/')[1:])
if fname == torrent_tree.name:
build_from_relative_name(torrent_tree, relative_name_split)
elif fname in torrent_tree.get_children_name():
build_from_relative_name(
torrent_tree.get_child_by_name(fname),
relative_name_split)
else:
child_torrent_tree = TorrentTree(fname)
children = copy.deepcopy(torrent_tree.children)
children.append(child_torrent_tree)
torrent_tree.children = children
torrent_tree.build_children_mapping()
build_from_relative_name(
child_torrent_tree, relative_name_split)
for relative_name in [f.name for f in torrent.files]:
build_from_relative_name(torrent_tree_root, relative_name)
return torrent_tree_root
def is_sub_tree(dir_tree, torrent_tree):
for ttree in torrent_tree.children:
if not ttree.children:
pass
elif ttree.name in dir_tree.get_children_name():
dtree = dir_tree.get_child_by_name(ttree.name)
is_sub_tree(dtree, ttree)
else:
return False
def find_path(dir_tree, torrent_tree):
queue = []
queue.append(dir_tree)
while queue:
dtree = queue.pop(0)
if torrent_tree.name == dtree.name:
if is_sub_tree(dtree, torrent_tree) != False:
return dtree.absname
else:
queue.extend(dtree.children)
def get_torrent_files(path):
f_list = []
for root, subfolders, files in os.walk(path):
for f in files:
if f.endswith('torrent'):
f_list.append(os.path.join(os.path.abspath(root), f))
return f_list
def parse_single_dir(torrent, data):
counter = Counter('')
success_count = 0
result = []
failed = []
dir_tree = DirTree.build_from_dir_name(os.path.abspath(data))
for f in get_torrent_files(torrent):
counter.message = (f'Processing {f}; Find count: {len(result)}; '
f'Success count: {success_count}; Failed '
f'load torrents count: {len(failed)}; ')
try:
torrent_tree = TorrentTree.build_from_torrent_files(f)
path = find_path(dir_tree, torrent_tree)
if path:
result.append((f, path))
LOG.info(f'Find a pair: {f}, {path}')
success_count += 1
except Exception as e:
LOG.error(f'Failed to parse torrent: {f}, error: {e}')
failed.append(f)
counter.update()
result_format = '\n'.join([' '.join(x) for x in result])
failed_format = '\n'.join(failed)
LOG.info(f'\nResult:\n{result_format}\n\nFailed:\n{failed_format}')
def parse_multiple_dir(torrent, data_list, cache):
torrent_files = get_torrent_files(torrent)
torrent_tree_mapping = {}
total_result = []
total_failed = []
counter = Counter('')
# Load cache file
LOG.info(f'Loading cache file {cache}')
if cache and os.path.exists(cache):
for f in [os.path.join(cache, f) for f in os.listdir(cache)]:
with gzip.open(f, 'rb') as input:
mapping = pickle.load(input)
torrent_tree_mapping.update(mapping)
LOG.info(f'Load {len(torrent_tree_mapping)} torrent tree data from '
f'cache file {cache}.')
# Genetate torrent tree data
torrent_success_count = len(torrent_tree_mapping)
torrent_failed_count = 0
LOG.info('Generate torrent tree data')
for f in torrent_files:
counter.message = (f'Generating torrent tree data for {f}; '
f'Success count: {torrent_success_count}; Failed '
f'load torrents count: {torrent_failed_count}; ')
if f not in torrent_tree_mapping.keys():
try:
torrent_tree = TorrentTree.build_from_torrent_files(f)
torrent_tree_mapping.update({f: torrent_tree})
torrent_success_count += 1
except Exception as e:
LOG.error(f'Failed to parse torrent: {f}, error: {e}')
torrent_failed_count += 1
total_failed.append(f)
else:
continue
counter.update()
# Save cache file
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i: i + n]
if cache:
LOG.info(f'Dumping cache file {cache}')
if not os.path.exists(cache):
os.mkdir(cache)
suffix = 0
for keys in chunks(list(torrent_tree_mapping.keys()), 1000):
f = os.path.join(cache, f'{os.path.basename(cache)}.{suffix}')
mapping = {k: torrent_tree_mapping[k] for k in keys}
with gzip.open(f, 'wb') as output:
pickle.dump(mapping, output, protocol=pickle.HIGHEST_PROTOCOL)
suffix += 1
LOG.info(f'Dump {len(torrent_tree_mapping)} torrent tree data to '
f'cache file {cache}.')
for data in data_list:
LOG.info(f'\n\nStart parse for data: {data}\n')
counter = Counter('')
success_count = 0
result = []
dir_tree = DirTree.build_from_dir_name(os.path.abspath(data))
for f, torrent_tree in torrent_tree_mapping.items():
counter.message = (f'Parse {f}; Find count: {len(result)}; '
f'Success count: {success_count}; ')
path = find_path(dir_tree, torrent_tree)
if path:
result.append((f, path))
LOG.info(f'Find a pair: {f}, {path}')
success_count += 1
counter.update()
total_result += result
result_format = '\n'.join([' '.join(x) for x in total_result])
failed_format = '\n'.join(total_failed)
LOG.info(f'\nResult:\n{result_format}\n\nFailed:\n{failed_format}')
if __name__ == '__main__':
parse = argparse.ArgumentParser()
parse.add_argument('--torrent', '-t', help='The torrent path')
parse.add_argument('--data', '-d', help='The data path')
parse.add_argument('--data-file', '-f',
help='The file contains multiple data path')
parse.add_argument('--cache', '-c',
help=
'''
Use cache file to speed up. If no the cache is empty, the script will
create it. Not that this will use a large memory.
''')
args = parse.parse_args()
if args.data and not args.data_file and not args.cache:
parse_single_dir(args.torrent, args.data)
os.exit(0)
data_list = []
if args.data:
data_list.append(abs(args.data))
if args.data_file:
with open(args.data_file, 'r') as f:
for line in f.read().split('\n'):
if line:
data_list.append(os.path.abspath(line))
parse_multiple_dir(args.torrent, data_list, args.cache)
# -*- coding:utf-8 -*-
# !/usr/bin/env python3
'''
Author: ayasakinagi
Email: xyz.wang@outlook.com
环境: Python3
依赖: requests, beautifulsoup4, lxml
使用: 修改uid, cookie, utime, uploadTimeInterval, type变量
'''
import re
import time
import logging
import requests
from bs4 import BeautifulSoup as BS
logPath = '/tmp/u2.33x.log'
# 修改为你的uid
uid = ''
# 修改为你的cookie
cookie = ''
# 修改为你需要的优惠期限
utime = 24
# 只对上传时间为uploadTimeInterval内的种子释放魔法, 默认为一小时
uploadTimeInterval = 3600
# 1为覆盖2x, 2为不覆盖2x
type = 1
header = {
'dnt': '1',
'accept-encoding': 'gzip, deflate',
'accept-language': 'zh-CN,zh;q=0.8',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'cache-control': 'max-age=0',
'authority': 'u2.dmhy.org',
'cookie': cookie
}
# init log
handler1 = logging.FileHandler(logPath)
handler2 = logging.StreamHandler()
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(message)s'
formatter = logging.Formatter(fmt)
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
logger = logging.getLogger('log')
logger.addHandler(handler1)
logger.addHandler(handler2)
logger.setLevel(logging.DEBUG)
# get ucoinNum
def getUcoinNum():
url = 'https://u2.dmhy.org/userdetails.php?id=' + str(uid)
page = requests.get(url, headers = header).text
soup = BS(page, 'lxml')
ucoinNum = soup.find_all('span', {'class': 'ucoin-notation'})[1]['title']
return ucoinNum
def main():
logger.info("Start Cap")
# Get downloading torrent
url = 'https://u2.dmhy.org/getusertorrentlistajax.php?userid=' + uid + '&type=leeching'
page = requests.get(url, headers = header).text
logger.info('Get downloading torrent page success')
soup = BS(page, 'lxml')
td = soup.find('table', {'border': 1}).children
idList = []
for i in td:
i = str(i)
if type == 1:
# check 2x, pro_custom为自定义, pro_free2up为2xfree, pro_2up为2x, pro_50pctdown2up为2x 50%off
if 'pro_custom' not in i:
# get id
rg = re.compile('id=*?(\\d+)', re.IGNORECASE | re.DOTALL).search(i)
if rg:
id = int(rg.group(1))
idList.append(id)
elif 'pro_custom' in i:
# get id
rg = re.compile('id=*?(\\d+)', re.IGNORECASE | re.DOTALL).search(i)
if rg:
id = int(rg.group(1))
# get uploadRatio
rg = re.compile('arrowup.*?<b>([+-]?\\d*\\.\\d+)(?![-+0-9\\.])', re.IGNORECASE | re.DOTALL).search(i)
if rg:
uploadRatio = float(rg.group(1))
# get downloadRatio
rg = re.compile('arrowdown.*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.])', re.IGNORECASE | re.DOTALL).search(i)
if rg:
downloadRatio = float(rg.group(1))
# add id into list
if uploadRatio and uploadRatio < 2:
idList.append(id)
elif type == 2:
# check 2x, pro_custom为自定义, pro_free2up为2xfree, pro_2up为2x, pro_50pctdown2up为2x 50%off
if 'pro_custom' not in i and 'pro_free2up' not in i and 'pro_2up' not in i and 'pro_50pctdown2up' not in i:
# get id
rg = re.compile('id=*?(\\d+)', re.IGNORECASE | re.DOTALL).search(i)
if rg:
id = int(rg.group(1))
idList.append(id)
elif 'pro_custom' in i:
# get id
rg = re.compile('id=*?(\\d+)', re.IGNORECASE | re.DOTALL).search(i)
if rg:
id = int(rg.group(1))
# get uploadRatio
rg = re.compile('arrowup.*?<b>([+-]?\\d*\\.\\d+)(?![-+0-9\\.])', re.IGNORECASE | re.DOTALL).search(i)
if rg:
uploadRatio = float(rg.group(1))
# get downloadRatio
rg = re.compile('arrowdown.*?([+-]?\\d*\\.\\d+)(?![-+0-9\\.])', re.IGNORECASE | re.DOTALL).search(i)
if rg:
downloadRatio = float(rg.group(1))
# add id into list
if uploadRatio and uploadRatio < 2:
idList.append(id)
else:
logger.error('Type error, a valid value')
# check add time
diffList = []
for i in idList:
url = 'https://u2.dmhy.org/details.php?id=' + str(i)
page = requests.get(url, headers = header).text
soup = BS(page, 'lxml')
uploadTime = time.strptime(soup.find('time')['title'], '%Y-%m-%d %H:%M:%S')
uploadTimeUTC = time.mktime(uploadTime) - 28800
# localtimeUTC = time.time() + time.timezone
localtimeUTC = time.mktime(time.gmtime())
diff = localtimeUTC - uploadTimeUTC
if diff > uploadTimeInterval:
diffList.append(i)
time.sleep(2)
idList = list(set(idList).difference(diffList))
logger.info('Need 2.33x: ' + str(idList))
# Get magic page
for i in idList:
# get form data
url = 'https://u2.dmhy.org/promotion.php?action=magic&torrent=' + str(i)
page = requests.get(url, headers = header).text
soup = BS(page, 'lxml')
data = {}
data['action'] = soup.find('input', {'name': 'action'})['value']
data['divergence'] = soup.find('input', {'name': 'divergence'})['value']
data['base_everyone'] = soup.find('input', {'name': 'base_everyone'})['value']
data['base_self'] = soup.find('input', {'name': 'base_self'})['value']
data['base_other'] = soup.find('input', {'name': 'base_other'})['value']
data['torrent'] = soup.find('input', {'name': 'torrent'})['value']
data['tsize'] = soup.find('input', {'name': 'tsize'})['value']
data['ttl'] = soup.find('input', {'name': 'ttl'})['value']
# get ucoinCost
# user: ALL为地图炮, SELF为恢复系, OTHER为治愈系
# start: 0表示立即生效
# hours: 魔法持续时间, 24-360 hours
# promotion: 2为免费, 3为2x, 4为2xFree, 5为50%off, 6为2x50%off, 7为30%off, 8为other(若选择此项,需要传递ur及dr参数,默认为1)
# comment: 魔法咒语什么的, 非必须
data['user'] = 'SELF'
data['user_other'] = ''
data['start'] = 0
data['hours'] = utime
data['promotion'] = 8
data['ur'] = 2.33
data['dr'] = 1
data['comment'] = ''
url = 'https://u2.dmhy.org/promotion.php?test=1'
page = requests.post(url, headers = header, data = data).text
soup = BS(page, 'lxml')
ucoinCost = soup.find('span', {'class': '\\"ucoin-notation\\"'})['title'][2:-2]
logger.info('Torrent ' + str(i) + "'s ucoinCost: " + ucoinCost + ', now your ucoin num is ' + getUcoinNum())
# Magic
url = 'https://u2.dmhy.org/promotion.php?action=magic&torrent=' + str(i)
page = requests.post(url, headers = header, data = data)
if page.status_code == 200:
logger.info('Torrent ' + str(i) + ' 2.33x success, now your ucoin num is ' + getUcoinNum())
else:
logger.info('Error, try again later')
if __name__ == '__main__':
while True:
try:
main()
except Exception as e:
logger.error(e)
time.sleep(150)
# -*- coding:utf-8 -*-
# !/usr/bin/env python3
'''
Author: ayasakinagi
Email: xyz.wang@outlook.com
环境: Python3
依赖: requests, deluge_client, lxml, paramiko
使用: 修改config中的配置
- push: 推送设置, 若启用多个, 则只有一个生效, 优先级顺序为deluge>remotedir>localdir
- localdir: 本地目录
- enable: True使用本地目录, False禁用
- path为本地目录路径(使用绝对路径)
- remotedir: 远程目录
- enable: True使用远程目录, False禁用
- hostname: 远程主机地址(域名)
- port: ssh端口
- username: 登录用户名
- password: 密码
- path: 远程目录(使用绝对路径)
- deluge: delugeRPCClient, 需要勾选Preferences->Daemon->Allow Remote Connections以启用远程连接, 注意勾选后需要重启deluge进程(可在Connection Manager中操作)
- enable: True使用delugeRPCClient推送, False禁用;
- hostname: 远程主机地址(域名)
- port: deluge管理端口, 可在Preferences->Daemon中查看
- username: deluge管理用户, 可在~/.config/deluge/auth找到
- password: deluge密码, 非webUI密码, 可在~/.config/deluge/auth找到
- siteconfig: 若启用多个, 则只有一个生效, 优先级顺序为 webpage>rss
- passkey: 用于下载torrent
- webpage: 控制从首页抓取torrent
- enable: True启用, False禁用
- url: 首页地址
- cookie: 用于登录
- rss: 从订阅中抓取torrent
- enable: True启用, False禁用
- url: rss订阅地址
- rule: 规则,只针对webpage抓取方式生效
- enable: True启用规则, False禁用
- rules: 具体的规则
- name: 匹配标题
- enable: True启用此条规则, False禁用此条规则
- exclude: 若标题匹配到关键词, 则去除对应的torrent, 若有多个, 可用 | 分割
- type: 匹配类型
- enable: True启用此条规则, False禁用此条规则
- exclude: 若类型匹配到关键词, 则去除对应的torrent, 若有多个, 可用 | 分割
- size: 匹配大小
- enable: True启用此条规则, False禁用此条规则
- limit: 若torrent体积大于limit值, 则去除对应的torrent, 单位为GiB
- time: 匹配上传时间
- enable: True启用此条规则, False禁用此条规则
- limit: 若上传时间超过limit, 则去除对应的torrent, 单位为分钟
- magic: 优惠,可配置多个
- enable: True启用此项优惠, False禁用此项
- keyword: 优惠关键词, 可在网页的class内找到, 若有多个, 可用|分割
- seeder: 若为正数或0, 表示下载大于此数值的种子, 若为负数, 表示下载小于此数值的种子
- leecher:同上
- tmpdir: 种子文件临时存放目录, 默认为/tmp
- timeout: 两次抓取的间隔时间, 单位为秒
'''
import re
import os
import time
import shutil
import paramiko
import logging
import requests
from lxml import etree
from deluge_client import DelugeRPCClient
config = {
"push": {
"local": {
"enable": False,
"path": ""
},
"remote": {
"enable": False,
"hostname": "",
"port": 22,
"username": "",
"password": "",
"path": ""
},
"deluge": {
"enable": True,
"hostname": "",
"port": 58846,
"username": "",
"password": ""
},
},
"siteconfig": {
"passkey": "",
"webpage": {
"enable": False,
"url": "https://u2.dmhy.org/torrents.php",
"cookie": ""
},
"rss": {
"enable": True,
"url": ""
},
},
"rule": {
"enable": False,
"rules": {
"name": {
"enable": False,
"exclude": ""
},
"type": {
"enable": False,
"exclude": ""
},
"size": {
"enable": False,
"limit": 600
},
"time": {
"enable": False,
"limit": 60
},
"magic": {
"enable": True,
"free": {
"enable": True,
"keyword": "pro_free|pro_free2up",
"seeder": -2,
"leecher": 0
},
"2x": {
"enable": True,
"keyword": "pro_2up",
"seeder": -2,
"leecher": 0
},
"50off": {
"enable": True,
"keyword": "pro_50pctdown",
"seeder": -1,
"leecher": 5
},
"other": {
"enable": True,
"keyword": "pro_custom",
"seeder": -2,
"leecher": 0
},
},
},
},
"tmpdir": "/tmp",
"timeout": 60
}
class initialization():
def __init__(self, conf):
self.conf = conf
def start(self):
self.initlog()
self.initconfig()
return self.conf, self.logger
def initlog(self):
# logPath为日志保存目录
logPath = "/tmp/u2download.log"
# init log
handler1 = logging.FileHandler(logPath)
handler2 = logging.StreamHandler()
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt)
handler1.setFormatter(formatter)
handler2.setFormatter(formatter)
self.logger = logging.getLogger('log')
self.logger.addHandler(handler1)
self.logger.addHandler(handler2)
self.logger.setLevel(logging.DEBUG)
def initconfig(self):
logger = self.logger
# check dir config
if self.conf["push"]["local"]["enable"]:
if not self.conf["push"]["local"]["path"][-1] == "/":
self.conf["push"]["local"]["path"] += "/"
if not os.path.exists(self.conf["push"]["local"]["path"]):
os.makedirs(self.conf["push"]["local"]["path"])
logger.info("mkdir " + self.conf["local"]["path"])
if self.conf["push"]["remote"]["enable"]:
if not self.conf["push"]["remote"]["path"][-1] == "/":
self.conf["push"]["remote"]["path"] += "/"
# check site config
if self.conf["siteconfig"]["webpage"]["enable"] and self.conf["siteconfig"]["rss"]["enable"]:
logger.error("Only one download method")
exit()
# check tmp dir
if not self.conf["tmpdir"][-1] == "/":
self.conf["tmpdir"] += "/"
# init u2ID file
u2IDPath = os.path.split(os.path.realpath(__file__))[0] + "/u2ID.log"
if not os.path.exists(u2IDPath):
os.mknod(u2IDPath)
logger.info("Create file " + u2IDPath)
self.conf["u2IDPath"] = u2IDPath
# add request headers
if self.conf["siteconfig"]["webpage"]["enable"]:
self.conf["params"] = {
'dnt': '1',
'accept-encoding': 'gzip, deflate',
'accept-language': 'zh-CN,zh;q=0.8',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'cache-control': 'max-age=0',
'authority': 'u2.dmhy.org',
'cookie': self.conf["siteconfig"]["webpage"]["cookie"]
}
class getInfo():
def __init__(self, logger):
self.logger = logger
def getInfoFromWebPage(self, url, params):
page = requests.get(url, headers = params, timeout = 10).text
return page
def fromWebPage(self, url, params):
page = self.getInfoFromWebPage(url, params)
html = etree.HTML(page)
tr = html.xpath("//table[contains(@class, 'torrents')]/tr")[1:]
info = []
for i in tr:
# get id
trid = i.xpath("td[position()=2]/table/tr/td[position()=2]/a/@href")[0]
trid = re.findall("\d+", trid)[0]
# get type
trtype = str(i.xpath("td[position()=1]/a/text()")[0])
# get name
trname = str(i.xpath("td[position()=2]/table/tr[position()=1]/td/a/text()")[0])
# get magic
trmagic = i.xpath("td[position()=2]/table/tr[position()=2]/td/img/@class")
if trmagic:
trmagic = str(trmagic[0])
else:
trmagic = ''
# get magictime
trmagictime = i.xpath("td[position()=2]/table/tr[position()=2]/td/b/time/@title")
if trmagictime:
trmagictime = trmagictime[0]
trmagictime = time.mktime(time.strptime(trmagictime, "%Y-%m-%d %H:%M:%S"))
else:
trmagictime = None
# get uploadtime
truploadtime = i.xpath("td[position()=4]/time/@title")[0]
truploadtime = time.mktime(time.strptime(truploadtime, "%Y-%m-%d %H:%M:%S"))
# get size
trsize = i.xpath("td[position()=5]/text()")
if 'T' in trsize[1]:
trsize = float(trsize[0]) * 1024
elif 'G' in trsize[1]:
trsize = float(trsize[0])
elif 'M' in trsize[1]:
trsize = float(trsize[0]) / 1024
# get seeder
trseeder = i.xpath("td[position()=6]/b/a/text()")
if trseeder:
trseeder = int(trseeder[0])
elif i.xpath("td[position()=6]/b/a/font/text()"):
trseeder = int(i.xpath("td[position()=6]/b/a/font/text()")[0])
else:
trseeder = 0
# get leecher
trleecher = i.xpath("td[position()=7]/b/a/text()")
if trleecher:
trleecher = int(trleecher[0])
else:
trleecher = 0
info.append({"id": trid, "type": trtype, "name": trname, "magic": trmagic, "magictime": trmagictime, "uploadtime": truploadtime, "size": trsize, "seeder": trseeder, "leecher": trleecher})
return info
def getInfoFromRSS(self, url):
rss = requests.get(url, timeout = 10).content
return rss
def fromRSS(self, url):
rss = self.getInfoFromRSS(url)
xml = etree.fromstring(rss)
info = []
for i in xml.iterfind("channel/item"):
rg = re.compile("php.*?(\\d+)", re.IGNORECASE | re.DOTALL).search(i.findtext("link"))
if rg:
info.append(rg.group(1))
return info
class filtertorrent():
def filtertr(self, infolist, rule):
idList = []
if rule["enable"]:
rule = rule["rules"]
for i in infolist:
if rule["name"]["enable"]:
if not self.filterNameType(i["name"], rule["name"]["exclude"]):
continue
if rule["type"]["enable"]:
if not self.filterNameType(i["type"], rule["type"]["exclude"]):
continue
if rule["size"]["enable"]:
if not self.filterSizeTime(i["size"], rule["size"]["limit"]):
continue
if rule["time"]["enable"]:
if not self.filterSizeTime(time.time() - i["uploadtime"], rule["time"]["limit"] * 60):
continue
if rule["magic"]["enable"]:
if not self.filtermagic(i["magic"], i["seeder"], i["leecher"], rule["magic"]):
continue
idList.append(i["id"])
return idList
else:
for i in infolist:
idList.append(i["id"])
return idList
def filterNameType(self, info, rule):
for i in rule.split("|"):
if i in info:
return False
return True
def filterSizeTime(self, info, rule):
if info > rule:
return False
else:
return True
def filtermagic(self, magic, seeder, leecher, rule):
for i in rule:
if i != "enable":
tmp = rule[i]
keyword = tmp["keyword"].split("|")
for j in keyword:
if j in magic:
if (lambda x: x if tmp["seeder"] >= 0 else -x)(seeder) >= tmp["seeder"] and (lambda x: x if tmp["leecher"] >= 0 else -x)(leecher) >= tmp["leecher"]:
return True
return False
class download():
def __init__(self, idList, passkey, tmpdir, u2IDPath):
self.idList = idList
self.passkey = passkey
self.tmpdir = tmpdir
self.u2IDPath = u2IDPath
def run(self):
# self.checkDuplicate()
self.dl()
self.addLog()
return self.downloadID
def dl(self):
for i in self.downloadID:
link = "https://u2.dmhy.org/download.php?id=" + i + "&passkey=" + self.passkey + "&http=1"
tmpfile = self.tmpdir + i + ".torrent"
with open(tmpfile, "wb") as f:
f.write(requests.get(link).content)
def checkDuplicate(self):
f = open(self.u2IDPath, "r")
downloadedID = []
tmpidList = []
for i in f.readlines():
downloadedID.append(int(i))
for i in self.idList:
tmpidList.append(int(i))
dup = list(set(tmpidList).intersection(set(downloadedID)))
self.downloadID = list(set(tmpidList).difference(set(dup)))
self.downloadID = list(map(lambda x: str(x), self.downloadID))
f.close()
return self.downloadID
def addLog(self):
f = open(self.u2IDPath, "a")
for i in self.downloadID:
f.write(i + "\n")
f.close()
class push():
def __init__(self, downloadID, tmpdir, conf):
self.downloadID = downloadID
self.tmpdir = tmpdir
self.conf = conf["push"]
def push(self):
if self.conf["local"]["enable"]:
self.pushlocal()
if self.conf["remote"]["enable"]:
self.pushremote()
def pushlocal(self):
for i in self.downloadID:
tmpfile = tmpdir + i + ".torrent"
shutil.copy(tmpfile, self.conf["local"]["path"])
os.remove(tmpfile)
def pushremote(self):
self.createSFTP()
for i in self.downloadID:
tmpfile = self.tmpdir + i + ".torrent"
remotepath = self.conf["remote"]["path"] + i + ".torrent"
self.sftp.put(tmpfile, remotepath)
os.remove(tmpfile)
self.sftp.close()
def createSFTP(self):
host = self.conf["remote"]["hostname"]
port = self.conf["remote"]["port"]
username = self.conf["remote"]["username"]
password = self.conf["remote"]["password"]
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, port = port, username = username, password = password)
self.sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())
class delugePush():
def __init__(self, conf, passkey):
host = conf["hostname"]
port = conf["port"]
username = conf["username"]
password = conf["password"]
self.passkey = passkey
self.client = DelugeRPCClient(host, port, username, password)
def dpush(self, downloadID):
self.client.connect()
if self.client.connected:
for i in downloadID:
link = "https://u2.dmhy.org/download.php?id=" + i + "&passkey=" + self.passkey + "&http=1"
self.client.call("core.add_torrent_url", link, {"add_paused": False})
self.client.disconnect()
else:
logger.error("Can't connect deluge client")
return False
def run(conf, logger):
# get info
getinfo = getInfo(logger)
if conf["siteconfig"]["rss"]["enable"]:
url = conf["siteconfig"]["rss"]["url"]
idList = getinfo.fromRSS(url)
logger.info("Get idlist success by rss")
elif conf["siteconfig"]["webpage"]["enable"]:
url = conf["siteconfig"]["webpage"]["url"]
params = conf["params"]
infoList = getinfo.fromWebPage(url, params)
# filter
filt = filtertorrent()
idList = filt.filtertr(infoList, conf["rule"])
logger.info("Get idlist success by webpage")
# download and push
passkey = conf["siteconfig"]["passkey"]
tmpdir = conf["tmpdir"]
u2IDPath = conf["u2IDPath"]
dl = download(idList, passkey, tmpdir, u2IDPath)
downloadID = dl.checkDuplicate()
logger.info("Download " + str(downloadID))
if conf["push"]["deluge"]["enable"]:
if downloadID:
dpush = delugePush(conf["push"]["deluge"], passkey)
dpush.dpush(downloadID)
dl.addLog()
logger.info("Push " + str(downloadID) + " by delugeRPCClient")
elif conf["push"]["remote"]["enable"]:
downloadID = dl.run()
if downloadID:
ph = push(downloadID, tmpdir, conf)
ph.pushremote()
logger.info("Push " + str(downloadID) + " by sftp")
elif conf["push"]["local"]["enable"]:
downloadID = dl.run()
if downloadID:
ph = push(downloadID, tmpdir, conf)
ph.pushlocal()
logger.info("Push " + str(downloadID) + " by local")
if __name__ == "__main__":
init = initialization(config)
conf, logger = init.start()
while True:
try:
run(conf, logger)
except Exception as e:
logger.error(e)
time.sleep(config["timeout"])
@huihuimoe
Copy link

https://gist.github.com/littleya/86cd895f97b614ebea376a1008291ccf#file-u2rss-py-L311
这行应为
if not self.filterSizeTime(i["size"], rule["size"]["limit"]):

@psc2350
Copy link

psc2350 commented Sep 20, 2018

請問這要怎麼用
我一直得到這個錯誤
USER_ID: 1000, GROUP_ID: 1000
nss_wrapper location: /usr/lib/libnss_wrapper.so
appbox@vnc:~$ python '/home/appbox/Desktop/u2Auto2.33x.py'
2018-09-20 02:32:33,719 - u2Auto2.33x.py:66 - Start Cap
2018-09-20 02:32:34,251 - u2Auto2.33x.py:73 - Get downloading torrent page success
2018-09-20 02:32:34,253 - u2Auto2.33x.py:199 - 'NoneType' object has no attribute 'children'

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment