Skip to content

Instantly share code, notes, and snippets.

@akkuman
Last active Nov 20, 2021
Embed
What would you like to do?
[WallpaperAbyss壁纸下载器] #tools #python
*.jpg
*.JPEG
*.png
.history
build
dist
**/__pycache__
**/*.pyc
import requests
import argparse
import re
import copy
import time
from urllib.parse import urlparse, parse_qsl, urlencode, urlunparse
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',
}
parser = argparse.ArgumentParser(description='WallpaperAbyss下载器')
parser.add_argument('url', type=str, help='要下载的壁纸的页面url')
parser.add_argument('-t', '--timeout', type=int, help='每个壁纸下载的sleep秒数', default=0)
parser.add_argument('-p', '--proxy', type=str, help='代理地址')
args = parser.parse_args()
def url_add_query(url: str, params: dict) -> str:
url_parts = list(urlparse(url))
query = dict(parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
return urlunparse(url_parts)
def download(referer, data_id, data_type, data_server, data_user_id, session=None):
if not session:
session = requests.Session()
form_data = {
'content_id': data_id,
'content_type': 'wallpaper',
'file_type': data_type,
'image_server': data_server,
}
get_download_link_headers = copy.deepcopy(headers)
# 获取下载链接
print('[*] Getting download link...')
get_download_link_headers['referer'] = referer
resp = session.post('https://api.alphacoders.com/content/get-download-link', headers=get_download_link_headers, data=form_data)
if resp.status_code != 200:
print(f'[{resp.status_code}] Failed to get download link: {resp.text}')
return
download_link = resp.json()['link']
print(f'[*] Download link: {download_link}')
# 下载图
print('[*] Downloading...')
resp = session.get(download_link, headers=get_download_link_headers)
if resp.status_code != 200:
print(f'[{resp.status_code}] Failed to download: {resp.text}')
return
return resp.content
def main():
session = requests.Session()
if args.proxy:
session.proxies = {
'http': args.proxy,
'https': args.proxy,
}
# 获取页面
current_page = 1
while True:
url = url_add_query(args.url, {'page': current_page})
resp = session.get(url, headers=headers)
if resp.status_code != 200:
print(f'[{resp.status_code}] Failed to get page: {resp.text}')
return
# <span title="下载壁纸" class="btn btn-primary btn-block download-button" data-id="543961" data-type="jpg" data-server="images8" data-user-id="42440">
# <i class="el el-download-alt"></i>
# </span>
matchs = re.findall(r'data-id="(\d+)" data-type="(\w+)" data-server="(\w+)" data-user-id="(\d+)"', resp.text)
for match in matchs:
print(f'[*] Downloading {match[0]}...')
img_content = download(url, match[0], match[1], match[2], match[3], session)
print(f'[v] Downloaded {match[0]}')
with open(f'{match[0]}.{match[1]}', 'wb') as f:
f.write(img_content)
time.sleep(args.timeout)
last_page_match = re.search(r'const last_page = (\d+);', resp.text)
# 没有下一页了
if current_page >= int(last_page_match.group(1)):
break
current_page += 1
if __name__ == '__main__':
main()
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['main.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='main',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None )
[tool.poetry]
name = "pic"
version = "0.1.0"
description = ""
authors = ["Akkuman <akkumans@qq.com>"]
[[tool.poetry.source]]
name = "aliyun"
url = "https://mirrors.aliyun.com/pypi/simple/"
default = true
[tool.poetry.dependencies]
python = "^3.8"
requests = "^2.26.0"
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment