Skip to content

Instantly share code, notes, and snippets.

@cipi1965
Created June 19, 2017 19:40
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cipi1965/30e001779d268c882e5baeb30b9efa73 to your computer and use it in GitHub Desktop.
Save cipi1965/30e001779d268c882e5baeb30b9efa73 to your computer and use it in GitHub Desktop.
Easybytez download script in Python
#!/usr/bin/python
from __future__ import print_function
from tqdm import tqdm
from lxml import html
import requests
import sys
import os
from urllib import quote_plus
# It requires a premium account
username=""
password=""
proxies = {'http': 'http://127.0.0.1:80/'}
if len(sys.argv) != 2:
print("Missing argument: easybytez <link or file with list>")
exit(1)
if not sys.argv[1].startswith("http") and not os.path.isfile(sys.argv[1]):
print("File not exists")
exit(1)
if not sys.argv[1].startswith("http"):
links = [line.rstrip('\n') for line in open(sys.argv[1])]
else:
links = [sys.argv[1]]
rsession = requests.Session()
r = rsession.get("http://www.easybytez.com/login2.html", proxies=proxies)
tree = html.fromstring(r.content)
csrf = tree.xpath('//*[@name="rand"]/@value')[0]
payload = {'rand': csrf, 'op': 'login2', 'redirect': 'http://www.easybytez.com/', 'login': username, 'password': password}
login = rsession.post('http://www.easybytez.com/', data=payload, proxies=proxies)
for link in links:
if link.startswith("http"):
r = rsession.get(link, allow_redirects=False, proxies=proxies)
try:
splitted_url = r.headers['Location'].split("/")
except KeyError:
print("Link not found for: "+link)
continue
filename = splitted_url[len(splitted_url)-1]
response = rsession.get(r.headers['Location'], stream=True, proxies=proxies)
if os.path.isfile(filename) and os.path.getsize(filename) == int( response.headers['Content-Length'] ):
print(filename+" already downloaded. Skipping...")
continue
with open(filename, 'wb') as f:
chunkSize=2048
pbar = tqdm( unit="B", total=int( response.headers['Content-Length'] ), desc=filename, unit_scale=True )
for chunk in response.iter_content(chunk_size=chunkSize):
if chunk: # filter out keep-alive new chunks
pbar.update (len(chunk))
f.write(chunk)
# else:
# print(r.text + "\nSkipping "+ link +"...")
@cipi1965
Copy link
Author

cipi1965 commented Jan 8, 2022

@8dodo7 honestly i don't know, i use LinkSnappy now

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment