Created
September 28, 2017 02:42
-
-
Save fourkbomb/9f0aeadb5b300a4fdd23559c368d75dd to your computer and use it in GitHub Desktop.
download source releases from samsung OSRC using a terminal
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
# Copyright (c) 2017 Simon Shields | |
# | |
# Permission is hereby granted, free of charge, to any person obtaining a copy | |
# of this software and associated documentation files (the "Software"), to deal | |
# in the Software without restriction, including without limitation the rights | |
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
# copies of the Software, and to permit persons to whom the Software is | |
# furnished to do so, subject to the following conditions: | |
# | |
# The above copyright notice and this permission notice shall be included in all | |
# copies or substantial portions of the Software. | |
# | |
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
# SOFTWARE. | |
# | |
# Before using this script, first run `pip install requests beautifulsoup4` | |
# or use your distro's package manager | |
import time | |
import requests | |
import os | |
from bs4 import BeautifulSoup | |
print("Note: by downloading anything from Samsung OSRC using this script, you agree to the terms laid out on this page: http://opensource.samsung.com/reception/receptionSub.do?method=modal") | |
try: # py3 | |
from urllib.parse import quote_plus | |
except ImportError: # py2 | |
from urllib import quote_plus | |
def bytes_to_human(b): | |
units = ['B', 'kB', 'MB', 'GB', 'TB'] | |
uidx = 0 | |
while b > 2048: | |
b = float(b) / 1024.0 | |
uidx += 1 | |
return '%.02f %s'%(b, units[uidx]) | |
def seconds_to_human(b): | |
sec = b % 60 | |
b = b // 60 | |
mts = b % 60 | |
b = b // 60 | |
if b > 0: | |
return '%dh %02dm %02ds'%(b, mts, sec) | |
elif mts > 0: | |
return '%02dm %02ds'%(mts, sec) | |
else: | |
return '%02ds'%sec | |
SEARCH_URL = 'http://opensource.samsung.com/reception/receptionSub.do?method=search&searchValue=%s' | |
CSRF_URL = 'http://opensource.samsung.com/reception/receptionSub.do?method=modal' | |
DOWNLOAD_URL = 'http://opensource.samsung.com/reception/receptionSub.do?method=downLoad' | |
# to download | |
# get files matching query | |
session = requests.Session() | |
session.headers.update({'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:55.0) Gecko/20100101 Firefox/55.0'}) | |
query = input('Enter search query: ') | |
res = session.get(SEARCH_URL % quote_plus(query)) | |
soup = BeautifulSoup(res.text, 'html.parser') | |
rows = soup.find_all('table', class_='bT')[0].find_all('tr')[1:] # first row is table header | |
i = 0 | |
fmt = '%15s | %14s | %s' | |
print(('[%2s] ' + fmt) %('id', 'model', 'version', 'filename')) | |
post_data = {} | |
for row in rows: | |
cells = row.find_all('td') | |
print(('[%02d] ' + fmt)%(i, cells[1].text.strip(), cells[2].text.strip(), cells[3].text.strip())) | |
post_data[i] = { | |
'attach_id': row.find_all(id='s_attach_id_%d'%i)[0].get('value'), | |
'down_purpose': 'EVU', | |
'down_purpose_memo': '', | |
'searchValue': query, | |
'page': '1', | |
'countPerPage': '15', | |
'countPerBlock': '15', | |
} | |
i += 1 | |
if i == 0: | |
raise "No results!" | |
choice = int(input('Which one to download? [0-%d] '%(i-1))) | |
if choice >= i: | |
raise "Invalid choice" | |
data = post_data[choice] | |
res = session.get(CSRF_URL) | |
soup = BeautifulSoup(res.text, 'html.parser') | |
data['_csrf'] = soup.find_all(id='_csrf')[0].get('value') | |
data['returnValue'] = 'Y,EVU,,' + data['_csrf'] | |
resp = session.post(DOWNLOAD_URL, data=data, stream=True) | |
print(resp.status_code) | |
print(resp.headers) | |
if not resp.headers['Content-Type'].startswith('application/octet-stream'): | |
print('Error: non-zip response from OSRC, aborting') | |
raise Exception() | |
dlen = int(resp.headers['Content-Length']) | |
filename = resp.headers['Content-Disposition'].split('=')[1][:-1] | |
if os.path.exists(filename): | |
i = 1 | |
while os.path.exists(filename + '.' + str(i)): | |
i += 1 | |
filename += '.' + str(i) | |
print('Downloading %s to %s'%(bytes_to_human(dlen), filename)) | |
progress = 0 | |
start = int(time.time()) - 1 | |
with open(filename, mode='wb') as f: | |
for chunk in resp.iter_content(chunk_size=512 * 1024): | |
f.write(chunk) | |
progress += len(chunk) | |
delta = int(time.time()) - start | |
speed = progress/delta | |
if speed > 0: | |
eta = seconds_to_human((dlen-progress) / speed) | |
else: | |
eta = 'No ETA' | |
print('Downloading %s - %s/%s, %s/s, ETA %s'%( | |
filename, | |
bytes_to_human(progress), | |
bytes_to_human(dlen), | |
bytes_to_human(speed), | |
eta), end=' \r') | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment