Skip to content

Instantly share code, notes, and snippets.

@Nua07
Created March 18, 2021 06:46
Show Gist options
  • Save Nua07/f03cbed972df1462003e43bab640f3c9 to your computer and use it in GitHub Desktop.
Save Nua07/f03cbed972df1462003e43bab640f3c9 to your computer and use it in GitHub Desktop.
import os
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import urllib.parse
def path_to_url(host, path):
if not path.startswith("http"):
return "https://" + video_host + path
return path
res = requests.get("https://linkkf.app", params={"s": input("Name: ")})
soup = BeautifulSoup(res.text, "html.parser")
ani = []
for idx, item in enumerate(soup.select("div.item1")):
title = item.find("span").text
url = item.find("a").get("href")
ani.append({"title": title, "url": url})
print("[{}] {}".format(idx, title))
selected_ani = ani[int(input("idx: "))]
res = requests.get(selected_ani["url"])
soup = BeautifulSoup(res.text, "html.parser")
episode = []
for idx, item in enumerate(soup.select("article>a.buttonbb")):
title = item.find("span").text
url = item.get("href")
episode.append({"title": title, "url": url})
print("[{}] {}".format(idx, title))
selected_episode = episode[int(input("idx: "))]
ani_title = selected_ani["title"]
episode_title = selected_episode["title"]
res = requests.get(selected_episode["url"])
soup = BeautifulSoup(res.text, "html.parser")
video_url = soup.select(".switcher>option")[0].get("value")
res = requests.get(video_url, headers={"Referer": selected_episode["url"]})
soup = BeautifulSoup(res.text, "html.parser")
video_host = urllib.parse.urlparse(video_url).netloc
track_url = path_to_url(video_host, soup.find("track").get("src"))
source_url = path_to_url(video_host, soup.find("source").get("src"))
if not os.path.isdir(ani_title):
if not os.path.isdir(ani_title):
os.mkdir(ani_title)
print("자막 다운로드")
track_ext = track_url.split(".")[-1]
res = requests.get(track_url, headers={"Referer": video_url})
f = open("{}/{}.{}".format(ani_title, episode_title, track_ext), "wb")
f.write(res.content)
f.close()
print("동영상 다운로드")
source_ext = source_url.split(".")[-1]
source_parsed = urllib.parse.urlparse(source_url)
if source_ext == "m3u8":
res = requests.get(source_url, headers={"Referer": video_url})
text = res.text
ts = []
for i in text.splitlines():
if not i.startswith("#"):
ts.append(i)
for i in tqdm(ts):
res = requests.get(source_parsed.scheme + "://" + source_parsed.netloc +
"/" + "/".join(source_parsed.path.split("/")[:-1]) + "/" + i, headers={"Referer": video_url})
f = open(ani_title + "/" + i, "wb")
f.write(res.content)
f.close()
else:
video_filename = "{}/{}.{}".format(ani_title, episode_title, source_ext)
res = requests.get(source_url, headers={"Referer": video_url}, stream=True)
total_size_in_bytes = int(res.headers.get('content-length', 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(video_filename, 'wb') as file:
for data in res.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
print("end")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment