Skip to content

Instantly share code, notes, and snippets.


8X7K/ Secret

Created Dec 8, 2021
What would you like to do?
import requests
from bs4 import BeautifulSoup
import json
import sys
import time
import functools
from ja_sentence_segmenter.common.pipeline import make_pipeline
from ja_sentence_segmenter.concatenate.simple_concatenator import concatenate_matching
from ja_sentence_segmenter.normalize.neologd_normalizer import normalize
from ja_sentence_segmenter.split.simple_splitter import split_newline, split_punctuation
split_punc2 = functools.partial(split_punctuation, punctuations=r"。!?…")
concat_tail_no = functools.partial(concatenate_matching, former_matching_rule=r"^(?P<result>.+)(の)$", remove_former_matched=False)
segmenter = make_pipeline(normalize, split_newline, concat_tail_no, split_punc2)
# から取得できるアクセストークン
# 指定した id のアニメからあらすじを取得する関数
def get_anime_arasuji(id):
# 迷惑をかけないよう 1 秒 sleep する
url = f"{id}"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
divs ='div.container')
for i, div in enumerate(divs):
if div.text.strip() == "あらすじ":
raw_text = divs[i+1].select_one('div.c-body__content').text
return list(segmenter(raw_text))
return None
page_num = 1
while True:
url = f"{ANNICT_SECRET_TOKEN}&per_page=50&sort_id=desc&page={page_num}"
r = requests.get(url)
annict_dict = r.json()
animes = annict_dict["works"]
if not annict_dict["works"]:
for anime in animes:
# クール物の TV アニメ以外は除外する
if anime["media"] != "tv" or "season_name_text" not in anime:
arasuji = get_anime_arasuji(anime["id"])
if arasuji is not None:
print(f'arasuji found in {anime["title"]}')
res = {
"タイトル": anime["title"],
"あらすじ": arasuji,
"時期": anime["season_name_text"]
with open("annict_data.jsonl", "a") as f:
f.write(f"{json.dumps(res, ensure_ascii=False)}\n")
print(f'arasuji not found in {anime["title"]}')
page_num += 1
Copy link

koke2c95 commented Dec 30, 2021

thanks interesting idea

I just discovered tmdb can be a good source
some well-known example:
Story synopsis - API
epsiode synopsis - API

if Interested on multilingual sbert multilingual model

I didn't found who build a Parallel text dataset or dump with translations dev reference
you can use discover API to get anime tv/moive id

and here is a related anime Recommendation Systems project

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment