Skip to content

Instantly share code, notes, and snippets.

@leonardomarcao
Created February 28, 2024 19:26
Show Gist options
  • Save leonardomarcao/75c51931caa59a05ac63faea84b1ad4f to your computer and use it in GitHub Desktop.
Save leonardomarcao/75c51931caa59a05ac63faea84b1ad4f to your computer and use it in GitHub Desktop.
Webscrapy to get all existing logo car brands in the world
import os
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from PIL import Image
urls = [
"https://www.carlogos.org/car-brands/",
"https://www.carlogos.org/car-brands/page-2.html",
"https://www.carlogos.org/car-brands/page-3.html",
"https://www.carlogos.org/car-brands/page-4.html",
"https://www.carlogos.org/car-brands/page-5.html",
"https://www.carlogos.org/car-brands/page-6.html",
"https://www.carlogos.org/car-brands/page-7.html",
"https://www.carlogos.org/car-brands/page-8.html",
]
if not os.path.exists("logos"):
os.makedirs("logos")
def download_logos(url):
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
logos = soup.select(".logo-list li img")
for logo in logos:
try:
src = logo["src"].replace(".png", ".html").replace("/car-logos/", "")
logo_url = urljoin(url, src)
response = requests.get(logo_url)
soap = BeautifulSoup(response.content, "html.parser")
download_link = soap.find("img")
if not download_link or not download_link.get("src"):
continue
logo_url = urljoin(url, download_link["src"])
filename = os.path.join("logos", src.split(".html")[0] + ".png")
request = requests.get(logo_url, stream=True)
if request.status_code != 200:
print(f"Failed to download {logo_url}")
continue
with Image.open(request.raw) as image:
image.save(filename, "PNG")
image.close()
print(f"Downloaded {logo_url}")
except Exception as e:
print(f"Failed to download {logo_url}: {e}")
continue
if __name__ == "__main__":
for url in urls:
download_logos(url)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment