Skip to content

Instantly share code, notes, and snippets.

@genekogan
Created February 22, 2017 11:49
Show Gist options
  • Save genekogan/ebd77196e4bf0705db51f86431099e57 to your computer and use it in GitHub Desktop.
Save genekogan/ebd77196e4bf0705db51f86431099e57 to your computer and use it in GitHub Desktop.
scraping full size images from Google Images
from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
import argparse
import sys
import json
# adapted from http://stackoverflow.com/questions/20716842/python-download-images-from-google-image-search
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')
def main(args):
parser = argparse.ArgumentParser(description='Scrape Google images')
parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
parser.add_argument('-n', '--num_images', default=10, type=int, help='num images to save')
parser.add_argument('-d', '--directory', default='/Users/gene/Downloads/', type=str, help='save directory')
args = parser.parse_args()
query = args.search#raw_input(args.search)
max_images = args.num_images
save_directory = args.directory
image_type="Action"
query= query.split()
query='+'.join(query)
url="https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
soup = get_soup(url,header)
ActualImages=[]# contains the link for Large original images, type of image
for a in soup.find_all("div",{"class":"rg_meta"}):
link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
ActualImages.append((link,Type))
for i , (img , Type) in enumerate( ActualImages[0:max_images]):
try:
req = urllib2.Request(img, headers={'User-Agent' : header})
raw_img = urllib2.urlopen(req).read()
if len(Type)==0:
f = open(os.path.join(save_directory , "img" + "_"+ str(i)+".jpg"), 'wb')
else :
f = open(os.path.join(save_directory , "img" + "_"+ str(i)+"."+Type), 'wb')
f.write(raw_img)
f.close()
except Exception as e:
print "could not load : "+img
print e
if __name__ == '__main__':
from sys import argv
try:
main(argv)
except KeyboardInterrupt:
pass
sys.exit()
@Kamleshsam
Copy link

This worked for me 👍 💯

# Resources:
# https://gist.github.com/genekogan/ebd77196e4bf0705db51f86431099e57

print("run imports...")
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib3
import argparse
import urllib.request

print("define program variables and open google images...")
searchterm = 'garage' # will also be the name of the folder
url = "https://www.google.co.in/search?q="+searchterm+"&source=lnms&tbm=isch"
# NEED TO DOWNLOAD CHROMEDRIVER, insert path to chromedriver inside parentheses in following line
browser = webdriver.Chrome('C:/path/to/chromedriver.exe')
browser.get(url)
header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
counter = 0
succounter = 0

print("start scrolling to generate more images on the page...")
# 500 time we scroll down by 10000 in order to generate more images on the website
for _ in range(500):
    browser.execute_script("window.scrollBy(0,10000)")

print("start scraping ...")
for x in browser.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd tx8vtf")]'):
    counter = counter + 1
    print("Total Count:", counter)
    print("Succsessful Count:", succounter)
    print("URL:", x.get_attribute('src'))

    img = x.get_attribute('src')
    new_filename = "image"+str(counter)+".jpg"

    try:
        path = 'C:/path/to/whatever/folder/you/want/on/your/local/drive/'
        path += new_filename
        urllib.request.urlretrieve(img, path)
        succounter += 1
    except Exception as e:
        print(e)

print(succounter, "pictures succesfully downloaded")
browser.close()

Why is this code downloading only few images even if increase numbers in for loop?

@yeamusic21
Copy link

yeamusic21 commented Jul 23, 2020

@Kamleshsam - The code is manually scrolling down. For example, do a Google image search on 'dog', then scroll down many times; at some point you can't scroll down any further because Google stops auto-generating results and instead displays a 'show more results' button which you need to click to keep scrolling. You could create code to get around this, but this is not included in the above script. Instead of trying to write code to click the 'show more results' button (which would take time), one work around I used was to just run the script multiple times with different, but similar, search terms each time. So say you wanted to scrape images of dogs, you could say dog, labrador dog, husky dog, etc. and run a batch file to run the script for each search phrase.

@kchiran
Copy link

kchiran commented Sep 7, 2020

I cannot figure out which directory are the images being saved into? Can anybody help me?

@yeamusic21
Copy link

@kchiran - 'C:/path/to/whatever/folder/you/want/on/your/local/drive/'

@AnnBkrv
Copy link

AnnBkrv commented Sep 15, 2020

i've turned @yeamusic21's program into a script, got rid of certificate authentication issues and changed the xpath bit. so here's the code if anyone needs it. just save it and execute it
update: also clicks the show more results button now

# Resources:
# https://gist.github.com/genekogan/ebd77196e4bf0705db51f86431099e57

print("run imports...")
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib3
import argparse
import urllib.request


import ssl
ssl._create_default_https_context = ssl._create_unverified_context


def run(query, save_directory):
    print("define program variables and open google images...")
    url = "https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
    # NEED TO DOWNLOAD CHROMEDRIVER, insert path to chromedriver inside parentheses in following line
    browser = webdriver.Chrome('../utilities/chromedriver')
    browser.get(url) # opens the page
    header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
    counter = 0
    succounter = 0

    print("start scrolling to generate more images on the page...")
    # 500 time we scroll down by 10000 in order to generate more images on the website
    def scroll():
        for _ in range(500):
            browser.execute_script("window.scrollBy(0,10000)")
    scroll()
    moreButton = browser.find_element_by_class_name("mye4qd")
    moreButton.click()
    scroll()

    print("start scraping ...")
    for x in browser.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]'):
        counter = counter + 1
        #print("Total Count:", counter)
        #print("Successful Count:", succounter)
        #print("URL:", x.get_attribute('src'))

        img = x.get_attribute('src')
        new_filename = "/image"+str(counter)+".jpg"
        path = save_directory
        path += new_filename

        try:
            urllib.request.urlretrieve(img, path)
            succounter += 1
        except Exception as e:
            print(e)
    print(succounter, "pictures succesfully downloaded")
    browser.close()

def main():
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
    parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
    args = parser.parse_args()
    run(args.search, args.directory)


if __name__ == '__main__':
    main()

@hoangphuc1998
Copy link

hoangphuc1998 commented Sep 18, 2020

i've turned @yeamusic21's program into a script, got rid of certificate authentication issues and changed the xpath bit. so here's the code if anyone needs it. just save it and execute it

# Resources:
# https://gist.github.com/genekogan/ebd77196e4bf0705db51f86431099e57

print("run imports...")
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import json
import os
import urllib3
import argparse
import urllib.request


import ssl
ssl._create_default_https_context = ssl._create_unverified_context


def run(query, save_directory):
    print("define program variables and open google images...")
    url = "https://www.google.co.in/search?q="+query+"&source=lnms&tbm=isch"
    # NEED TO DOWNLOAD CHROMEDRIVER, insert path to chromedriver inside parentheses in following line
    browser = webdriver.Chrome('../utilities/chromedriver')
    browser.get(url) # opens the page
    header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
    counter = 0
    succounter = 0

    print("start scrolling to generate more images on the page...")
    # 500 time we scroll down by 10000 in order to generate more images on the website
    for _ in range(500):
        browser.execute_script("window.scrollBy(0,10000)")

    print("start scraping ...")
    for x in browser.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]'):
        counter = counter + 1
        #print("Total Count:", counter)
        #print("Successful Count:", succounter)
        #print("URL:", x.get_attribute('src'))

        img = x.get_attribute('src')
        new_filename = "/image"+str(counter)+".jpg"
        path = save_directory
        path += new_filename

        try:
            urllib.request.urlretrieve(img, path)
            succounter += 1
        except Exception as e:
            print(e)
    print(succounter, "pictures succesfully downloaded")
    browser.close()

def main():
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
    parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
    args = parser.parse_args()
    run(args.search, args.directory)


if __name__ == '__main__':
    main()

Thank you. This works perfectly. But it only downloads thumbnails, not original images

@AnnBkrv
Copy link

AnnBkrv commented Sep 18, 2020

@hoangphuc1998 the original program also downloaded thumbnails instead of images, I believe

@hoangphuc1998
Copy link

hoangphuc1998 commented Sep 21, 2020

from selenium import webdriver
import time
import requests
import shutil
import os
import argparse

def save_img(inp,img,i, directory):
    try:
        filename = inp+str(i)+'.jpg'
        response = requests.get(img,stream=True)
        image_path = os.path.join(directory, filename)
        with open(image_path, 'wb') as file:
            shutil.copyfileobj(response.raw, file)
    except Exception:
        pass


def find_urls(inp,url,driver, directory):
    driver.get(url)
    for _ in range(500):
        driver.execute_script("window.scrollBy(0,10000)")
        try:
            driver.find_element_by_css_selector('.mye4qd').click()
        except:
            continue
    for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')):
        try:
            imgurl.click()
            img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src")
            save_img(inp,img,j, directory)
            time.sleep(1.5)
        except:
            pass
            
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
    parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
    args = parser.parse_args()
    driver = webdriver.Chrome('/path_to_chromedriver')
    directory = args.directory
    inp = args.search
    if not os.path.isdir(directory):
        os.makedirs(directory)
    url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947'
    find_urls(inp,url,driver, directory)

This script will download all images in larger size

@AnnBkrv
Copy link

AnnBkrv commented Sep 22, 2020

@hoangphuc1998 doesn't work for me, it just clicks through the images but doesn't download anything

@akshat-khare
Copy link

Quick install steps would be: 1. install conda and activate, 2. pip install selenium 3. install chromedriver and extract it somewhere 4. edit the script and provide chromedriver path location in the arguments of webdriver.Chrome() 5. change the parameters for search and directory ( parser.add_argument('-s')) or pass them in the next step 6. execute the script 7. go to system preferences>permissions and allow chromedriver to work on mac
@AnnBkrv try specifying the right directory using --directory parameter

@AnnBkrv
Copy link

AnnBkrv commented Sep 23, 2020

@akshat-khare i did. i tried to run a portion of this script in a notebook, like debugging, and the image gets clicked, but then the url is not retrieved. so the image can't be downloaded

@sebasegovia01
Copy link

sebasegovia01 commented Sep 24, 2020

from selenium import webdriver
import time
import requests
import shutil
import os
import argparse

def save_img(inp,img,i, directory):
    try:
        filename = inp+str(i)+'.jpg'
        response = requests.get(img,stream=True)
        image_path = os.path.join(directory, filename)
        with open(image_path, 'wb') as file:
            shutil.copyfileobj(response.raw, file)
    except Exception:
        pass


def find_urls(inp,url,driver, directory):
    driver.get(url)
    for _ in range(500):
        driver.execute_script("window.scrollBy(0,10000)")
        try:
            driver.find_element_by_css_selector('.mye4qd').click()
        except:
            continue
    for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')):
        try:
            imgurl.click()
            img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src")
            save_img(inp,img,j, directory)
            time.sleep(1.5)
        except:
            pass
            
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
    parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
    args = parser.parse_args()
    driver = webdriver.Chrome('/path_to_chromedriver')
    directory = args.directory
    inp = args.search
    if not os.path.isdir(directory):
        os.makedirs(directory)
    url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947'
    find_urls(inp,url,driver, directory)

This script will download all images in larger size

Working fine guys! ( it is necesary before install selenium and chromedriver.exe)

@KryeKuzhinieri
Copy link

from selenium import webdriver
import time
import requests
import shutil
import os
import argparse

def save_img(inp,img,i, directory):
    try:
        filename = inp+str(i)+'.jpg'
        response = requests.get(img,stream=True)
        image_path = os.path.join(directory, filename)
        with open(image_path, 'wb') as file:
            shutil.copyfileobj(response.raw, file)
    except Exception:
        pass


def find_urls(inp,url,driver, directory):
    driver.get(url)
    for _ in range(500):
        driver.execute_script("window.scrollBy(0,10000)")
        try:
            driver.find_element_by_css_selector('.mye4qd').click()
        except:
            continue
    for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')):
        try:
            imgurl.click()
            img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src")
            save_img(inp,img,j, directory)
            time.sleep(1.5)
        except:
            pass
            
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
    parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
    args = parser.parse_args()
    driver = webdriver.Chrome('/path_to_chromedriver')
    directory = args.directory
    inp = args.search
    if not os.path.isdir(directory):
        os.makedirs(directory)
    url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947'
    find_urls(inp,url,driver, directory)

This script will download all images in larger size

Great stuff. Worked just fine. Please add it in a repo or so.

@frank-64
Copy link

from selenium import webdriver
import time
import requests
import shutil
import os
import argparse

def save_img(inp,img,i, directory):
    try:
        filename = inp+str(i)+'.jpg'
        response = requests.get(img,stream=True)
        image_path = os.path.join(directory, filename)
        with open(image_path, 'wb') as file:
            shutil.copyfileobj(response.raw, file)
    except Exception:
        pass


def find_urls(inp,url,driver, directory):
    driver.get(url)
    for _ in range(500):
        driver.execute_script("window.scrollBy(0,10000)")
        try:
            driver.find_element_by_css_selector('.mye4qd').click()
        except:
            continue
    for j, imgurl in enumerate(driver.find_elements_by_xpath('//img[contains(@class,"rg_i Q4LuWd")]')):
        try:
            imgurl.click()
            img = driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src")
            save_img(inp,img,j, directory)
            time.sleep(1.5)
        except:
            pass
            
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Scrape Google images')
    parser.add_argument('-s', '--search', default='bananas', type=str, help='search term')
    parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
    args = parser.parse_args()
    driver = webdriver.Chrome('/path_to_chromedriver')
    directory = args.directory
    inp = args.search
    if not os.path.isdir(directory):
        os.makedirs(directory)
    url = 'https://www.google.com/search?q='+str(inp)+'&source=lnms&tbm=isch&sa=X&ved=2ahUKEwie44_AnqLpAhUhBWMBHUFGD90Q_AUoAXoECBUQAw&biw=1920&bih=947'
    find_urls(inp,url,driver, directory)

This script will download all images in larger size

Great stuff. Worked just fine. Please add it in a repo or so.

Can anyone explain what happens inside the second for loop of find_urls please.

I try to run the code and the save_img function is not called once yet you say it works 'just fine'
Thanks.

@RSKothari
Copy link

Hey guys! Does anyone here know how to keep this code running infinitely?

@yeamusic21
Copy link

You could schedule the job to run every hour or something using Windows Task Manager, CRON, Airflow, the schedule package, etc.

@RSKothari
Copy link

Yeah, that's an idea - I'm curious though, will google return new sets of images? That's a concern. Or perhaps I can have the process write to the same directory and ignore the file that has been written by finding its URL.

@AsimuthSchwitters
Copy link

Hey there,
Would anyone of you guys be able to change this code in a way that after clicking on the first image of the search it clicks on one of the similar images provided by google and continues doing this with every image?

@resatarikan
Copy link

` #this code working now..

from  selenium import  webdriver;
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By

import urllib.request
import json;
import os

import time

driver = webdriver.Chrome('D:\webdrivers\chromedriver.exe');
default_path = "D:\\google_images"


def start(country, job_name):       

	images = driver.find_elements_by_css_selector(".rg_i.Q4LuWd")
	_path = os.path.join(default_path, country+"\\"  + job_name)
	try:
		os.mkdir(_path)
	except:
		pass
	os.chdir(_path)
	count_images = len(images)
	count = 0;
	for image in images:  
		count += 1
		
		if not image.get_attribute('src') == "":     
	
			
			print(str(count))
			try:
				time.sleep(3)
				image.click()
				time.sleep(5)
			
				
				try:
					big_image = driver.find_element_by_css_selector("#Sva75c > div > div > div.pxAole > div.tvh9oe.BIB1wf > c-wiz > div > div.OUZ5W > div.zjoqD > div > div.v4dQwb > a > img")             
					#url = driver.execute_script("return document.querySelector('.d87Otf').parentElement.parentElement.parentElement.querySelector(\"[jsname='HiaYvf']\").src")
					url = big_image.get_attribute("src")
					print(url)
					try:
						urllib.request.urlretrieve(url, str(count) + "_car.jpg")
					except:
						print("indirme hatası")
				except:
					print("javascript hatası")
			except:
				 print("tıklanamadı")
driver.get(uk_very_big_cars_on_road)

start("uk_big", "uk_very_big_cars_on_road_01" )v`

@VerbTheNoun95
Copy link

I was able to use hoangphuc1998's method earlier in the year successfully, but that started getting only the downsized thumbnail images.

Using resatarikan's version gets me some of full size images, but it's still inconsistent.

Maybe google changed how images are loaded now, but I noticed that driver.find_element_by_xpath('//body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute("src") will now return data:image/png;base64,{long_random_string}.

@Gucci44600
Copy link

Hello, when I run this program (script which download all images in larger size), Google opens and scrolls but an error terminates the program (picture).
I've had this problem several times when trying other codes. Could someone please help me?
Capture d’écran 2023-01-04 115440

@resatarikan
Copy link

resatarikan commented Jan 4, 2023

Hello, when I run this program (script which download all images in larger size), Google opens and scrolls but an error terminates the program (picture). I've had this problem several times when trying other codes. Could someone please help me? Capture d’écran 2023-01-04 115440

Hello,
find_element_by_* commands are deprecated.

https://stackoverflow.com/questions/69875125/find-element-by-commands-are-deprecated-in-selenium

You have to use find_elements(By.XPATH, '//......')

@Gucci44600
Copy link

Thank you for your reply but it didn't work : for j, imgurl in enumerate(driver.find_element(By.XPATH,'//img[contains(@Class,"rg_i Q4LuWd")]')):

NameError: name 'By' is not defined

@resatarikan
Copy link

You have to import selenium By module.

@Gucci44600
Copy link

Thank you. I imported selenium By module and the program can go further but I still have an error (photo)
Image1

@resatarikan
Copy link

you have to update the class definition in xpath

@Gucci44600
Copy link

Thanks, I will try !

@Gucci44600
Copy link

Hello, I tried to update the Xpath class but it does not change anything. I did some research but I have trouble understanding. I had to change some things, now there is no more error but it still doesn't work. Google opens the page, scrolls and that's it. Do you think you could try on your side with the link and tell me if it works? And eventually tell me how you did it please?
Here is the link (https://www.google.com/search?q=french+pedestrian+light&rlz=1C1CHBF_frFR1008FR1008&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiGgui61IX9AhVaQaQEHeS4CkgQ_AUoAXoECAEQAw&biw=1536&bih=714&dpr=1.25).

@Gucci44600
Copy link

Gucci44600 commented Mar 1, 2023

`
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
import time
import requests
import shutil
import os
import argparse

def save_img(inp,img,i, directory):
try:
filename = inp+str(i)+'.jpg'
response = requests.get(img,stream=True)
image_path = os.path.join(directory, filename)
with open(image_path, 'wb') as file:
shutil.copyfileobj(response.raw, file)
except Exception:
pass

def find_urls(inp,url,driver, directory):
driver.get(url)
img_urls = driver.find_elements(By.XPATH,'//img[contains(@Class,"rg_i yDmH0d")]')
#img_urls = driver.find_elements(By.XPATH,'//*[@id="yDmH0d"]')
for _ in range(500):
driver.execute_script("window.scrollBy(0,10000)")
try:
driver.find_element_by_css_selector('.mye4qd').click()
except:
continue
for j, imgurl in enumerate(img_urls):
try:
img = imgurl.get_attribute('src')
save_img(inp,img,j, directory)
time.sleep(1.5)
except:
pass
s=Service('C:\chromedriver.exe')
if name == "main":
parser = argparse.ArgumentParser(description='Scrape Google images')
parser.add_argument('-s', '--search', default='banana', type=str, help='search term')
parser.add_argument('-d', '--directory', default='../Downloads/', type=str, help='save directory')
args = parser.parse_args()
driver = webdriver.Chrome(service=s)
directory = args.directory
inp = args.search
if not os.path.isdir(directory):
os.makedirs(directory)
url = 'https://www.google.com/search?q=french+pedestrian+light&rlz=1C1CHBF_frFR1008FR1008&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiGgui61IX9AhVaQaQEHeS4CkgQ_AUoAXoECAEQAw&biw=1536&bih=714&dpr=1.25'
find_urls(inp,url,driver, directory)
`

@Gucci44600
Copy link

Sorry, I tried to keep the identation of the code but it doesn't work

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment