-
-
Save vraghuvaran/880696d24685c669fc6e8d90e74c798d to your computer and use it in GitHub Desktop.
This script will fetch out all the hyperlinks from particular website.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
######################################### | |
# # | |
#This will fetch all the url's of targe.# | |
#website. # | |
#Usage - # | |
# ./url_spidy.py target_address # | |
# # | |
######################################### | |
import urllib2 | |
from bs4 import BeautifulSoup | |
import sys | |
data=urllib2.urlopen(sys.argv[1]) | |
new_data=data.read() | |
bt=BeautifulSoup(new_data,"html.parser") | |
for link in bt.find_all('a'): | |
print (link.get('href')) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment