Skip to content

Instantly share code, notes, and snippets.

@tajulasri
Last active June 23, 2018 08:15
Show Gist options
  • Save tajulasri/e7c16c0e7ec3274277411b7901d18b71 to your computer and use it in GitHub Desktop.
Save tajulasri/e7c16c0e7ec3274277411b7901d18b71 to your computer and use it in GitHub Desktop.
#!/bin/python
import requests
import time
import xlsxwriter
import sys
from random_useragent.random_useragent import Randomize
"""
API URL : http://wordoid.com/QueryHandler.ashx?_=0.2528023694537982&domainAvailability=0&language=1&maxLength=5&pattern=*pay&quality=4
"""
set_green = "\x1b[6;30;42m {text} \x1b[0m"
def resolve_available(data):
if(data == False):
return "Not Available"
else:
return 'Available'
def get_qid_key(_a="0.2528023694537982",domainAvailability=0,language=1,maxlength=5,pattern="*pay",quality=4):
urlstring = "http://wordoid.com/QueryHandler.ashx?_={analytic}&domainAvailability={da}&language={_l}&maxLength={maxl}&pattern={pattern}pay&quality={quality}"
r = requests.get(urlstring.format(analytic=_a,da=domainAvailability,_l=language,maxl=maxlength,pattern=pattern,quality=quality))
if r.status_code == 200:
return r.json()['QID']
else:
return 'NOT_VALID_REQUEST'
user_agent = "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"
def scrap_domain(wordlength = 10,pattern="*pay"):
file = open('domain_list.txt','w')
qid_session = get_qid_key(maxlength=wordlength,pattern=pattern)
if qid_session == 'NOT_VALID_REQUEST':
print "request sesssion id are error \n"
sys.exit()
#request cookie
cookies = dict(
# _ga_cookie="GA1.2.1626877236.1529491064",
# _gid_cookie="GA1.2.1227027148.1529491064",
# __atuvc="1%7C25",
# __atuvs="5b2a2e7808fd83bc000",
__language="1",
__quality='4',
__maxLength=str(wordlength),
__domainAvailability='0',
__pattern=pattern,
__qid=qid_session
)
headers = {"user-agent": user_agent,"accept": "application/json","referer": "http://wordoid.com/","x-requested-with":"XMLHttpRequest"}
print_format = "Domain keyword {word} \ndomain info data {info_data}"
for page in xrange(1,10):
request_url = "http://wordoid.com/ResultHandler.ashx?_=0.5604492497419404&from={page}&qid={qid_session}"
r = requests.get(request_url.format(page=page,qid_session=qid_session),headers=headers,cookies=cookies)
print r.text
try:
length_response = len(r.json()['results'])
except ValueError, e:
print e
if(r.status_code == 200):
if(length_response >= 1):
for data in r.json()['results']:
#check for domain
domain_in_list = data['domains']
keyword = data['word']
domains = []
if domain_in_list is None:
continue
for domain in domain_in_list:
string_format = "Domain name {domain_name} => available_status: {status}".format(domain_name=domain['name'],status=resolve_available(domain['available']))
if(domain['available'] == False):
#scrapped_domains.append([domain['name'],available_with_no_color(domain['available'])])
file.write(domain['name']+" | "+resolve_available(domain['available'])+"\n")
domains.append(string_format)
#scrapped_domains.append([keyword,available_with_no_color(domain['available'][0])])
#print print_format.format(word=keyword,info_data='\n'.join(map(str, domains)))
print '\n'.join(map(str, domains))
else:
print "Opps something wrong with this noob scripts."
print r.content
break
time.sleep(2)
file.close()
scrap_domain(wordlength=7)
# workbook = xlsxwriter.Workbook('available_domain_list.xlsx')
# worksheet = workbook.add_worksheet()
# row = 0
# col = 0
# for name,availablity in scrapped_domains:
# worksheet.write(row, col,name)
# worksheet.write(row, col + 1, availablity)
# row += 1
# workbook.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment