Skip to content

Instantly share code, notes, and snippets.

else:
file.close()
print('File closed')
http://api.scraperapi.com?api_key={YOUR_API_KEY}&url=
linkedin_scraper('http://api.scraperapi.com?api_key=51e43be283e4db2a5afb62660xxxxxxx&url=https://www.linkedin.com/jobs-guest/jobs/api/seeMoreJobPostings/search?keywords=Product%20Management&location=San%20Francisco%20Bay%20Area&geoId=90000084&trk=public_jobs_jobs-search-bar_search-submit&position=1&pageNum=0&start=', 0)
import csv
import requests
from bs4 import BeautifulSoup
file = open('linkedin-jobs.csv', 'a')
writer = csv.writer(file)
writer.writerow(['Title', 'Company', 'Location', 'Apply'])
def linkedin_scraper(webpage, page_number):
next_page = webpage + str(page_number)
import requests
url = 'https://datatables.net/examples/ajax/data/arrays.txt?_=1656247207356'
headers = {
'value': 'application/json, text/javascript, */*; q=0.01',
'accept': 'application/json, text/javascript, */*; q=0.01',
'cookie': 'PHPSESSID=196d9e692bf75bea701ea53461032689; __utmc=120757021; __utmz=120757021.1655866355.1.1.utmcsr=bing|utmccn=(organic)|utmcmd=organic|utmctr=(not provided); __utma=120757021.1861635672.1655866355.1656246692.1656255144.5'
}
page = requests.get(url, headers=headers)
data = page.json()
print(len(data))
data = page.json()
first_array = data['data'][0]
print(first_array)
all_arrays = data['data']
print(len(all_arrays))
data = page.json()
for item in data['data']:
name = item[0]
position = item[1]
office = item[2]
extn = item[3]
start_date = item[4]
salary = item[5]
file = open('js-table-data.csv', 'w')
writer = csv.writer(file)
writer.writerow(['Name', 'Position', 'Office', 'Start Date', 'Extn', 'Salary'])