Skip to content

Instantly share code, notes, and snippets.

Avatar
💫

Dmitiry Zub☀️ dimitryzub

💫
View GitHub Profile
@dimitryzub
dimitryzub / serpapi_google_scholar_author_articles.py
Created May 19, 2021
Scrape Google Scholar Author Articles with SerpApi
View serpapi_google_scholar_author_articles.py
from serpapi import GoogleSearch
import os
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google_scholar_author",
"author_id": "9PepYk8AAAAJ",
"hl": "en",
}
@dimitryzub
dimitryzub / serpapi_google_scholar_author_results.py
Last active May 19, 2021
Scrape Google Scholar Author Results with SerpApi
View serpapi_google_scholar_author_results.py
from serpapi import GoogleSearch
import os
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google_scholar_author",
"author_id": "9PepYk8AAAAJ",
"hl": "en",
}
@dimitryzub
dimitryzub / serpapi_google_scholar_authors_citedby_results.py
Last active May 19, 2021
Scrape Google Scholar Authors CitedBy Results with SerpApi
View serpapi_google_scholar_authors_citedby_results.py
from serpapi import GoogleSearch
import os, json
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google_scholar_author",
"author_id": "9PepYk8AAAAJ",
"hl": "en",
}
@dimitryzub
dimitryzub / serpapi_full_example_scrape_google_scholar_profile_author_results.py
Last active May 30, 2021
Full Example: Scrape Google Scholar Profile and Author Results with SerpApi
View serpapi_full_example_scrape_google_scholar_profile_author_results.py
from serpapi import GoogleSearch
import os
def serpapi_scrape_profile_results_combo():
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google_scholar_profiles",
"hl": "en",
"mauthors": "samsung"
}
@dimitryzub
dimitryzub / python_scrape_google_scholar_co_authors_results.py
Last active May 20, 2021
Scrape Google Scholar Co-Authors Results with Python
View python_scrape_google_scholar_co_authors_results.py
from bs4 import BeautifulSoup
import requests, lxml, os
headers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.getenv('HTTP_PROXY')
@dimitryzub
dimitryzub / serpapi_scrape_author_co_author_results.py
Last active May 29, 2021
Scrape Google Scholar Author Co-Authors Results with SerpApi
View serpapi_scrape_author_co_author_results.py
from serpapi import GoogleSearch
import os
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google_scholar_author",
"author_id": "m8dFEawAAAAJ",
"hl": "en",
}
@dimitryzub
dimitryzub / python_scrape_google_scholar_author_co_authors_results.py
Created May 20, 2021
Scrape Google Scholar Co-Authors Results with Python
View python_scrape_google_scholar_author_co_authors_results.py
from bs4 import BeautifulSoup
import requests, lxml, os
headers = {
'User-agent':
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
proxies = {
'http': os.getenv('HTTP_PROXY')
@dimitryzub
dimitryzub / hltv_csgo_scrape_match_data.py
Last active Jun 6, 2021
HLTV CS:GO Scrape Match Stats
View hltv_csgo_scrape_match_data.py
# Website was dynamically updated so requests-html was used instead of bs4
from requests_html import HTMLSession
import csv
session = HTMLSession()
with open('csgo_match_stats.csv', mode='w', newline='', encoding='utf8') as csv_file:
# fieldnames needs to be the same as while doing .appned()
fieldnames = ['Left Team', 'Left Team Score', 'Right Team Score', 'Right Team', 'Event Name']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
@dimitryzub
dimitryzub / serpapi_google_maps_local_results.py
Created Jun 9, 2021
Scrape Google Maps Local Results using SerpApi
View serpapi_google_maps_local_results.py
from serpapi import GoogleSearch
import csv
params = {
"api_key": "YOUR_API_KEY",
"engine": "google_maps",
"type": "search",
"google_domain": "google.com",
"q": "кофе мариуполь", # query
"ll": "@47.0919234,37.5093148,12z" # @ + latitude + , + longitude + , + zoom
@dimitryzub
dimitryzub / baidu_get_organic_results.py
Created Jun 18, 2021
baidu_scrape_organic_results
View baidu_get_organic_results.py
from bs4 import BeautifulSoup
import requests, lxml, json
headers = {
"User-Agent":
"Mozilla/5.0 (Linux; Android 10; HD1913) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.105 Mobile Safari/537.36 EdgA/46.1.2.5140"
}
def get_organic_results():