Skip to content

Instantly share code, notes, and snippets.

View oltarasenko's full-sized avatar
🏠
Working from home

oltarasenko

🏠
Working from home
  • Erlang Solutions
  • Stockholm
View GitHub Profile
// This function creates a new spreadsheet with a given name and returns active sheet (so data can be inserted there)
function createSheet(name){
var accountName = AdWordsApp.currentAccount().getName();
var spreadsheet = SpreadsheetApp.create(name);
return spreadsheet
}
function main(){
// Gathering the report
//Report Builder - WHERE and DURING can be easily changed as needed to different values or metrics.
//For example you could filter on Clicks by changing line 13 to ' WHERE Clicks > 10 '
//For more help see, https://developers.google.com/adwords/scripts/docs/features/reports
var report = AdWordsApp.report(
'SELECT ' +
'Criteria, CampaignName, AdGroupName, Clicks, Impressions, Cost, ' +
'Conversions, QualityScore, SearchPredictedCtr, CreativeQualityScore, PostClickQualityScore ' +
> response = Crawly.fetch "https://www.bonsai-shop.com/izanpip121-02_26785_9870"
> {:ok, document} = Floki.parse_document(response.body)
> Floki.find(document, ".price.h1") |> Floki.text()
> ""
# in config.exs
import Config
config :crawly,
# Important part here. We want crawly to use ChromeDriverFetcher.
fetcher: {ChromeDriverFetcher, []},
closespider_timeout: :disabled,
closespider_itemcount: :disabled,
concurrent_requests_per_domain: 2,
middlewares: [
def fetch(request, client_options) do
# Start new browser session, with some extra parameters
{:ok, session} =
Wallaby.start_session(
capabilities: %{
javascriptEnabled: true,
chromeOptions: %{args: ["--headless"]}
}
)
defmodule ChromeDriverFetcher do
@behaviour Crawly.Fetchers.Fetcher
def fetch(request, client_options) do
{:ok, %HTTPoison.Response{status_code: 200, body: "body", request_url: "url"}}
end
end
# in config.exs
import Config
config :crawly,
closespider_timeout: :disabled,
closespider_itemcount: :disabled,
concurrent_requests_per_domain: 2,
middlewares: [
Crawly.Middlewares.DomainFilter,
Crawly.Middlewares.UniqueRequest
defmodule Spider do
use Crawly.Spider
# This is not going to be used, so we're ignoring it.
@impl Crawly.Spider
def base_url() do
:ok
end
@impl Crawly.Spider
search_results = Floki.find(document, ".ZINbbc.xpd.O9g5cc.uUPGi") |> Floki.filter_out("#st-card")
fun = fn block ->
%{title: Floki.find(block, "h3") |> Floki.text(),
description: Floki.find(block, ".BNeawe") |> Floki.text(),
link: Floki.find(block, ".kCrYT a") |> Floki.attribute("href")
}
end
Enum.map(search_results, fn element -> fun.(element) end)
srp = Floki.find(document, ".ZINbbc.xpd.O9g5cc.uUPGi") |> Floki.filter_out("#st-card")