Skip to content

Instantly share code, notes, and snippets.

{
"name": "MagicEden Sales Bot v3 - Blueprint Provided by Deux à Trois (https://2a3.dev)",
"flow": [
{
"id": 2,
"module": "http:ActionSendData",
"version": 3,
"parameters": {
"handleErrors": false,
"useNewZLibDeCompress": true
# Find and Parse Sitemaps to Create List of all website's pages
from usp.tree import sitemap_tree_for_homepage
def getPagesFromSitemap(fullDomain):
listPagesRaw = []
tree = sitemap_tree_for_homepage(fullDomain)
for page in tree.all_pages():
listPagesRaw.append(page.url)
import requests
import pandas as pd
import urllib.request
from slugify import slugify
df = pd.read_csv("clippings.csv")
token = "9HGDNG5-50E40Y9-MRTE5HC-HRWHPPB"
for clipping in df.itertuples():
## Fixed Height Export
import requests
import urllib.request
token = "9HGDNG5-50E40Y9-MRTE5HC-HRWHPPB"
url = "https://www.dailymail.co.uk/news/article-9744405/Melbourne-best-city-working-home-Covid-losing-crown-liveable-city.html"
result = requests.get(f"https://shot.screenshotapi.net/screenshot?token={token}&url={url}&height=3000&output=json&no_cookie_banners=true&block_ads=true")
## Fullpage Export
import requests
import urllib.request
token = "your_token_here"
url = "https://www.dailymail.co.uk/news/article-9744405/Melbourne-best-city-working-home-Covid-losing-crown-liveable-city.html"
result = requests.get(f"https://shot.screenshotapi.net/screenshot?token={token}&url={url}&full_page=1&output=json&no_cookie_banners=true&block_ads=true")
import pandas as pd
import time
import re
from slugify import slugify
from html2image import Html2Image
hti = Html2Image()
df = pd.read_csv("clippings.csv")
from html2image import Html2Image
hti = Html2Image()
hti.screenshot(url="https://martechwithme.com",save_as="screenshot.png").