Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save raciallyambiguous/a2fd59026f9e67d390077695faa50c38 to your computer and use it in GitHub Desktop.
Save raciallyambiguous/a2fd59026f9e67d390077695faa50c38 to your computer and use it in GitHub Desktop.
Need Help Scraping WNBA Team Pages — Selenium Struggles with Dynamic Pages
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd
# Load team URLs from JSON file
with open('Membership_URLs.json', 'r') as file: # Replace with the correct relative or full path
teams = []
for line in file:
team_data = line.strip().split(', ')
if len(team_data) == 2 and team_data[0] not in ['Mystics', 'Valkyries']: # Exclude Mystics and Valkyries
teams.append({"team": team_data[0], "url": team_data[1]})
# Toggle headless mode
HEADLESS = False # Change to True to enable headless mode
# Setup Chrome options
options = Options()
if HEADLESS:
options.add_argument("--headless") # Enable headless mode
options.add_argument("--start-maximized") # Open browser in maximized mode
options.add_argument("--disable-blink-features=AutomationControlled") # Avoid detection
service = Service('/path/to/chromedriver') # Replace with the correct path to chromedriver
# Initialize driver with increased timeout
driver = webdriver.Chrome(service=service, options=options)
driver.set_page_load_timeout(180) # Increase timeout to 180 seconds
# Create a list to store ticket info
data = []
# Loop through each team's website
for team in teams:
try:
driver.get(team['url'])
time.sleep(5) # Allow page to load
# Handle potential cookie popups
try:
popup = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.ID, "onetrust-accept-btn-handler")))
popup.click()
time.sleep(2)
except:
pass
# Scrape data
deposit = "N/A"
availability = "N/A"
image_url = "N/A"
url = driver.current_url
# Scrape deposit and availability details
try:
deposit_element = driver.find_element(By.XPATH, "//*[contains(text(), 'Deposit')] | //*[contains(text(), 'deposit')]")
deposit = deposit_element.text
except:
pass
try:
availability_element = driver.find_element(By.XPATH, "//*[contains(text(), 'Availability')] | //*[contains(text(), 'available')]")
availability = availability_element.text
except:
pass
# Scrape arena seating image
try:
image_element = driver.find_element(By.TAG_NAME, 'img')
if image_element:
src = image_element.get_attribute('src')
if 'seating' in src.lower() or 'pricing' in src.lower(): # Filter relevant images
image_url = src
except:
pass
# Append data
data.append({
'Team': team['team'],
'Deposit': deposit,
'Availability': availability,
'Image URL': image_url,
'URL': url
})
except Exception as e:
print(f"Error with {team['team']}: {e}")
data.append({
'Team': team['team'],
'Deposit': 'Error',
'Availability': 'Error',
'Image URL': 'Error',
'URL': team['url']
})
# Close browser
driver.quit()
# Save data to CSV
df = pd.DataFrame(data)
df.to_csv('wnba_season_ticket_info.csv', index=False)
print("Data exported to 'wnba_season_ticket_info.csv'")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment