Created
March 13, 2024 09:57
-
-
Save bjornblissing/ee90f045445e61900f0eaa4b21fd9fab to your computer and use it in GitHub Desktop.
Lista dagens luncher i Ebbepark
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
import requests | |
from bs4 import BeautifulSoup, SoupStrainer | |
import re | |
# La Luna | |
def laluna(): | |
print("La Luna") | |
URL = 'http://www.lalunat1.se' | |
page = requests.get(URL) | |
soup = BeautifulSoup(page.content, "html.parser") | |
results = soup.find(id="nav") | |
lunch_elements = results.find_all("a", string="Lunch") | |
lunch_text = "" | |
for lunch_element in lunch_elements: | |
link_url = lunch_element["href"] | |
lunch_url = URL+link_url; | |
lunch_page = requests.get(lunch_url) | |
soup = BeautifulSoup(lunch_page.content, "html.parser") | |
text_contents = soup.find_all("div", class_="text_content") | |
for text_content in text_contents: | |
paragraphs = text_content.find_all("p") | |
for paragraph in paragraphs: | |
strongs = paragraph.find_all("strong") | |
for strong in strongs: | |
strongText = strong.get_text(); | |
lunch_text += strongText | |
# Clean up La Luna String | |
# Replace non-breaking space | |
lunch_text = re.sub(u"\u00A0", ' ', lunch_text) | |
# Replace zero width space | |
lunch_text = re.sub(u"\u200B", ' ', lunch_text) | |
lunch_text = re.sub('(\\S)[ \t]*\n[ \t]*(\\S)', '\\1 \\2', lunch_text) | |
lunch_text = re.sub('[ \t]{2,}', ' ', lunch_text) | |
lunch_text = re.sub('^[ ]+', '', lunch_text) | |
lunch_text = re.sub('\n[ ]', '\\n', lunch_text) | |
lunch_text = re.sub('Måndag[\\s:]*', 'Måndag\\n', lunch_text) | |
lunch_text = re.sub('Tisdag[\\s:]*', 'Tisdag\\n', lunch_text) | |
lunch_text = re.sub('Onsdag[\\s:]*', 'Onsdag\\n', lunch_text) | |
lunch_text = re.sub('Torsdag[\\s:]*','Torsdag\\n', lunch_text) | |
lunch_text = re.sub('Fredag[\\s:]*', 'Fredag\\n', lunch_text) | |
lunch_text = re.sub('Veckans sallad[\\s:]*', 'Veckans sallad\\n', lunch_text) | |
lunch_text = re.sub('Veckans special[\\s:]*', 'Veckans special\\n', lunch_text) | |
lunch_text = re.sub('Veckans vegetariska[\\s:]*', 'Veckans vegetariska\\n', lunch_text) | |
print(lunch_text) | |
# Svinneriet | |
def svinneriet(): | |
print("Svinneriet") | |
URL = 'https://www.svinneriet.se/meny' | |
page = requests.get(URL) | |
soup = BeautifulSoup(page.content, "html.parser") | |
meat = soup.find("h3", string=lambda text: "kött" in text.lower()) | |
paragraph = meat.find_next_sibling("p") | |
print(meat.get_text()) | |
print(paragraph.get_text()) | |
fish = soup.find("h3", string=lambda text: "blött" in text.lower()) | |
paragraph = fish.find_next_sibling("p") | |
print(fish.get_text()) | |
print(paragraph.get_text()) | |
veg = soup.find("h3", string=lambda text: "växt" in text.lower()) | |
paragraph = veg.find_next_sibling("p") | |
print(veg.get_text()) | |
print(paragraph.get_text()) | |
if __name__ == "__main__": | |
laluna() | |
svinneriet() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment