Skip to content

Instantly share code, notes, and snippets.

@d3d9

d3d9/efa.py

Created Aug 16, 2020
Embed
What would you like to do?
Code, der jahrelang für https://twitter.com/hstbot lief, zur Ausführung per crontab. Verwendet python-twitter 3.3.1. WTFPL
import requests
import twitter
#import sys
from datetime import datetime
import logging
import xml.etree.ElementTree as ET
logging.basicConfig(filename='efa.log',level=logging.INFO)
tweet_length = 280
hashtags = "#VRR"
tweet_length -= len(hashtags)
knownfile = 'known-efa.txt'
payload = {'language':'de',
'itdLPxx_transpCompany':'vrr',
'filterPublicationStatus':'current',
'filterOMC_PlaceID':'5914000:29',
# 'itdLPxx_selOperator':'HST', nicht gut wegen 00 Sonstige
'filterProviderCode':'HST',
'AIXMLReduction':['removeStops','removeLines','removeValidity','removePublication','removeCreationTime','removeExpirationTime','removeSourceSystem']}
r = requests.get('http://openservice-test.vrr.de/static02/XML_ADDINFO_REQUEST',payload)
root = ET.fromstring(r.content)
#tree = ET.parse(sys.argv[1])
#root = tree.getroot()
with open(knownfile,encoding='utf-8') as file:
known = file.read().splitlines()
api = twitter.Api(consumer_key='X',
consumer_secret='X',
access_token_key='X',
access_token_secret='X')
#print(api.VerifyCredentials())
ci = 0
titles = []
for itdATI in root.iter('itdAdditionalTravelInformation'):
ci += 1
infoID = itdATI.attrib['infoID']
infolinktext = itdATI[0].find('infoLinkText').text
# war vorher mit infoID statt text
titles.append(infolinktext)
if infolinktext in known:
continue
timespan = "("+itdATI[0][0][-1].find('value').text+")"
#link = itdATI[0].find('infoLinkURL').text # URL mit IP schlecht bei Twitter
link = "https://efa.vrr.de/vrr/XSLT_ADDINFO_REQUEST?itdLPxx_addInfoDetailView="+infoID
rest_length = tweet_length - len(timespan) - len(link)
smstext = ""
if itdATI[0].find('infoText').find('outputClientText').find('smsText') is not None:
smstext = itdATI[0].find('infoText').find('outputClientText')[0].text
if len(infolinktext) <= rest_length:
outputtext = infolinktext
elif len(smstext) <= rest_length and len(smstext) != 0:
outputtext = smstext
else:
outputtext = infolinktext[0:(rest_length-3)]+"..."
rest_length -= len(outputtext)
tweet = "#"+outputtext+" "+timespan+" "+link+" "+hashtags # Hashtag am Anfang weil der Ortsname dort ist
try:
status = api.PostUpdate(tweet)
except Exception as e:
logging.warning(str(datetime.now())+str(e))
#print(rest_length,tweet)
logging.info(str(datetime.now())+" "+infoID+" "+str(rest_length)+"\n\n"+tweet+"\n\n")
with open(knownfile, 'w', encoding='utf-8') as file:
if ci > 0:
file.write('\n'.join(titles)+'\n')
else:
file.write('n\n')
import requests
import twitter
import logging
#import sys
from datetime import datetime
import xml.etree.ElementTree as ET
logging.basicConfig(filename='news.log',level=logging.INFO)
tweet_length = 280
hashtags = "#Hagen #VRR"
tweet_length -= len(hashtags)
knownfile = 'known-news.txt'
r = requests.get('http://www.strassenbahn-hagen.de/news_rss.xml')
root = ET.fromstring(r.content)
#tree = ET.parse(sys.argv[1])
#root = tree.getroot()
with open(knownfile) as file:
known = file.read().splitlines()
nowknown = []
api = twitter.Api(consumer_key='X',
consumer_secret='X',
access_token_key='X',
access_token_secret='X')
#print(api.VerifyCredentials())
for item in root.iter('item'):
guid = item[0].text
nowknown.append(guid)
if guid in known:
continue
#else:
# with open(knownfile, 'a') as file:
# file.write(guid+'\n')
link = item.find('link').text
title = item.find('title').text
rest_length = tweet_length - len(title) - len(link)
tweet = title+" "+link+" "+hashtags
try:
status = api.PostUpdate(tweet)
except Exception as e:
logging.warning(str(datetime.now())+str(e))
logging.info(str(datetime.now())+" "+guid+" "+str(rest_length)+"\n\n"+tweet+"\n\n")
# neu:
with open(knownfile, 'w', encoding='utf-8') as file:
file.write('\n'.join(nowknown)+'\n')
import requests
import twitter
#import sys
from datetime import datetime
import logging
from bs4 import BeautifulSoup
import xml.etree.ElementTree as ET
logging.basicConfig(filename='ticker.log',level=logging.INFO)
tweet_length = 280
hashtags = "#Hagen #VRR"
orte = ["MVG","VER","Dortmund","Schwerte","Iserlohn","Wiblingwerde","Breckerfeld","Ennepetal","Gevelsberg","Wetter","Herdecke","Kierspe"]
website = "http://www.strassenbahn-hagen.de"
tweet_length -= (len(website)+ 1 + len(hashtags))
knownfile = 'known-ticker.txt'
r = requests.get('http://www.strassenbahn-hagen.de/ticker_rss.xml')
root = ET.fromstring(r.content)
#tree = ET.parse(sys.argv[1])
#root = tree.getroot()
with open(knownfile,encoding='utf-8') as file:
known = file.read().splitlines()
api = twitter.Api(consumer_key='X',
consumer_secret='X',
access_token_key='X',
access_token_secret='X')
#print(api.VerifyCredentials())
ci = 0
orig_content = []
tweets = []
for item in root.iter('item'):
ci += 1
content = " ".join(item[-1].text.split())
orig_content.append(content)
if content in known:
continue
title = ""
if item.find('title').text is not None:
title = BeautifulSoup(" ".join(item.find('title').text.split()).strip(), "lxml").text.strip()
teile = content.split('<br>')
map(str.strip, teile)
ausgabe = ""
ausgegeben = False
bepunktet = False
komma = ""
for teil in teile:
for unterteil in teil.split(". "): # potenzielles todo: anderes als punkt, hm
if unterteil.find("keine Einschränkungen") < 0 and unterteil not in [""," ","\n"] and not (unterteil[0].lower() == "l" and ":" in [unterteil[-1],unterteil[-2]]):
if teil.find(". ") > -1:
if bepunktet == False:
ausgabe += komma+unterteil.strip()
if ausgabe[-1] not in [".","!",";"]:
ausgabe += "."
ausgabe += " "
ausgegeben = True
bepunktet = True
else:
ausgabe += unterteil.strip()
if ausgabe[-1] not in [".","!",";"]:
ausgabe += "."
ausgabe += " "
ausgegeben = True
bepunktet = True
else:
if bepunktet:
ausgabe += unterteil.strip()
ausgegeben = True
bepunktet = False
else:
ausgabe += komma+unterteil.strip()
ausgegeben = True
if ausgegeben:
if ausgabe[-1] in [".",",",")","!",";"]:
komma = " "
else:
komma = ", "
for ort in orte:
ausgabe = ausgabe.replace(ort,"#"+ort)
# todo: abgeschnittene hashtags soll es nicht geben
# todo: kein #Dortmunder Str. etc.
tweet = title+"\n"+ausgabe.strip()
if tweet.count(" Linie ") > 3:
tweet = tweet.replace(" Linie ","\nLinie ")
tweetsplit = []
if len(tweet) <= tweet_length:
tweet += (" " + website + " " + hashtags)
tweets.append([tweet])
print(tweet)
else:
r = range(0, len(tweet), tweet_length-4)
# todo: text pro tweet, insbesondere alle nach dem ersten, maximieren! ### 3 zu 4 geaendert
for ri in r:
if ri == r[-1]:
tweetsplit.append("…"+tweet[ri:ri+tweet_length-4] + " " + website)
elif ri == r[0]:
tweetsplit.append(tweet[ri:ri+tweet_length-4] + "… " + website + " " + hashtags)
else:
tweetsplit.append("…"+tweet[ri:ri+tweet_length-4] + "… " + website)
print(tweetsplit[-1])
tweets.append(tweetsplit)
for x in tweets:
ti = 0
lastid = 0
if len(x) == 1:
try:
status = api.PostUpdate(x[0])
except Exception as e:
logging.warning(str(datetime.now())+" "+str(e)+"\nContent:\n"+content+"\n")
logging.info(str(datetime.now())+"\n\n"+x[0]+"\n\n")
elif len(x) > 1:
for tw in x:
try:
if ti == 0:
status = api.PostUpdate(tw)
lastid = status.id
else:
status = api.PostUpdate(tw, in_reply_to_status_id=lastid)
lastid = status.id
except Exception as e:
logging.warning(str(datetime.now())+" x["+str(ti)+"] "+str(e)+"\nContent:\n"+content+"\n")
ti += 1
logging.info(str(datetime.now())+"\n\n"+"\n".join(x)+"\n\n")
with open(knownfile, 'w', encoding='utf-8') as file:
if ci > 0:
file.write('\n'.join(orig_content)+'\n')
else:
file.write('n\n')
Copyright © 2017~2020 Kevin Arutyunyan
This work is free. You can redistribute it and/or modify it under the
terms of the Do What The Fuck You Want To Public License, Version 2,
as published by Sam Hocevar. See http://www.wtfpl.net/ for more details.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.