Created
December 24, 2023 14:09
-
-
Save AnasBoubechra/3e5834c767cc62d56c911ddbef72d659 to your computer and use it in GitHub Desktop.
Django implementation of url_has_allowed_host_and_scheme & other useful functions (Works with flask)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import base64 | |
import datetime | |
import re | |
import unicodedata | |
from binascii import Error as BinasciiError | |
from email.utils import formatdate | |
from urllib.parse import ( | |
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, | |
scheme_chars, urlencode as original_urlencode, uses_params, | |
) | |
from werkzeug.datastructures import MultiDict | |
ETAG_MATCH = re.compile(r'\A((?:W/)?"[^"]*")') | |
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() | |
__D = r'(?P<day>\d{2})' | |
__D2 = r'(?P<day>[ \d]\d)' | |
__M = r'(?P<mon>\w{3})' | |
__Y = r'(?P<year>\d{4})' | |
__Y2 = r'(?P<year>\d{2})' | |
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' | |
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) | |
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) | |
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) | |
RFC3986_GENDELIMS = ":/?#[]@" | |
RFC3986_SUBDELIMS = "!$&'()*+,;=" | |
def urlencode(query, doseq=False): | |
""" | |
A version of Python's urllib.parse.urlencode() function that can operate on | |
MultiDict and non-string values. | |
""" | |
if isinstance(query, MultiDict): | |
query = query.lists() | |
elif hasattr(query, 'items'): | |
query = query.items() | |
query_params = [] | |
for key, value in query: | |
if value is None: | |
raise TypeError( | |
"Cannot encode None for key '%s' in a query string. Did you " | |
"mean to pass an empty string or omit the value?" % key | |
) | |
elif not doseq or isinstance(value, (str, bytes)): | |
query_val = value | |
else: | |
try: | |
itr = iter(value) | |
except TypeError: | |
query_val = value | |
else: | |
# Consume generators and iterators, when doseq=True, to | |
# work around https://bugs.python.org/issue31706. | |
query_val = [] | |
for item in itr: | |
if item is None: | |
raise TypeError( | |
"Cannot encode None for key '%s' in a query " | |
"string. Did you mean to pass an empty string or " | |
"omit the value?" % key | |
) | |
elif not isinstance(item, bytes): | |
item = str(item) | |
query_val.append(item) | |
query_params.append((key, query_val)) | |
return original_urlencode(query_params, doseq) | |
def http_date(epoch_seconds=None): | |
""" | |
Format the time to match the RFC1123 date format as specified by HTTP | |
RFC7231 section 7.1.1.1. | |
`epoch_seconds` is a floating point number expressed in seconds since the | |
epoch, in UTC - such as that outputted by time.time(). If set to None, it | |
defaults to the current time. | |
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. | |
""" | |
return formatdate(epoch_seconds, usegmt=True) | |
def parse_http_date(date): | |
""" | |
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1. | |
The three formats allowed by the RFC are accepted, even if only the first | |
one is still in widespread use. | |
Return an integer expressed in seconds since the epoch, in UTC. | |
""" | |
# email.utils.parsedate() does the job for RFC1123 dates; unfortunately | |
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll | |
# our own RFC-compliant parsing. | |
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: | |
m = regex.match(date) | |
if m is not None: | |
break | |
else: | |
raise ValueError("%r is not in a valid HTTP date format" % date) | |
try: | |
tz = datetime.timezone.utc | |
year = int(m['year']) | |
if year < 100: | |
current_year = datetime.datetime.now(tz=tz).year | |
current_century = current_year - (current_year % 100) | |
if year - (current_year % 100) > 50: | |
# year that appears to be more than 50 years in the future are | |
# interpreted as representing the past. | |
year += current_century - 100 | |
else: | |
year += current_century | |
month = MONTHS.index(m['mon'].lower()) + 1 | |
day = int(m['day']) | |
hour = int(m['hour']) | |
min = int(m['min']) | |
sec = int(m['sec']) | |
result = datetime.datetime(year, month, day, hour, min, sec, tzinfo=tz) | |
return int(result.timestamp()) | |
except Exception as exc: | |
raise ValueError("%r is not a valid date" % date) from exc | |
def parse_http_date_safe(date): | |
""" | |
Same as parse_http_date, but return None if the input is invalid. | |
""" | |
try: | |
return parse_http_date(date) | |
except Exception: | |
pass | |
# Base 36 functions: useful for generating compact URLs | |
def base36_to_int(s): | |
""" | |
Convert a base 36 string to an int. Raise ValueError if the input won't fit | |
into an int. | |
""" | |
# To prevent overconsumption of server resources, reject any | |
# base36 string that is longer than 13 base36 digits (13 digits | |
# is sufficient to base36-encode any 64-bit integer) | |
if len(s) > 13: | |
raise ValueError("Base36 input too large") | |
return int(s, 36) | |
def int_to_base36(i): | |
"""Convert an integer to a base36 string.""" | |
char_set = '0123456789abcdefghijklmnopqrstuvwxyz' | |
if i < 0: | |
raise ValueError("Negative base36 conversion input.") | |
if i < 36: | |
return char_set[i] | |
b36 = '' | |
while i != 0: | |
i, n = divmod(i, 36) | |
b36 = char_set[n] + b36 | |
return b36 | |
def urlsafe_base64_encode(s): | |
""" | |
Encode a bytestring to a base64 string for use in URLs. Strip any trailing | |
equal signs. | |
""" | |
return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii') | |
def urlsafe_base64_decode(s): | |
""" | |
Decode a base64 encoded string. Add back any trailing equal signs that | |
might have been stripped. | |
""" | |
s = s.encode() | |
try: | |
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'=')) | |
except (LookupError, BinasciiError) as e: | |
raise ValueError(e) | |
def parse_etags(etag_str): | |
""" | |
Parse a string of ETags given in an If-None-Match or If-Match header as | |
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags | |
should be matched. | |
""" | |
if etag_str.strip() == '*': | |
return ['*'] | |
else: | |
# Parse each ETag individually, and return any that are valid. | |
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(',')) | |
return [match[1] for match in etag_matches if match] | |
def quote_etag(etag_str): | |
""" | |
If the provided string is already a quoted ETag, return it. Otherwise, wrap | |
the string in quotes, making it a strong ETag. | |
""" | |
if ETAG_MATCH.match(etag_str): | |
return etag_str | |
else: | |
return '"%s"' % etag_str | |
def is_same_domain(host, pattern): | |
""" | |
Return ``True`` if the host is either an exact match or a match | |
to the wildcard pattern. | |
Any pattern beginning with a period matches a domain and all of its | |
subdomains. (e.g. ``.example.com`` matches ``example.com`` and | |
``foo.example.com``). Anything else is an exact string match. | |
""" | |
if not pattern: | |
return False | |
pattern = pattern.lower() | |
return ( | |
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or | |
pattern == host | |
) | |
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): | |
""" | |
Return ``True`` if the url uses an allowed host and a safe scheme. | |
Always return ``False`` on an empty url. | |
If ``require_https`` is ``True``, only 'https' will be considered a valid | |
scheme, as opposed to 'http' and 'https' with the default, ``False``. | |
Note: "True" doesn't entail that a URL is "safe". It may still be e.g. | |
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() | |
on the path component of untrusted URLs. | |
""" | |
if url is not None: | |
url = url.strip() | |
if not url: | |
return False | |
if allowed_hosts is None: | |
allowed_hosts = set() | |
elif isinstance(allowed_hosts, str): | |
allowed_hosts = {allowed_hosts} | |
# Chrome treats \ completely as / in paths but it could be part of some | |
# basic auth credentials so we need to check both URLs. | |
return ( | |
_url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and | |
_url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https) | |
) | |
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function. | |
def _urlparse(url, scheme='', allow_fragments=True): | |
"""Parse a URL into 6 components: | |
<scheme>://<netloc>/<path>;<params>?<query>#<fragment> | |
Return a 6-tuple: (scheme, netloc, path, params, query, fragment). | |
Note that we don't break the components up in smaller bits | |
(e.g. netloc is a single string) and we don't expand % escapes.""" | |
url, scheme, _coerce_result = _coerce_args(url, scheme) | |
splitresult = _urlsplit(url, scheme, allow_fragments) | |
scheme, netloc, url, query, fragment = splitresult | |
if scheme in uses_params and ';' in url: | |
url, params = _splitparams(url) | |
else: | |
params = '' | |
result = ParseResult(scheme, netloc, url, params, query, fragment) | |
return _coerce_result(result) | |
# Copied from urllib.parse.urlsplit() with | |
# https://github.com/python/cpython/pull/661 applied. | |
def _urlsplit(url, scheme='', allow_fragments=True): | |
"""Parse a URL into 5 components: | |
<scheme>://<netloc>/<path>?<query>#<fragment> | |
Return a 5-tuple: (scheme, netloc, path, query, fragment). | |
Note that we don't break the components up in smaller bits | |
(e.g. netloc is a single string) and we don't expand % escapes.""" | |
url, scheme, _coerce_result = _coerce_args(url, scheme) | |
netloc = query = fragment = '' | |
i = url.find(':') | |
if i > 0: | |
for c in url[:i]: | |
if c not in scheme_chars: | |
break | |
else: | |
scheme, url = url[:i].lower(), url[i + 1:] | |
if url[:2] == '//': | |
netloc, url = _splitnetloc(url, 2) | |
if (('[' in netloc and ']' not in netloc) or | |
(']' in netloc and '[' not in netloc)): | |
raise ValueError("Invalid IPv6 URL") | |
if allow_fragments and '#' in url: | |
url, fragment = url.split('#', 1) | |
if '?' in url: | |
url, query = url.split('?', 1) | |
v = SplitResult(scheme, netloc, url, query, fragment) | |
return _coerce_result(v) | |
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): | |
# Chrome considers any URL with more than two slashes to be absolute, but | |
# urlparse is not so flexible. Treat any url with three slashes as unsafe. | |
if url.startswith('///'): | |
return False | |
try: | |
url_info = _urlparse(url) | |
except ValueError: # e.g. invalid IPv6 addresses | |
return False | |
# Forbid URLs like http:///example.com - with a scheme, but without a hostname. | |
# In that URL, example.com is not the hostname but, a path component. However, | |
# Chrome will still consider example.com to be the hostname, so we must not | |
# allow this syntax. | |
if not url_info.netloc and url_info.scheme: | |
return False | |
# Forbid URLs that start with control characters. Some browsers (like | |
# Chrome) ignore quite a few control characters at the start of a | |
# URL and might consider the URL as scheme relative. | |
if unicodedata.category(url[0])[0] == 'C': | |
return False | |
scheme = url_info.scheme | |
# Consider URLs without a scheme (e.g. //example.com/p) to be http. | |
if not url_info.scheme and url_info.netloc: | |
scheme = 'http' | |
valid_schemes = ['https'] if require_https else ['http', 'https'] | |
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and | |
(not scheme or scheme in valid_schemes)) | |
def escape_leading_slashes(url): | |
""" | |
If redirecting to an absolute path (two leading slashes), a slash must be | |
escaped to prevent browsers from handling the path as schemaless and | |
redirecting to another host. | |
""" | |
if url.startswith('//'): | |
url = '/%2F{}'.format(url[2:]) | |
return url |
Author
AnasBoubechra
commented
Dec 24, 2023
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment