Added python-site-packages.

This commit is contained in:
pranav-1711 2022-03-04 18:52:19 +05:30
parent e381ec853c
commit 15257808cc
67 changed files with 25523 additions and 0 deletions

View file

@ -0,0 +1,19 @@
from .encoding import base64_decode as base64_decode
from .encoding import base64_encode as base64_encode
from .encoding import want_bytes as want_bytes
from .exc import BadData as BadData
from .exc import BadHeader as BadHeader
from .exc import BadPayload as BadPayload
from .exc import BadSignature as BadSignature
from .exc import BadTimeSignature as BadTimeSignature
from .exc import SignatureExpired as SignatureExpired
from .serializer import Serializer as Serializer
from .signer import HMACAlgorithm as HMACAlgorithm
from .signer import NoneAlgorithm as NoneAlgorithm
from .signer import Signer as Signer
from .timed import TimedSerializer as TimedSerializer
from .timed import TimestampSigner as TimestampSigner
from .url_safe import URLSafeSerializer as URLSafeSerializer
from .url_safe import URLSafeTimedSerializer as URLSafeTimedSerializer
__version__ = "2.1.0"

View file

@ -0,0 +1,16 @@
import json as _json
import typing as _t
class _CompactJSON:
"""Wrapper around json module that strips whitespace."""
@staticmethod
def loads(payload: _t.Union[str, bytes]) -> _t.Any:
return _json.loads(payload)
@staticmethod
def dumps(obj: _t.Any, **kwargs: _t.Any) -> str:
kwargs.setdefault("ensure_ascii", False)
kwargs.setdefault("separators", (",", ":"))
return _json.dumps(obj, **kwargs)

View file

@ -0,0 +1,54 @@
import base64
import string
import struct
import typing as _t
from .exc import BadData
_t_str_bytes = _t.Union[str, bytes]
def want_bytes(
s: _t_str_bytes, encoding: str = "utf-8", errors: str = "strict"
) -> bytes:
if isinstance(s, str):
s = s.encode(encoding, errors)
return s
def base64_encode(string: _t_str_bytes) -> bytes:
"""Base64 encode a string of bytes or text. The resulting bytes are
safe to use in URLs.
"""
string = want_bytes(string)
return base64.urlsafe_b64encode(string).rstrip(b"=")
def base64_decode(string: _t_str_bytes) -> bytes:
"""Base64 decode a URL-safe string of bytes or text. The result is
bytes.
"""
string = want_bytes(string, encoding="ascii", errors="ignore")
string += b"=" * (-len(string) % 4)
try:
return base64.urlsafe_b64decode(string)
except (TypeError, ValueError) as e:
raise BadData("Invalid base64-encoded data") from e
# The alphabet used by base64.urlsafe_*
_base64_alphabet = f"{string.ascii_letters}{string.digits}-_=".encode("ascii")
_int64_struct = struct.Struct(">Q")
_int_to_bytes = _int64_struct.pack
_bytes_to_int = _t.cast("_t.Callable[[bytes], _t.Tuple[int]]", _int64_struct.unpack)
def int_to_bytes(num: int) -> bytes:
return _int_to_bytes(num).lstrip(b"\x00")
def bytes_to_int(bytestr: bytes) -> int:
return _bytes_to_int(bytestr.rjust(8, b"\x00"))[0]

View file

@ -0,0 +1,107 @@
import typing as _t
from datetime import datetime
_t_opt_any = _t.Optional[_t.Any]
_t_opt_exc = _t.Optional[Exception]
class BadData(Exception):
"""Raised if bad data of any sort was encountered. This is the base
for all exceptions that ItsDangerous defines.
.. versionadded:: 0.15
"""
def __init__(self, message: str):
super().__init__(message)
self.message = message
def __str__(self) -> str:
return self.message
class BadSignature(BadData):
"""Raised if a signature does not match."""
def __init__(self, message: str, payload: _t_opt_any = None):
super().__init__(message)
#: The payload that failed the signature test. In some
#: situations you might still want to inspect this, even if
#: you know it was tampered with.
#:
#: .. versionadded:: 0.14
self.payload: _t_opt_any = payload
class BadTimeSignature(BadSignature):
"""Raised if a time-based signature is invalid. This is a subclass
of :class:`BadSignature`.
"""
def __init__(
self,
message: str,
payload: _t_opt_any = None,
date_signed: _t.Optional[datetime] = None,
):
super().__init__(message, payload)
#: If the signature expired this exposes the date of when the
#: signature was created. This can be helpful in order to
#: tell the user how long a link has been gone stale.
#:
#: .. versionchanged:: 2.0
#: The datetime value is timezone-aware rather than naive.
#:
#: .. versionadded:: 0.14
self.date_signed = date_signed
class SignatureExpired(BadTimeSignature):
"""Raised if a signature timestamp is older than ``max_age``. This
is a subclass of :exc:`BadTimeSignature`.
"""
class BadHeader(BadSignature):
"""Raised if a signed header is invalid in some form. This only
happens for serializers that have a header that goes with the
signature.
.. versionadded:: 0.24
"""
def __init__(
self,
message: str,
payload: _t_opt_any = None,
header: _t_opt_any = None,
original_error: _t_opt_exc = None,
):
super().__init__(message, payload)
#: If the header is actually available but just malformed it
#: might be stored here.
self.header: _t_opt_any = header
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error: _t_opt_exc = original_error
class BadPayload(BadData):
"""Raised if a payload is invalid. This could happen if the payload
is loaded despite an invalid signature, or if there is a mismatch
between the serializer and deserializer. The original exception
that occurred during loading is stored on as :attr:`original_error`.
.. versionadded:: 0.15
"""
def __init__(self, message: str, original_error: _t_opt_exc = None):
super().__init__(message)
#: If available, the error that indicates why the payload was
#: not valid. This might be ``None``.
self.original_error: _t_opt_exc = original_error

View file

@ -0,0 +1,295 @@
import json
import typing as _t
from .encoding import want_bytes
from .exc import BadPayload
from .exc import BadSignature
from .signer import _make_keys_list
from .signer import Signer
_t_str_bytes = _t.Union[str, bytes]
_t_opt_str_bytes = _t.Optional[_t_str_bytes]
_t_kwargs = _t.Dict[str, _t.Any]
_t_opt_kwargs = _t.Optional[_t_kwargs]
_t_signer = _t.Type[Signer]
_t_fallbacks = _t.List[_t.Union[_t_kwargs, _t.Tuple[_t_signer, _t_kwargs], _t_signer]]
_t_load_unsafe = _t.Tuple[bool, _t.Any]
_t_secret_key = _t.Union[_t.Iterable[_t_str_bytes], _t_str_bytes]
def is_text_serializer(serializer: _t.Any) -> bool:
"""Checks whether a serializer generates text or binary."""
return isinstance(serializer.dumps({}), str)
class Serializer:
"""A serializer wraps a :class:`~itsdangerous.signer.Signer` to
enable serializing and securely signing data other than bytes. It
can unsign to verify that the data hasn't been changed.
The serializer provides :meth:`dumps` and :meth:`loads`, similar to
:mod:`json`, and by default uses :mod:`json` internally to serialize
the data to bytes.
The secret key should be a random string of ``bytes`` and should not
be saved to code or version control. Different salts should be used
to distinguish signing in different contexts. See :doc:`/concepts`
for information about the security of the secret key and salt.
:param secret_key: The secret key to sign and verify with. Can be a
list of keys, oldest to newest, to support key rotation.
:param salt: Extra key to combine with ``secret_key`` to distinguish
signatures in different contexts.
:param serializer: An object that provides ``dumps`` and ``loads``
methods for serializing data to a string. Defaults to
:attr:`default_serializer`, which defaults to :mod:`json`.
:param serializer_kwargs: Keyword arguments to pass when calling
``serializer.dumps``.
:param signer: A ``Signer`` class to instantiate when signing data.
Defaults to :attr:`default_signer`, which defaults to
:class:`~itsdangerous.signer.Signer`.
:param signer_kwargs: Keyword arguments to pass when instantiating
the ``Signer`` class.
:param fallback_signers: List of signer parameters to try when
unsigning with the default signer fails. Each item can be a dict
of ``signer_kwargs``, a ``Signer`` class, or a tuple of
``(signer, signer_kwargs)``. Defaults to
:attr:`default_fallback_signers`.
.. versionchanged:: 2.0
Added support for key rotation by passing a list to
``secret_key``.
.. versionchanged:: 2.0
Removed the default SHA-512 fallback signer from
``default_fallback_signers``.
.. versionchanged:: 1.1
Added support for ``fallback_signers`` and configured a default
SHA-512 fallback. This fallback is for users who used the yanked
1.0.0 release which defaulted to SHA-512.
.. versionchanged:: 0.14
The ``signer`` and ``signer_kwargs`` parameters were added to
the constructor.
"""
#: The default serialization module to use to serialize data to a
#: string internally. The default is :mod:`json`, but can be changed
#: to any object that provides ``dumps`` and ``loads`` methods.
default_serializer: _t.Any = json
#: The default ``Signer`` class to instantiate when signing data.
#: The default is :class:`itsdangerous.signer.Signer`.
default_signer: _t_signer = Signer
#: The default fallback signers to try when unsigning fails.
default_fallback_signers: _t_fallbacks = []
def __init__(
self,
secret_key: _t_secret_key,
salt: _t_opt_str_bytes = b"itsdangerous",
serializer: _t.Any = None,
serializer_kwargs: _t_opt_kwargs = None,
signer: _t.Optional[_t_signer] = None,
signer_kwargs: _t_opt_kwargs = None,
fallback_signers: _t.Optional[_t_fallbacks] = None,
):
#: The list of secret keys to try for verifying signatures, from
#: oldest to newest. The newest (last) key is used for signing.
#:
#: This allows a key rotation system to keep a list of allowed
#: keys and remove expired ones.
self.secret_keys: _t.List[bytes] = _make_keys_list(secret_key)
if salt is not None:
salt = want_bytes(salt)
# if salt is None then the signer's default is used
self.salt = salt
if serializer is None:
serializer = self.default_serializer
self.serializer: _t.Any = serializer
self.is_text_serializer: bool = is_text_serializer(serializer)
if signer is None:
signer = self.default_signer
self.signer: _t_signer = signer
self.signer_kwargs: _t_kwargs = signer_kwargs or {}
if fallback_signers is None:
fallback_signers = list(self.default_fallback_signers or ())
self.fallback_signers: _t_fallbacks = fallback_signers
self.serializer_kwargs: _t_kwargs = serializer_kwargs or {}
@property
def secret_key(self) -> bytes:
"""The newest (last) entry in the :attr:`secret_keys` list. This
is for compatibility from before key rotation support was added.
"""
return self.secret_keys[-1]
def load_payload(
self, payload: bytes, serializer: _t.Optional[_t.Any] = None
) -> _t.Any:
"""Loads the encoded object. This function raises
:class:`.BadPayload` if the payload is not valid. The
``serializer`` parameter can be used to override the serializer
stored on the class. The encoded ``payload`` should always be
bytes.
"""
if serializer is None:
serializer = self.serializer
is_text = self.is_text_serializer
else:
is_text = is_text_serializer(serializer)
try:
if is_text:
return serializer.loads(payload.decode("utf-8"))
return serializer.loads(payload)
except Exception as e:
raise BadPayload(
"Could not load the payload because an exception"
" occurred on unserializing the data.",
original_error=e,
) from e
def dump_payload(self, obj: _t.Any) -> bytes:
"""Dumps the encoded object. The return value is always bytes.
If the internal serializer returns text, the value will be
encoded as UTF-8.
"""
return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))
def make_signer(self, salt: _t_opt_str_bytes = None) -> Signer:
"""Creates a new instance of the signer to be used. The default
implementation uses the :class:`.Signer` base class.
"""
if salt is None:
salt = self.salt
return self.signer(self.secret_keys, salt=salt, **self.signer_kwargs)
def iter_unsigners(self, salt: _t_opt_str_bytes = None) -> _t.Iterator[Signer]:
"""Iterates over all signers to be tried for unsigning. Starts
with the configured signer, then constructs each signer
specified in ``fallback_signers``.
"""
if salt is None:
salt = self.salt
yield self.make_signer(salt)
for fallback in self.fallback_signers:
if isinstance(fallback, dict):
kwargs = fallback
fallback = self.signer
elif isinstance(fallback, tuple):
fallback, kwargs = fallback
else:
kwargs = self.signer_kwargs
for secret_key in self.secret_keys:
yield fallback(secret_key, salt=salt, **kwargs)
def dumps(self, obj: _t.Any, salt: _t_opt_str_bytes = None) -> _t_str_bytes:
"""Returns a signed string serialized with the internal
serializer. The return value can be either a byte or unicode
string depending on the format of the internal serializer.
"""
payload = want_bytes(self.dump_payload(obj))
rv = self.make_signer(salt).sign(payload)
if self.is_text_serializer:
return rv.decode("utf-8")
return rv
def dump(self, obj: _t.Any, f: _t.IO, salt: _t_opt_str_bytes = None) -> None:
"""Like :meth:`dumps` but dumps into a file. The file handle has
to be compatible with what the internal serializer expects.
"""
f.write(self.dumps(obj, salt))
def loads(
self, s: _t_str_bytes, salt: _t_opt_str_bytes = None, **kwargs: _t.Any
) -> _t.Any:
"""Reverse of :meth:`dumps`. Raises :exc:`.BadSignature` if the
signature validation fails.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
return self.load_payload(signer.unsign(s))
except BadSignature as err:
last_exception = err
raise _t.cast(BadSignature, last_exception)
def load(self, f: _t.IO, salt: _t_opt_str_bytes = None) -> _t.Any:
"""Like :meth:`loads` but loads from a file."""
return self.loads(f.read(), salt)
def loads_unsafe(
self, s: _t_str_bytes, salt: _t_opt_str_bytes = None
) -> _t_load_unsafe:
"""Like :meth:`loads` but without verifying the signature. This
is potentially very dangerous to use depending on how your
serializer works. The return value is ``(signature_valid,
payload)`` instead of just the payload. The first item will be a
boolean that indicates if the signature is valid. This function
never fails.
Use it for debugging only and if you know that your serializer
module is not exploitable (for example, do not use it with a
pickle serializer).
.. versionadded:: 0.15
"""
return self._loads_unsafe_impl(s, salt)
def _loads_unsafe_impl(
self,
s: _t_str_bytes,
salt: _t_opt_str_bytes,
load_kwargs: _t_opt_kwargs = None,
load_payload_kwargs: _t_opt_kwargs = None,
) -> _t_load_unsafe:
"""Low level helper function to implement :meth:`loads_unsafe`
in serializer subclasses.
"""
if load_kwargs is None:
load_kwargs = {}
try:
return True, self.loads(s, salt=salt, **load_kwargs)
except BadSignature as e:
if e.payload is None:
return False, None
if load_payload_kwargs is None:
load_payload_kwargs = {}
try:
return (
False,
self.load_payload(e.payload, **load_payload_kwargs),
)
except BadPayload:
return False, None
def load_unsafe(self, f: _t.IO, salt: _t_opt_str_bytes = None) -> _t_load_unsafe:
"""Like :meth:`loads_unsafe` but loads from a file.
.. versionadded:: 0.15
"""
return self.loads_unsafe(f.read(), salt=salt)

View file

@ -0,0 +1,257 @@
import hashlib
import hmac
import typing as _t
from .encoding import _base64_alphabet
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import want_bytes
from .exc import BadSignature
_t_str_bytes = _t.Union[str, bytes]
_t_opt_str_bytes = _t.Optional[_t_str_bytes]
_t_secret_key = _t.Union[_t.Iterable[_t_str_bytes], _t_str_bytes]
class SigningAlgorithm:
"""Subclasses must implement :meth:`get_signature` to provide
signature generation functionality.
"""
def get_signature(self, key: bytes, value: bytes) -> bytes:
"""Returns the signature for the given key and value."""
raise NotImplementedError()
def verify_signature(self, key: bytes, value: bytes, sig: bytes) -> bool:
"""Verifies the given signature matches the expected
signature.
"""
return hmac.compare_digest(sig, self.get_signature(key, value))
class NoneAlgorithm(SigningAlgorithm):
"""Provides an algorithm that does not perform any signing and
returns an empty signature.
"""
def get_signature(self, key: bytes, value: bytes) -> bytes:
return b""
class HMACAlgorithm(SigningAlgorithm):
"""Provides signature generation using HMACs."""
#: The digest method to use with the MAC algorithm. This defaults to
#: SHA1, but can be changed to any other function in the hashlib
#: module.
default_digest_method: _t.Any = staticmethod(hashlib.sha1)
def __init__(self, digest_method: _t.Any = None):
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method: _t.Any = digest_method
def get_signature(self, key: bytes, value: bytes) -> bytes:
mac = hmac.new(key, msg=value, digestmod=self.digest_method)
return mac.digest()
def _make_keys_list(secret_key: _t_secret_key) -> _t.List[bytes]:
if isinstance(secret_key, (str, bytes)):
return [want_bytes(secret_key)]
return [want_bytes(s) for s in secret_key]
class Signer:
"""A signer securely signs bytes, then unsigns them to verify that
the value hasn't been changed.
The secret key should be a random string of ``bytes`` and should not
be saved to code or version control. Different salts should be used
to distinguish signing in different contexts. See :doc:`/concepts`
for information about the security of the secret key and salt.
:param secret_key: The secret key to sign and verify with. Can be a
list of keys, oldest to newest, to support key rotation.
:param salt: Extra key to combine with ``secret_key`` to distinguish
signatures in different contexts.
:param sep: Separator between the signature and value.
:param key_derivation: How to derive the signing key from the secret
key and salt. Possible values are ``concat``, ``django-concat``,
or ``hmac``. Defaults to :attr:`default_key_derivation`, which
defaults to ``django-concat``.
:param digest_method: Hash function to use when generating the HMAC
signature. Defaults to :attr:`default_digest_method`, which
defaults to :func:`hashlib.sha1`. Note that the security of the
hash alone doesn't apply when used intermediately in HMAC.
:param algorithm: A :class:`SigningAlgorithm` instance to use
instead of building a default :class:`HMACAlgorithm` with the
``digest_method``.
.. versionchanged:: 2.0
Added support for key rotation by passing a list to
``secret_key``.
.. versionchanged:: 0.18
``algorithm`` was added as an argument to the class constructor.
.. versionchanged:: 0.14
``key_derivation`` and ``digest_method`` were added as arguments
to the class constructor.
"""
#: The default digest method to use for the signer. The default is
#: :func:`hashlib.sha1`, but can be changed to any :mod:`hashlib` or
#: compatible object. Note that the security of the hash alone
#: doesn't apply when used intermediately in HMAC.
#:
#: .. versionadded:: 0.14
default_digest_method: _t.Any = staticmethod(hashlib.sha1)
#: The default scheme to use to derive the signing key from the
#: secret key and salt. The default is ``django-concat``. Possible
#: values are ``concat``, ``django-concat``, and ``hmac``.
#:
#: .. versionadded:: 0.14
default_key_derivation: str = "django-concat"
def __init__(
self,
secret_key: _t_secret_key,
salt: _t_opt_str_bytes = b"itsdangerous.Signer",
sep: _t_str_bytes = b".",
key_derivation: _t.Optional[str] = None,
digest_method: _t.Optional[_t.Any] = None,
algorithm: _t.Optional[SigningAlgorithm] = None,
):
#: The list of secret keys to try for verifying signatures, from
#: oldest to newest. The newest (last) key is used for signing.
#:
#: This allows a key rotation system to keep a list of allowed
#: keys and remove expired ones.
self.secret_keys: _t.List[bytes] = _make_keys_list(secret_key)
self.sep: bytes = want_bytes(sep)
if self.sep in _base64_alphabet:
raise ValueError(
"The given separator cannot be used because it may be"
" contained in the signature itself. ASCII letters,"
" digits, and '-_=' must not be used."
)
if salt is not None:
salt = want_bytes(salt)
else:
salt = b"itsdangerous.Signer"
self.salt = salt
if key_derivation is None:
key_derivation = self.default_key_derivation
self.key_derivation: str = key_derivation
if digest_method is None:
digest_method = self.default_digest_method
self.digest_method: _t.Any = digest_method
if algorithm is None:
algorithm = HMACAlgorithm(self.digest_method)
self.algorithm: SigningAlgorithm = algorithm
@property
def secret_key(self) -> bytes:
"""The newest (last) entry in the :attr:`secret_keys` list. This
is for compatibility from before key rotation support was added.
"""
return self.secret_keys[-1]
def derive_key(self, secret_key: _t_opt_str_bytes = None) -> bytes:
"""This method is called to derive the key. The default key
derivation choices can be overridden here. Key derivation is not
intended to be used as a security method to make a complex key
out of a short password. Instead you should use large random
secret keys.
:param secret_key: A specific secret key to derive from.
Defaults to the last item in :attr:`secret_keys`.
.. versionchanged:: 2.0
Added the ``secret_key`` parameter.
"""
if secret_key is None:
secret_key = self.secret_keys[-1]
else:
secret_key = want_bytes(secret_key)
if self.key_derivation == "concat":
return _t.cast(bytes, self.digest_method(self.salt + secret_key).digest())
elif self.key_derivation == "django-concat":
return _t.cast(
bytes, self.digest_method(self.salt + b"signer" + secret_key).digest()
)
elif self.key_derivation == "hmac":
mac = hmac.new(secret_key, digestmod=self.digest_method)
mac.update(self.salt)
return mac.digest()
elif self.key_derivation == "none":
return secret_key
else:
raise TypeError("Unknown key derivation method")
def get_signature(self, value: _t_str_bytes) -> bytes:
"""Returns the signature for the given value."""
value = want_bytes(value)
key = self.derive_key()
sig = self.algorithm.get_signature(key, value)
return base64_encode(sig)
def sign(self, value: _t_str_bytes) -> bytes:
"""Signs the given string."""
value = want_bytes(value)
return value + self.sep + self.get_signature(value)
def verify_signature(self, value: _t_str_bytes, sig: _t_str_bytes) -> bool:
"""Verifies the signature for the given value."""
try:
sig = base64_decode(sig)
except Exception:
return False
value = want_bytes(value)
for secret_key in reversed(self.secret_keys):
key = self.derive_key(secret_key)
if self.algorithm.verify_signature(key, value, sig):
return True
return False
def unsign(self, signed_value: _t_str_bytes) -> bytes:
"""Unsigns the given string."""
signed_value = want_bytes(signed_value)
if self.sep not in signed_value:
raise BadSignature(f"No {self.sep!r} found in value")
value, sig = signed_value.rsplit(self.sep, 1)
if self.verify_signature(value, sig):
return value
raise BadSignature(f"Signature {sig!r} does not match", payload=value)
def validate(self, signed_value: _t_str_bytes) -> bool:
"""Only validates the given signed value. Returns ``True`` if
the signature exists and is valid.
"""
try:
self.unsign(signed_value)
return True
except BadSignature:
return False

View file

@ -0,0 +1,227 @@
import time
import typing
import typing as _t
from datetime import datetime
from datetime import timezone
from .encoding import base64_decode
from .encoding import base64_encode
from .encoding import bytes_to_int
from .encoding import int_to_bytes
from .encoding import want_bytes
from .exc import BadSignature
from .exc import BadTimeSignature
from .exc import SignatureExpired
from .serializer import Serializer
from .signer import Signer
_t_str_bytes = _t.Union[str, bytes]
_t_opt_str_bytes = _t.Optional[_t_str_bytes]
_t_opt_int = _t.Optional[int]
if _t.TYPE_CHECKING:
import typing_extensions as _te
class TimestampSigner(Signer):
"""Works like the regular :class:`.Signer` but also records the time
of the signing and can be used to expire signatures. The
:meth:`unsign` method can raise :exc:`.SignatureExpired` if the
unsigning failed because the signature is expired.
"""
def get_timestamp(self) -> int:
"""Returns the current timestamp. The function must return an
integer.
"""
return int(time.time())
def timestamp_to_datetime(self, ts: int) -> datetime:
"""Convert the timestamp from :meth:`get_timestamp` into an
aware :class`datetime.datetime` in UTC.
.. versionchanged:: 2.0
The timestamp is returned as a timezone-aware ``datetime``
in UTC rather than a naive ``datetime`` assumed to be UTC.
"""
return datetime.fromtimestamp(ts, tz=timezone.utc)
def sign(self, value: _t_str_bytes) -> bytes:
"""Signs the given string and also attaches time information."""
value = want_bytes(value)
timestamp = base64_encode(int_to_bytes(self.get_timestamp()))
sep = want_bytes(self.sep)
value = value + sep + timestamp
return value + sep + self.get_signature(value)
# Ignore overlapping signatures check, return_timestamp is the only
# parameter that affects the return type.
@typing.overload
def unsign( # type: ignore
self,
signed_value: _t_str_bytes,
max_age: _t_opt_int = None,
return_timestamp: "_te.Literal[False]" = False,
) -> bytes:
...
@typing.overload
def unsign(
self,
signed_value: _t_str_bytes,
max_age: _t_opt_int = None,
return_timestamp: "_te.Literal[True]" = True,
) -> _t.Tuple[bytes, datetime]:
...
def unsign(
self,
signed_value: _t_str_bytes,
max_age: _t_opt_int = None,
return_timestamp: bool = False,
) -> _t.Union[_t.Tuple[bytes, datetime], bytes]:
"""Works like the regular :meth:`.Signer.unsign` but can also
validate the time. See the base docstring of the class for
the general behavior. If ``return_timestamp`` is ``True`` the
timestamp of the signature will be returned as an aware
:class:`datetime.datetime` object in UTC.
.. versionchanged:: 2.0
The timestamp is returned as a timezone-aware ``datetime``
in UTC rather than a naive ``datetime`` assumed to be UTC.
"""
try:
result = super().unsign(signed_value)
sig_error = None
except BadSignature as e:
sig_error = e
result = e.payload or b""
sep = want_bytes(self.sep)
# If there is no timestamp in the result there is something
# seriously wrong. In case there was a signature error, we raise
# that one directly, otherwise we have a weird situation in
# which we shouldn't have come except someone uses a time-based
# serializer on non-timestamp data, so catch that.
if sep not in result:
if sig_error:
raise sig_error
raise BadTimeSignature("timestamp missing", payload=result)
value, ts_bytes = result.rsplit(sep, 1)
ts_int: _t_opt_int = None
ts_dt: _t.Optional[datetime] = None
try:
ts_int = bytes_to_int(base64_decode(ts_bytes))
except Exception:
pass
# Signature is *not* okay. Raise a proper error now that we have
# split the value and the timestamp.
if sig_error is not None:
if ts_int is not None:
ts_dt = self.timestamp_to_datetime(ts_int)
raise BadTimeSignature(str(sig_error), payload=value, date_signed=ts_dt)
# Signature was okay but the timestamp is actually not there or
# malformed. Should not happen, but we handle it anyway.
if ts_int is None:
raise BadTimeSignature("Malformed timestamp", payload=value)
# Check timestamp is not older than max_age
if max_age is not None:
age = self.get_timestamp() - ts_int
if age > max_age:
raise SignatureExpired(
f"Signature age {age} > {max_age} seconds",
payload=value,
date_signed=self.timestamp_to_datetime(ts_int),
)
if age < 0:
raise SignatureExpired(
f"Signature age {age} < 0 seconds",
payload=value,
date_signed=self.timestamp_to_datetime(ts_int),
)
if return_timestamp:
return value, self.timestamp_to_datetime(ts_int)
return value
def validate(self, signed_value: _t_str_bytes, max_age: _t_opt_int = None) -> bool:
"""Only validates the given signed value. Returns ``True`` if
the signature exists and is valid."""
try:
self.unsign(signed_value, max_age=max_age)
return True
except BadSignature:
return False
class TimedSerializer(Serializer):
"""Uses :class:`TimestampSigner` instead of the default
:class:`.Signer`.
"""
default_signer: _t.Type[TimestampSigner] = TimestampSigner
def iter_unsigners(
self, salt: _t_opt_str_bytes = None
) -> _t.Iterator[TimestampSigner]:
return _t.cast("_t.Iterator[TimestampSigner]", super().iter_unsigners(salt))
# TODO: Signature is incompatible because parameters were added
# before salt.
def loads( # type: ignore
self,
s: _t_str_bytes,
max_age: _t_opt_int = None,
return_timestamp: bool = False,
salt: _t_opt_str_bytes = None,
) -> _t.Any:
"""Reverse of :meth:`dumps`, raises :exc:`.BadSignature` if the
signature validation fails. If a ``max_age`` is provided it will
ensure the signature is not older than that time in seconds. In
case the signature is outdated, :exc:`.SignatureExpired` is
raised. All arguments are forwarded to the signer's
:meth:`~TimestampSigner.unsign` method.
"""
s = want_bytes(s)
last_exception = None
for signer in self.iter_unsigners(salt):
try:
base64d, timestamp = signer.unsign(
s, max_age=max_age, return_timestamp=True
)
payload = self.load_payload(base64d)
if return_timestamp:
return payload, timestamp
return payload
except SignatureExpired:
# The signature was unsigned successfully but was
# expired. Do not try the next signer.
raise
except BadSignature as err:
last_exception = err
raise _t.cast(BadSignature, last_exception)
def loads_unsafe( # type: ignore
self,
s: _t_str_bytes,
max_age: _t_opt_int = None,
salt: _t_opt_str_bytes = None,
) -> _t.Tuple[bool, _t.Any]:
return self._loads_unsafe_impl(s, salt, load_kwargs={"max_age": max_age})

View file

@ -0,0 +1,80 @@
import typing as _t
import zlib
from ._json import _CompactJSON
from .encoding import base64_decode
from .encoding import base64_encode
from .exc import BadPayload
from .serializer import Serializer
from .timed import TimedSerializer
class URLSafeSerializerMixin(Serializer):
"""Mixed in with a regular serializer it will attempt to zlib
compress the string to make it shorter if necessary. It will also
base64 encode the string so that it can safely be placed in a URL.
"""
default_serializer = _CompactJSON
def load_payload(
self,
payload: bytes,
*args: _t.Any,
serializer: _t.Optional[_t.Any] = None,
**kwargs: _t.Any,
) -> _t.Any:
decompress = False
if payload.startswith(b"."):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload(
"Could not base64 decode the payload because of an exception",
original_error=e,
) from e
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload(
"Could not zlib decompress the payload before decoding the payload",
original_error=e,
) from e
return super().load_payload(json, *args, **kwargs)
def dump_payload(self, obj: _t.Any) -> bytes:
json = super().dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b"." + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`.Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`.TimedSerializer` but dumps and loads into a
URL safe string consisting of the upper and lowercase character of
the alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""

View file

@ -0,0 +1,6 @@
from .serving import run_simple as run_simple
from .test import Client as Client
from .wrappers import Request as Request
from .wrappers import Response as Response
__version__ = "2.0.3"

View file

@ -0,0 +1,626 @@
import inspect
import logging
import operator
import re
import string
import sys
import typing
import typing as t
from datetime import date
from datetime import datetime
from datetime import timezone
from itertools import chain
from weakref import WeakKeyDictionary
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
from .wrappers.request import Request # noqa: F401
_logger: t.Optional[logging.Logger] = None
_signature_cache = WeakKeyDictionary() # type: ignore
_epoch_ord = date(1970, 1, 1).toordinal()
_legal_cookie_chars = frozenset(
c.encode("ascii")
for c in f"{string.ascii_letters}{string.digits}/=!#$%&'*+-.^_`|~:"
)
_cookie_quoting_map = {b",": b"\\054", b";": b"\\073", b'"': b'\\"', b"\\": b"\\\\"}
for _i in chain(range(32), range(127, 256)):
_cookie_quoting_map[_i.to_bytes(1, sys.byteorder)] = f"\\{_i:03o}".encode("latin1")
_octal_re = re.compile(rb"\\[0-3][0-7][0-7]")
_quote_re = re.compile(rb"[\\].")
_legal_cookie_chars_re = rb"[\w\d!#%&\'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=]"
_cookie_re = re.compile(
rb"""
(?P<key>[^=;]+)
(?:\s*=\s*
(?P<val>
"(?:[^\\"]|\\.)*" |
(?:.*?)
)
)?
\s*;
""",
flags=re.VERBOSE,
)
class _Missing:
def __repr__(self) -> str:
return "no value"
def __reduce__(self) -> str:
return "_missing"
_missing = _Missing()
@typing.overload
def _make_encode_wrapper(reference: str) -> t.Callable[[str], str]:
...
@typing.overload
def _make_encode_wrapper(reference: bytes) -> t.Callable[[str], bytes]:
...
def _make_encode_wrapper(reference: t.AnyStr) -> t.Callable[[str], t.AnyStr]:
"""Create a function that will be called with a string argument. If
the reference is bytes, values will be encoded to bytes.
"""
if isinstance(reference, str):
return lambda x: x
return operator.methodcaller("encode", "latin1")
def _check_str_tuple(value: t.Tuple[t.AnyStr, ...]) -> None:
"""Ensure tuple items are all strings or all bytes."""
if not value:
return
item_type = str if isinstance(value[0], str) else bytes
if any(not isinstance(item, item_type) for item in value):
raise TypeError(f"Cannot mix str and bytes arguments (got {value!r})")
_default_encoding = sys.getdefaultencoding()
def _to_bytes(
x: t.Union[str, bytes], charset: str = _default_encoding, errors: str = "strict"
) -> bytes:
if x is None or isinstance(x, bytes):
return x
if isinstance(x, (bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError("Expected bytes")
@typing.overload
def _to_str( # type: ignore
x: None,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> None:
...
@typing.overload
def _to_str(
x: t.Any,
charset: t.Optional[str] = ...,
errors: str = ...,
allow_none_charset: bool = ...,
) -> str:
...
def _to_str(
x: t.Optional[t.Any],
charset: t.Optional[str] = _default_encoding,
errors: str = "strict",
allow_none_charset: bool = False,
) -> t.Optional[t.Union[str, bytes]]:
if x is None or isinstance(x, str):
return x
if not isinstance(x, (bytes, bytearray)):
return str(x)
if charset is None:
if allow_none_charset:
return x
return x.decode(charset, errors) # type: ignore
def _wsgi_decoding_dance(
s: str, charset: str = "utf-8", errors: str = "replace"
) -> str:
return s.encode("latin1").decode(charset, errors)
def _wsgi_encoding_dance(
s: str, charset: str = "utf-8", errors: str = "replace"
) -> str:
if isinstance(s, bytes):
return s.decode("latin1", errors)
return s.encode(charset).decode("latin1", errors)
def _get_environ(obj: t.Union["WSGIEnvironment", "Request"]) -> "WSGIEnvironment":
env = getattr(obj, "environ", obj)
assert isinstance(
env, dict
), f"{type(obj).__name__!r} is not a WSGI environment (has to be a dict)"
return env
def _has_level_handler(logger: logging.Logger) -> bool:
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
class _ColorStreamHandler(logging.StreamHandler):
"""On Windows, wrap stream with Colorama for ANSI style support."""
def __init__(self) -> None:
try:
import colorama
except ImportError:
stream = None
else:
stream = colorama.AnsiToWin32(sys.stderr)
super().__init__(stream)
def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:
"""Log a message to the 'werkzeug' logger.
The logger is created the first time it is needed. If there is no
level set, it is set to :data:`logging.INFO`. If there is no handler
for the logger's effective level, a :class:`logging.StreamHandler`
is added.
"""
global _logger
if _logger is None:
_logger = logging.getLogger("werkzeug")
if _logger.level == logging.NOTSET:
_logger.setLevel(logging.INFO)
if not _has_level_handler(_logger):
_logger.addHandler(_ColorStreamHandler())
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
def _parse_signature(func): # type: ignore
"""Return a signature object for the function.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1 along with ``utils.bind`` and
``validate_arguments``.
"""
# if we have a cached validator for this function, return it
parse = _signature_cache.get(func)
if parse is not None:
return parse
# inspect the function signature and collect all the information
tup = inspect.getfullargspec(func)
positional, vararg_var, kwarg_var, defaults = tup[:4]
defaults = defaults or ()
arg_count = len(positional)
arguments = []
for idx, name in enumerate(positional):
if isinstance(name, list):
raise TypeError(
"cannot parse functions that unpack tuples in the function signature"
)
try:
default = defaults[idx - arg_count]
except IndexError:
param = (name, False, None)
else:
param = (name, True, default)
arguments.append(param)
arguments = tuple(arguments)
def parse(args, kwargs): # type: ignore
new_args = []
missing = []
extra = {}
# consume as many arguments as positional as possible
for idx, (name, has_default, default) in enumerate(arguments):
try:
new_args.append(args[idx])
except IndexError:
try:
new_args.append(kwargs.pop(name))
except KeyError:
if has_default:
new_args.append(default)
else:
missing.append(name)
else:
if name in kwargs:
extra[name] = kwargs.pop(name)
# handle extra arguments
extra_positional = args[arg_count:]
if vararg_var is not None:
new_args.extend(extra_positional)
extra_positional = ()
if kwargs and kwarg_var is None:
extra.update(kwargs)
kwargs = {}
return (
new_args,
kwargs,
missing,
extra,
extra_positional,
arguments,
vararg_var,
kwarg_var,
)
_signature_cache[func] = parse
return parse
@typing.overload
def _dt_as_utc(dt: None) -> None:
...
@typing.overload
def _dt_as_utc(dt: datetime) -> datetime:
...
def _dt_as_utc(dt: t.Optional[datetime]) -> t.Optional[datetime]:
if dt is None:
return dt
if dt.tzinfo is None:
return dt.replace(tzinfo=timezone.utc)
elif dt.tzinfo != timezone.utc:
return dt.astimezone(timezone.utc)
return dt
_TAccessorValue = t.TypeVar("_TAccessorValue")
class _DictAccessorProperty(t.Generic[_TAccessorValue]):
"""Baseclass for `environ_property` and `header_property`."""
read_only = False
def __init__(
self,
name: str,
default: t.Optional[_TAccessorValue] = None,
load_func: t.Optional[t.Callable[[str], _TAccessorValue]] = None,
dump_func: t.Optional[t.Callable[[_TAccessorValue], str]] = None,
read_only: t.Optional[bool] = None,
doc: t.Optional[str] = None,
) -> None:
self.name = name
self.default = default
self.load_func = load_func
self.dump_func = dump_func
if read_only is not None:
self.read_only = read_only
self.__doc__ = doc
def lookup(self, instance: t.Any) -> t.MutableMapping[str, t.Any]:
raise NotImplementedError
@typing.overload
def __get__(
self, instance: None, owner: type
) -> "_DictAccessorProperty[_TAccessorValue]":
...
@typing.overload
def __get__(self, instance: t.Any, owner: type) -> _TAccessorValue:
...
def __get__(
self, instance: t.Optional[t.Any], owner: type
) -> t.Union[_TAccessorValue, "_DictAccessorProperty[_TAccessorValue]"]:
if instance is None:
return self
storage = self.lookup(instance)
if self.name not in storage:
return self.default # type: ignore
value = storage[self.name]
if self.load_func is not None:
try:
return self.load_func(value)
except (ValueError, TypeError):
return self.default # type: ignore
return value # type: ignore
def __set__(self, instance: t.Any, value: _TAccessorValue) -> None:
if self.read_only:
raise AttributeError("read only property")
if self.dump_func is not None:
self.lookup(instance)[self.name] = self.dump_func(value)
else:
self.lookup(instance)[self.name] = value
def __delete__(self, instance: t.Any) -> None:
if self.read_only:
raise AttributeError("read only property")
self.lookup(instance).pop(self.name, None)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.name}>"
def _cookie_quote(b: bytes) -> bytes:
buf = bytearray()
all_legal = True
_lookup = _cookie_quoting_map.get
_push = buf.extend
for char_int in b:
char = char_int.to_bytes(1, sys.byteorder)
if char not in _legal_cookie_chars:
all_legal = False
char = _lookup(char, char)
_push(char)
if all_legal:
return bytes(buf)
return bytes(b'"' + buf + b'"')
def _cookie_unquote(b: bytes) -> bytes:
if len(b) < 2:
return b
if b[:1] != b'"' or b[-1:] != b'"':
return b
b = b[1:-1]
i = 0
n = len(b)
rv = bytearray()
_push = rv.extend
while 0 <= i < n:
o_match = _octal_re.search(b, i)
q_match = _quote_re.search(b, i)
if not o_match and not q_match:
rv.extend(b[i:])
break
j = k = -1
if o_match:
j = o_match.start(0)
if q_match:
k = q_match.start(0)
if q_match and (not o_match or k < j):
_push(b[i:k])
_push(b[k + 1 : k + 2])
i = k + 2
else:
_push(b[i:j])
rv.append(int(b[j + 1 : j + 4], 8))
i = j + 4
return bytes(rv)
def _cookie_parse_impl(b: bytes) -> t.Iterator[t.Tuple[bytes, bytes]]:
"""Lowlevel cookie parsing facility that operates on bytes."""
i = 0
n = len(b)
while i < n:
match = _cookie_re.search(b + b";", i)
if not match:
break
key = match.group("key").strip()
value = match.group("val") or b""
i = match.end(0)
yield key, _cookie_unquote(value)
def _encode_idna(domain: str) -> bytes:
# If we're given bytes, make sure they fit into ASCII
if isinstance(domain, bytes):
domain.decode("ascii")
return domain
# Otherwise check if it's already ascii, then return
try:
return domain.encode("ascii")
except UnicodeError:
pass
# Otherwise encode each part separately
return b".".join(p.encode("idna") for p in domain.split("."))
def _decode_idna(domain: t.Union[str, bytes]) -> str:
# If the input is a string try to encode it to ascii to do the idna
# decoding. If that fails because of a unicode error, then we
# already have a decoded idna domain.
if isinstance(domain, str):
try:
domain = domain.encode("ascii")
except UnicodeError:
return domain # type: ignore
# Decode each part separately. If a part fails, try to decode it
# with ascii and silently ignore errors. This makes sense because
# the idna codec does not have error handling.
def decode_part(part: bytes) -> str:
try:
return part.decode("idna")
except UnicodeError:
return part.decode("ascii", "ignore")
return ".".join(decode_part(p) for p in domain.split(b"."))
@typing.overload
def _make_cookie_domain(domain: None) -> None:
...
@typing.overload
def _make_cookie_domain(domain: str) -> bytes:
...
def _make_cookie_domain(domain: t.Optional[str]) -> t.Optional[bytes]:
if domain is None:
return None
domain = _encode_idna(domain)
if b":" in domain:
domain = domain.split(b":", 1)[0]
if b"." in domain:
return domain
raise ValueError(
"Setting 'domain' for a cookie on a server running locally (ex: "
"localhost) is not supported by complying browsers. You should "
"have something like: '127.0.0.1 localhost dev.localhost' on "
"your hosts file and then point your server to run on "
"'dev.localhost' and also set 'domain' for 'dev.localhost'"
)
def _easteregg(app: t.Optional["WSGIApplication"] = None) -> "WSGIApplication":
"""Like the name says. But who knows how it works?"""
def bzzzzzzz(gyver: bytes) -> str:
import base64
import zlib
return zlib.decompress(base64.b64decode(gyver)).decode("ascii")
gyver = "\n".join(
[
x + (77 - len(x)) * " "
for x in bzzzzzzz(
b"""
eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
4QIxzK4bth1PV7+uHn6WXZ5C4ka/+prFzx3zWLMHAVZb8RRUxtFXI5DTQ2n3Hi2sNI+HK43AOWSY
jmEzE4naFp58PdzhPMdslLVWHTGUVpSxImw+pS/D+JhzLfdS1j7PzUMxij+mc2U0I9zcbZ/HcZxc
q1QjvvcThMYFnp93agEx392ZdLJWXbi/Ca4Oivl4h/Y1ErEqP+lrg7Xa4qnUKu5UE9UUA4xeqLJ5
jWlPKJvR2yhRI7xFPdzPuc6adXu6ovwXwRPXXnZHxlPtkSkqWHilsOrGrvcVWXgGP3daXomCj317
8P2UOw/NnA0OOikZyFf3zZ76eN9QXNwYdD8f8/LdBRFg0BO3bB+Pe/+G8er8tDJv83XTkj7WeMBJ
v/rnAfdO51d6sFglfi8U7zbnr0u9tyJHhFZNXYfH8Iafv2Oa+DT6l8u9UYlajV/hcEgk1x8E8L/r
XJXl2SK+GJCxtnyhVKv6GFCEB1OO3f9YWAIEbwcRWv/6RPpsEzOkXURMN37J0PoCSYeBnJQd9Giu
LxYQJNlYPSo/iTQwgaihbART7Fcyem2tTSCcwNCs85MOOpJtXhXDe0E7zgZJkcxWTar/zEjdIVCk
iXy87FW6j5aGZhttDBoAZ3vnmlkx4q4mMmCdLtnHkBXFMCReqthSGkQ+MDXLLCpXwBs0t+sIhsDI
tjBB8MwqYQpLygZ56rRHHpw+OAVyGgaGRHWy2QfXez+ZQQTTBkmRXdV/A9LwH6XGZpEAZU8rs4pE
1R4FQ3Uwt8RKEtRc0/CrANUoes3EzM6WYcFyskGZ6UTHJWenBDS7h163Eo2bpzqxNE9aVgEM2CqI
GAJe9Yra4P5qKmta27VjzYdR04Vc7KHeY4vs61C0nbywFmcSXYjzBHdiEjraS7PGG2jHHTpJUMxN
Jlxr3pUuFvlBWLJGE3GcA1/1xxLcHmlO+LAXbhrXah1tD6Ze+uqFGdZa5FM+3eHcKNaEarutAQ0A
QMAZHV+ve6LxAwWnXbbSXEG2DmCX5ijeLCKj5lhVFBrMm+ryOttCAeFpUdZyQLAQkA06RLs56rzG
8MID55vqr/g64Qr/wqwlE0TVxgoiZhHrbY2h1iuuyUVg1nlkpDrQ7Vm1xIkI5XRKLedN9EjzVchu
jQhXcVkjVdgP2O99QShpdvXWoSwkp5uMwyjt3jiWCqWGSiaaPAzohjPanXVLbM3x0dNskJsaCEyz
DTKIs+7WKJD4ZcJGfMhLFBf6hlbnNkLEePF8Cx2o2kwmYF4+MzAxa6i+6xIQkswOqGO+3x9NaZX8
MrZRaFZpLeVTYI9F/djY6DDVVs340nZGmwrDqTCiiqD5luj3OzwpmQCiQhdRYowUYEA3i1WWGwL4
GCtSoO4XbIPFeKGU13XPkDf5IdimLpAvi2kVDVQbzOOa4KAXMFlpi/hV8F6IDe0Y2reg3PuNKT3i
RYhZqtkQZqSB2Qm0SGtjAw7RDwaM1roESC8HWiPxkoOy0lLTRFG39kvbLZbU9gFKFRvixDZBJmpi
Xyq3RE5lW00EJjaqwp/v3EByMSpVZYsEIJ4APaHmVtpGSieV5CALOtNUAzTBiw81GLgC0quyzf6c
NlWknzJeCsJ5fup2R4d8CYGN77mu5vnO1UqbfElZ9E6cR6zbHjgsr9ly18fXjZoPeDjPuzlWbFwS
pdvPkhntFvkc13qb9094LL5NrA3NIq3r9eNnop9DizWOqCEbyRBFJTHn6Tt3CG1o8a4HevYh0XiJ
sR0AVVHuGuMOIfbuQ/OKBkGRC6NJ4u7sbPX8bG/n5sNIOQ6/Y/BX3IwRlTSabtZpYLB85lYtkkgm
p1qXK3Du2mnr5INXmT/78KI12n11EFBkJHHp0wJyLe9MvPNUGYsf+170maayRoy2lURGHAIapSpQ
krEDuNoJCHNlZYhKpvw4mspVWxqo415n8cD62N9+EfHrAvqQnINStetek7RY2Urv8nxsnGaZfRr/
nhXbJ6m/yl1LzYqscDZA9QHLNbdaSTTr+kFg3bC0iYbX/eQy0Bv3h4B50/SGYzKAXkCeOLI3bcAt
mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
7f2zLkGNv8b191cD/3vs9Q833z8t"""
).splitlines()
]
)
def easteregged(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
def injecting_start_response(
status: str, headers: t.List[t.Tuple[str, str]], exc_info: t.Any = None
) -> t.Callable[[bytes], t.Any]:
headers.append(("X-Powered-By", "Werkzeug"))
return start_response(status, headers, exc_info)
if app is not None and environ.get("QUERY_STRING") != "macgybarchakku":
return app(environ, injecting_start_response)
injecting_start_response("200 OK", [("Content-Type", "text/html")])
return [
f"""\
<!DOCTYPE html>
<html>
<head>
<title>About Werkzeug</title>
<style type="text/css">
body {{ font: 15px Georgia, serif; text-align: center; }}
a {{ color: #333; text-decoration: none; }}
h1 {{ font-size: 30px; margin: 20px 0 10px 0; }}
p {{ margin: 0 0 30px 0; }}
pre {{ font: 11px 'Consolas', 'Monaco', monospace; line-height: 0.95; }}
</style>
</head>
<body>
<h1><a href="http://werkzeug.pocoo.org/">Werkzeug</a></h1>
<p>the Swiss Army knife of Python web development.</p>
<pre>{gyver}\n\n\n</pre>
</body>
</html>""".encode(
"latin1"
)
]
return easteregged

View file

@ -0,0 +1,430 @@
import fnmatch
import os
import subprocess
import sys
import threading
import time
import typing as t
from itertools import chain
from pathlib import PurePath
from ._internal import _log
# The various system prefixes where imports are found. Base values are
# different when running in a virtualenv. The stat reloader won't scan
# these directories, it would be too inefficient.
prefix = {sys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix}
if hasattr(sys, "real_prefix"):
# virtualenv < 20
prefix.add(sys.real_prefix) # type: ignore
_ignore_prefixes = tuple(prefix)
del prefix
def _iter_module_paths() -> t.Iterator[str]:
"""Find the filesystem paths associated with imported modules."""
# List is in case the value is modified by the app while updating.
for module in list(sys.modules.values()):
name = getattr(module, "__file__", None)
if name is None:
continue
while not os.path.isfile(name):
# Zip file, find the base file without the module path.
old = name
name = os.path.dirname(name)
if name == old: # skip if it was all directories somehow
break
else:
yield name
def _remove_by_pattern(paths: t.Set[str], exclude_patterns: t.Set[str]) -> None:
for pattern in exclude_patterns:
paths.difference_update(fnmatch.filter(paths, pattern))
def _find_stat_paths(
extra_files: t.Set[str], exclude_patterns: t.Set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Returns imported
module files, Python files under non-system paths. Extra files and
Python files under extra directories can also be scanned.
System paths have to be excluded for efficiency. Non-system paths,
such as a project root or ``sys.path.insert``, should be the paths
of interest to the user anyway.
"""
paths = set()
for path in chain(list(sys.path), extra_files):
path = os.path.abspath(path)
if os.path.isfile(path):
# zip file on sys.path, or extra file
paths.add(path)
for root, dirs, files in os.walk(path):
# Ignore system prefixes for efficience. Don't scan
# __pycache__, it will have a py or pyc module at the import
# path. As an optimization, ignore .git and .hg since
# nothing interesting will be there.
if root.startswith(_ignore_prefixes) or os.path.basename(root) in {
"__pycache__",
".git",
".hg",
}:
dirs.clear()
continue
for name in files:
if name.endswith((".py", ".pyc")):
paths.add(os.path.join(root, name))
paths.update(_iter_module_paths())
_remove_by_pattern(paths, exclude_patterns)
return paths
def _find_watchdog_paths(
extra_files: t.Set[str], exclude_patterns: t.Set[str]
) -> t.Iterable[str]:
"""Find paths for the stat reloader to watch. Looks at the same
sources as the stat reloader, but watches everything under
directories instead of individual files.
"""
dirs = set()
for name in chain(list(sys.path), extra_files):
name = os.path.abspath(name)
if os.path.isfile(name):
name = os.path.dirname(name)
dirs.add(name)
for name in _iter_module_paths():
dirs.add(os.path.dirname(name))
_remove_by_pattern(dirs, exclude_patterns)
return _find_common_roots(dirs)
def _find_common_roots(paths: t.Iterable[str]) -> t.Iterable[str]:
root: t.Dict[str, dict] = {}
for chunks in sorted((PurePath(x).parts for x in paths), key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node: t.Mapping[str, dict], path: t.Tuple[str, ...]) -> None:
for prefix, child in node.items():
_walk(child, path + (prefix,))
if not node:
rv.add(os.path.join(*path))
_walk(root, ())
return rv
def _get_args_for_reloading() -> t.List[str]:
"""Determine how the script was executed, and return the args needed
to execute it again in a new process.
"""
rv = [sys.executable]
py_script = sys.argv[0]
args = sys.argv[1:]
# Need to look at main module to determine how it was executed.
__main__ = sys.modules["__main__"]
# The value of __package__ indicates how Python was called. It may
# not exist if a setuptools script is installed as an egg. It may be
# set incorrectly for entry points created with pip on Windows.
if getattr(__main__, "__package__", None) is None or (
os.name == "nt"
and __main__.__package__ == ""
and not os.path.exists(py_script)
and os.path.exists(f"{py_script}.exe")
):
# Executed a file, like "python app.py".
py_script = os.path.abspath(py_script)
if os.name == "nt":
# Windows entry points have ".exe" extension and should be
# called directly.
if not os.path.exists(py_script) and os.path.exists(f"{py_script}.exe"):
py_script += ".exe"
if (
os.path.splitext(sys.executable)[1] == ".exe"
and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
if sys.argv[0] == "-m":
# Flask works around previous behavior by putting
# "-m flask" in sys.argv.
# TODO remove this once Flask no longer misbehaves
args = sys.argv
else:
if os.path.isfile(py_script):
# Rewritten by Python from "-m script" to "/path/to/script.py".
py_module = t.cast(str, __main__.__package__)
name = os.path.splitext(os.path.basename(py_script))[0]
if name != "__main__":
py_module += f".{name}"
else:
# Incorrectly rewritten by pydevd debugger from "-m script" to "script".
py_module = py_script
rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
class ReloaderLoop:
name = ""
def __init__(
self,
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
interval: t.Union[int, float] = 1,
) -> None:
self.extra_files: t.Set[str] = {os.path.abspath(x) for x in extra_files or ()}
self.exclude_patterns: t.Set[str] = set(exclude_patterns or ())
self.interval = interval
def __enter__(self) -> "ReloaderLoop":
"""Do any setup, then run one step of the watch to populate the
initial filesystem state.
"""
self.run_step()
return self
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
"""Clean up any resources associated with the reloader."""
pass
def run(self) -> None:
"""Continually run the watch step, sleeping for the configured
interval after each step.
"""
while True:
self.run_step()
time.sleep(self.interval)
def run_step(self) -> None:
"""Run one step for watching the filesystem. Called once to set
up initial state, then repeatedly to update it.
"""
pass
def restart_with_reloader(self) -> int:
"""Spawn a new Python interpreter with the same arguments as the
current one, but running the reloader thread.
"""
while True:
_log("info", f" * Restarting with {self.name}")
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ["WERKZEUG_RUN_MAIN"] = "true"
exit_code = subprocess.call(args, env=new_environ, close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename: str) -> None:
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename: str) -> None:
filename = os.path.abspath(filename)
_log("info", f" * Detected change in {filename!r}, reloading")
class StatReloaderLoop(ReloaderLoop):
name = "stat"
def __enter__(self) -> ReloaderLoop:
self.mtimes: t.Dict[str, float] = {}
return super().__enter__()
def run_step(self) -> None:
for name in chain(_find_stat_paths(self.extra_files, self.exclude_patterns)):
try:
mtime = os.stat(name).st_mtime
except OSError:
continue
old_time = self.mtimes.get(name)
if old_time is None:
self.mtimes[name] = mtime
continue
if mtime > old_time:
self.trigger_reload(name)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
super().__init__(*args, **kwargs)
trigger_reload = self.trigger_reload
class EventHandler(PatternMatchingEventHandler): # type: ignore
def on_any_event(self, event): # type: ignore
trigger_reload(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
self.name = f"watchdog ({reloader_name})"
self.observer = Observer()
# Extra patterns can be non-Python files, match them in addition
# to all Python files in default and extra directories. Ignore
# __pycache__ since a change there will always have a change to
# the source file (or initial pyc file) as well. Ignore Git and
# Mercurial internal changes.
extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
self.event_handler = EventHandler(
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
ignore_patterns=[
"*/__pycache__/*",
"*/.git/*",
"*/.hg/*",
*self.exclude_patterns,
],
)
self.should_reload = False
def trigger_reload(self, filename: str) -> None:
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def __enter__(self) -> ReloaderLoop:
self.watches: t.Dict[str, t.Any] = {}
self.observer.start()
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
self.observer.stop()
self.observer.join()
def run(self) -> None:
while not self.should_reload:
self.run_step()
time.sleep(self.interval)
sys.exit(3)
def run_step(self) -> None:
to_delete = set(self.watches)
for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
if path not in self.watches:
try:
self.watches[path] = self.observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
self.watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = self.watches.pop(path, None)
if watch is not None:
self.observer.unschedule(watch)
reloader_loops: t.Dict[str, t.Type[ReloaderLoop]] = {
"stat": StatReloaderLoop,
"watchdog": WatchdogReloaderLoop,
}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on() -> None:
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after a reload."""
# tcgetattr will fail if stdin isn't a tty
if sys.stdin is None or not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(
main_func: t.Callable[[], None],
extra_files: t.Optional[t.Iterable[str]] = None,
exclude_patterns: t.Optional[t.Iterable[str]] = None,
interval: t.Union[int, float] = 1,
reloader_type: str = "auto",
) -> None:
"""Run the given function in an independent Python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
reloader = reloader_loops[reloader_type](
extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
)
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.daemon = True
# Enter the reloader to set up initial state, then start
# the app thread and reloader update loop.
with reloader:
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,912 @@
from datetime import datetime
from os import PathLike
from typing import Any
from typing import Callable
from typing import Collection
from typing import Dict
from typing import FrozenSet
from typing import Generic
from typing import Hashable
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Mapping
from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from _typeshed.wsgi import WSGIEnvironment
from typing_extensions import Literal
from typing_extensions import SupportsIndex
K = TypeVar("K")
V = TypeVar("V")
T = TypeVar("T")
D = TypeVar("D")
_CD = TypeVar("_CD", bound="CallbackDict")
def is_immutable(self: object) -> NoReturn: ...
def iter_multi_items(
mapping: Union[Mapping[K, Union[V, Iterable[V]]], Iterable[Tuple[K, V]]]
) -> Iterator[Tuple[K, V]]: ...
class ImmutableListMixin(List[V]):
_hash_cache: Optional[int]
def __hash__(self) -> int: ... # type: ignore
def __delitem__(self, key: Union[SupportsIndex, slice]) -> NoReturn: ...
def __iadd__(self, other: t.Any) -> NoReturn: ... # type: ignore
def __imul__(self, other: SupportsIndex) -> NoReturn: ...
def __setitem__( # type: ignore
self, key: Union[int, slice], value: V
) -> NoReturn: ...
def append(self, value: V) -> NoReturn: ...
def remove(self, value: V) -> NoReturn: ...
def extend(self, values: Iterable[V]) -> NoReturn: ...
def insert(self, pos: SupportsIndex, value: V) -> NoReturn: ...
def pop(self, index: SupportsIndex = -1) -> NoReturn: ...
def reverse(self) -> NoReturn: ...
def sort(
self, key: Optional[Callable[[V], Any]] = None, reverse: bool = False
) -> NoReturn: ...
class ImmutableList(ImmutableListMixin[V]): ...
class ImmutableDictMixin(Dict[K, V]):
_hash_cache: Optional[int]
@classmethod
def fromkeys( # type: ignore
cls, keys: Iterable[K], value: Optional[V] = None
) -> ImmutableDictMixin[K, V]: ...
def _iter_hashitems(self) -> Iterable[Hashable]: ...
def __hash__(self) -> int: ... # type: ignore
def setdefault(self, key: K, default: Optional[V] = None) -> NoReturn: ...
def update(self, *args: Any, **kwargs: V) -> NoReturn: ...
def pop(self, key: K, default: Optional[V] = None) -> NoReturn: ... # type: ignore
def popitem(self) -> NoReturn: ...
def __setitem__(self, key: K, value: V) -> NoReturn: ...
def __delitem__(self, key: K) -> NoReturn: ...
def clear(self) -> NoReturn: ...
class ImmutableMultiDictMixin(ImmutableDictMixin[K, V]):
def _iter_hashitems(self) -> Iterable[Hashable]: ...
def add(self, key: K, value: V) -> NoReturn: ...
def popitemlist(self) -> NoReturn: ...
def poplist(self, key: K) -> NoReturn: ...
def setlist(self, key: K, new_list: Iterable[V]) -> NoReturn: ...
def setlistdefault(
self, key: K, default_list: Optional[Iterable[V]] = None
) -> NoReturn: ...
def _calls_update(name: str) -> Callable[[UpdateDictMixin[K, V]], Any]: ...
class UpdateDictMixin(Dict[K, V]):
on_update: Optional[Callable[[UpdateDictMixin[K, V]], None]]
def setdefault(self, key: K, default: Optional[V] = None) -> V: ...
@overload
def pop(self, key: K) -> V: ...
@overload
def pop(self, key: K, default: Union[V, T] = ...) -> Union[V, T]: ...
def __setitem__(self, key: K, value: V) -> None: ...
def __delitem__(self, key: K) -> None: ...
def clear(self) -> None: ...
def popitem(self) -> Tuple[K, V]: ...
def update(
self, *args: Union[Mapping[K, V], Iterable[Tuple[K, V]]], **kwargs: V
) -> None: ...
class TypeConversionDict(Dict[K, V]):
@overload
def get(self, key: K, default: None = ..., type: None = ...) -> Optional[V]: ...
@overload
def get(self, key: K, default: D, type: None = ...) -> Union[D, V]: ...
@overload
def get(self, key: K, default: D, type: Callable[[V], T]) -> Union[D, T]: ...
@overload
def get(self, key: K, type: Callable[[V], T]) -> Optional[T]: ...
class ImmutableTypeConversionDict(ImmutableDictMixin[K, V], TypeConversionDict[K, V]):
def copy(self) -> TypeConversionDict[K, V]: ...
def __copy__(self) -> ImmutableTypeConversionDict: ...
class MultiDict(TypeConversionDict[K, V]):
def __init__(
self,
mapping: Optional[
Union[Mapping[K, Union[Iterable[V], V]], Iterable[Tuple[K, V]]]
] = None,
) -> None: ...
def __getitem__(self, item: K) -> V: ...
def __setitem__(self, key: K, value: V) -> None: ...
def add(self, key: K, value: V) -> None: ...
@overload
def getlist(self, key: K) -> List[V]: ...
@overload
def getlist(self, key: K, type: Callable[[V], T] = ...) -> List[T]: ...
def setlist(self, key: K, new_list: Iterable[V]) -> None: ...
def setdefault(self, key: K, default: Optional[V] = None) -> V: ...
def setlistdefault(
self, key: K, default_list: Optional[Iterable[V]] = None
) -> List[V]: ...
def items(self, multi: bool = False) -> Iterator[Tuple[K, V]]: ... # type: ignore
def lists(self) -> Iterator[Tuple[K, List[V]]]: ...
def values(self) -> Iterator[V]: ... # type: ignore
def listvalues(self) -> Iterator[List[V]]: ...
def copy(self) -> MultiDict[K, V]: ...
def deepcopy(self, memo: Any = None) -> MultiDict[K, V]: ...
@overload
def to_dict(self) -> Dict[K, V]: ...
@overload
def to_dict(self, flat: Literal[False]) -> Dict[K, List[V]]: ...
def update( # type: ignore
self, mapping: Union[Mapping[K, Union[Iterable[V], V]], Iterable[Tuple[K, V]]]
) -> None: ...
@overload
def pop(self, key: K) -> V: ...
@overload
def pop(self, key: K, default: Union[V, T] = ...) -> Union[V, T]: ...
def popitem(self) -> Tuple[K, V]: ...
def poplist(self, key: K) -> List[V]: ...
def popitemlist(self) -> Tuple[K, List[V]]: ...
def __copy__(self) -> MultiDict[K, V]: ...
def __deepcopy__(self, memo: Any) -> MultiDict[K, V]: ...
class _omd_bucket(Generic[K, V]):
prev: Optional[_omd_bucket]
next: Optional[_omd_bucket]
key: K
value: V
def __init__(self, omd: OrderedMultiDict, key: K, value: V) -> None: ...
def unlink(self, omd: OrderedMultiDict) -> None: ...
class OrderedMultiDict(MultiDict[K, V]):
_first_bucket: Optional[_omd_bucket]
_last_bucket: Optional[_omd_bucket]
def __init__(self, mapping: Optional[Mapping[K, V]] = None) -> None: ...
def __eq__(self, other: object) -> bool: ...
def __getitem__(self, key: K) -> V: ...
def __setitem__(self, key: K, value: V) -> None: ...
def __delitem__(self, key: K) -> None: ...
def keys(self) -> Iterator[K]: ... # type: ignore
def __iter__(self) -> Iterator[K]: ...
def values(self) -> Iterator[V]: ... # type: ignore
def items(self, multi: bool = False) -> Iterator[Tuple[K, V]]: ... # type: ignore
def lists(self) -> Iterator[Tuple[K, List[V]]]: ...
def listvalues(self) -> Iterator[List[V]]: ...
def add(self, key: K, value: V) -> None: ...
@overload
def getlist(self, key: K) -> List[V]: ...
@overload
def getlist(self, key: K, type: Callable[[V], T] = ...) -> List[T]: ...
def setlist(self, key: K, new_list: Iterable[V]) -> None: ...
def setlistdefault(
self, key: K, default_list: Optional[Iterable[V]] = None
) -> List[V]: ...
def update( # type: ignore
self, mapping: Union[Mapping[K, V], Iterable[Tuple[K, V]]]
) -> None: ...
def poplist(self, key: K) -> List[V]: ...
@overload
def pop(self, key: K) -> V: ...
@overload
def pop(self, key: K, default: Union[V, T] = ...) -> Union[V, T]: ...
def popitem(self) -> Tuple[K, V]: ...
def popitemlist(self) -> Tuple[K, List[V]]: ...
def _options_header_vkw(
value: str, kw: Mapping[str, Optional[Union[str, int]]]
) -> str: ...
def _unicodify_header_value(value: Union[str, int]) -> str: ...
HV = Union[str, int]
class Headers(Dict[str, str]):
_list: List[Tuple[str, str]]
def __init__(
self,
defaults: Optional[
Union[Mapping[str, Union[HV, Iterable[HV]]], Iterable[Tuple[str, HV]]]
] = None,
) -> None: ...
@overload
def __getitem__(self, key: str) -> str: ...
@overload
def __getitem__(self, key: int) -> Tuple[str, str]: ...
@overload
def __getitem__(self, key: slice) -> Headers: ...
@overload
def __getitem__(self, key: str, _get_mode: Literal[True] = ...) -> str: ...
def __eq__(self, other: object) -> bool: ...
@overload # type: ignore
def get(self, key: str, default: str) -> str: ...
@overload
def get(self, key: str, default: Optional[str] = None) -> Optional[str]: ...
@overload
def get(
self, key: str, default: Optional[T] = None, type: Callable[[str], T] = ...
) -> Optional[T]: ...
@overload
def getlist(self, key: str) -> List[str]: ...
@overload
def getlist(self, key: str, type: Callable[[str], T]) -> List[T]: ...
def get_all(self, name: str) -> List[str]: ...
def items( # type: ignore
self, lower: bool = False
) -> Iterator[Tuple[str, str]]: ...
def keys(self, lower: bool = False) -> Iterator[str]: ... # type: ignore
def values(self) -> Iterator[str]: ... # type: ignore
def extend(
self,
*args: Union[Mapping[str, Union[HV, Iterable[HV]]], Iterable[Tuple[str, HV]]],
**kwargs: Union[HV, Iterable[HV]],
) -> None: ...
@overload
def __delitem__(self, key: Union[str, int, slice]) -> None: ...
@overload
def __delitem__(self, key: str, _index_operation: Literal[False]) -> None: ...
def remove(self, key: str) -> None: ...
@overload # type: ignore
def pop(self, key: str, default: Optional[str] = None) -> str: ...
@overload
def pop(
self, key: Optional[int] = None, default: Optional[Tuple[str, str]] = None
) -> Tuple[str, str]: ...
def popitem(self) -> Tuple[str, str]: ...
def __contains__(self, key: str) -> bool: ... # type: ignore
def has_key(self, key: str) -> bool: ...
def __iter__(self) -> Iterator[Tuple[str, str]]: ... # type: ignore
def add(self, _key: str, _value: HV, **kw: HV) -> None: ...
def _validate_value(self, value: str) -> None: ...
def add_header(self, _key: str, _value: HV, **_kw: HV) -> None: ...
def clear(self) -> None: ...
def set(self, _key: str, _value: HV, **kw: HV) -> None: ...
def setlist(self, key: str, values: Iterable[HV]) -> None: ...
def setdefault(self, key: str, default: HV) -> str: ... # type: ignore
def setlistdefault(self, key: str, default: Iterable[HV]) -> None: ...
@overload
def __setitem__(self, key: str, value: HV) -> None: ...
@overload
def __setitem__(self, key: int, value: Tuple[str, HV]) -> None: ...
@overload
def __setitem__(self, key: slice, value: Iterable[Tuple[str, HV]]) -> None: ...
def update(
self,
*args: Union[Mapping[str, HV], Iterable[Tuple[str, HV]]],
**kwargs: Union[HV, Iterable[HV]],
) -> None: ...
def to_wsgi_list(self) -> List[Tuple[str, str]]: ...
def copy(self) -> Headers: ...
def __copy__(self) -> Headers: ...
class ImmutableHeadersMixin(Headers):
def __delitem__(self, key: Any, _index_operation: bool = True) -> NoReturn: ...
def __setitem__(self, key: Any, value: Any) -> NoReturn: ...
def set(self, _key: Any, _value: Any, **kw: Any) -> NoReturn: ...
def setlist(self, key: Any, values: Any) -> NoReturn: ...
def add(self, _key: Any, _value: Any, **kw: Any) -> NoReturn: ...
def add_header(self, _key: Any, _value: Any, **_kw: Any) -> NoReturn: ...
def remove(self, key: Any) -> NoReturn: ...
def extend(self, *args: Any, **kwargs: Any) -> NoReturn: ...
def update(self, *args: Any, **kwargs: Any) -> NoReturn: ...
def insert(self, pos: Any, value: Any) -> NoReturn: ...
def pop(self, key: Any = None, default: Any = ...) -> NoReturn: ...
def popitem(self) -> NoReturn: ...
def setdefault(self, key: Any, default: Any) -> NoReturn: ... # type: ignore
def setlistdefault(self, key: Any, default: Any) -> NoReturn: ...
class EnvironHeaders(ImmutableHeadersMixin, Headers):
environ: WSGIEnvironment
def __init__(self, environ: WSGIEnvironment) -> None: ...
def __eq__(self, other: object) -> bool: ...
def __getitem__( # type: ignore
self, key: str, _get_mode: Literal[False] = False
) -> str: ...
def __iter__(self) -> Iterator[Tuple[str, str]]: ... # type: ignore
def copy(self) -> NoReturn: ...
class CombinedMultiDict(ImmutableMultiDictMixin[K, V], MultiDict[K, V]): # type: ignore
dicts: List[MultiDict[K, V]]
def __init__(self, dicts: Optional[Iterable[MultiDict[K, V]]]) -> None: ...
@classmethod
def fromkeys(cls, keys: Any, value: Any = None) -> NoReturn: ...
def __getitem__(self, key: K) -> V: ...
@overload # type: ignore
def get(self, key: K) -> Optional[V]: ...
@overload
def get(self, key: K, default: Union[V, T] = ...) -> Union[V, T]: ...
@overload
def get(
self, key: K, default: Optional[T] = None, type: Callable[[V], T] = ...
) -> Optional[T]: ...
@overload
def getlist(self, key: K) -> List[V]: ...
@overload
def getlist(self, key: K, type: Callable[[V], T] = ...) -> List[T]: ...
def _keys_impl(self) -> Set[K]: ...
def keys(self) -> Set[K]: ... # type: ignore
def __iter__(self) -> Set[K]: ... # type: ignore
def items(self, multi: bool = False) -> Iterator[Tuple[K, V]]: ... # type: ignore
def values(self) -> Iterator[V]: ... # type: ignore
def lists(self) -> Iterator[Tuple[K, List[V]]]: ...
def listvalues(self) -> Iterator[List[V]]: ...
def copy(self) -> MultiDict[K, V]: ...
@overload
def to_dict(self) -> Dict[K, V]: ...
@overload
def to_dict(self, flat: Literal[False]) -> Dict[K, List[V]]: ...
def __contains__(self, key: K) -> bool: ... # type: ignore
def has_key(self, key: K) -> bool: ...
class FileMultiDict(MultiDict[str, "FileStorage"]):
def add_file(
self,
name: str,
file: Union[FileStorage, str, IO[bytes]],
filename: Optional[str] = None,
content_type: Optional[str] = None,
) -> None: ...
class ImmutableDict(ImmutableDictMixin[K, V], Dict[K, V]):
def copy(self) -> Dict[K, V]: ...
def __copy__(self) -> ImmutableDict[K, V]: ...
class ImmutableMultiDict( # type: ignore
ImmutableMultiDictMixin[K, V], MultiDict[K, V]
):
def copy(self) -> MultiDict[K, V]: ...
def __copy__(self) -> ImmutableMultiDict[K, V]: ...
class ImmutableOrderedMultiDict( # type: ignore
ImmutableMultiDictMixin[K, V], OrderedMultiDict[K, V]
):
def _iter_hashitems(self) -> Iterator[Tuple[int, Tuple[K, V]]]: ...
def copy(self) -> OrderedMultiDict[K, V]: ...
def __copy__(self) -> ImmutableOrderedMultiDict[K, V]: ...
class Accept(ImmutableList[Tuple[str, int]]):
provided: bool
def __init__(
self, values: Optional[Union[Accept, Iterable[Tuple[str, float]]]] = None
) -> None: ...
def _specificity(self, value: str) -> Tuple[bool, ...]: ...
def _value_matches(self, value: str, item: str) -> bool: ...
@overload # type: ignore
def __getitem__(self, key: str) -> int: ...
@overload
def __getitem__(self, key: int) -> Tuple[str, int]: ...
@overload
def __getitem__(self, key: slice) -> Iterable[Tuple[str, int]]: ...
def quality(self, key: str) -> int: ...
def __contains__(self, value: str) -> bool: ... # type: ignore
def index(self, key: str) -> int: ... # type: ignore
def find(self, key: str) -> int: ...
def values(self) -> Iterator[str]: ...
def to_header(self) -> str: ...
def _best_single_match(self, match: str) -> Optional[Tuple[str, int]]: ...
def best_match(
self, matches: Iterable[str], default: Optional[str] = None
) -> Optional[str]: ...
@property
def best(self) -> str: ...
def _normalize_mime(value: str) -> List[str]: ...
class MIMEAccept(Accept):
def _specificity(self, value: str) -> Tuple[bool, ...]: ...
def _value_matches(self, value: str, item: str) -> bool: ...
@property
def accept_html(self) -> bool: ...
@property
def accept_xhtml(self) -> bool: ...
@property
def accept_json(self) -> bool: ...
def _normalize_lang(value: str) -> List[str]: ...
class LanguageAccept(Accept):
def _value_matches(self, value: str, item: str) -> bool: ...
def best_match(
self, matches: Iterable[str], default: Optional[str] = None
) -> Optional[str]: ...
class CharsetAccept(Accept):
def _value_matches(self, value: str, item: str) -> bool: ...
_CPT = TypeVar("_CPT", str, int, bool)
_OptCPT = Optional[_CPT]
def cache_property(key: str, empty: _OptCPT, type: Type[_CPT]) -> property: ...
class _CacheControl(UpdateDictMixin[str, _OptCPT], Dict[str, _OptCPT]):
provided: bool
def __init__(
self,
values: Union[Mapping[str, _OptCPT], Iterable[Tuple[str, _OptCPT]]] = (),
on_update: Optional[Callable[[_CacheControl], None]] = None,
) -> None: ...
@property
def no_cache(self) -> Optional[bool]: ...
@no_cache.setter
def no_cache(self, value: Optional[bool]) -> None: ...
@no_cache.deleter
def no_cache(self) -> None: ...
@property
def no_store(self) -> Optional[bool]: ...
@no_store.setter
def no_store(self, value: Optional[bool]) -> None: ...
@no_store.deleter
def no_store(self) -> None: ...
@property
def max_age(self) -> Optional[int]: ...
@max_age.setter
def max_age(self, value: Optional[int]) -> None: ...
@max_age.deleter
def max_age(self) -> None: ...
@property
def no_transform(self) -> Optional[bool]: ...
@no_transform.setter
def no_transform(self, value: Optional[bool]) -> None: ...
@no_transform.deleter
def no_transform(self) -> None: ...
def _get_cache_value(self, key: str, empty: Optional[T], type: Type[T]) -> T: ...
def _set_cache_value(self, key: str, value: Optional[T], type: Type[T]) -> None: ...
def _del_cache_value(self, key: str) -> None: ...
def to_header(self) -> str: ...
@staticmethod
def cache_property(key: str, empty: _OptCPT, type: Type[_CPT]) -> property: ...
class RequestCacheControl(ImmutableDictMixin[str, _OptCPT], _CacheControl):
@property
def max_stale(self) -> Optional[int]: ...
@max_stale.setter
def max_stale(self, value: Optional[int]) -> None: ...
@max_stale.deleter
def max_stale(self) -> None: ...
@property
def min_fresh(self) -> Optional[int]: ...
@min_fresh.setter
def min_fresh(self, value: Optional[int]) -> None: ...
@min_fresh.deleter
def min_fresh(self) -> None: ...
@property
def only_if_cached(self) -> Optional[bool]: ...
@only_if_cached.setter
def only_if_cached(self, value: Optional[bool]) -> None: ...
@only_if_cached.deleter
def only_if_cached(self) -> None: ...
class ResponseCacheControl(_CacheControl):
@property
def public(self) -> Optional[bool]: ...
@public.setter
def public(self, value: Optional[bool]) -> None: ...
@public.deleter
def public(self) -> None: ...
@property
def private(self) -> Optional[bool]: ...
@private.setter
def private(self, value: Optional[bool]) -> None: ...
@private.deleter
def private(self) -> None: ...
@property
def must_revalidate(self) -> Optional[bool]: ...
@must_revalidate.setter
def must_revalidate(self, value: Optional[bool]) -> None: ...
@must_revalidate.deleter
def must_revalidate(self) -> None: ...
@property
def proxy_revalidate(self) -> Optional[bool]: ...
@proxy_revalidate.setter
def proxy_revalidate(self, value: Optional[bool]) -> None: ...
@proxy_revalidate.deleter
def proxy_revalidate(self) -> None: ...
@property
def s_maxage(self) -> Optional[int]: ...
@s_maxage.setter
def s_maxage(self, value: Optional[int]) -> None: ...
@s_maxage.deleter
def s_maxage(self) -> None: ...
@property
def immutable(self) -> Optional[bool]: ...
@immutable.setter
def immutable(self, value: Optional[bool]) -> None: ...
@immutable.deleter
def immutable(self) -> None: ...
def csp_property(key: str) -> property: ...
class ContentSecurityPolicy(UpdateDictMixin[str, str], Dict[str, str]):
@property
def base_uri(self) -> Optional[str]: ...
@base_uri.setter
def base_uri(self, value: Optional[str]) -> None: ...
@base_uri.deleter
def base_uri(self) -> None: ...
@property
def child_src(self) -> Optional[str]: ...
@child_src.setter
def child_src(self, value: Optional[str]) -> None: ...
@child_src.deleter
def child_src(self) -> None: ...
@property
def connect_src(self) -> Optional[str]: ...
@connect_src.setter
def connect_src(self, value: Optional[str]) -> None: ...
@connect_src.deleter
def connect_src(self) -> None: ...
@property
def default_src(self) -> Optional[str]: ...
@default_src.setter
def default_src(self, value: Optional[str]) -> None: ...
@default_src.deleter
def default_src(self) -> None: ...
@property
def font_src(self) -> Optional[str]: ...
@font_src.setter
def font_src(self, value: Optional[str]) -> None: ...
@font_src.deleter
def font_src(self) -> None: ...
@property
def form_action(self) -> Optional[str]: ...
@form_action.setter
def form_action(self, value: Optional[str]) -> None: ...
@form_action.deleter
def form_action(self) -> None: ...
@property
def frame_ancestors(self) -> Optional[str]: ...
@frame_ancestors.setter
def frame_ancestors(self, value: Optional[str]) -> None: ...
@frame_ancestors.deleter
def frame_ancestors(self) -> None: ...
@property
def frame_src(self) -> Optional[str]: ...
@frame_src.setter
def frame_src(self, value: Optional[str]) -> None: ...
@frame_src.deleter
def frame_src(self) -> None: ...
@property
def img_src(self) -> Optional[str]: ...
@img_src.setter
def img_src(self, value: Optional[str]) -> None: ...
@img_src.deleter
def img_src(self) -> None: ...
@property
def manifest_src(self) -> Optional[str]: ...
@manifest_src.setter
def manifest_src(self, value: Optional[str]) -> None: ...
@manifest_src.deleter
def manifest_src(self) -> None: ...
@property
def media_src(self) -> Optional[str]: ...
@media_src.setter
def media_src(self, value: Optional[str]) -> None: ...
@media_src.deleter
def media_src(self) -> None: ...
@property
def navigate_to(self) -> Optional[str]: ...
@navigate_to.setter
def navigate_to(self, value: Optional[str]) -> None: ...
@navigate_to.deleter
def navigate_to(self) -> None: ...
@property
def object_src(self) -> Optional[str]: ...
@object_src.setter
def object_src(self, value: Optional[str]) -> None: ...
@object_src.deleter
def object_src(self) -> None: ...
@property
def prefetch_src(self) -> Optional[str]: ...
@prefetch_src.setter
def prefetch_src(self, value: Optional[str]) -> None: ...
@prefetch_src.deleter
def prefetch_src(self) -> None: ...
@property
def plugin_types(self) -> Optional[str]: ...
@plugin_types.setter
def plugin_types(self, value: Optional[str]) -> None: ...
@plugin_types.deleter
def plugin_types(self) -> None: ...
@property
def report_to(self) -> Optional[str]: ...
@report_to.setter
def report_to(self, value: Optional[str]) -> None: ...
@report_to.deleter
def report_to(self) -> None: ...
@property
def report_uri(self) -> Optional[str]: ...
@report_uri.setter
def report_uri(self, value: Optional[str]) -> None: ...
@report_uri.deleter
def report_uri(self) -> None: ...
@property
def sandbox(self) -> Optional[str]: ...
@sandbox.setter
def sandbox(self, value: Optional[str]) -> None: ...
@sandbox.deleter
def sandbox(self) -> None: ...
@property
def script_src(self) -> Optional[str]: ...
@script_src.setter
def script_src(self, value: Optional[str]) -> None: ...
@script_src.deleter
def script_src(self) -> None: ...
@property
def script_src_attr(self) -> Optional[str]: ...
@script_src_attr.setter
def script_src_attr(self, value: Optional[str]) -> None: ...
@script_src_attr.deleter
def script_src_attr(self) -> None: ...
@property
def script_src_elem(self) -> Optional[str]: ...
@script_src_elem.setter
def script_src_elem(self, value: Optional[str]) -> None: ...
@script_src_elem.deleter
def script_src_elem(self) -> None: ...
@property
def style_src(self) -> Optional[str]: ...
@style_src.setter
def style_src(self, value: Optional[str]) -> None: ...
@style_src.deleter
def style_src(self) -> None: ...
@property
def style_src_attr(self) -> Optional[str]: ...
@style_src_attr.setter
def style_src_attr(self, value: Optional[str]) -> None: ...
@style_src_attr.deleter
def style_src_attr(self) -> None: ...
@property
def style_src_elem(self) -> Optional[str]: ...
@style_src_elem.setter
def style_src_elem(self, value: Optional[str]) -> None: ...
@style_src_elem.deleter
def style_src_elem(self) -> None: ...
@property
def worker_src(self) -> Optional[str]: ...
@worker_src.setter
def worker_src(self, value: Optional[str]) -> None: ...
@worker_src.deleter
def worker_src(self) -> None: ...
provided: bool
def __init__(
self,
values: Union[Mapping[str, str], Iterable[Tuple[str, str]]] = (),
on_update: Optional[Callable[[ContentSecurityPolicy], None]] = None,
) -> None: ...
def _get_value(self, key: str) -> Optional[str]: ...
def _set_value(self, key: str, value: str) -> None: ...
def _del_value(self, key: str) -> None: ...
def to_header(self) -> str: ...
class CallbackDict(UpdateDictMixin[K, V], Dict[K, V]):
def __init__(
self,
initial: Optional[Union[Mapping[K, V], Iterable[Tuple[K, V]]]] = None,
on_update: Optional[Callable[[_CD], None]] = None,
) -> None: ...
class HeaderSet(Set[str]):
_headers: List[str]
_set: Set[str]
on_update: Optional[Callable[[HeaderSet], None]]
def __init__(
self,
headers: Optional[Iterable[str]] = None,
on_update: Optional[Callable[[HeaderSet], None]] = None,
) -> None: ...
def add(self, header: str) -> None: ...
def remove(self, header: str) -> None: ...
def update(self, iterable: Iterable[str]) -> None: ... # type: ignore
def discard(self, header: str) -> None: ...
def find(self, header: str) -> int: ...
def index(self, header: str) -> int: ...
def clear(self) -> None: ...
def as_set(self, preserve_casing: bool = False) -> Set[str]: ...
def to_header(self) -> str: ...
def __getitem__(self, idx: int) -> str: ...
def __delitem__(self, idx: int) -> None: ...
def __setitem__(self, idx: int, value: str) -> None: ...
def __contains__(self, header: str) -> bool: ... # type: ignore
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
class ETags(Collection[str]):
_strong: FrozenSet[str]
_weak: FrozenSet[str]
star_tag: bool
def __init__(
self,
strong_etags: Optional[Iterable[str]] = None,
weak_etags: Optional[Iterable[str]] = None,
star_tag: bool = False,
) -> None: ...
def as_set(self, include_weak: bool = False) -> Set[str]: ...
def is_weak(self, etag: str) -> bool: ...
def is_strong(self, etag: str) -> bool: ...
def contains_weak(self, etag: str) -> bool: ...
def contains(self, etag: str) -> bool: ...
def contains_raw(self, etag: str) -> bool: ...
def to_header(self) -> str: ...
def __call__(
self,
etag: Optional[str] = None,
data: Optional[bytes] = None,
include_weak: bool = False,
) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[str]: ...
def __contains__(self, item: str) -> bool: ... # type: ignore
class IfRange:
etag: Optional[str]
date: Optional[datetime]
def __init__(
self, etag: Optional[str] = None, date: Optional[datetime] = None
) -> None: ...
def to_header(self) -> str: ...
class Range:
units: str
ranges: List[Tuple[int, Optional[int]]]
def __init__(self, units: str, ranges: List[Tuple[int, Optional[int]]]) -> None: ...
def range_for_length(self, length: Optional[int]) -> Optional[Tuple[int, int]]: ...
def make_content_range(self, length: Optional[int]) -> Optional[ContentRange]: ...
def to_header(self) -> str: ...
def to_content_range_header(self, length: Optional[int]) -> Optional[str]: ...
def _callback_property(name: str) -> property: ...
class ContentRange:
on_update: Optional[Callable[[ContentRange], None]]
def __init__(
self,
units: Optional[str],
start: Optional[int],
stop: Optional[int],
length: Optional[int] = None,
on_update: Optional[Callable[[ContentRange], None]] = None,
) -> None: ...
@property
def units(self) -> Optional[str]: ...
@units.setter
def units(self, value: Optional[str]) -> None: ...
@property
def start(self) -> Optional[int]: ...
@start.setter
def start(self, value: Optional[int]) -> None: ...
@property
def stop(self) -> Optional[int]: ...
@stop.setter
def stop(self, value: Optional[int]) -> None: ...
@property
def length(self) -> Optional[int]: ...
@length.setter
def length(self, value: Optional[int]) -> None: ...
def set(
self,
start: Optional[int],
stop: Optional[int],
length: Optional[int] = None,
units: Optional[str] = "bytes",
) -> None: ...
def unset(self) -> None: ...
def to_header(self) -> str: ...
class Authorization(ImmutableDictMixin[str, str], Dict[str, str]):
type: str
def __init__(
self,
auth_type: str,
data: Optional[Union[Mapping[str, str], Iterable[Tuple[str, str]]]] = None,
) -> None: ...
@property
def username(self) -> Optional[str]: ...
@property
def password(self) -> Optional[str]: ...
@property
def realm(self) -> Optional[str]: ...
@property
def nonce(self) -> Optional[str]: ...
@property
def uri(self) -> Optional[str]: ...
@property
def nc(self) -> Optional[str]: ...
@property
def cnonce(self) -> Optional[str]: ...
@property
def response(self) -> Optional[str]: ...
@property
def opaque(self) -> Optional[str]: ...
@property
def qop(self) -> Optional[str]: ...
def to_header(self) -> str: ...
def auth_property(name: str, doc: Optional[str] = None) -> property: ...
def _set_property(name: str, doc: Optional[str] = None) -> property: ...
class WWWAuthenticate(UpdateDictMixin[str, str], Dict[str, str]):
_require_quoting: FrozenSet[str]
def __init__(
self,
auth_type: Optional[str] = None,
values: Optional[Union[Mapping[str, str], Iterable[Tuple[str, str]]]] = None,
on_update: Optional[Callable[[WWWAuthenticate], None]] = None,
) -> None: ...
def set_basic(self, realm: str = ...) -> None: ...
def set_digest(
self,
realm: str,
nonce: str,
qop: Iterable[str] = ("auth",),
opaque: Optional[str] = None,
algorithm: Optional[str] = None,
stale: bool = False,
) -> None: ...
def to_header(self) -> str: ...
@property
def type(self) -> Optional[str]: ...
@type.setter
def type(self, value: Optional[str]) -> None: ...
@property
def realm(self) -> Optional[str]: ...
@realm.setter
def realm(self, value: Optional[str]) -> None: ...
@property
def domain(self) -> HeaderSet: ...
@property
def nonce(self) -> Optional[str]: ...
@nonce.setter
def nonce(self, value: Optional[str]) -> None: ...
@property
def opaque(self) -> Optional[str]: ...
@opaque.setter
def opaque(self, value: Optional[str]) -> None: ...
@property
def algorithm(self) -> Optional[str]: ...
@algorithm.setter
def algorithm(self, value: Optional[str]) -> None: ...
@property
def qop(self) -> HeaderSet: ...
@property
def stale(self) -> Optional[bool]: ...
@stale.setter
def stale(self, value: Optional[bool]) -> None: ...
@staticmethod
def auth_property(name: str, doc: Optional[str] = None) -> property: ...
class FileStorage:
name: Optional[str]
stream: IO[bytes]
filename: Optional[str]
headers: Headers
_parsed_content_type: Tuple[str, Dict[str, str]]
def __init__(
self,
stream: Optional[IO[bytes]] = None,
filename: Optional[str] = None,
name: Optional[str] = None,
content_type: Optional[str] = None,
content_length: Optional[int] = None,
headers: Optional[Headers] = None,
) -> None: ...
def _parse_content_type(self) -> None: ...
@property
def content_type(self) -> str: ...
@property
def content_length(self) -> int: ...
@property
def mimetype(self) -> str: ...
@property
def mimetype_params(self) -> Dict[str, str]: ...
def save(
self, dst: Union[str, PathLike, IO[bytes]], buffer_size: int = ...
) -> None: ...
def close(self) -> None: ...
def __bool__(self) -> bool: ...
def __getattr__(self, name: str) -> Any: ...
def __iter__(self) -> Iterator[bytes]: ...
def __repr__(self) -> str: ...

View file

@ -0,0 +1,500 @@
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import typing as t
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers.request import Request
from ..wrappers.response import Response
from .console import Console
from .tbtools import Frame
from .tbtools import get_current_traceback
from .tbtools import render_console_html
from .tbtools import Traceback
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin: str) -> str:
return hashlib.sha1(f"{pin} added salt".encode("utf-8", "replace")).hexdigest()[:12]
_machine_id: t.Optional[t.Union[str, bytes]] = None
def get_machine_id() -> t.Optional[t.Union[str, bytes]]:
global _machine_id
if _machine_id is not None:
return _machine_id
def _generate() -> t.Optional[t.Union[str, bytes]]:
linux = b""
# machine-id is stable across boots, boot_id is not.
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
value = f.readline().strip()
except OSError:
continue
if value:
linux += value
break
# Containers share the same machine id, add some cgroup
# information. This is used outside containers too but should be
# relatively stable across boots.
try:
with open("/proc/self/cgroup", "rb") as f:
linux += f.readline().strip().rpartition(b"/")[2]
except OSError:
pass
if linux:
return linux
# On OS X, use ioreg to get the computer's serial number.
try:
# subprocess may not be available, e.g. Google App Engine
# https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"], stdout=PIPE
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows, use winreg to get the machine guid.
if sys.platform == "win32":
import winreg
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
winreg.KEY_READ | winreg.KEY_WOW64_64KEY,
) as rk:
guid: t.Union[str, bytes]
guid_type: int
guid, guid_type = winreg.QueryValueEx(rk, "MachineGuid")
if guid_type == winreg.REG_SZ:
return guid.encode("utf-8")
return guid
except OSError:
pass
return None
_machine_id = _generate()
return _machine_id
class _ConsoleFrame:
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace: t.Dict[str, t.Any]):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(
app: "WSGIApplication",
) -> t.Union[t.Tuple[str, str], t.Tuple[None, None]]:
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", t.cast(object, app).__class__.__module__)
username: t.Optional[str]
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", type(app).__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.sha1()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, str):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = f"__wzd{h.hexdigest()[:20]}"
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = f"{int(h.hexdigest(), 16):09d}"[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication:
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
_pin: str
_pin_cookie: str
def __init__(
self,
app: "WSGIApplication",
evalex: bool = False,
request_key: str = "werkzeug.request",
console_path: str = "/console",
console_init_func: t.Optional[t.Callable[[], t.Dict[str, t.Any]]] = None,
show_hidden_frames: bool = False,
pin_security: bool = True,
pin_logging: bool = True,
) -> None:
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames: t.Dict[int, t.Union[Frame, _ConsoleFrame]] = {}
self.tracebacks: t.Dict[int, Traceback] = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log("warning", " * Debugger PIN disabled. DEBUGGER UNSECURED!")
else:
_log("info", " * Debugger PIN: %s", self.pin)
else:
self.pin = None
@property
def pin(self) -> t.Optional[str]:
if not hasattr(self, "_pin"):
pin_cookie = get_pin_and_cookie_name(self.app)
self._pin, self._pin_cookie = pin_cookie # type: ignore
return self._pin
@pin.setter
def pin(self, value: str) -> None:
self._pin = value
@property
def pin_cookie_name(self) -> str:
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
pin_cookie = get_pin_and_cookie_name(self.app)
self._pin, self._pin_cookie = pin_cookie # type: ignore
return self._pin_cookie
def debug_application(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterator[bytes]:
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
yield from app_iter
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
except Exception:
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(
self, request: Request, command: str, frame: t.Union[Frame, _ConsoleFrame]
) -> Response:
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request: Request) -> Response:
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def get_resource(self, request: Request, filename: str) -> Response:
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ: "WSGIEnvironment") -> t.Optional[bool]:
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self) -> None:
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request: Request) -> Response:
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
pin = t.cast(str, self.pin)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args["pin"]
if entered_pin.strip().replace("-", "") == pin.replace("-", ""):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
f"{int(time.time())}|{hash_pin(pin)}",
httponly=True,
samesite="Strict",
secure=request.is_secure,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self) -> Response:
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info", " * To enable the debugger you need to enter the security pin:"
)
_log("info", " * Debugger pin code: %s", self.pin)
return Response("")
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
frame = self.frames.get(request.args.get("frm", type=int)) # type: ignore
if cmd == "resource" and arg:
response = self.get_resource(request, arg) # type: ignore
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request) # type: ignore
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request() # type: ignore
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame) # type: ignore
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request) # type: ignore
return response(environ, start_response)

View file

@ -0,0 +1,214 @@
import code
import sys
import typing as t
from html import escape
from types import CodeType
from ..local import Local
from .repr import debug_repr
from .repr import dump
from .repr import helper
if t.TYPE_CHECKING:
import codeop # noqa: F401
_local = Local()
class HTMLStringO:
"""A StringO version that HTML escapes on write."""
def __init__(self) -> None:
self._buffer: t.List[str] = []
def isatty(self) -> bool:
return False
def close(self) -> None:
pass
def flush(self) -> None:
pass
def seek(self, n: int, mode: int = 0) -> None:
pass
def readline(self) -> str:
if len(self._buffer) == 0:
return ""
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self) -> str:
val = "".join(self._buffer)
del self._buffer[:]
return val
def _write(self, x: str) -> None:
if isinstance(x, bytes):
x = x.decode("utf-8", "replace")
self._buffer.append(x)
def write(self, x: str) -> None:
self._write(escape(x))
def writelines(self, x: t.Iterable[str]) -> None:
self._write(escape("".join(x)))
class ThreadedStream:
"""Thread-local wrapper for sys.stdout for the interactive console."""
@staticmethod
def push() -> None:
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = t.cast(t.TextIO, ThreadedStream())
_local.stream = HTMLStringO()
@staticmethod
def fetch() -> str:
try:
stream = _local.stream
except AttributeError:
return ""
return stream.reset() # type: ignore
@staticmethod
def displayhook(obj: object) -> None:
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj) # type: ignore
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals["_"] = obj
stream._write(debug_repr(obj))
def __setattr__(self, name: str, value: t.Any) -> None:
raise AttributeError(f"read only attribute {name}")
def __dir__(self) -> t.List[str]:
return dir(sys.__stdout__)
def __getattribute__(self, name: str) -> t.Any:
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self) -> str:
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader:
def __init__(self) -> None:
self._storage: t.Dict[int, str] = {}
def register(self, code: CodeType, source: str) -> None:
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code: CodeType) -> t.Optional[str]:
try:
return self._storage[id(code)]
except KeyError:
return None
class _InteractiveConsole(code.InteractiveInterpreter):
locals: t.Dict[str, t.Any]
def __init__(self, globals: t.Dict[str, t.Any], locals: t.Dict[str, t.Any]) -> None:
self.loader = _ConsoleLoader()
locals = {
**globals,
**locals,
"dump": dump,
"help": helper,
"__loader__": self.loader,
}
super().__init__(locals)
original_compile = self.compile
def compile(source: str, filename: str, symbol: str) -> t.Optional[CodeType]:
code = original_compile(source, filename, symbol)
if code is not None:
self.loader.register(code, source)
return code
self.compile = compile # type: ignore[assignment]
self.more = False
self.buffer: t.List[str] = []
def runsource(self, source: str, **kwargs: t.Any) -> str: # type: ignore
source = f"{source.rstrip()}\n"
ThreadedStream.push()
prompt = "... " if self.more else ">>> "
try:
source_to_eval = "".join(self.buffer + [source])
if super().runsource(source_to_eval, "<debugger>", "single"):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + escape(source) + output
def runcode(self, code: CodeType) -> None:
try:
exec(code, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self) -> None:
from .tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary()) # type: ignore
def showsyntaxerror(self, filename: t.Optional[str] = None) -> None:
from .tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary()) # type: ignore
def write(self, data: str) -> None:
sys.stdout.write(data)
class Console:
"""An interactive console."""
def __init__(
self,
globals: t.Optional[t.Dict[str, t.Any]] = None,
locals: t.Optional[t.Dict[str, t.Any]] = None,
) -> None:
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code: str) -> str:
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout

View file

@ -0,0 +1,284 @@
"""Object representations for debugging purposes. Unlike the default
repr, these expose more information and produce HTML instead of ASCII.
Together with the CSS and JavaScript of the debugger this gives a
colorful and more compact output.
"""
import codecs
import re
import sys
import typing as t
from collections import deque
from html import escape
from traceback import format_exception_only
missing = object()
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
RegexType = type(_paragraph_re)
HELP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
<pre class=help>%(text)s</pre>
</div>\
"""
OBJECT_DUMP_HTML = """\
<div class=box>
<h3>%(title)s</h3>
%(repr)s
<table>%(items)s</table>
</div>\
"""
def debug_repr(obj: object) -> str:
"""Creates a debug repr of an object as HTML string."""
return DebugReprGenerator().repr(obj)
def dump(obj: object = missing) -> None:
"""Print the object details to stdout._write (for the interactive
console of the web debugger.
"""
gen = DebugReprGenerator()
if obj is missing:
rv = gen.dump_locals(sys._getframe(1).f_locals)
else:
rv = gen.dump_object(obj)
sys.stdout._write(rv) # type: ignore
class _Helper:
"""Displays an HTML version of the normal help, for the interactive
debugger only because it requires a patched sys.stdout.
"""
def __repr__(self) -> str:
return "Type help(object) for help about object."
def __call__(self, topic: t.Optional[t.Any] = None) -> None:
if topic is None:
sys.stdout._write(f"<span class=help>{self!r}</span>") # type: ignore
return
import pydoc
pydoc.help(topic)
rv = sys.stdout.reset() # type: ignore
if isinstance(rv, bytes):
rv = rv.decode("utf-8", "ignore")
paragraphs = _paragraph_re.split(rv)
if len(paragraphs) > 1:
title = paragraphs[0]
text = "\n\n".join(paragraphs[1:])
else:
title = "Help"
text = paragraphs[0]
sys.stdout._write(HELP_HTML % {"title": title, "text": text}) # type: ignore
helper = _Helper()
def _add_subclass_info(
inner: str, obj: object, base: t.Union[t.Type, t.Tuple[t.Type, ...]]
) -> str:
if isinstance(base, tuple):
for base in base:
if type(obj) is base:
return inner
elif type(obj) is base:
return inner
module = ""
if obj.__class__.__module__ not in ("__builtin__", "exceptions"):
module = f'<span class="module">{obj.__class__.__module__}.</span>'
return f"{module}{type(obj).__name__}({inner})"
def _sequence_repr_maker(
left: str, right: str, base: t.Type, limit: int = 8
) -> t.Callable[["DebugReprGenerator", t.Iterable, bool], str]:
def proxy(self: "DebugReprGenerator", obj: t.Iterable, recursive: bool) -> str:
if recursive:
return _add_subclass_info(f"{left}...{right}", obj, base)
buf = [left]
have_extended_section = False
for idx, item in enumerate(obj):
if idx:
buf.append(", ")
if idx == limit:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(self.repr(item))
if have_extended_section:
buf.append("</span>")
buf.append(right)
return _add_subclass_info("".join(buf), obj, base)
return proxy
class DebugReprGenerator:
def __init__(self) -> None:
self._stack: t.List[t.Any] = []
list_repr = _sequence_repr_maker("[", "]", list)
tuple_repr = _sequence_repr_maker("(", ")", tuple)
set_repr = _sequence_repr_maker("set([", "])", set)
frozenset_repr = _sequence_repr_maker("frozenset([", "])", frozenset)
deque_repr = _sequence_repr_maker(
'<span class="module">collections.</span>deque([', "])", deque
)
def regex_repr(self, obj: t.Pattern) -> str:
pattern = repr(obj.pattern)
pattern = codecs.decode(pattern, "unicode-escape", "ignore") # type: ignore
pattern = f"r{pattern}"
return f're.compile(<span class="string regex">{pattern}</span>)'
def string_repr(self, obj: t.Union[str, bytes], limit: int = 70) -> str:
buf = ['<span class="string">']
r = repr(obj)
# shorten the repr when the hidden part would be at least 3 chars
if len(r) - limit > 2:
buf.extend(
(
escape(r[:limit]),
'<span class="extended">',
escape(r[limit:]),
"</span>",
)
)
else:
buf.append(escape(r))
buf.append("</span>")
out = "".join(buf)
# if the repr looks like a standard string, add subclass info if needed
if r[0] in "'\"" or (r[0] == "b" and r[1] in "'\""):
return _add_subclass_info(out, obj, (bytes, str))
# otherwise, assume the repr distinguishes the subclass already
return out
def dict_repr(
self,
d: t.Union[t.Dict[int, None], t.Dict[str, int], t.Dict[t.Union[str, int], int]],
recursive: bool,
limit: int = 5,
) -> str:
if recursive:
return _add_subclass_info("{...}", d, dict)
buf = ["{"]
have_extended_section = False
for idx, (key, value) in enumerate(d.items()):
if idx:
buf.append(", ")
if idx == limit - 1:
buf.append('<span class="extended">')
have_extended_section = True
buf.append(
f'<span class="pair"><span class="key">{self.repr(key)}</span>:'
f' <span class="value">{self.repr(value)}</span></span>'
)
if have_extended_section:
buf.append("</span>")
buf.append("}")
return _add_subclass_info("".join(buf), d, dict)
def object_repr(
self, obj: t.Optional[t.Union[t.Type[dict], t.Callable, t.Type[list]]]
) -> str:
r = repr(obj)
return f'<span class="object">{escape(r)}</span>'
def dispatch_repr(self, obj: t.Any, recursive: bool) -> str:
if obj is helper:
return f'<span class="help">{helper!r}</span>'
if isinstance(obj, (int, float, complex)):
return f'<span class="number">{obj!r}</span>'
if isinstance(obj, str) or isinstance(obj, bytes):
return self.string_repr(obj)
if isinstance(obj, RegexType):
return self.regex_repr(obj)
if isinstance(obj, list):
return self.list_repr(obj, recursive)
if isinstance(obj, tuple):
return self.tuple_repr(obj, recursive)
if isinstance(obj, set):
return self.set_repr(obj, recursive)
if isinstance(obj, frozenset):
return self.frozenset_repr(obj, recursive)
if isinstance(obj, dict):
return self.dict_repr(obj, recursive)
if isinstance(obj, deque):
return self.deque_repr(obj, recursive)
return self.object_repr(obj)
def fallback_repr(self) -> str:
try:
info = "".join(format_exception_only(*sys.exc_info()[:2]))
except Exception:
info = "?"
return (
'<span class="brokenrepr">'
f"&lt;broken repr ({escape(info.strip())})&gt;</span>"
)
def repr(self, obj: object) -> str:
recursive = False
for item in self._stack:
if item is obj:
recursive = True
break
self._stack.append(obj)
try:
try:
return self.dispatch_repr(obj, recursive)
except Exception:
return self.fallback_repr()
finally:
self._stack.pop()
def dump_object(self, obj: object) -> str:
repr = None
items: t.Optional[t.List[t.Tuple[str, str]]] = None
if isinstance(obj, dict):
title = "Contents of"
items = []
for key, value in obj.items():
if not isinstance(key, str):
items = None
break
items.append((key, self.repr(value)))
if items is None:
items = []
repr = self.repr(obj)
for key in dir(obj):
try:
items.append((key, self.repr(getattr(obj, key))))
except Exception:
pass
title = "Details for"
title += f" {object.__repr__(obj)[1:-1]}"
return self.render_object_dump(items, title, repr)
def dump_locals(self, d: t.Dict[str, t.Any]) -> str:
items = [(key, self.repr(value)) for key, value in d.items()]
return self.render_object_dump(items, "Local variables in frame")
def render_object_dump(
self, items: t.List[t.Tuple[str, str]], title: str, repr: t.Optional[str] = None
) -> str:
html_items = []
for key, value in items:
html_items.append(f"<tr><th>{escape(key)}<td><pre class=repr>{value}</pre>")
if not html_items:
html_items.append("<tr><td><em>Nothing</em>")
return OBJECT_DUMP_HTML % {
"title": escape(title),
"repr": f"<pre class=repr>{repr if repr else ''}</pre>",
"items": "\n".join(html_items),
}

View file

@ -0,0 +1,96 @@
-------------------------------
UBUNTU FONT LICENCE Version 1.0
-------------------------------
PREAMBLE
This licence allows the licensed fonts to be used, studied, modified and
redistributed freely. The fonts, including any derivative works, can be
bundled, embedded, and redistributed provided the terms of this licence
are met. The fonts and derivatives, however, cannot be released under
any other licence. The requirement for fonts to remain under this
licence does not require any document created using the fonts or their
derivatives to be published under this licence, as long as the primary
purpose of the document is not to be a vehicle for the distribution of
the fonts.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright
Holder(s) under this licence and clearly marked as such. This may
include source files, build scripts and documentation.
"Original Version" refers to the collection of Font Software components
as received under this licence.
"Modified Version" refers to any derivative made by adding to, deleting,
or substituting -- in part or in whole -- any of the components of the
Original Version, by changing formats or by porting the Font Software to
a new environment.
"Copyright Holder(s)" refers to all individuals and companies who have a
copyright ownership of the Font Software.
"Substantially Changed" refers to Modified Versions which can be easily
identified as dissimilar to the Font Software by users of the Font
Software comparing the Original Version with the Modified Version.
To "Propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification and with or without charging
a redistribution fee), making available to the public, and in some
countries other activities as well.
PERMISSION & CONDITIONS
This licence does not grant any rights under trademark law and all such
rights are reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of the Font Software, to propagate the Font Software, subject to
the below conditions:
1) Each copy of the Font Software must contain the above copyright
notice and this licence. These can be included either as stand-alone
text files, human-readable headers or in the appropriate machine-
readable metadata fields within text or binary files as long as those
fields can be easily viewed by the user.
2) The font name complies with the following:
(a) The Original Version must retain its name, unmodified.
(b) Modified Versions which are Substantially Changed must be renamed to
avoid use of the name of the Original Version or similar names entirely.
(c) Modified Versions which are not Substantially Changed must be
renamed to both (i) retain the name of the Original Version and (ii) add
additional naming elements to distinguish the Modified Version from the
Original Version. The name of such Modified Versions must be the name of
the Original Version, with "derivative X" where X represents the name of
the new work, appended to that name.
3) The name(s) of the Copyright Holder(s) and any contributor to the
Font Software shall not be used to promote, endorse or advertise any
Modified Version, except (i) as required by this licence, (ii) to
acknowledge the contribution(s) of the Copyright Holder(s) or (iii) with
their explicit written permission.
4) The Font Software, modified or unmodified, in part or in whole, must
be distributed entirely under this licence, and must not be distributed
under any other licence. The requirement for fonts to remain under this
licence does not affect any document created using the Font Software,
except any version of the Font Software extracted from a document
created using the Font Software may only be distributed under this
licence.
TERMINATION
This licence becomes null and void if any of the above conditions are
not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER
DEALINGS IN THE FONT SOFTWARE.

View file

@ -0,0 +1,6 @@
Silk icon set 1.3 by Mark James <mjames@gmail.com>
http://www.famfamfam.com/lab/icons/silk/
License: [CC-BY-2.5](https://creativecommons.org/licenses/by/2.5/)
or [CC-BY-3.0](https://creativecommons.org/licenses/by/3.0/)

Binary file not shown.

After

Width:  |  Height:  |  Size: 507 B

View file

@ -0,0 +1,359 @@
docReady(() => {
if (!EVALEX_TRUSTED) {
initPinBox();
}
// if we are in console mode, show the console.
if (CONSOLE_MODE && EVALEX) {
createInteractiveConsole();
}
const frames = document.querySelectorAll("div.traceback div.frame");
if (EVALEX) {
addConsoleIconToFrames(frames);
}
addEventListenersToElements(document.querySelectorAll("div.detail"), "click", () =>
document.querySelector("div.traceback").scrollIntoView(false)
);
addToggleFrameTraceback(frames);
addToggleTraceTypesOnClick(document.querySelectorAll("h2.traceback"));
addInfoPrompt(document.querySelectorAll("span.nojavascript"));
wrapPlainTraceback();
});
function addToggleFrameTraceback(frames) {
frames.forEach((frame) => {
frame.addEventListener("click", () => {
frame.getElementsByTagName("pre")[0].parentElement.classList.toggle("expanded");
});
})
}
function wrapPlainTraceback() {
const plainTraceback = document.querySelector("div.plain textarea");
const wrapper = document.createElement("pre");
const textNode = document.createTextNode(plainTraceback.textContent);
wrapper.appendChild(textNode);
plainTraceback.replaceWith(wrapper);
}
function initPinBox() {
document.querySelector(".pin-prompt form").addEventListener(
"submit",
function (event) {
event.preventDefault();
const pin = encodeURIComponent(this.pin.value);
const encodedSecret = encodeURIComponent(SECRET);
const btn = this.btn;
btn.disabled = true;
fetch(
`${document.location.pathname}?__debugger__=yes&cmd=pinauth&pin=${pin}&s=${encodedSecret}`
)
.then((res) => res.json())
.then(({auth, exhausted}) => {
if (auth) {
EVALEX_TRUSTED = true;
fadeOut(document.getElementsByClassName("pin-prompt")[0]);
} else {
alert(
`Error: ${
exhausted
? "too many attempts. Restart server to retry."
: "incorrect pin"
}`
);
}
})
.catch((err) => {
alert("Error: Could not verify PIN. Network error?");
console.error(err);
})
.finally(() => (btn.disabled = false));
},
false
);
}
function promptForPin() {
if (!EVALEX_TRUSTED) {
const encodedSecret = encodeURIComponent(SECRET);
fetch(
`${document.location.pathname}?__debugger__=yes&cmd=printpin&s=${encodedSecret}`
);
const pinPrompt = document.getElementsByClassName("pin-prompt")[0];
fadeIn(pinPrompt);
document.querySelector('.pin-prompt input[name="pin"]').focus();
}
}
/**
* Helper function for shell initialization
*/
function openShell(consoleNode, target, frameID) {
promptForPin();
if (consoleNode) {
slideToggle(consoleNode);
return consoleNode;
}
let historyPos = 0;
const history = [""];
const consoleElement = createConsole();
const output = createConsoleOutput();
const form = createConsoleInputForm();
const command = createConsoleInput();
target.parentNode.appendChild(consoleElement);
consoleElement.append(output);
consoleElement.append(form);
form.append(command);
command.focus();
slideToggle(consoleElement);
form.addEventListener("submit", (e) => {
handleConsoleSubmit(e, command, frameID).then((consoleOutput) => {
output.append(consoleOutput);
command.focus();
consoleElement.scrollTo(0, consoleElement.scrollHeight);
const old = history.pop();
history.push(command.value);
if (typeof old !== "undefined") {
history.push(old);
}
historyPos = history.length - 1;
command.value = "";
});
});
command.addEventListener("keydown", (e) => {
if (e.key === "l" && e.ctrlKey) {
output.innerText = "--- screen cleared ---";
} else if (e.key === "ArrowUp" || e.key === "ArrowDown") {
// Handle up arrow and down arrow.
if (e.key === "ArrowUp" && historyPos > 0) {
e.preventDefault();
historyPos--;
} else if (e.key === "ArrowDown" && historyPos < history.length - 1) {
historyPos++;
}
command.value = history[historyPos];
}
return false;
});
return consoleElement;
}
function addEventListenersToElements(elements, event, listener) {
elements.forEach((el) => el.addEventListener(event, listener));
}
/**
* Add extra info
*/
function addInfoPrompt(elements) {
for (let i = 0; i < elements.length; i++) {
elements[i].innerHTML =
"<p>To switch between the interactive traceback and the plaintext " +
'one, you can click on the "Traceback" headline. From the text ' +
"traceback you can also create a paste of it. " +
(!EVALEX
? ""
: "For code execution mouse-over the frame you want to debug and " +
"click on the console icon on the right side." +
"<p>You can execute arbitrary Python code in the stack frames and " +
"there are some extra helpers available for introspection:" +
"<ul><li><code>dump()</code> shows all variables in the frame" +
"<li><code>dump(obj)</code> dumps all that's known about the object</ul>");
elements[i].classList.remove("nojavascript");
}
}
function addConsoleIconToFrames(frames) {
for (let i = 0; i < frames.length; i++) {
let consoleNode = null;
const target = frames[i];
const frameID = frames[i].id.substring(6);
for (let j = 0; j < target.getElementsByTagName("pre").length; j++) {
const img = createIconForConsole();
img.addEventListener("click", (e) => {
e.stopPropagation();
consoleNode = openShell(consoleNode, target, frameID);
return false;
});
target.getElementsByTagName("pre")[j].append(img);
}
}
}
function slideToggle(target) {
target.classList.toggle("active");
}
/**
* toggle traceback types on click.
*/
function addToggleTraceTypesOnClick(elements) {
for (let i = 0; i < elements.length; i++) {
elements[i].addEventListener("click", () => {
document.querySelector("div.traceback").classList.toggle("hidden");
document.querySelector("div.plain").classList.toggle("hidden");
});
elements[i].style.cursor = "pointer";
document.querySelector("div.plain").classList.toggle("hidden");
}
}
function createConsole() {
const consoleNode = document.createElement("pre");
consoleNode.classList.add("console");
consoleNode.classList.add("active");
return consoleNode;
}
function createConsoleOutput() {
const output = document.createElement("div");
output.classList.add("output");
output.innerHTML = "[console ready]";
return output;
}
function createConsoleInputForm() {
const form = document.createElement("form");
form.innerHTML = "&gt;&gt;&gt; ";
return form;
}
function createConsoleInput() {
const command = document.createElement("input");
command.type = "text";
command.setAttribute("autocomplete", "off");
command.setAttribute("spellcheck", false);
command.setAttribute("autocapitalize", "off");
command.setAttribute("autocorrect", "off");
return command;
}
function createIconForConsole() {
const img = document.createElement("img");
img.setAttribute("src", "?__debugger__=yes&cmd=resource&f=console.png");
img.setAttribute("title", "Open an interactive python shell in this frame");
return img;
}
function createExpansionButtonForConsole() {
const expansionButton = document.createElement("a");
expansionButton.setAttribute("href", "#");
expansionButton.setAttribute("class", "toggle");
expansionButton.innerHTML = "&nbsp;&nbsp;";
return expansionButton;
}
function createInteractiveConsole() {
const target = document.querySelector("div.console div.inner");
while (target.firstChild) {
target.removeChild(target.firstChild);
}
openShell(null, target, 0);
}
function handleConsoleSubmit(e, command, frameID) {
// Prevent page from refreshing.
e.preventDefault();
return new Promise((resolve) => {
// Get input command.
const cmd = command.value;
// Setup GET request.
const urlPath = "";
const params = {
__debugger__: "yes",
cmd: cmd,
frm: frameID,
s: SECRET,
};
const paramString = Object.keys(params)
.map((key) => {
return "&" + encodeURIComponent(key) + "=" + encodeURIComponent(params[key]);
})
.join("");
fetch(urlPath + "?" + paramString)
.then((res) => {
return res.text();
})
.then((data) => {
const tmp = document.createElement("div");
tmp.innerHTML = data;
resolve(tmp);
// Handle expandable span for long list outputs.
// Example to test: list(range(13))
let wrapperAdded = false;
const wrapperSpan = document.createElement("span");
const expansionButton = createExpansionButtonForConsole();
tmp.querySelectorAll("span.extended").forEach((spanToWrap) => {
const parentDiv = spanToWrap.parentNode;
if (!wrapperAdded) {
parentDiv.insertBefore(wrapperSpan, spanToWrap);
wrapperAdded = true;
}
parentDiv.removeChild(spanToWrap);
wrapperSpan.append(spanToWrap);
spanToWrap.hidden = true;
expansionButton.addEventListener("click", () => {
spanToWrap.hidden = !spanToWrap.hidden;
expansionButton.classList.toggle("open");
return false;
});
});
// Add expansion button at end of wrapper.
if (wrapperAdded) {
wrapperSpan.append(expansionButton);
}
})
.catch((err) => {
console.error(err);
});
return false;
});
}
function fadeOut(element) {
element.style.opacity = 1;
(function fade() {
element.style.opacity -= 0.1;
if (element.style.opacity < 0) {
element.style.display = "none";
} else {
requestAnimationFrame(fade);
}
})();
}
function fadeIn(element, display) {
element.style.opacity = 0;
element.style.display = display || "block";
(function fade() {
let val = parseFloat(element.style.opacity) + 0.1;
if (val <= 1) {
element.style.opacity = val;
requestAnimationFrame(fade);
}
})();
}
function docReady(fn) {
if (document.readyState === "complete" || document.readyState === "interactive") {
setTimeout(fn, 1);
} else {
document.addEventListener("DOMContentLoaded", fn);
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 818 B

View file

@ -0,0 +1,163 @@
@font-face {
font-family: 'Ubuntu';
font-style: normal;
font-weight: normal;
src: local('Ubuntu'), local('Ubuntu-Regular'),
url('?__debugger__=yes&cmd=resource&f=ubuntu.ttf') format('truetype');
}
body, input { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; color: #000; text-align: center;
margin: 1em; padding: 0; font-size: 15px; }
h1, h2, h3 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
input { background-color: #fff; margin: 0; text-align: left;
outline: none !important; }
input[type="submit"] { padding: 3px 6px; }
a { color: #11557C; }
a:hover { color: #177199; }
pre, code,
textarea { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 14px; }
div.debugger { text-align: left; padding: 12px; margin: auto;
background-color: white; }
h1 { font-size: 36px; margin: 0 0 0.3em 0; }
div.detail { cursor: pointer; }
div.detail p { margin: 0 0 8px 13px; font-size: 14px; white-space: pre-wrap;
font-family: monospace; }
div.explanation { margin: 20px 13px; font-size: 15px; color: #555; }
div.footer { font-size: 13px; text-align: right; margin: 30px 0;
color: #86989B; }
h2 { font-size: 16px; margin: 1.3em 0 0.0 0; padding: 9px;
background-color: #11557C; color: white; }
h2 em, h3 em { font-style: normal; color: #A5D6D9; font-weight: normal; }
div.traceback, div.plain { border: 1px solid #ddd; margin: 0 0 1em 0; padding: 10px; }
div.plain p { margin: 0; }
div.plain textarea,
div.plain pre { margin: 10px 0 0 0; padding: 4px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.plain textarea { width: 99%; height: 300px; }
div.traceback h3 { font-size: 1em; margin: 0 0 0.8em 0; }
div.traceback ul { list-style: none; margin: 0; padding: 0 0 0 1em; }
div.traceback h4 { font-size: 13px; font-weight: normal; margin: 0.7em 0 0.1em 0; }
div.traceback pre { margin: 0; padding: 5px 0 3px 15px;
background-color: #E8EFF0; border: 1px solid #D3E7E9; }
div.traceback .library .current { background: white; color: #555; }
div.traceback .expanded .current { background: #E8EFF0; color: black; }
div.traceback pre:hover { background-color: #DDECEE; color: black; cursor: pointer; }
div.traceback div.source.expanded pre + pre { border-top: none; }
div.traceback span.ws { display: none; }
div.traceback pre.before, div.traceback pre.after { display: none; background: white; }
div.traceback div.source.expanded pre.before,
div.traceback div.source.expanded pre.after {
display: block;
}
div.traceback div.source.expanded span.ws {
display: inline;
}
div.traceback blockquote { margin: 1em 0 0 0; padding: 0; white-space: pre-line; }
div.traceback img { float: right; padding: 2px; margin: -3px 2px 0 0; display: none; }
div.traceback img:hover { background-color: #ddd; cursor: pointer;
border-color: #BFDDE0; }
div.traceback pre:hover img { display: block; }
div.traceback cite.filename { font-style: normal; color: #3B666B; }
pre.console { border: 1px solid #ccc; background: white!important;
color: black; padding: 5px!important;
margin: 3px 0 0 0!important; cursor: default!important;
max-height: 400px; overflow: auto; }
pre.console form { color: #555; }
pre.console input { background-color: transparent; color: #555;
width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
'Bitstream Vera Sans Mono', monospace; font-size: 14px;
border: none!important; }
span.string { color: #30799B; }
span.number { color: #9C1A1C; }
span.help { color: #3A7734; }
span.object { color: #485F6E; }
span.extended { opacity: 0.5; }
span.extended:hover { opacity: 1; }
a.toggle { text-decoration: none; background-repeat: no-repeat;
background-position: center center;
background-image: url(?__debugger__=yes&cmd=resource&f=more.png); }
a.toggle:hover { background-color: #444; }
a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
pre.console div.traceback,
pre.console div.box { margin: 5px 10px; white-space: normal;
border: 1px solid #11557C; padding: 10px;
font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; }
pre.console div.box h3,
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
background: #11557C; color: white; }
pre.console div.traceback pre:hover { cursor: default; background: #E8EFF0; }
pre.console div.traceback pre.syntaxerror { background: inherit; border: none;
margin: 20px -10px -10px -10px;
padding: 10px; border-top: 1px solid #BFDDE0;
background: #E8EFF0; }
pre.console div.noframe-traceback pre.syntaxerror { margin-top: -10px; border: none; }
pre.console div.box pre.repr { padding: 0; margin: 0; background-color: white; border: none; }
pre.console div.box table { margin-top: 6px; }
pre.console div.box pre { border: none; }
pre.console div.box pre.help { background-color: white; }
pre.console div.box pre.help:hover { cursor: default; }
pre.console table tr { vertical-align: top; }
div.console { border: 1px solid #ccc; padding: 4px; background-color: #fafafa; }
div.traceback pre, div.console pre {
white-space: pre-wrap; /* css-3 should we be so lucky... */
white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
white-space: -pre-wrap; /* Opera 4-6 ?? */
white-space: -o-pre-wrap; /* Opera 7 ?? */
word-wrap: break-word; /* Internet Explorer 5.5+ */
_white-space: pre; /* IE only hack to re-specify in
addition to word-wrap */
}
div.pin-prompt {
position: absolute;
display: none;
top: 0;
bottom: 0;
left: 0;
right: 0;
background: rgba(255, 255, 255, 0.8);
}
div.pin-prompt .inner {
background: #eee;
padding: 10px 50px;
width: 350px;
margin: 10% auto 0 auto;
border: 1px solid #ccc;
border-radius: 2px;
}
div.exc-divider {
margin: 0.7em 0 0 -1em;
padding: 0.5em;
background: #11557C;
color: #ddd;
border: 1px solid #ddd;
}
.console.active {
max-height: 0!important;
display: none;
}
.hidden {
display: none;
}

Binary file not shown.

View file

@ -0,0 +1,600 @@
import codecs
import inspect
import os
import re
import sys
import sysconfig
import traceback
import typing as t
from html import escape
from tokenize import TokenError
from types import CodeType
from types import TracebackType
from .._internal import _to_str
from ..filesystem import get_filesystem_encoding
from ..utils import cached_property
from .console import Console
_coding_re = re.compile(rb"coding[:=]\s*([-\w.]+)")
_line_re = re.compile(rb"^(.*?)$", re.MULTILINE)
_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
HEADER = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css"
type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does
not accidentally trigger a request to /favicon.ico which might
change the application's state. -->
<link rel="shortcut icon"
href="?__debugger__=yes&amp;cmd=resource&amp;f=console.png">
<script src="?__debugger__=yes&amp;cmd=resource&amp;f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
</script>
</head>
<body style="background-color: #fff">
<div class="debugger">
"""
FOOTER = """\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
<div class="pin-prompt">
<div class="inner">
<h3>Console Locked</h3>
<p>
The console is locked and needs to be unlocked by entering the PIN.
You can find the PIN printed out on the standard output of your
shell that runs the server.
<form>
<p>PIN:
<input type=text name=pin size=14>
<input type=submit name=btn value="Confirm Pin">
</form>
</div>
</div>
</body>
</html>
"""
PAGE_HTML = (
HEADER
+ """\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<p>
This is the Copy/Paste friendly version of the traceback.
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
"""
+ FOOTER
+ """
<!--
%(plaintext_cs)s
-->
"""
)
CONSOLE_HTML = (
HEADER
+ """\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
"""
+ FOOTER
)
SUMMARY_HTML = """\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
"""
FRAME_HTML = """\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<div class="source %(library)s">%(lines)s</div>
</div>
"""
SOURCE_LINE_HTML = """\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
"""
def render_console_html(secret: str, evalex_trusted: bool = True) -> str:
return CONSOLE_HTML % {
"evalex": "true",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "true",
"title": "Console",
"secret": secret,
"traceback_id": -1,
}
def get_current_traceback(
ignore_system_exceptions: bool = False,
show_hidden_frames: bool = False,
skip: int = 0,
) -> "Traceback":
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
info = t.cast(
t.Tuple[t.Type[BaseException], BaseException, TracebackType], sys.exc_info()
)
exc_type, exc_value, tb = info
if ignore_system_exceptions and exc_type in {
SystemExit,
KeyboardInterrupt,
GeneratorExit,
}:
raise
for _ in range(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line:
"""Helper for the source renderer."""
__slots__ = ("lineno", "code", "in_frame", "current")
def __init__(self, lineno: int, code: str) -> None:
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
@property
def classes(self) -> t.List[str]:
rv = ["line"]
if self.in_frame:
rv.append("in-frame")
if self.current:
rv.append("current")
return rv
def render(self) -> str:
return SOURCE_LINE_HTML % {
"classes": " ".join(self.classes),
"lineno": self.lineno,
"code": escape(self.code),
}
class Traceback:
"""Wraps a traceback."""
def __init__(
self,
exc_type: t.Type[BaseException],
exc_value: BaseException,
tb: TracebackType,
) -> None:
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
exception_type = exc_type.__name__
if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
exception_type = f"{exc_type.__module__}.{exception_type}"
self.exception_type = exception_type
self.groups = []
memo = set()
while True:
self.groups.append(Group(exc_type, exc_value, tb))
memo.add(id(exc_value))
exc_value = exc_value.__cause__ or exc_value.__context__ # type: ignore
if exc_value is None or id(exc_value) in memo:
break
exc_type = type(exc_value)
tb = exc_value.__traceback__ # type: ignore
self.groups.reverse()
self.frames = [frame for group in self.groups for frame in group.frames]
def filter_hidden_frames(self) -> None:
"""Remove the frames according to the paste spec."""
for group in self.groups:
group.filter_hidden_frames()
self.frames[:] = [frame for group in self.groups for frame in group.frames]
@property
def is_syntax_error(self) -> bool:
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
@property
def exception(self) -> str:
"""String representation of the final exception."""
return self.groups[-1].exception
def log(self, logfile: t.Optional[t.IO[str]] = None) -> None:
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = f"{self.plaintext.rstrip()}\n"
logfile.write(tb)
def render_summary(self, include_title: bool = True) -> str:
"""Render the traceback for the interactive console."""
title = ""
classes = ["traceback"]
if not self.frames:
classes.append("noframe-traceback")
frames = []
else:
library_frames = sum(frame.is_library for frame in self.frames)
mark_lib = 0 < library_frames < len(self.frames)
frames = [group.render(mark_lib=mark_lib) for group in self.groups]
if include_title:
if self.is_syntax_error:
title = "Syntax Error"
else:
title = "Traceback <em>(most recent call last)</em>:"
if self.is_syntax_error:
description = f"<pre class=syntaxerror>{escape(self.exception)}</pre>"
else:
description = f"<blockquote>{escape(self.exception)}</blockquote>"
return SUMMARY_HTML % {
"classes": " ".join(classes),
"title": f"<h3>{title if title else ''}</h3>",
"frames": "\n".join(frames),
"description": description,
}
def render_full(
self,
evalex: bool = False,
secret: t.Optional[str] = None,
evalex_trusted: bool = True,
) -> str:
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
"title": exc,
"exception": exc,
"exception_type": escape(self.exception_type),
"summary": self.render_summary(include_title=False),
"plaintext": escape(self.plaintext),
"plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
"traceback_id": self.id,
"secret": secret,
}
@cached_property
def plaintext(self) -> str:
return "\n".join([group.render_text() for group in self.groups])
@property
def id(self) -> int:
return id(self)
class Group:
"""A group of frames for an exception in a traceback. If the
exception has a ``__cause__`` or ``__context__``, there are multiple
exception groups.
"""
def __init__(
self,
exc_type: t.Type[BaseException],
exc_value: BaseException,
tb: TracebackType,
) -> None:
self.exc_type = exc_type
self.exc_value = exc_value
self.info = None
if exc_value.__cause__ is not None:
self.info = (
"The above exception was the direct cause of the following exception"
)
elif exc_value.__context__ is not None:
self.info = (
"During handling of the above exception, another exception occurred"
)
self.frames = []
while tb is not None:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next # type: ignore
def filter_hidden_frames(self) -> None:
# An exception may not have a traceback to filter frames, such
# as one re-raised from ProcessPoolExecutor.
if not self.frames:
return
new_frames: t.List[Frame] = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ("before", "before_and_this"):
new_frames = []
hidden = False
if hide == "before_and_this":
continue
elif hide in ("reset", "reset_and_this"):
hidden = False
if hide == "reset_and_this":
continue
elif hide in ("after", "after_and_this"):
hidden = True
if hide == "after_and_this":
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == "codeop":
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
@property
def exception(self) -> str:
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = "".join(buf).strip()
return _to_str(rv, "utf-8", "replace")
def render(self, mark_lib: bool = True) -> str:
out = []
if self.info is not None:
out.append(f'<li><div class="exc-divider">{self.info}:</div>')
for frame in self.frames:
title = f' title="{escape(frame.info)}"' if frame.info else ""
out.append(f"<li{title}>{frame.render(mark_lib=mark_lib)}")
return "\n".join(out)
def render_text(self) -> str:
out = []
if self.info is not None:
out.append(f"\n{self.info}:\n")
out.append("Traceback (most recent call last):")
for frame in self.frames:
out.append(frame.render_text())
out.append(self.exception)
return "\n".join(out)
class Frame:
"""A single frame in a traceback."""
def __init__(
self,
exc_type: t.Type[BaseException],
exc_value: BaseException,
tb: TracebackType,
) -> None:
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in (".pyo", ".pyc"):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = _to_str(fn, get_filesystem_encoding())
self.module = self.globals.get("__name__", self.locals.get("__name__"))
self.loader = self.globals.get("__loader__", self.locals.get("__loader__"))
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get("__traceback_hide__", False)
info = self.locals.get("__traceback_info__")
if info is not None:
info = _to_str(info, "utf-8", "replace")
self.info = info
def render(self, mark_lib: bool = True) -> str:
"""Render a single frame in a traceback."""
return FRAME_HTML % {
"id": self.id,
"filename": escape(self.filename),
"lineno": self.lineno,
"function_name": escape(self.function_name),
"lines": self.render_line_context(),
"library": "library" if mark_lib and self.is_library else "",
}
@cached_property
def is_library(self) -> bool:
return any(
self.filename.startswith(os.path.realpath(path))
for path in sysconfig.get_paths().values()
)
def render_text(self) -> str:
return (
f' File "{self.filename}", line {self.lineno}, in {self.function_name}\n'
f" {self.current_line.strip()}"
)
def render_line_context(self) -> str:
before, current, after = self.get_context_lines()
rv = []
def render_line(line: str, cls: str) -> None:
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
rv.append(
f'<pre class="line {cls}"><span class="ws">{" " * prefix}</span>'
f"{escape(stripped_line) if stripped_line else ' '}</pre>"
)
for line in before:
render_line(line, "before")
render_line(current, "current")
for line in after:
render_line(line, "after")
return "\n".join(rv)
def get_annotated_lines(self) -> t.List[Line]:
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, "co_firstlineno"):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([f"{x.code}\n" for x in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno : lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def eval(self, code: t.Union[str, CodeType], mode: str = "single") -> t.Any:
"""Evaluate code in the context of the frame."""
if isinstance(code, str):
code = compile(code, "<interactive>", mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self) -> t.List[str]:
"""The sourcecode of the file as list of strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, "get_source"):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, "get_source_by_code"):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
with open(self.filename, mode="rb") as f:
source = f.read()
except OSError:
return []
# already str? return right away
if isinstance(source, str):
return source.splitlines()
charset = "utf-8"
if source.startswith(codecs.BOM_UTF8):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
coding_match = _coding_re.search(match.group())
if coding_match is not None:
charset = coding_match.group(1).decode("utf-8")
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
charset = _to_str(charset)
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
return source.decode(charset, "replace").splitlines()
def get_context_lines(
self, context: int = 5
) -> t.Tuple[t.List[str], str, t.List[str]]:
before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
past = self.sourcelines[self.lineno : self.lineno + context]
return (before, self.current_line, past)
@property
def current_line(self) -> str:
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return ""
@cached_property
def console(self) -> Console:
return Console(self.globals, self.locals)
@property
def id(self) -> int:
return id(self)

View file

@ -0,0 +1,944 @@
"""Implements a number of Python exceptions which can be raised from within
a view to trigger a standard HTTP non-200 response.
Usage Example
-------------
.. code-block:: python
from werkzeug.wrappers.request import Request
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@Request.application
def application(request):
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. However, they are not Werkzeug response objects. You
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you may have to pass an environ (WSGI) or scope
(ASGI) to ``get_response()`` because some errors fetch additional
information relating to the request.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error:
.. code-block:: python
@Request.application
def application(request):
try:
return view(request)
except NotFound as e:
return not_found(request)
except HTTPException as e:
return e
"""
import sys
import typing as t
import warnings
from datetime import datetime
from html import escape
from ._internal import _get_environ
if t.TYPE_CHECKING:
import typing_extensions as te
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIEnvironment
from .datastructures import WWWAuthenticate
from .sansio.response import Response
from .wrappers.request import Request as WSGIRequest # noqa: F401
from .wrappers.response import Response as WSGIResponse # noqa: F401
class HTTPException(Exception):
"""The base class for all HTTP exceptions. This exception can be called as a WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code: t.Optional[int] = None
description: t.Optional[str] = None
def __init__(
self,
description: t.Optional[str] = None,
response: t.Optional["Response"] = None,
) -> None:
super().__init__()
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(
cls, exception: t.Type[BaseException], name: t.Optional[str] = None
) -> t.Type["HTTPException"]:
"""Create an exception that is a subclass of the calling HTTP
exception and the ``exception`` argument.
The first argument to the class will be passed to the
wrapped ``exception``, the rest to the HTTP exception. If
``e.args`` is not empty and ``e.show_exception`` is ``True``,
the wrapped exception message is added to the HTTP error
description.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Create a subclass manually
instead.
.. versionchanged:: 0.15.5
The ``show_exception`` attribute controls whether the
description includes the wrapped exception message.
.. versionchanged:: 0.15.0
The description includes the wrapped exception message.
"""
warnings.warn(
"'HTTPException.wrap' is deprecated and will be removed in"
" Werkzeug 2.1. Create a subclass manually instead.",
DeprecationWarning,
stacklevel=2,
)
class newcls(cls, exception): # type: ignore
_description = cls.description
show_exception = False
def __init__(
self, arg: t.Optional[t.Any] = None, *args: t.Any, **kwargs: t.Any
) -> None:
super().__init__(*args, **kwargs)
if arg is None:
exception.__init__(self)
else:
exception.__init__(self, arg)
@property
def description(self) -> str:
if self.show_exception:
return (
f"{self._description}\n"
f"{exception.__name__}: {exception.__str__(self)}"
)
return self._description # type: ignore
@description.setter
def description(self, value: str) -> None:
self._description = value
newcls.__module__ = sys._getframe(1).f_globals["__name__"]
name = name or cls.__name__ + exception.__name__
newcls.__name__ = newcls.__qualname__ = name
return newcls
@property
def name(self) -> str:
"""The status name."""
from .http import HTTP_STATUS_CODES
return HTTP_STATUS_CODES.get(self.code, "Unknown Error") # type: ignore
def get_description(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> str:
"""Get the description."""
if self.description is None:
description = ""
elif not isinstance(self.description, str):
description = str(self.description)
else:
description = self.description
description = escape(description).replace("\n", "<br>")
return f"<p>{description}</p>"
def get_body(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> str:
"""Get the HTML body."""
return (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
f"<title>{self.code} {escape(self.name)}</title>\n"
f"<h1>{escape(self.name)}</h1>\n"
f"{self.get_description(environ)}\n"
)
def get_headers(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> t.List[t.Tuple[str, str]]:
"""Get a list of headers."""
return [("Content-Type", "text/html; charset=utf-8")]
def get_response(
self,
environ: t.Optional[t.Union["WSGIEnvironment", "WSGIRequest"]] = None,
scope: t.Optional[dict] = None,
) -> "Response":
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
from .wrappers.response import Response as WSGIResponse # noqa: F811
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ, scope)
return WSGIResponse(self.get_body(environ, scope), self.code, headers)
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = t.cast("WSGIResponse", self.get_response(environ))
return response(environ, start_response)
def __str__(self) -> str:
code = self.code if self.code is not None else "???"
return f"{code} {self.name}: {self.description}"
def __repr__(self) -> str:
code = self.code if self.code is not None else "???"
return f"<{type(self).__name__} '{code}: {self.name}'>"
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
"The browser (or proxy) sent a request that this server could "
"not understand."
)
class BadRequestKeyError(BadRequest, KeyError):
"""An exception that is used to signal both a :exc:`KeyError` and a
:exc:`BadRequest`. Used by many of the datastructures.
"""
_description = BadRequest.description
#: Show the KeyError along with the HTTP error message in the
#: response. This should be disabled in production, but can be
#: useful in a debug mode.
show_exception = False
def __init__(self, arg: t.Optional[str] = None, *args: t.Any, **kwargs: t.Any):
super().__init__(*args, **kwargs)
if arg is None:
KeyError.__init__(self)
else:
KeyError.__init__(self, arg)
@property # type: ignore
def description(self) -> str: # type: ignore
if self.show_exception:
return (
f"{self._description}\n"
f"{KeyError.__name__}: {KeyError.__str__(self)}"
)
return self._description
@description.setter
def description(self, value: str) -> None:
self._description = value
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class BadHost(BadRequest):
"""Raised if the submitted host is badly formatted.
.. versionadded:: 0.11.2
"""
class Unauthorized(HTTPException):
"""*401* ``Unauthorized``
Raise if the user is not authorized to access a resource.
The ``www_authenticate`` argument should be used to set the
``WWW-Authenticate`` header. This is used for HTTP basic auth and
other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate`
to create correctly formatted values. Strictly speaking a 401
response is invalid if it doesn't provide at least one value for
this header, although real clients typically don't care.
:param description: Override the default message used for the body
of the response.
:param www-authenticate: A single value, or list of values, for the
WWW-Authenticate header(s).
.. versionchanged:: 2.0
Serialize multiple ``www_authenticate`` items into multiple
``WWW-Authenticate`` headers, rather than joining them
into a single value, for better interoperability.
.. versionchanged:: 0.15.3
If the ``www_authenticate`` argument is not set, the
``WWW-Authenticate`` header is not set.
.. versionchanged:: 0.15.3
The ``response`` argument was restored.
.. versionchanged:: 0.15.1
``description`` was moved back as the first argument, restoring
its previous position.
.. versionchanged:: 0.15.0
``www_authenticate`` was added as the first argument, ahead of
``description``.
"""
code = 401
description = (
"The server could not verify that you are authorized to access"
" the URL requested. You either supplied the wrong credentials"
" (e.g. a bad password), or your browser doesn't understand"
" how to supply the credentials required."
)
def __init__(
self,
description: t.Optional[str] = None,
response: t.Optional["Response"] = None,
www_authenticate: t.Optional[
t.Union["WWWAuthenticate", t.Iterable["WWWAuthenticate"]]
] = None,
) -> None:
super().__init__(description, response)
from .datastructures import WWWAuthenticate
if isinstance(www_authenticate, WWWAuthenticate):
www_authenticate = (www_authenticate,)
self.www_authenticate = www_authenticate
def get_headers(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> t.List[t.Tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.www_authenticate:
headers.extend(("WWW-Authenticate", str(x)) for x in self.www_authenticate)
return headers
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
"You don't have the permission to access the requested"
" resource. It is either read-protected or not readable by the"
" server."
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
"The requested URL was not found on the server. If you entered"
" the URL manually please check your spelling and try again."
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = "The method is not allowed for the requested URL."
def __init__(
self,
valid_methods: t.Optional[t.Iterable[str]] = None,
description: t.Optional[str] = None,
response: t.Optional["Response"] = None,
) -> None:
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
super().__init__(description=description, response=response)
self.valid_methods = valid_methods
def get_headers(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> t.List[t.Tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.valid_methods:
headers.append(("Allow", ", ".join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
"The resource identified by the request is only capable of"
" generating response entities which have content"
" characteristics not acceptable according to the accept"
" headers sent in the request."
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
"The server closed the network connection because the browser"
" didn't finish the request within the specified time."
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
"A conflict happened while processing the request. The"
" resource might have been modified while the request was being"
" processed."
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
"The requested URL is no longer available on this server and"
" there is no forwarding address. If you followed a link from a"
" foreign page, please contact the author of this page."
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
"A request with this method requires a valid <code>Content-"
"Length</code> header."
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
"The precondition on the request for the URL failed positive evaluation."
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = "The data value transmitted exceeds the capacity limit."
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
"The length of the requested URL exceeds the capacity limit for"
" this server. The request cannot be processed."
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
"The server does not support the media type transmitted in the request."
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for an invalid part of the file.
.. versionadded:: 0.7
"""
code = 416
description = "The server cannot provide the requested range."
def __init__(
self,
length: t.Optional[int] = None,
units: str = "bytes",
description: t.Optional[str] = None,
response: t.Optional["Response"] = None,
) -> None:
"""Takes an optional `Content-Range` header value based on ``length``
parameter.
"""
super().__init__(description=description, response=response)
self.length = length
self.units = units
def get_headers(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> t.List[t.Tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.length is not None:
headers.append(("Content-Range", f"{self.units} */{self.length}"))
return headers
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = "The server could not meet the requirements of the Expect header"
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = "This server is a teapot, not a coffee machine"
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
"The request was well-formed but was unable to be followed due"
" to semantic errors."
)
class Locked(HTTPException):
"""*423* `Locked`
Used if the resource that is being accessed is locked.
"""
code = 423
description = "The resource that is being accessed is locked."
class FailedDependency(HTTPException):
"""*424* `Failed Dependency`
Used if the method could not be performed on the resource
because the requested action depended on another action and that action failed.
"""
code = 424
description = (
"The method could not be performed on the resource because the"
" requested action depended on another action and that action"
" failed."
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
"This request is required to be conditional; try using"
' "If-Match" or "If-Unmodified-Since".'
)
class _RetryAfter(HTTPException):
"""Adds an optional ``retry_after`` parameter which will set the
``Retry-After`` header. May be an :class:`int` number of seconds or
a :class:`~datetime.datetime`.
"""
def __init__(
self,
description: t.Optional[str] = None,
response: t.Optional["Response"] = None,
retry_after: t.Optional[t.Union[datetime, int]] = None,
) -> None:
super().__init__(description, response)
self.retry_after = retry_after
def get_headers(
self,
environ: t.Optional["WSGIEnvironment"] = None,
scope: t.Optional[dict] = None,
) -> t.List[t.Tuple[str, str]]:
headers = super().get_headers(environ, scope)
if self.retry_after:
if isinstance(self.retry_after, datetime):
from .http import http_date
value = http_date(self.retry_after)
else:
value = str(self.retry_after)
headers.append(("Retry-After", value))
return headers
class TooManyRequests(_RetryAfter):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives
responses, and this request exceeds that rate. (The server may use
any convenient method to identify users and their request rates).
The server may include a "Retry-After" header to indicate how long
the user should wait before retrying.
:param retry_after: If given, set the ``Retry-After`` header to this
value. May be an :class:`int` number of seconds or a
:class:`~datetime.datetime`.
.. versionchanged:: 1.0
Added ``retry_after`` parameter.
"""
code = 429
description = "This user has exceeded an allotted request count. Try again later."
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = "One or more header fields exceeds the maximum size."
class UnavailableForLegalReasons(HTTPException):
"""*451* `Unavailable For Legal Reasons`
This status code indicates that the server is denying access to the
resource as a consequence of a legal demand.
"""
code = 451
description = "Unavailable for legal reasons."
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
.. versionchanged:: 1.0.0
Added the :attr:`original_exception` attribute.
"""
code = 500
description = (
"The server encountered an internal error and was unable to"
" complete your request. Either the server is overloaded or"
" there is an error in the application."
)
def __init__(
self,
description: t.Optional[str] = None,
response: t.Optional["Response"] = None,
original_exception: t.Optional[BaseException] = None,
) -> None:
#: The original exception that caused this 500 error. Can be
#: used by frameworks to provide context when handling
#: unexpected errors.
self.original_exception = original_exception
super().__init__(description=description, response=response)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = "The server does not support the action requested by the browser."
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
"The proxy server received an invalid response from an upstream server."
)
class ServiceUnavailable(_RetryAfter):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily
unavailable.
:param retry_after: If given, set the ``Retry-After`` header to this
value. May be an :class:`int` number of seconds or a
:class:`~datetime.datetime`.
.. versionchanged:: 1.0
Added ``retry_after`` parameter.
"""
code = 503
description = (
"The server is temporarily unable to service your request due"
" to maintenance downtime or capacity problems. Please try"
" again later."
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = "The connection to an upstream server timed out."
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
"The server does not support the HTTP protocol version used in the request."
)
default_exceptions: t.Dict[int, t.Type[HTTPException]] = {}
def _find_exceptions() -> None:
for obj in globals().values():
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter:
"""When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(
self,
mapping: t.Optional[t.Dict[int, t.Type[HTTPException]]] = None,
extra: t.Optional[t.Dict[int, t.Type[HTTPException]]] = None,
) -> None:
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(
self, code: t.Union[int, "Response"], *args: t.Any, **kwargs: t.Any
) -> "te.NoReturn":
from .sansio.response import Response
if isinstance(code, Response):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError(f"no exception for {code!r}")
raise self.mapping[code](*args, **kwargs)
def abort(
status: t.Union[int, "Response"], *args: t.Any, **kwargs: t.Any
) -> "te.NoReturn":
"""Raises an :py:exc:`HTTPException` for the given status code or WSGI
application.
If a status code is given, it will be looked up in the list of
exceptions and will raise that exception. If passed a WSGI application,
it will wrap it in a proxy WSGI exception and raise that::
abort(404) # 404 Not Found
abort(Response('Hello World'))
"""
_aborter(status, *args, **kwargs)
_aborter: Aborter = Aborter()

View file

@ -0,0 +1,55 @@
import codecs
import sys
import typing as t
import warnings
# We do not trust traditional unixes.
has_likely_buggy_unicode_filesystem = (
sys.platform.startswith("linux") or "bsd" in sys.platform
)
def _is_ascii_encoding(encoding: t.Optional[str]) -> bool:
"""Given an encoding this figures out if the encoding is actually ASCII (which
is something we don't actually want in most cases). This is necessary
because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
return codecs.lookup(encoding).name == "ascii"
except LookupError:
return False
class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
"""The warning used by Werkzeug to signal a broken filesystem. Will only be
used once per runtime."""
_warned_about_filesystem_encoding = False
def get_filesystem_encoding() -> str:
"""Returns the filesystem encoding that should be used. Note that this is
different from the Python understanding of the filesystem encoding which
might be deeply flawed. Do not use this value against Python's string APIs
because it might be different. See :ref:`filesystem-encoding` for the exact
behavior.
The concept of a filesystem encoding in generally is not something you
should rely on. As such if you ever need to use this function except for
writing wrapper code reconsider.
"""
global _warned_about_filesystem_encoding
rv = sys.getfilesystemencoding()
if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv):
if not _warned_about_filesystem_encoding:
warnings.warn(
"Detected a misconfigured UNIX filesystem: Will use"
f" UTF-8 as filesystem encoding instead of {rv!r}",
BrokenFilesystemWarning,
)
_warned_about_filesystem_encoding = True
return "utf-8"
return rv

View file

@ -0,0 +1,495 @@
import typing as t
import warnings
from functools import update_wrapper
from io import BytesIO
from itertools import chain
from typing import Union
from . import exceptions
from ._internal import _to_str
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
from .http import parse_options_header
from .sansio.multipart import Data
from .sansio.multipart import Epilogue
from .sansio.multipart import Field
from .sansio.multipart import File
from .sansio.multipart import MultipartDecoder
from .sansio.multipart import NeedData
from .urls import url_decode_stream
from .wsgi import _make_chunk_iter
from .wsgi import get_content_length
from .wsgi import get_input_stream
# there are some platforms where SpooledTemporaryFile is not available.
# In that case we need to provide a fallback.
try:
from tempfile import SpooledTemporaryFile
except ImportError:
from tempfile import TemporaryFile
SpooledTemporaryFile = None # type: ignore
if t.TYPE_CHECKING:
import typing as te
from _typeshed.wsgi import WSGIEnvironment
t_parse_result = t.Tuple[t.IO[bytes], MultiDict, MultiDict]
class TStreamFactory(te.Protocol):
def __call__(
self,
total_content_length: t.Optional[int],
content_type: t.Optional[str],
filename: t.Optional[str],
content_length: t.Optional[int] = None,
) -> t.IO[bytes]:
...
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def _exhaust(stream: t.IO[bytes]) -> None:
bts = stream.read(64 * 1024)
while bts:
bts = stream.read(64 * 1024)
def default_stream_factory(
total_content_length: t.Optional[int],
content_type: t.Optional[str],
filename: t.Optional[str],
content_length: t.Optional[int] = None,
) -> t.IO[bytes]:
max_size = 1024 * 500
if SpooledTemporaryFile is not None:
return t.cast(t.IO[bytes], SpooledTemporaryFile(max_size=max_size, mode="rb+"))
elif total_content_length is None or total_content_length > max_size:
return t.cast(t.IO[bytes], TemporaryFile("rb+"))
return BytesIO()
def parse_form_data(
environ: "WSGIEnvironment",
stream_factory: t.Optional["TStreamFactory"] = None,
charset: str = "utf-8",
errors: str = "replace",
max_form_memory_size: t.Optional[int] = None,
max_content_length: t.Optional[int] = None,
cls: t.Optional[t.Type[MultiDict]] = None,
silent: bool = True,
) -> "t_parse_result":
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :doc:`/request_data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`Response._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(
stream_factory,
charset,
errors,
max_form_memory_size,
max_content_length,
cls,
silent,
).parse_from_environ(environ)
def exhaust_stream(f: F) -> F:
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs): # type: ignore
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, "exhaust", None)
if exhaust is not None:
exhaust()
else:
while True:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(t.cast(F, wrapper), f)
class FormDataParser:
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`Response._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(
self,
stream_factory: t.Optional["TStreamFactory"] = None,
charset: str = "utf-8",
errors: str = "replace",
max_form_memory_size: t.Optional[int] = None,
max_content_length: t.Optional[int] = None,
cls: t.Optional[t.Type[MultiDict]] = None,
silent: bool = True,
) -> None:
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(
self, mimetype: str, options: t.Dict[str, str]
) -> t.Optional[
t.Callable[
["FormDataParser", t.IO[bytes], str, t.Optional[int], t.Dict[str, str]],
"t_parse_result",
]
]:
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ: "WSGIEnvironment") -> "t_parse_result":
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get("CONTENT_TYPE", "")
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype, content_length, options)
def parse(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: t.Optional[int],
options: t.Optional[t.Dict[str, str]] = None,
) -> "t_parse_result":
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if (
self.max_content_length is not None
and content_length is not None
and content_length > self.max_content_length
):
# if the input stream is not exhausted, firefox reports Connection Reset
_exhaust(stream)
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype, content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: t.Optional[int],
options: t.Dict[str, str],
) -> "t_parse_result":
parser = MultiPartParser(
self.stream_factory,
self.charset,
self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls,
)
boundary = options.get("boundary", "").encode("ascii")
if not boundary:
raise ValueError("Missing boundary")
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(
self,
stream: t.IO[bytes],
mimetype: str,
content_length: t.Optional[int],
options: t.Dict[str, str],
) -> "t_parse_result":
if (
self.max_form_memory_size is not None
and content_length is not None
and content_length > self.max_form_memory_size
):
# if the input stream is not exhausted, firefox reports Connection Reset
_exhaust(stream)
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions: t.Dict[
str,
t.Callable[
["FormDataParser", t.IO[bytes], str, t.Optional[int], t.Dict[str, str]],
"t_parse_result",
],
] = {
"multipart/form-data": _parse_multipart,
"application/x-www-form-urlencoded": _parse_urlencoded,
"application/x-url-encoded": _parse_urlencoded,
}
def _line_parse(line: str) -> t.Tuple[str, bool]:
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] == "\r\n":
return line[:-2], True
elif line[-1:] in {"\r", "\n"}:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable: t.Iterable[bytes]) -> Headers:
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
warnings.warn(
"'parse_multipart_headers' is deprecated and will be removed in"
" Werkzeug 2.1.",
DeprecationWarning,
stacklevel=2,
)
result: t.List[t.Tuple[str, str]] = []
for b_line in iterable:
line = _to_str(b_line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError("unexpected end of line in multipart header")
if not line:
break
elif line[0] in " \t" and result:
key, value = result[-1]
result[-1] = (key, f"{value}\n {line[1:]}")
else:
parts = line.split(":", 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
class MultiPartParser:
def __init__(
self,
stream_factory: t.Optional["TStreamFactory"] = None,
charset: str = "utf-8",
errors: str = "replace",
max_form_memory_size: t.Optional[int] = None,
cls: t.Optional[t.Type[MultiDict]] = None,
buffer_size: int = 64 * 1024,
) -> None:
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
self.buffer_size = buffer_size
def fail(self, message: str) -> "te.NoReturn":
raise ValueError(message)
def get_part_charset(self, headers: Headers) -> str:
# Figure out input charset for current part
content_type = headers.get("content-type")
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get("charset", self.charset)
return self.charset
def start_file_streaming(
self, event: File, total_content_length: t.Optional[int]
) -> t.IO[bytes]:
content_type = event.headers.get("content-type")
try:
content_length = int(event.headers["content-length"])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(
total_content_length=total_content_length,
filename=event.filename,
content_type=content_type,
content_length=content_length,
)
return container
def parse(
self, stream: t.IO[bytes], boundary: bytes, content_length: t.Optional[int]
) -> t.Tuple[MultiDict, MultiDict]:
container: t.Union[t.IO[bytes], t.List[bytes]]
_write: t.Callable[[bytes], t.Any]
iterator = chain(
_make_chunk_iter(
stream,
limit=content_length,
buffer_size=self.buffer_size,
),
[None],
)
parser = MultipartDecoder(boundary, self.max_form_memory_size)
fields = []
files = []
current_part: Union[Field, File]
for data in iterator:
parser.receive_data(data)
event = parser.next_event()
while not isinstance(event, (Epilogue, NeedData)):
if isinstance(event, Field):
current_part = event
container = []
_write = container.append
elif isinstance(event, File):
current_part = event
container = self.start_file_streaming(event, content_length)
_write = container.write
elif isinstance(event, Data):
_write(event.data)
if not event.more_data:
if isinstance(current_part, Field):
value = b"".join(container).decode(
self.get_part_charset(current_part.headers), self.errors
)
fields.append((current_part.name, value))
else:
container = t.cast(t.IO[bytes], container)
container.seek(0)
files.append(
(
current_part.name,
FileStorage(
container,
current_part.filename,
current_part.name,
headers=current_part.headers,
),
)
)
event = parser.next_event()
return self.cls(fields), self.cls(files)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,690 @@
import copy
import math
import operator
import sys
import typing as t
import warnings
from functools import partial
from functools import update_wrapper
from .wsgi import ClosingIterator
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
try:
from greenlet import getcurrent as _get_ident
except ImportError:
from threading import get_ident as _get_ident
def get_ident() -> int:
warnings.warn(
"'get_ident' is deprecated and will be removed in Werkzeug"
" 2.1. Use 'greenlet.getcurrent' or 'threading.get_ident' for"
" previous behavior.",
DeprecationWarning,
stacklevel=2,
)
return _get_ident() # type: ignore
class _CannotUseContextVar(Exception):
pass
try:
from contextvars import ContextVar
if "gevent" in sys.modules or "eventlet" in sys.modules:
# Both use greenlet, so first check it has patched
# ContextVars, Greenlet <0.4.17 does not.
import greenlet
greenlet_patched = getattr(greenlet, "GREENLET_USE_CONTEXT_VARS", False)
if not greenlet_patched:
# If Gevent is used, check it has patched ContextVars,
# <20.5 does not.
try:
from gevent.monkey import is_object_patched
except ImportError:
# Gevent isn't used, but Greenlet is and hasn't patched
raise _CannotUseContextVar() from None
else:
if is_object_patched("threading", "local") and not is_object_patched(
"contextvars", "ContextVar"
):
raise _CannotUseContextVar()
def __release_local__(storage: t.Any) -> None:
# Can remove when support for non-stdlib ContextVars is
# removed, see "Fake" version below.
storage.set({})
except (ImportError, _CannotUseContextVar):
class ContextVar: # type: ignore
"""A fake ContextVar based on the previous greenlet/threading
ident function. Used on Python 3.6, eventlet, and old versions
of gevent.
"""
def __init__(self, _name: str) -> None:
self.storage: t.Dict[int, t.Dict[str, t.Any]] = {}
def get(self, default: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
return self.storage.get(_get_ident(), default)
def set(self, value: t.Dict[str, t.Any]) -> None:
self.storage[_get_ident()] = value
def __release_local__(storage: t.Any) -> None:
# Special version to ensure that the storage is cleaned up on
# release.
storage.storage.pop(_get_ident(), None)
def release_local(local: t.Union["Local", "LocalStack"]) -> None:
"""Releases the contents of the local for the current context.
This makes it possible to use locals without a manager.
Example::
>>> loc = Local()
>>> loc.foo = 42
>>> release_local(loc)
>>> hasattr(loc, 'foo')
False
With this function one can release :class:`Local` objects as well
as :class:`LocalStack` objects. However it is not possible to
release data held by proxies that way, one always has to retain
a reference to the underlying local object in order to be able
to release it.
.. versionadded:: 0.6.1
"""
local.__release_local__()
class Local:
__slots__ = ("_storage",)
def __init__(self) -> None:
object.__setattr__(self, "_storage", ContextVar("local_storage"))
@property
def __storage__(self) -> t.Dict[str, t.Any]:
warnings.warn(
"'__storage__' is deprecated and will be removed in Werkzeug 2.1.",
DeprecationWarning,
stacklevel=2,
)
return self._storage.get({}) # type: ignore
@property
def __ident_func__(self) -> t.Callable[[], int]:
warnings.warn(
"'__ident_func__' is deprecated and will be removed in"
" Werkzeug 2.1. It should not be used in Python 3.7+.",
DeprecationWarning,
stacklevel=2,
)
return _get_ident # type: ignore
@__ident_func__.setter
def __ident_func__(self, func: t.Callable[[], int]) -> None:
warnings.warn(
"'__ident_func__' is deprecated and will be removed in"
" Werkzeug 2.1. Setting it no longer has any effect.",
DeprecationWarning,
stacklevel=2,
)
def __iter__(self) -> t.Iterator[t.Tuple[int, t.Any]]:
return iter(self._storage.get({}).items())
def __call__(self, proxy: str) -> "LocalProxy":
"""Create a proxy for a name."""
return LocalProxy(self, proxy)
def __release_local__(self) -> None:
__release_local__(self._storage)
def __getattr__(self, name: str) -> t.Any:
values = self._storage.get({})
try:
return values[name]
except KeyError:
raise AttributeError(name) from None
def __setattr__(self, name: str, value: t.Any) -> None:
values = self._storage.get({}).copy()
values[name] = value
self._storage.set(values)
def __delattr__(self, name: str) -> None:
values = self._storage.get({}).copy()
try:
del values[name]
self._storage.set(values)
except KeyError:
raise AttributeError(name) from None
class LocalStack:
"""This class works similar to a :class:`Local` but keeps a stack
of objects instead. This is best explained with an example::
>>> ls = LocalStack()
>>> ls.push(42)
>>> ls.top
42
>>> ls.push(23)
>>> ls.top
23
>>> ls.pop()
23
>>> ls.top
42
They can be force released by using a :class:`LocalManager` or with
the :func:`release_local` function but the correct way is to pop the
item from the stack after using. When the stack is empty it will
no longer be bound to the current context (and as such released).
By calling the stack without arguments it returns a proxy that resolves to
the topmost item on the stack.
.. versionadded:: 0.6.1
"""
def __init__(self) -> None:
self._local = Local()
def __release_local__(self) -> None:
self._local.__release_local__()
@property
def __ident_func__(self) -> t.Callable[[], int]:
return self._local.__ident_func__
@__ident_func__.setter
def __ident_func__(self, value: t.Callable[[], int]) -> None:
object.__setattr__(self._local, "__ident_func__", value)
def __call__(self) -> "LocalProxy":
def _lookup() -> t.Any:
rv = self.top
if rv is None:
raise RuntimeError("object unbound")
return rv
return LocalProxy(_lookup)
def push(self, obj: t.Any) -> t.List[t.Any]:
"""Pushes a new item to the stack"""
rv = getattr(self._local, "stack", []).copy()
rv.append(obj)
self._local.stack = rv
return rv
def pop(self) -> t.Any:
"""Removes the topmost item from the stack, will return the
old value or `None` if the stack was already empty.
"""
stack = getattr(self._local, "stack", None)
if stack is None:
return None
elif len(stack) == 1:
release_local(self._local)
return stack[-1]
else:
return stack.pop()
@property
def top(self) -> t.Any:
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
try:
return self._local.stack[-1]
except (AttributeError, IndexError):
return None
class LocalManager:
"""Local objects cannot manage themselves. For that you need a local
manager. You can pass a local manager multiple locals or add them
later by appending them to `manager.locals`. Every time the manager
cleans up, it will clean up all the data left in the locals for this
context.
.. versionchanged:: 2.0
``ident_func`` is deprecated and will be removed in Werkzeug
2.1.
.. versionchanged:: 0.6.1
The :func:`release_local` function can be used instead of a
manager.
.. versionchanged:: 0.7
The ``ident_func`` parameter was added.
"""
def __init__(
self,
locals: t.Optional[t.Iterable[t.Union[Local, LocalStack]]] = None,
ident_func: None = None,
) -> None:
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
self.locals = list(locals)
if ident_func is not None:
warnings.warn(
"'ident_func' is deprecated and will be removed in"
" Werkzeug 2.1. Setting it no longer has any effect.",
DeprecationWarning,
stacklevel=2,
)
@property
def ident_func(self) -> t.Callable[[], int]:
warnings.warn(
"'ident_func' is deprecated and will be removed in Werkzeug 2.1.",
DeprecationWarning,
stacklevel=2,
)
return _get_ident # type: ignore
@ident_func.setter
def ident_func(self, func: t.Callable[[], int]) -> None:
warnings.warn(
"'ident_func' is deprecated and will be removedin Werkzeug"
" 2.1. Setting it no longer has any effect.",
DeprecationWarning,
stacklevel=2,
)
def get_ident(self) -> int:
"""Return the context identifier the local objects use internally for
this context. You cannot override this method to change the behavior
but use it to link other context local objects (such as SQLAlchemy's
scoped sessions) to the Werkzeug locals.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1.
.. versionchanged:: 0.7
You can pass a different ident function to the local manager that
will then be propagated to all the locals passed to the
constructor.
"""
warnings.warn(
"'get_ident' is deprecated and will be removed in Werkzeug 2.1.",
DeprecationWarning,
stacklevel=2,
)
return self.ident_func()
def cleanup(self) -> None:
"""Manually clean up the data in the locals for this context. Call
this at the end of the request or use `make_middleware()`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app: "WSGIApplication") -> "WSGIApplication":
"""Wrap a WSGI application so that cleaning up happens after
request end.
"""
def application(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
return ClosingIterator(app(environ, start_response), self.cleanup)
return application
def middleware(self, func: "WSGIApplication") -> "WSGIApplication":
"""Like `make_middleware` but for decorating functions.
Example usage::
@manager.middleware
def application(environ, start_response):
...
The difference to `make_middleware` is that the function passed
will have all the arguments copied from the inner application
(name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
def __repr__(self) -> str:
return f"<{type(self).__name__} storages: {len(self.locals)}>"
class _ProxyLookup:
"""Descriptor that handles proxied attribute lookup for
:class:`LocalProxy`.
:param f: The built-in function this attribute is accessed through.
Instead of looking up the special method, the function call
is redone on the object.
:param fallback: Return this function if the proxy is unbound
instead of raising a :exc:`RuntimeError`.
:param is_attr: This proxied name is an attribute, not a function.
Call the fallback immediately to get the value.
:param class_value: Value to return when accessed from the
``LocalProxy`` class directly. Used for ``__doc__`` so building
docs still works.
"""
__slots__ = ("bind_f", "fallback", "is_attr", "class_value", "name")
def __init__(
self,
f: t.Optional[t.Callable] = None,
fallback: t.Optional[t.Callable] = None,
class_value: t.Optional[t.Any] = None,
is_attr: bool = False,
) -> None:
bind_f: t.Optional[t.Callable[["LocalProxy", t.Any], t.Callable]]
if hasattr(f, "__get__"):
# A Python function, can be turned into a bound method.
def bind_f(instance: "LocalProxy", obj: t.Any) -> t.Callable:
return f.__get__(obj, type(obj)) # type: ignore
elif f is not None:
# A C function, use partial to bind the first argument.
def bind_f(instance: "LocalProxy", obj: t.Any) -> t.Callable:
return partial(f, obj) # type: ignore
else:
# Use getattr, which will produce a bound method.
bind_f = None
self.bind_f = bind_f
self.fallback = fallback
self.class_value = class_value
self.is_attr = is_attr
def __set_name__(self, owner: "LocalProxy", name: str) -> None:
self.name = name
def __get__(self, instance: "LocalProxy", owner: t.Optional[type] = None) -> t.Any:
if instance is None:
if self.class_value is not None:
return self.class_value
return self
try:
obj = instance._get_current_object()
except RuntimeError:
if self.fallback is None:
raise
fallback = self.fallback.__get__(instance, owner) # type: ignore
if self.is_attr:
# __class__ and __doc__ are attributes, not methods.
# Call the fallback to get the value.
return fallback()
return fallback
if self.bind_f is not None:
return self.bind_f(instance, obj)
return getattr(obj, self.name)
def __repr__(self) -> str:
return f"proxy {self.name}"
def __call__(self, instance: "LocalProxy", *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Support calling unbound methods from the class. For example,
this happens with ``copy.copy``, which does
``type(x).__copy__(x)``. ``type(x)`` can't be proxied, so it
returns the proxy type and descriptor.
"""
return self.__get__(instance, type(instance))(*args, **kwargs)
class _ProxyIOp(_ProxyLookup):
"""Look up an augmented assignment method on a proxied object. The
method is wrapped to return the proxy instead of the object.
"""
__slots__ = ()
def __init__(
self, f: t.Optional[t.Callable] = None, fallback: t.Optional[t.Callable] = None
) -> None:
super().__init__(f, fallback)
def bind_f(instance: "LocalProxy", obj: t.Any) -> t.Callable:
def i_op(self: t.Any, other: t.Any) -> "LocalProxy":
f(self, other) # type: ignore
return instance
return i_op.__get__(obj, type(obj)) # type: ignore
self.bind_f = bind_f
def _l_to_r_op(op: F) -> F:
"""Swap the argument order to turn an l-op into an r-op."""
def r_op(obj: t.Any, other: t.Any) -> t.Any:
return op(other, obj)
return t.cast(F, r_op)
class LocalProxy:
"""A proxy to the object bound to a :class:`Local`. All operations
on the proxy are forwarded to the bound object. If no object is
bound, a :exc:`RuntimeError` is raised.
.. code-block:: python
from werkzeug.local import Local
l = Local()
# a proxy to whatever l.user is set to
user = l("user")
from werkzeug.local import LocalStack
_request_stack = LocalStack()
# a proxy to _request_stack.top
request = _request_stack()
# a proxy to the session attribute of the request proxy
session = LocalProxy(lambda: request.session)
``__repr__`` and ``__class__`` are forwarded, so ``repr(x)`` and
``isinstance(x, cls)`` will look like the proxied object. Use
``issubclass(type(x), LocalProxy)`` to check if an object is a
proxy.
.. code-block:: python
repr(user) # <User admin>
isinstance(user, User) # True
issubclass(type(user), LocalProxy) # True
:param local: The :class:`Local` or callable that provides the
proxied object.
:param name: The attribute name to look up on a :class:`Local`. Not
used if a callable is given.
.. versionchanged:: 2.0
Updated proxied attributes and methods to reflect the current
data model.
.. versionchanged:: 0.6.1
The class can be instantiated with a callable.
"""
__slots__ = ("__local", "__name", "__wrapped__")
def __init__(
self,
local: t.Union["Local", t.Callable[[], t.Any]],
name: t.Optional[str] = None,
) -> None:
object.__setattr__(self, "_LocalProxy__local", local)
object.__setattr__(self, "_LocalProxy__name", name)
if callable(local) and not hasattr(local, "__release_local__"):
# "local" is a callable that is not an instance of Local or
# LocalManager: mark it as a wrapped function.
object.__setattr__(self, "__wrapped__", local)
def _get_current_object(self) -> t.Any:
"""Return the current object. This is useful if you want the real
object behind the proxy at a time for performance reasons or because
you want to pass the object into a different context.
"""
if not hasattr(self.__local, "__release_local__"): # type: ignore
return self.__local() # type: ignore
try:
return getattr(self.__local, self.__name) # type: ignore
except AttributeError:
name = self.__name # type: ignore
raise RuntimeError(f"no object bound to {name}") from None
__doc__ = _ProxyLookup( # type: ignore
class_value=__doc__, fallback=lambda self: type(self).__doc__, is_attr=True
)
# __del__ should only delete the proxy
__repr__ = _ProxyLookup( # type: ignore
repr, fallback=lambda self: f"<{type(self).__name__} unbound>"
)
__str__ = _ProxyLookup(str) # type: ignore
__bytes__ = _ProxyLookup(bytes)
__format__ = _ProxyLookup() # type: ignore
__lt__ = _ProxyLookup(operator.lt)
__le__ = _ProxyLookup(operator.le)
__eq__ = _ProxyLookup(operator.eq) # type: ignore
__ne__ = _ProxyLookup(operator.ne) # type: ignore
__gt__ = _ProxyLookup(operator.gt)
__ge__ = _ProxyLookup(operator.ge)
__hash__ = _ProxyLookup(hash) # type: ignore
__bool__ = _ProxyLookup(bool, fallback=lambda self: False)
__getattr__ = _ProxyLookup(getattr)
# __getattribute__ triggered through __getattr__
__setattr__ = _ProxyLookup(setattr) # type: ignore
__delattr__ = _ProxyLookup(delattr) # type: ignore
__dir__ = _ProxyLookup(dir, fallback=lambda self: []) # type: ignore
# __get__ (proxying descriptor not supported)
# __set__ (descriptor)
# __delete__ (descriptor)
# __set_name__ (descriptor)
# __objclass__ (descriptor)
# __slots__ used by proxy itself
# __dict__ (__getattr__)
# __weakref__ (__getattr__)
# __init_subclass__ (proxying metaclass not supported)
# __prepare__ (metaclass)
__class__ = _ProxyLookup(
fallback=lambda self: type(self), is_attr=True
) # type: ignore
__instancecheck__ = _ProxyLookup(lambda self, other: isinstance(other, self))
__subclasscheck__ = _ProxyLookup(lambda self, other: issubclass(other, self))
# __class_getitem__ triggered through __getitem__
__call__ = _ProxyLookup(lambda self, *args, **kwargs: self(*args, **kwargs))
__len__ = _ProxyLookup(len)
__length_hint__ = _ProxyLookup(operator.length_hint)
__getitem__ = _ProxyLookup(operator.getitem)
__setitem__ = _ProxyLookup(operator.setitem)
__delitem__ = _ProxyLookup(operator.delitem)
# __missing__ triggered through __getitem__
__iter__ = _ProxyLookup(iter)
__next__ = _ProxyLookup(next)
__reversed__ = _ProxyLookup(reversed)
__contains__ = _ProxyLookup(operator.contains)
__add__ = _ProxyLookup(operator.add)
__sub__ = _ProxyLookup(operator.sub)
__mul__ = _ProxyLookup(operator.mul)
__matmul__ = _ProxyLookup(operator.matmul)
__truediv__ = _ProxyLookup(operator.truediv)
__floordiv__ = _ProxyLookup(operator.floordiv)
__mod__ = _ProxyLookup(operator.mod)
__divmod__ = _ProxyLookup(divmod)
__pow__ = _ProxyLookup(pow)
__lshift__ = _ProxyLookup(operator.lshift)
__rshift__ = _ProxyLookup(operator.rshift)
__and__ = _ProxyLookup(operator.and_)
__xor__ = _ProxyLookup(operator.xor)
__or__ = _ProxyLookup(operator.or_)
__radd__ = _ProxyLookup(_l_to_r_op(operator.add))
__rsub__ = _ProxyLookup(_l_to_r_op(operator.sub))
__rmul__ = _ProxyLookup(_l_to_r_op(operator.mul))
__rmatmul__ = _ProxyLookup(_l_to_r_op(operator.matmul))
__rtruediv__ = _ProxyLookup(_l_to_r_op(operator.truediv))
__rfloordiv__ = _ProxyLookup(_l_to_r_op(operator.floordiv))
__rmod__ = _ProxyLookup(_l_to_r_op(operator.mod))
__rdivmod__ = _ProxyLookup(_l_to_r_op(divmod))
__rpow__ = _ProxyLookup(_l_to_r_op(pow))
__rlshift__ = _ProxyLookup(_l_to_r_op(operator.lshift))
__rrshift__ = _ProxyLookup(_l_to_r_op(operator.rshift))
__rand__ = _ProxyLookup(_l_to_r_op(operator.and_))
__rxor__ = _ProxyLookup(_l_to_r_op(operator.xor))
__ror__ = _ProxyLookup(_l_to_r_op(operator.or_))
__iadd__ = _ProxyIOp(operator.iadd)
__isub__ = _ProxyIOp(operator.isub)
__imul__ = _ProxyIOp(operator.imul)
__imatmul__ = _ProxyIOp(operator.imatmul)
__itruediv__ = _ProxyIOp(operator.itruediv)
__ifloordiv__ = _ProxyIOp(operator.ifloordiv)
__imod__ = _ProxyIOp(operator.imod)
__ipow__ = _ProxyIOp(operator.ipow)
__ilshift__ = _ProxyIOp(operator.ilshift)
__irshift__ = _ProxyIOp(operator.irshift)
__iand__ = _ProxyIOp(operator.iand)
__ixor__ = _ProxyIOp(operator.ixor)
__ior__ = _ProxyIOp(operator.ior)
__neg__ = _ProxyLookup(operator.neg)
__pos__ = _ProxyLookup(operator.pos)
__abs__ = _ProxyLookup(abs)
__invert__ = _ProxyLookup(operator.invert)
__complex__ = _ProxyLookup(complex)
__int__ = _ProxyLookup(int)
__float__ = _ProxyLookup(float)
__index__ = _ProxyLookup(operator.index)
__round__ = _ProxyLookup(round)
__trunc__ = _ProxyLookup(math.trunc)
__floor__ = _ProxyLookup(math.floor)
__ceil__ = _ProxyLookup(math.ceil)
__enter__ = _ProxyLookup()
__exit__ = _ProxyLookup()
__await__ = _ProxyLookup()
__aiter__ = _ProxyLookup()
__anext__ = _ProxyLookup()
__aenter__ = _ProxyLookup()
__aexit__ = _ProxyLookup()
__copy__ = _ProxyLookup(copy.copy)
__deepcopy__ = _ProxyLookup(copy.deepcopy)
# __getnewargs_ex__ (pickle through proxy not supported)
# __getnewargs__ (pickle)
# __getstate__ (pickle)
# __setstate__ (pickle)
# __reduce__ (pickle)
# __reduce_ex__ (pickle)

View file

@ -0,0 +1,22 @@
"""
Middleware
==========
A WSGI middleware is a WSGI application that wraps another application
in order to observe or change its behavior. Werkzeug provides some
middleware for common use cases.
.. toctree::
:maxdepth: 1
proxy_fix
shared_data
dispatcher
http_proxy
lint
profiler
The :doc:`interactive debugger </debug>` is also a middleware that can
be applied manually, although it is typically used automatically with
the :doc:`development server </serving>`.
"""

View file

@ -0,0 +1,78 @@
"""
Application Dispatcher
======================
This middleware creates a single WSGI application that dispatches to
multiple other WSGI applications mounted at different URL paths.
A common example is writing a Single Page Application, where you have a
backend API and a frontend written in JavaScript that does the routing
in the browser rather than requesting different pages from the server.
The frontend is a single HTML and JS file that should be served for any
path besides "/api".
This example dispatches to an API app under "/api", an admin app
under "/admin", and an app that serves frontend files for all other
requests::
app = DispatcherMiddleware(serve_frontend, {
'/api': api_app,
'/admin': admin_app,
})
In production, you might instead handle this at the HTTP server level,
serving files or proxying to application servers based on location. The
API and admin apps would each be deployed with a separate WSGI server,
and the static files would be served directly by the HTTP server.
.. autoclass:: DispatcherMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class DispatcherMiddleware:
"""Combine multiple applications as a single WSGI application.
Requests are dispatched to an application based on the path it is
mounted under.
:param app: The WSGI application to dispatch to if the request
doesn't match a mounted path.
:param mounts: Maps path prefixes to applications for dispatching.
"""
def __init__(
self,
app: "WSGIApplication",
mounts: t.Optional[t.Dict[str, "WSGIApplication"]] = None,
) -> None:
self.app = app
self.mounts = mounts or {}
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
script = environ.get("PATH_INFO", "")
path_info = ""
while "/" in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit("/", 1)
path_info = f"/{last_item}{path_info}"
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get("SCRIPT_NAME", "")
environ["SCRIPT_NAME"] = original_script_name + script
environ["PATH_INFO"] = path_info
return app(environ, start_response)

View file

@ -0,0 +1,230 @@
"""
Basic HTTP Proxy
================
.. autoclass:: ProxyMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
from http import client
from ..datastructures import EnvironHeaders
from ..http import is_hop_by_hop_header
from ..urls import url_parse
from ..urls import url_quote
from ..wsgi import get_input_stream
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProxyMiddleware:
"""Proxy requests under a path to an external server, routing other
requests to the app.
This middleware can only proxy HTTP requests, as HTTP is the only
protocol handled by the WSGI server. Other protocols, such as
WebSocket requests, cannot be proxied at this layer. This should
only be used for development, in production a real proxy server
should be used.
The middleware takes a dict mapping a path prefix to a dict
describing the host to be proxied to::
app = ProxyMiddleware(app, {
"/static/": {
"target": "http://127.0.0.1:5001/",
}
})
Each host has the following options:
``target``:
The target URL to dispatch to. This is required.
``remove_prefix``:
Whether to remove the prefix from the URL before dispatching it
to the target. The default is ``False``.
``host``:
``"<auto>"`` (default):
The host header is automatically rewritten to the URL of the
target.
``None``:
The host header is unmodified from the client request.
Any other value:
The host header is overwritten with the value.
``headers``:
A dictionary of headers to be sent with the request to the
target. The default is ``{}``.
``ssl_context``:
A :class:`ssl.SSLContext` defining how to verify requests if the
target is HTTPS. The default is ``None``.
In the example above, everything under ``"/static/"`` is proxied to
the server on port 5001. The host header is rewritten to the target,
and the ``"/static/"`` prefix is removed from the URLs.
:param app: The WSGI application to wrap.
:param targets: Proxy target configurations. See description above.
:param chunk_size: Size of chunks to read from input stream and
write to target.
:param timeout: Seconds before an operation to a target fails.
.. versionadded:: 0.14
"""
def __init__(
self,
app: "WSGIApplication",
targets: t.Mapping[str, t.Dict[str, t.Any]],
chunk_size: int = 2 << 13,
timeout: int = 10,
) -> None:
def _set_defaults(opts: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
opts.setdefault("remove_prefix", False)
opts.setdefault("host", "<auto>")
opts.setdefault("headers", {})
opts.setdefault("ssl_context", None)
return opts
self.app = app
self.targets = {
f"/{k.strip('/')}/": _set_defaults(v) for k, v in targets.items()
}
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(
self, opts: t.Dict[str, t.Any], path: str, prefix: str
) -> "WSGIApplication":
target = url_parse(opts["target"])
host = t.cast(str, target.ascii_host)
def application(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
headers = list(EnvironHeaders(environ).items())
headers[:] = [
(k, v)
for k, v in headers
if not is_hop_by_hop_header(k)
and k.lower() not in ("content-length", "host")
]
headers.append(("Connection", "close"))
if opts["host"] == "<auto>":
headers.append(("Host", host))
elif opts["host"] is None:
headers.append(("Host", environ["HTTP_HOST"]))
else:
headers.append(("Host", opts["host"]))
headers.extend(opts["headers"].items())
remote_path = path
if opts["remove_prefix"]:
remote_path = remote_path[len(prefix) :].lstrip("/")
remote_path = f"{target.path.rstrip('/')}/{remote_path}"
content_length = environ.get("CONTENT_LENGTH")
chunked = False
if content_length not in ("", None):
headers.append(("Content-Length", content_length)) # type: ignore
elif content_length is not None:
headers.append(("Transfer-Encoding", "chunked"))
chunked = True
try:
if target.scheme == "http":
con = client.HTTPConnection(
host, target.port or 80, timeout=self.timeout
)
elif target.scheme == "https":
con = client.HTTPSConnection(
host,
target.port or 443,
timeout=self.timeout,
context=opts["ssl_context"],
)
else:
raise RuntimeError(
"Target scheme must be 'http' or 'https', got"
f" {target.scheme!r}."
)
con.connect()
remote_url = url_quote(remote_path)
querystring = environ["QUERY_STRING"]
if querystring:
remote_url = f"{remote_url}?{querystring}"
con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True)
for k, v in headers:
if k.lower() == "connection":
v = "close"
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while True:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b"%x\r\n%s\r\n" % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except OSError:
from ..exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response(
f"{resp.status} {resp.reason}",
[
(k.title(), v)
for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)
],
)
def read() -> t.Iterator[bytes]:
while True:
try:
data = resp.read(self.chunk_size)
except OSError:
break
if not data:
break
yield data
return read()
return application
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
path = environ["PATH_INFO"]
app = self.app
for prefix, opts in self.targets.items():
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)

View file

@ -0,0 +1,420 @@
"""
WSGI Protocol Linter
====================
This module provides a middleware that performs sanity checks on the
behavior of the WSGI server and application. It checks that the
:pep:`3333` WSGI spec is properly implemented. It also warns on some
common HTTP errors such as non-empty responses for 304 status codes.
.. autoclass:: LintMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
from types import TracebackType
from urllib.parse import urlparse
from warnings import warn
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_type(context: str, obj: object, need: t.Type = str) -> None:
if type(obj) is not need:
warn(
f"{context!r} requires {need.__name__!r}, got {type(obj).__name__!r}.",
WSGIWarning,
stacklevel=3,
)
class InputStream:
def __init__(self, stream: t.IO[bytes]) -> None:
self._stream = stream
def read(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args: t.Any) -> bytes:
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self) -> t.Iterator[bytes]:
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self) -> None:
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class ErrorStream:
def __init__(self, stream: t.IO[str]) -> None:
self._stream = stream
def write(self, s: str) -> None:
check_type("wsgi.error.write()", s, str)
self._stream.write(s)
def flush(self) -> None:
self._stream.flush()
def writelines(self, seq: t.Iterable[str]) -> None:
for line in seq:
self.write(line)
def close(self) -> None:
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class GuardedWrite:
def __init__(self, write: t.Callable[[bytes], None], chunks: t.List[int]) -> None:
self._write = write
self._chunks = chunks
def __call__(self, s: bytes) -> None:
check_type("write()", s, bytes)
self._write(s)
self._chunks.append(len(s))
class GuardedIterator:
def __init__(
self,
iterator: t.Iterable[bytes],
headers_set: t.Tuple[int, Headers],
chunks: t.List[int],
) -> None:
self._iterator = iterator
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self) -> "GuardedIterator":
return self
def __next__(self) -> bytes:
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_type("application iterator items", rv, bytes)
self.chunks.append(len(rv))
return rv
def close(self) -> None:
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close() # type: ignore
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location") and is_entity_header(
key
):
warn(
f"Entity header {key!r} found in 304 response.", HTTPWarning
)
if bytes_sent:
warn("304 responses must not have a body.", HTTPWarning)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
f"{status_code} responses must have an empty content length.",
HTTPWarning,
)
if bytes_sent:
warn(f"{status_code} responses must not have a body.", HTTPWarning)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the"
" client do not match.",
WSGIWarning,
)
def __del__(self) -> None:
if not self.closed:
try:
warn(
"Iterator was garbage collected before it was closed.", WSGIWarning
)
except Exception:
pass
class LintMiddleware:
"""Warns about common errors in the WSGI and HTTP behavior of the
server and wrapped application. Some of the issues it checks are:
- invalid status codes
- non-bytes sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Error information is emitted using the :mod:`warnings` module.
:param app: The WSGI application to wrap.
.. code-block:: python
from werkzeug.middleware.lint import LintMiddleware
app = LintMiddleware(app)
"""
def __init__(self, app: "WSGIApplication") -> None:
self.app = app
def check_environ(self, environ: "WSGIEnvironment") -> None:
if type(environ) is not dict:
warn(
"WSGI environment is not a standard Python dict.",
WSGIWarning,
stacklevel=4,
)
for key in (
"REQUEST_METHOD",
"SERVER_NAME",
"SERVER_PORT",
"wsgi.version",
"wsgi.input",
"wsgi.errors",
"wsgi.multithread",
"wsgi.multiprocess",
"wsgi.run_once",
):
if key not in environ:
warn(
f"Required environment key {key!r} not found",
WSGIWarning,
stacklevel=3,
)
if environ["wsgi.version"] != (1, 0):
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
script_name = environ.get("SCRIPT_NAME", "")
path_info = environ.get("PATH_INFO", "")
if script_name and script_name[0] != "/":
warn(
f"'SCRIPT_NAME' does not start with a slash: {script_name!r}",
WSGIWarning,
stacklevel=3,
)
if path_info and path_info[0] != "/":
warn(
f"'PATH_INFO' does not start with a slash: {path_info!r}",
WSGIWarning,
stacklevel=3,
)
def check_start_response(
self,
status: str,
headers: t.List[t.Tuple[str, str]],
exc_info: t.Optional[
t.Tuple[t.Type[BaseException], BaseException, TracebackType]
],
) -> t.Tuple[int, Headers]:
check_type("status", status, str)
status_code_str = status.split(None, 1)[0]
if len(status_code_str) != 3 or not status_code_str.isdigit():
warn("Status code must be three digits.", WSGIWarning, stacklevel=3)
if len(status) < 4 or status[3] != " ":
warn(
f"Invalid value for status {status!r}. Valid status strings are three"
" digits, a space and a status explanation.",
WSGIWarning,
stacklevel=3,
)
status_code = int(status_code_str)
if status_code < 100:
warn("Status code < 100 detected.", WSGIWarning, stacklevel=3)
if type(headers) is not list:
warn("Header list is not a list.", WSGIWarning, stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn("Header items must be 2-item tuples.", WSGIWarning, stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(
"Header keys and values must be strings.", WSGIWarning, stacklevel=3
)
if name.lower() == "status":
warn(
"The status header is not supported due to"
" conflicts with the CGI spec.",
WSGIWarning,
stacklevel=3,
)
if exc_info is not None and not isinstance(exc_info, tuple):
warn("Invalid value for exc_info.", WSGIWarning, stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers: Headers) -> None:
etag = headers.get("etag")
if etag is not None:
if etag.startswith(("W/", "w/")):
if etag.startswith("w/"):
warn(
"Weak etag indicator should be upper case.",
HTTPWarning,
stacklevel=4,
)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn("Unquoted etag emitted.", HTTPWarning, stacklevel=4)
location = headers.get("location")
if location is not None:
if not urlparse(location).netloc:
warn(
"Absolute URLs required for location header.",
HTTPWarning,
stacklevel=4,
)
def check_iterator(self, app_iter: t.Iterable[bytes]) -> None:
if isinstance(app_iter, bytes):
warn(
"The application returned a bytestring. The response will send one"
" character at a time to the client, which will kill performance."
" Return a list or iterable instead.",
WSGIWarning,
stacklevel=3,
)
def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Iterable[bytes]:
if len(args) != 2:
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
if kwargs:
warn(
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
)
environ: "WSGIEnvironment" = args[0]
start_response: "StartResponse" = args[1]
self.check_environ(environ)
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
# Hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length.
environ["wsgi.file_wrapper"] = FileWrapper
headers_set: t.List[t.Any] = []
chunks: t.List[int] = []
def checking_start_response(
*args: t.Any, **kwargs: t.Any
) -> t.Callable[[bytes], None]:
if len(args) not in {2, 3}:
warn(
f"Invalid number of arguments: {len(args)}, expected 2 or 3.",
WSGIWarning,
stacklevel=2,
)
if kwargs:
warn("'start_response' does not take keyword arguments.", WSGIWarning)
status: str = args[0]
headers: t.List[t.Tuple[str, str]] = args[1]
exc_info: t.Optional[
t.Tuple[t.Type[BaseException], BaseException, TracebackType]
] = (args[2] if len(args) == 3 else None)
headers_set[:] = self.check_start_response(status, headers, exc_info)
return GuardedWrite(start_response(status, headers, exc_info), chunks)
app_iter = self.app(environ, t.cast("StartResponse", checking_start_response))
self.check_iterator(app_iter)
return GuardedIterator(
app_iter, t.cast(t.Tuple[int, Headers], headers_set), chunks
)

View file

@ -0,0 +1,139 @@
"""
Application Profiler
====================
This module provides a middleware that profiles each request with the
:mod:`cProfile` module. This can help identify bottlenecks in your code
that may be slowing down your application.
.. autoclass:: ProfilerMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import os.path
import sys
import time
import typing as t
from pstats import Stats
try:
from cProfile import Profile
except ImportError:
from profile import Profile # type: ignore
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProfilerMiddleware:
"""Wrap a WSGI application and profile the execution of each
request. Responses are buffered so that timings are more exact.
If ``stream`` is given, :class:`pstats.Stats` are written to it
after each request. If ``profile_dir`` is given, :mod:`cProfile`
data files are saved to that directory, one file per request.
The filename can be customized by passing ``filename_format``. If
it is a string, it will be formatted using :meth:`str.format` with
the following fields available:
- ``{method}`` - The request method; GET, POST, etc.
- ``{path}`` - The request path or 'root' should one not exist.
- ``{elapsed}`` - The elapsed time of the request.
- ``{time}`` - The time of the request.
If it is a callable, it will be called with the WSGI ``environ``
dict and should return a filename.
:param app: The WSGI application to wrap.
:param stream: Write stats to this stream. Disable with ``None``.
:param sort_by: A tuple of columns to sort stats by. See
:meth:`pstats.Stats.sort_stats`.
:param restrictions: A tuple of restrictions to filter stats by. See
:meth:`pstats.Stats.print_stats`.
:param profile_dir: Save profile data files to this directory.
:param filename_format: Format string for profile data file names,
or a callable returning a name. See explanation above.
.. code-block:: python
from werkzeug.middleware.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
.. versionchanged:: 0.15
Stats are written even if ``profile_dir`` is given, and can be
disable by passing ``stream=None``.
.. versionadded:: 0.15
Added ``filename_format``.
.. versionadded:: 0.9
Added ``restrictions`` and ``profile_dir``.
"""
def __init__(
self,
app: "WSGIApplication",
stream: t.IO[str] = sys.stdout,
sort_by: t.Iterable[str] = ("time", "calls"),
restrictions: t.Iterable[t.Union[str, int, float]] = (),
profile_dir: t.Optional[str] = None,
filename_format: str = "{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof",
) -> None:
self._app = app
self._stream = stream
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
self._filename_format = filename_format
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
response_body: t.List[bytes] = []
def catching_start_response(status, headers, exc_info=None): # type: ignore
start_response(status, headers, exc_info)
return response_body.append
def runapp() -> None:
app_iter = self._app(
environ, t.cast("StartResponse", catching_start_response)
)
response_body.extend(app_iter)
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
profile = Profile()
start = time.time()
profile.runcall(runapp)
body = b"".join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
if callable(self._filename_format):
filename = self._filename_format(environ)
else:
filename = self._filename_format.format(
method=environ["REQUEST_METHOD"],
path=environ["PATH_INFO"].strip("/").replace("/", ".") or "root",
elapsed=elapsed * 1000.0,
time=time.time(),
)
filename = os.path.join(self._profile_dir, filename)
profile.dump_stats(filename)
if self._stream is not None:
stats = Stats(profile, stream=self._stream)
stats.sort_stats(*self._sort_by)
print("-" * 80, file=self._stream)
path_info = environ.get("PATH_INFO", "")
print(f"PATH: {path_info!r}", file=self._stream)
stats.print_stats(*self._restrictions)
print(f"{'-' * 80}\n", file=self._stream)
return [body]

View file

@ -0,0 +1,187 @@
"""
X-Forwarded-For Proxy Fix
=========================
This module provides a middleware that adjusts the WSGI environ based on
``X-Forwarded-`` headers that proxies in front of an application may
set.
When an application is running behind a proxy server, WSGI may see the
request as coming from that server rather than the real client. Proxies
set various headers to track where the request actually came from.
This middleware should only be used if the application is actually
behind such a proxy, and should be configured with the number of proxies
that are chained in front of it. Not all proxies set all the headers.
Since incoming headers can be faked, you must set how many proxies are
setting each header so the middleware knows what to trust.
.. autoclass:: ProxyFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
from ..http import parse_list_header
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProxyFix:
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
front of the application may set.
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
``SERVER_PORT``.
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
You must tell the middleware how many proxies set each header so it
knows what values to trust. It is a security issue to trust values
that came from the client rather than a proxy.
The original values of the headers are stored in the WSGI
environ as ``werkzeug.proxy_fix.orig``, a dict.
:param app: The WSGI application to wrap.
:param x_for: Number of values to trust for ``X-Forwarded-For``.
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
:param x_prefix: Number of values to trust for
``X-Forwarded-Prefix``.
.. code-block:: python
from werkzeug.middleware.proxy_fix import ProxyFix
# App is behind one proxy that sets the -For and -Host headers.
app = ProxyFix(app, x_for=1, x_host=1)
.. versionchanged:: 1.0
Deprecated code has been removed:
* The ``num_proxies`` argument and attribute.
* The ``get_remote_addr`` method.
* The environ keys ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host``.
.. versionchanged:: 0.15
All headers support multiple values. The ``num_proxies``
argument is deprecated. Each header is configured with a
separate number of trusted proxies.
.. versionchanged:: 0.15
Original WSGI environ values are stored in the
``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
and will be removed in 1.0.
.. versionchanged:: 0.15
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
.. versionchanged:: 0.15
``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify
``SERVER_NAME`` and ``SERVER_PORT``.
"""
def __init__(
self,
app: "WSGIApplication",
x_for: int = 1,
x_proto: int = 1,
x_host: int = 0,
x_port: int = 0,
x_prefix: int = 0,
) -> None:
self.app = app
self.x_for = x_for
self.x_proto = x_proto
self.x_host = x_host
self.x_port = x_port
self.x_prefix = x_prefix
def _get_real_value(self, trusted: int, value: t.Optional[str]) -> t.Optional[str]:
"""Get the real value from a list header based on the configured
number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Comma separated list header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionchanged:: 1.0
Renamed from ``_get_trusted_comma``.
.. versionadded:: 0.15
"""
if not (trusted and value):
return None
values = parse_list_header(value)
if len(values) >= trusted:
return values[-trusted]
return None
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
"""Modify the WSGI environ based on the various ``Forwarded``
headers before calling the wrapped application. Store the
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
"""
environ_get = environ.get
orig_remote_addr = environ_get("REMOTE_ADDR")
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
orig_http_host = environ_get("HTTP_HOST")
environ.update(
{
"werkzeug.proxy_fix.orig": {
"REMOTE_ADDR": orig_remote_addr,
"wsgi.url_scheme": orig_wsgi_url_scheme,
"HTTP_HOST": orig_http_host,
"SERVER_NAME": environ_get("SERVER_NAME"),
"SERVER_PORT": environ_get("SERVER_PORT"),
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
}
}
)
x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
if x_for:
environ["REMOTE_ADDR"] = x_for
x_proto = self._get_real_value(
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
)
if x_proto:
environ["wsgi.url_scheme"] = x_proto
x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST"))
if x_host:
environ["HTTP_HOST"] = environ["SERVER_NAME"] = x_host
# "]" to check for IPv6 address without port
if ":" in x_host and not x_host.endswith("]"):
environ["SERVER_NAME"], environ["SERVER_PORT"] = x_host.rsplit(":", 1)
x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT"))
if x_port:
host = environ.get("HTTP_HOST")
if host:
# "]" to check for IPv6 address without port
if ":" in host and not host.endswith("]"):
host = host.rsplit(":", 1)[0]
environ["HTTP_HOST"] = f"{host}:{x_port}"
environ["SERVER_PORT"] = x_port
x_prefix = self._get_real_value(
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
)
if x_prefix:
environ["SCRIPT_NAME"] = x_prefix
return self.app(environ, start_response)

View file

@ -0,0 +1,320 @@
"""
Serve Shared Static Files
=========================
.. autoclass:: SharedDataMiddleware
:members: is_allowed
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import mimetypes
import os
import pkgutil
import posixpath
import typing as t
from datetime import datetime
from datetime import timezone
from io import BytesIO
from time import time
from zlib import adler32
from ..filesystem import get_filesystem_encoding
from ..http import http_date
from ..http import is_resource_modified
from ..security import safe_join
from ..utils import get_content_type
from ..wsgi import get_path_info
from ..wsgi import wrap_file
_TOpener = t.Callable[[], t.Tuple[t.IO[bytes], datetime, int]]
_TLoader = t.Callable[[t.Optional[str]], t.Tuple[t.Optional[str], t.Optional[_TOpener]]]
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class SharedDataMiddleware:
"""A WSGI middleware which provides static content for development
environments or simple server setups. Its usage is quite simple::
import os
from werkzeug.middleware.shared_data import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. Files can also be
mounted on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/static': ('myapplication', 'static')
})
This will then serve the ``static`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non-ASCII filenames. If the
encoding on the file system happens to match the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
:param fallback_mimetype: The fallback mimetype for unknown files.
.. versionchanged:: 1.0
The default ``fallback_mimetype`` is
``application/octet-stream``. If a filename looks like a text
mimetype, the ``utf-8`` charset is added to it.
.. versionadded:: 0.6
Added ``fallback_mimetype``.
.. versionchanged:: 0.5
Added ``cache_timeout``.
"""
def __init__(
self,
app: "WSGIApplication",
exports: t.Union[
t.Dict[str, t.Union[str, t.Tuple[str, str]]],
t.Iterable[t.Tuple[str, t.Union[str, t.Tuple[str, str]]]],
],
disallow: None = None,
cache: bool = True,
cache_timeout: int = 60 * 60 * 12,
fallback_mimetype: str = "application/octet-stream",
) -> None:
self.app = app
self.exports: t.List[t.Tuple[str, _TLoader]] = []
self.cache = cache
self.cache_timeout = cache_timeout
if isinstance(exports, dict):
exports = exports.items()
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, str):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError(f"unknown def {value!r}")
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename: str) -> bool:
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename: str) -> _TOpener:
return lambda: (
open(filename, "rb"),
datetime.fromtimestamp(os.path.getmtime(filename), tz=timezone.utc),
int(os.path.getsize(filename)),
)
def get_file_loader(self, filename: str) -> _TLoader:
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package: str, package_path: str) -> _TLoader:
load_time = datetime.now(timezone.utc)
provider = pkgutil.get_loader(package)
if hasattr(provider, "get_resource_reader"):
# Python 3
reader = provider.get_resource_reader(package) # type: ignore
def loader(
path: t.Optional[str],
) -> t.Tuple[t.Optional[str], t.Optional[_TOpener]]:
if path is None:
return None, None
path = safe_join(package_path, path)
if path is None:
return None, None
basename = posixpath.basename(path)
try:
resource = reader.open_resource(path)
except OSError:
return None, None
if isinstance(resource, BytesIO):
return (
basename,
lambda: (resource, load_time, len(resource.getvalue())),
)
return (
basename,
lambda: (
resource,
datetime.fromtimestamp(
os.path.getmtime(resource.name), tz=timezone.utc
),
os.path.getsize(resource.name),
),
)
else:
# Python 3.6
package_filename = provider.get_filename(package) # type: ignore
is_filesystem = os.path.exists(package_filename)
root = os.path.join(os.path.dirname(package_filename), package_path)
def loader(
path: t.Optional[str],
) -> t.Tuple[t.Optional[str], t.Optional[_TOpener]]:
if path is None:
return None, None
path = safe_join(root, path)
if path is None:
return None, None
basename = posixpath.basename(path)
if is_filesystem:
if not os.path.isfile(path):
return None, None
return basename, self._opener(path)
try:
data = provider.get_data(path) # type: ignore
except OSError:
return None, None
return basename, lambda: (BytesIO(data), load_time, len(data))
return loader
def get_directory_loader(self, directory: str) -> _TLoader:
def loader(
path: t.Optional[str],
) -> t.Tuple[t.Optional[str], t.Optional[_TOpener]]:
if path is not None:
path = safe_join(directory, path)
if path is None:
return None, None
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime: datetime, file_size: int, real_filename: str) -> str:
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode( # type: ignore
get_filesystem_encoding()
)
timestamp = mtime.timestamp()
checksum = adler32(real_filename) & 0xFFFFFFFF # type: ignore
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
path = get_path_info(environ)
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith("/"):
search_path += "/"
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path) :])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename): # type: ignore
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename) # type: ignore
mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8")
f, mtime, file_size = file_loader()
headers = [("Date", http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename) # type: ignore
headers += [
("Etag", f'"{etag}"'),
("Cache-Control", f"max-age={timeout}, public"),
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response("304 Not Modified", headers)
return []
headers.append(("Expires", http_date(time() + timeout)))
else:
headers.append(("Cache-Control", "public"))
headers.extend(
(
("Content-Type", mime_type),
("Content-Length", str(file_size)),
("Last-Modified", http_date(mtime)),
)
)
start_response("200 OK", headers)
return wrap_file(environ, f)

View file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,260 @@
import re
from dataclasses import dataclass
from enum import auto
from enum import Enum
from typing import cast
from typing import List
from typing import Optional
from typing import Tuple
from .._internal import _to_bytes
from .._internal import _to_str
from ..datastructures import Headers
from ..exceptions import RequestEntityTooLarge
from ..http import parse_options_header
class Event:
pass
@dataclass(frozen=True)
class Preamble(Event):
data: bytes
@dataclass(frozen=True)
class Field(Event):
name: str
headers: Headers
@dataclass(frozen=True)
class File(Event):
name: str
filename: str
headers: Headers
@dataclass(frozen=True)
class Data(Event):
data: bytes
more_data: bool
@dataclass(frozen=True)
class Epilogue(Event):
data: bytes
class NeedData(Event):
pass
NEED_DATA = NeedData()
class State(Enum):
PREAMBLE = auto()
PART = auto()
DATA = auto()
EPILOGUE = auto()
COMPLETE = auto()
# Multipart line breaks MUST be CRLF (\r\n) by RFC-7578, except that
# many implementations break this and either use CR or LF alone.
LINE_BREAK = b"(?:\r\n|\n|\r)"
BLANK_LINE_RE = re.compile(b"(?:\r\n\r\n|\r\r|\n\n)", re.MULTILINE)
LINE_BREAK_RE = re.compile(LINE_BREAK, re.MULTILINE)
# Header values can be continued via a space or tab after the linebreak, as
# per RFC2231
HEADER_CONTINUATION_RE = re.compile(b"%s[ \t]" % LINE_BREAK, re.MULTILINE)
class MultipartDecoder:
"""Decodes a multipart message as bytes into Python events.
The part data is returned as available to allow the caller to save
the data from memory to disk, if desired.
"""
def __init__(
self,
boundary: bytes,
max_form_memory_size: Optional[int] = None,
) -> None:
self.buffer = bytearray()
self.complete = False
self.max_form_memory_size = max_form_memory_size
self.state = State.PREAMBLE
self.boundary = boundary
# Note in the below \h i.e. horizontal whitespace is used
# as [^\S\n\r] as \h isn't supported in python.
# The preamble must end with a boundary where the boundary is
# prefixed by a line break, RFC2046. Except that many
# implementations including Werkzeug's tests omit the line
# break prefix. In addition the first boundary could be the
# epilogue boundary (for empty form-data) hence the matching
# group to understand if it is an epilogue boundary.
self.preamble_re = re.compile(
rb"%s?--%s(--[^\S\n\r]*%s?|[^\S\n\r]*%s)"
% (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
re.MULTILINE,
)
# A boundary must include a line break prefix and suffix, and
# may include trailing whitespace. In addition the boundary
# could be the epilogue boundary hence the matching group to
# understand if it is an epilogue boundary.
self.boundary_re = re.compile(
rb"%s--%s(--[^\S\n\r]*%s?|[^\S\n\r]*%s)"
% (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
re.MULTILINE,
)
def last_newline(self) -> int:
try:
last_nl = self.buffer.rindex(b"\n")
except ValueError:
last_nl = len(self.buffer)
try:
last_cr = self.buffer.rindex(b"\r")
except ValueError:
last_cr = len(self.buffer)
return min(last_nl, last_cr)
def receive_data(self, data: Optional[bytes]) -> None:
if data is None:
self.complete = True
elif (
self.max_form_memory_size is not None
and len(self.buffer) + len(data) > self.max_form_memory_size
):
raise RequestEntityTooLarge()
else:
self.buffer.extend(data)
def next_event(self) -> Event:
event: Event = NEED_DATA
if self.state == State.PREAMBLE:
match = self.preamble_re.search(self.buffer)
if match is not None:
if match.group(1).startswith(b"--"):
self.state = State.EPILOGUE
else:
self.state = State.PART
data = bytes(self.buffer[: match.start()])
del self.buffer[: match.end()]
event = Preamble(data=data)
elif self.state == State.PART:
match = BLANK_LINE_RE.search(self.buffer)
if match is not None:
headers = self._parse_headers(self.buffer[: match.start()])
del self.buffer[: match.end()]
if "content-disposition" not in headers:
raise ValueError("Missing Content-Disposition header")
disposition, extra = parse_options_header(
headers["content-disposition"]
)
name = cast(str, extra.get("name"))
filename = extra.get("filename")
if filename is not None:
event = File(
filename=filename,
headers=headers,
name=name,
)
else:
event = Field(
headers=headers,
name=name,
)
self.state = State.DATA
elif self.state == State.DATA:
if self.buffer.find(b"--" + self.boundary) == -1:
# No complete boundary in the buffer, but there may be
# a partial boundary at the end. As the boundary
# starts with either a nl or cr find the earliest and
# return up to that as data.
data_length = del_index = self.last_newline()
more_data = True
else:
match = self.boundary_re.search(self.buffer)
if match is not None:
if match.group(1).startswith(b"--"):
self.state = State.EPILOGUE
else:
self.state = State.PART
data_length = match.start()
del_index = match.end()
else:
data_length = del_index = self.last_newline()
more_data = match is None
data = bytes(self.buffer[:data_length])
del self.buffer[:del_index]
if data or not more_data:
event = Data(data=data, more_data=more_data)
elif self.state == State.EPILOGUE and self.complete:
event = Epilogue(data=bytes(self.buffer))
del self.buffer[:]
self.state = State.COMPLETE
if self.complete and isinstance(event, NeedData):
raise ValueError(f"Invalid form-data cannot parse beyond {self.state}")
return event
def _parse_headers(self, data: bytes) -> Headers:
headers: List[Tuple[str, str]] = []
# Merge the continued headers into one line
data = HEADER_CONTINUATION_RE.sub(b" ", data)
# Now there is one header per line
for line in data.splitlines():
if line.strip() != b"":
name, value = _to_str(line).strip().split(":", 1)
headers.append((name.strip(), value.strip()))
return Headers(headers)
class MultipartEncoder:
def __init__(self, boundary: bytes) -> None:
self.boundary = boundary
self.state = State.PREAMBLE
def send_event(self, event: Event) -> bytes:
if isinstance(event, Preamble) and self.state == State.PREAMBLE:
self.state = State.PART
return event.data
elif isinstance(event, (Field, File)) and self.state in {
State.PREAMBLE,
State.PART,
State.DATA,
}:
self.state = State.DATA
data = b"\r\n--" + self.boundary + b"\r\n"
data += b'Content-Disposition: form-data; name="%s"' % _to_bytes(event.name)
if isinstance(event, File):
data += b'; filename="%s"' % _to_bytes(event.filename)
data += b"\r\n"
for name, value in cast(Field, event).headers:
if name.lower() != "content-disposition":
data += _to_bytes(f"{name}: {value}\r\n")
data += b"\r\n"
return data
elif isinstance(event, Data) and self.state == State.DATA:
return event.data
elif isinstance(event, Epilogue):
self.state = State.COMPLETE
return b"\r\n--" + self.boundary + b"--\r\n" + event.data
else:
raise ValueError(f"Cannot generate {event} in state: {self.state}")

View file

@ -0,0 +1,548 @@
import typing as t
from datetime import datetime
from .._internal import _to_str
from ..datastructures import Accept
from ..datastructures import Authorization
from ..datastructures import CharsetAccept
from ..datastructures import ETags
from ..datastructures import Headers
from ..datastructures import HeaderSet
from ..datastructures import IfRange
from ..datastructures import ImmutableList
from ..datastructures import ImmutableMultiDict
from ..datastructures import LanguageAccept
from ..datastructures import MIMEAccept
from ..datastructures import MultiDict
from ..datastructures import Range
from ..datastructures import RequestCacheControl
from ..http import parse_accept_header
from ..http import parse_authorization_header
from ..http import parse_cache_control_header
from ..http import parse_cookie
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
from ..http import parse_list_header
from ..http import parse_options_header
from ..http import parse_range_header
from ..http import parse_set_header
from ..urls import url_decode
from ..user_agent import UserAgent
from ..useragents import _UserAgent as _DeprecatedUserAgent
from ..utils import cached_property
from ..utils import header_property
from .utils import get_current_url
from .utils import get_host
class Request:
"""Represents the non-IO parts of a HTTP request, including the
method, URL info, and headers.
This class is not meant for general use. It should only be used when
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
provides a WSGI implementation at :cls:`werkzeug.wrappers.Request`.
:param method: The method the request was made with, such as
``GET``.
:param scheme: The URL scheme of the protocol the request used, such
as ``https`` or ``wss``.
:param server: The address of the server. ``(host, port)``,
``(path, None)`` for unix sockets, or ``None`` if not known.
:param root_path: The prefix that the application is mounted under.
This is prepended to generated URLs, but is not part of route
matching.
:param path: The path part of the URL after ``root_path``.
:param query_string: The part of the URL after the "?".
:param headers: The headers received with the request.
:param remote_addr: The address of the client sending the request.
.. versionadded:: 2.0
"""
#: The charset used to decode most data in the request.
charset = "utf-8"
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = "replace"
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: The type to be used for dict values from the incoming WSGI
#: environment. (For example for :attr:`cookies`.) By default an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used.
#:
#: .. versionchanged:: 1.0.0
#: Changed to ``ImmutableMultiDict`` to support multiple values.
#:
#: .. versionadded:: 0.6
dict_storage_class: t.Type[MultiDict] = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class: t.Type[t.List] = ImmutableList
user_agent_class: t.Type[UserAgent] = _DeprecatedUserAgent
"""The class used and returned by the :attr:`user_agent` property to
parse the header. Defaults to
:class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An
extension can provide a subclass that uses a parser to provide other
data.
.. versionadded:: 2.0
"""
#: Valid host names when handling requests. By default all hosts are
#: trusted, which means that whatever the client says the host is
#: will be accepted.
#:
#: Because ``Host`` and ``X-Forwarded-Host`` headers can be set to
#: any value by a malicious client, it is recommended to either set
#: this property or implement similar validation in the proxy (if
#: the application is being run behind one).
#:
#: .. versionadded:: 0.9
trusted_hosts: t.Optional[t.List[str]] = None
def __init__(
self,
method: str,
scheme: str,
server: t.Optional[t.Tuple[str, t.Optional[int]]],
root_path: str,
path: str,
query_string: bytes,
headers: Headers,
remote_addr: t.Optional[str],
) -> None:
#: The method the request was made with, such as ``GET``.
self.method = method.upper()
#: The URL scheme of the protocol the request used, such as
#: ``https`` or ``wss``.
self.scheme = scheme
#: The address of the server. ``(host, port)``, ``(path, None)``
#: for unix sockets, or ``None`` if not known.
self.server = server
#: The prefix that the application is mounted under, without a
#: trailing slash. :attr:`path` comes after this.
self.root_path = root_path.rstrip("/")
#: The path part of the URL after :attr:`root_path`. This is the
#: path used for routing within the application.
self.path = "/" + path.lstrip("/")
#: The part of the URL after the "?". This is the raw value, use
#: :attr:`args` for the parsed values.
self.query_string = query_string
#: The headers received with the request.
self.headers = headers
#: The address of the client sending the request.
self.remote_addr = remote_addr
def __repr__(self) -> str:
try:
url = self.url
except Exception as e:
url = f"(invalid URL: {e})"
return f"<{type(self).__name__} {url!r} [{self.method}]>"
@property
def url_charset(self) -> str:
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@cached_property
def args(self) -> "MultiDict[str, str]":
"""The parsed URL parameters (the part in the URL after the question
mark).
By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(
self.query_string,
self.url_charset,
errors=self.encoding_errors,
cls=self.parameter_storage_class,
)
@cached_property
def access_route(self) -> t.List[str]:
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if "X-Forwarded-For" in self.headers:
return self.list_storage_class(
parse_list_header(self.headers["X-Forwarded-For"])
)
elif self.remote_addr is not None:
return self.list_storage_class([self.remote_addr])
return self.list_storage_class()
@cached_property
def full_path(self) -> str:
"""Requested path, including the query string."""
return f"{self.path}?{_to_str(self.query_string, self.url_charset)}"
@property
def is_secure(self) -> bool:
"""``True`` if the request was made with a secure protocol
(HTTPS or WSS).
"""
return self.scheme in {"https", "wss"}
@cached_property
def url(self) -> str:
"""The full request URL with the scheme, host, root path, path,
and query string."""
return get_current_url(
self.scheme, self.host, self.root_path, self.path, self.query_string
)
@cached_property
def base_url(self) -> str:
"""Like :attr:`url` but without the query string."""
return get_current_url(self.scheme, self.host, self.root_path, self.path)
@cached_property
def root_url(self) -> str:
"""The request URL scheme, host, and root path. This is the root
that the application is accessed from.
"""
return get_current_url(self.scheme, self.host, self.root_path)
@cached_property
def host_url(self) -> str:
"""The request URL scheme and host only."""
return get_current_url(self.scheme, self.host)
@cached_property
def host(self) -> str:
"""The host name the request was made to, including the port if
it's non-standard. Validated with :attr:`trusted_hosts`.
"""
return get_host(
self.scheme, self.headers.get("host"), self.server, self.trusted_hosts
)
@cached_property
def cookies(self) -> "ImmutableMultiDict[str, str]":
"""A :class:`dict` with the contents of all cookies transmitted with
the request."""
wsgi_combined_cookie = ";".join(self.headers.getlist("Cookie"))
return parse_cookie( # type: ignore
wsgi_combined_cookie,
self.charset,
self.encoding_errors,
cls=self.dict_storage_class,
)
# Common Descriptors
content_type = header_property[str](
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
read_only=True,
)
@cached_property
def content_length(self) -> t.Optional[int]:
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
if self.headers.get("Transfer-Encoding", "") == "chunked":
return None
content_length = self.headers.get("Content-Length")
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
return None
content_encoding = header_property[str](
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.
.. versionadded:: 0.9""",
read_only=True,
)
content_md5 = header_property[str](
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)
.. versionadded:: 0.9""",
read_only=True,
)
referrer = header_property[str](
"Referer",
doc="""The Referer[sic] request-header field allows the client
to specify, for the server's benefit, the address (URI) of the
resource from which the Request-URI was obtained (the
"referrer", although the header field is misspelled).""",
read_only=True,
)
date = header_property(
"Date",
None,
parse_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
""",
read_only=True,
)
max_forwards = header_property(
"Max-Forwards",
None,
int,
doc="""The Max-Forwards request-header field provides a
mechanism with the TRACE and OPTIONS methods to limit the number
of proxies or gateways that can forward the request to the next
inbound server.""",
read_only=True,
)
def _parse_content_type(self) -> None:
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = parse_options_header(
self.headers.get("Content-Type", "")
)
@property
def mimetype(self) -> str:
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self) -> t.Dict[str, str]:
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self) -> HeaderSet:
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.headers.get("Pragma", ""))
# Accept
@cached_property
def accept_mimetypes(self) -> MIMEAccept:
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.headers.get("Accept"), MIMEAccept)
@cached_property
def accept_charsets(self) -> CharsetAccept:
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.headers.get("Accept-Charset"), CharsetAccept)
@cached_property
def accept_encodings(self) -> Accept:
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.headers.get("Accept-Encoding"))
@cached_property
def accept_languages(self) -> LanguageAccept:
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.headers.get("Accept-Language"), LanguageAccept)
# ETag
@cached_property
def cache_control(self) -> RequestCacheControl:
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.headers.get("Cache-Control")
return parse_cache_control_header(cache_control, None, RequestCacheControl)
@cached_property
def if_match(self) -> ETags:
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-Match"))
@cached_property
def if_none_match(self) -> ETags:
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.headers.get("If-None-Match"))
@cached_property
def if_modified_since(self) -> t.Optional[datetime]:
"""The parsed `If-Modified-Since` header as a datetime object.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
return parse_date(self.headers.get("If-Modified-Since"))
@cached_property
def if_unmodified_since(self) -> t.Optional[datetime]:
"""The parsed `If-Unmodified-Since` header as a datetime object.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
return parse_date(self.headers.get("If-Unmodified-Since"))
@cached_property
def if_range(self) -> IfRange:
"""The parsed ``If-Range`` header.
.. versionchanged:: 2.0
``IfRange.date`` is timezone-aware.
.. versionadded:: 0.7
"""
return parse_if_range_header(self.headers.get("If-Range"))
@cached_property
def range(self) -> t.Optional[Range]:
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.headers.get("Range"))
# User Agent
@cached_property
def user_agent(self) -> UserAgent:
"""The user agent. Use ``user_agent.string`` to get the header
value. Set :attr:`user_agent_class` to a subclass of
:class:`~werkzeug.user_agent.UserAgent` to provide parsing for
the other properties or other extended data.
.. versionchanged:: 2.0
The built in parser is deprecated and will be removed in
Werkzeug 2.1. A ``UserAgent`` subclass must be set to parse
data from the string.
"""
return self.user_agent_class(self.headers.get("User-Agent", ""))
# Authorization
@cached_property
def authorization(self) -> t.Optional[Authorization]:
"""The `Authorization` object in parsed form."""
return parse_authorization_header(self.headers.get("Authorization"))
# CORS
origin = header_property[str](
"Origin",
doc=(
"The host that the request originated from. Set"
" :attr:`~CORSResponseMixin.access_control_allow_origin` on"
" the response to indicate which origins are allowed."
),
read_only=True,
)
access_control_request_headers = header_property(
"Access-Control-Request-Headers",
load_func=parse_set_header,
doc=(
"Sent with a preflight request to indicate which headers"
" will be sent with the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_headers`"
" on the response to indicate which headers are allowed."
),
read_only=True,
)
access_control_request_method = header_property[str](
"Access-Control-Request-Method",
doc=(
"Sent with a preflight request to indicate which method"
" will be used for the cross origin request. Set"
" :attr:`~CORSResponseMixin.access_control_allow_methods`"
" on the response to indicate which methods are allowed."
),
read_only=True,
)
@property
def is_json(self) -> bool:
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)

View file

@ -0,0 +1,704 @@
import typing as t
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from http import HTTPStatus
from .._internal import _to_str
from ..datastructures import Headers
from ..datastructures import HeaderSet
from ..http import dump_cookie
from ..http import HTTP_STATUS_CODES
from ..utils import get_content_type
from werkzeug.datastructures import CallbackDict
from werkzeug.datastructures import ContentRange
from werkzeug.datastructures import ContentSecurityPolicy
from werkzeug.datastructures import ResponseCacheControl
from werkzeug.datastructures import WWWAuthenticate
from werkzeug.http import COEP
from werkzeug.http import COOP
from werkzeug.http import dump_age
from werkzeug.http import dump_header
from werkzeug.http import dump_options_header
from werkzeug.http import http_date
from werkzeug.http import parse_age
from werkzeug.http import parse_cache_control_header
from werkzeug.http import parse_content_range_header
from werkzeug.http import parse_csp_header
from werkzeug.http import parse_date
from werkzeug.http import parse_options_header
from werkzeug.http import parse_set_header
from werkzeug.http import parse_www_authenticate_header
from werkzeug.http import quote_etag
from werkzeug.http import unquote_etag
from werkzeug.utils import header_property
def _set_property(name: str, doc: t.Optional[str] = None) -> property:
def fget(self: "Response") -> HeaderSet:
def on_update(header_set: HeaderSet) -> None:
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(
self: "Response",
value: t.Optional[
t.Union[str, t.Dict[str, t.Union[str, int]], t.Iterable[str]]
],
) -> None:
if not value:
del self.headers[name]
elif isinstance(value, str):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
class Response:
"""Represents the non-IO parts of an HTTP response, specifically the
status and headers but not the body.
This class is not meant for general use. It should only be used when
implementing WSGI, ASGI, or another HTTP application spec. Werkzeug
provides a WSGI implementation at :cls:`werkzeug.wrappers.Response`.
:param status: The status code for the response. Either an int, in
which case the default status message is added, or a string in
the form ``{code} {message}``, like ``404 Not Found``. Defaults
to 200.
:param headers: A :class:`~werkzeug.datastructures.Headers` object,
or a list of ``(key, value)`` tuples that will be converted to a
``Headers`` object.
:param mimetype: The mime type (content type without charset or
other parameters) of the response. If the value starts with
``text/`` (or matches some other special cases), the charset
will be added to create the ``content_type``.
:param content_type: The full content type of the response.
Overrides building the value from ``mimetype``.
.. versionadded:: 2.0
"""
#: the charset of the response.
charset = "utf-8"
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = "text/plain"
#: Warn if a cookie header exceeds this size. The default, 4093, should be
#: safely `supported by most browsers <cookie_>`_. A cookie larger than
#: this size will still be sent, but it may be ignored or handled
#: incorrectly by some browsers. Set to 0 to disable this check.
#:
#: .. versionadded:: 0.13
#:
#: .. _`cookie`: http://browsercookielimits.squawky.net/
max_cookie_size = 4093
# A :class:`Headers` object representing the response headers.
headers: Headers
def __init__(
self,
status: t.Optional[t.Union[int, str, HTTPStatus]] = None,
headers: t.Optional[
t.Union[
t.Mapping[str, t.Union[str, int, t.Iterable[t.Union[str, int]]]],
t.Iterable[t.Tuple[str, t.Union[str, int]]],
]
] = None,
mimetype: t.Optional[str] = None,
content_type: t.Optional[str] = None,
) -> None:
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and "content-type" not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers["Content-Type"] = content_type
if status is None:
status = self.default_status
self.status = status # type: ignore
def __repr__(self) -> str:
return f"<{type(self).__name__} [{self.status}]>"
@property
def status_code(self) -> int:
"""The HTTP status code as a number."""
return self._status_code
@status_code.setter
def status_code(self, code: int) -> None:
self.status = code # type: ignore
@property
def status(self) -> str:
"""The HTTP status code as a string."""
return self._status
@status.setter
def status(self, value: t.Union[str, int, HTTPStatus]) -> None:
if not isinstance(value, (str, bytes, int, HTTPStatus)):
raise TypeError("Invalid status argument")
self._status, self._status_code = self._clean_status(value)
def _clean_status(self, value: t.Union[str, int, HTTPStatus]) -> t.Tuple[str, int]:
if isinstance(value, HTTPStatus):
value = int(value)
status = _to_str(value, self.charset)
split_status = status.split(None, 1)
if len(split_status) == 0:
raise ValueError("Empty status argument")
if len(split_status) > 1:
if split_status[0].isdigit():
# code and message
return status, int(split_status[0])
# multi-word message
return f"0 {status}", 0
if split_status[0].isdigit():
# code only
status_code = int(split_status[0])
try:
status = f"{status_code} {HTTP_STATUS_CODES[status_code].upper()}"
except KeyError:
status = f"{status_code} UNKNOWN"
return status, status_code
# one-word message
return f"0 {status}", 0
def set_cookie(
self,
key: str,
value: str = "",
max_age: t.Optional[t.Union[timedelta, int]] = None,
expires: t.Optional[t.Union[str, datetime, int, float]] = None,
path: t.Optional[str] = "/",
domain: t.Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: t.Optional[str] = None,
) -> None:
"""Sets a cookie.
A warning is raised if the size of the cookie header exceeds
:attr:`max_cookie_size`, but the header will still be set.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: If ``True``, the cookie will only be available
via HTTPS.
:param httponly: Disallow JavaScript access to the cookie.
:param samesite: Limit the scope of the cookie to only be
attached to requests that are "same-site".
"""
self.headers.add(
"Set-Cookie",
dump_cookie(
key,
value=value,
max_age=max_age,
expires=expires,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
charset=self.charset,
max_size=self.max_cookie_size,
samesite=samesite,
),
)
def delete_cookie(
self,
key: str,
path: str = "/",
domain: t.Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: t.Optional[str] = None,
) -> None:
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
:param secure: If ``True``, the cookie will only be available
via HTTPS.
:param httponly: Disallow JavaScript access to the cookie.
:param samesite: Limit the scope of the cookie to only be
attached to requests that are "same-site".
"""
self.set_cookie(
key,
expires=0,
max_age=0,
path=path,
domain=domain,
secure=secure,
httponly=httponly,
samesite=samesite,
)
@property
def is_json(self) -> bool:
"""Check if the mimetype indicates JSON data, either
:mimetype:`application/json` or :mimetype:`application/*+json`.
"""
mt = self.mimetype
return mt is not None and (
mt == "application/json"
or mt.startswith("application/")
and mt.endswith("+json")
)
# Common Descriptors
@property
def mimetype(self) -> t.Optional[str]:
"""The mimetype (content type without charset etc.)"""
ct = self.headers.get("content-type")
if ct:
return ct.split(";")[0].strip()
else:
return None
@mimetype.setter
def mimetype(self, value: str) -> None:
self.headers["Content-Type"] = get_content_type(value, self.charset)
@property
def mimetype_params(self) -> t.Dict[str, str]:
"""The mimetype parameters as dict. For example if the
content type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
"""
def on_update(d: CallbackDict) -> None:
self.headers["Content-Type"] = dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get("content-type", ""))[1]
return CallbackDict(d, on_update)
location = header_property[str](
"Location",
doc="""The Location response-header field is used to redirect
the recipient to a location other than the Request-URI for
completion of the request or identification of a new
resource.""",
)
age = header_property(
"Age",
None,
parse_age,
dump_age, # type: ignore
doc="""The Age response-header field conveys the sender's
estimate of the amount of time since the response (or its
revalidation) was generated at the origin server.
Age values are non-negative decimal integers, representing time
in seconds.""",
)
content_type = header_property[str](
"Content-Type",
doc="""The Content-Type entity-header field indicates the media
type of the entity-body sent to the recipient or, in the case of
the HEAD method, the media type that would have been sent had
the request been a GET.""",
)
content_length = header_property(
"Content-Length",
None,
int,
str,
doc="""The Content-Length entity-header field indicates the size
of the entity-body, in decimal number of OCTETs, sent to the
recipient or, in the case of the HEAD method, the size of the
entity-body that would have been sent had the request been a
GET.""",
)
content_location = header_property[str](
"Content-Location",
doc="""The Content-Location entity-header field MAY be used to
supply the resource location for the entity enclosed in the
message when that entity is accessible from a location separate
from the requested resource's URI.""",
)
content_encoding = header_property[str](
"Content-Encoding",
doc="""The Content-Encoding entity-header field is used as a
modifier to the media-type. When present, its value indicates
what additional content codings have been applied to the
entity-body, and thus what decoding mechanisms must be applied
in order to obtain the media-type referenced by the Content-Type
header field.""",
)
content_md5 = header_property[str](
"Content-MD5",
doc="""The Content-MD5 entity-header field, as defined in
RFC 1864, is an MD5 digest of the entity-body for the purpose of
providing an end-to-end message integrity check (MIC) of the
entity-body. (Note: a MIC is good for detecting accidental
modification of the entity-body in transit, but is not proof
against malicious attacks.)""",
)
date = header_property(
"Date",
None,
parse_date,
http_date,
doc="""The Date general-header field represents the date and
time at which the message was originated, having the same
semantics as orig-date in RFC 822.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
""",
)
expires = header_property(
"Expires",
None,
parse_date,
http_date,
doc="""The Expires entity-header field gives the date/time after
which the response is considered stale. A stale cache entry may
not normally be returned by a cache.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
""",
)
last_modified = header_property(
"Last-Modified",
None,
parse_date,
http_date,
doc="""The Last-Modified entity-header field indicates the date
and time at which the origin server believes the variant was
last modified.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
""",
)
@property
def retry_after(self) -> t.Optional[datetime]:
"""The Retry-After response-header field can be used with a
503 (Service Unavailable) response to indicate how long the
service is expected to be unavailable to the requesting client.
Time in seconds until expiration or date.
.. versionchanged:: 2.0
The datetime object is timezone-aware.
"""
value = self.headers.get("retry-after")
if value is None:
return None
elif value.isdigit():
return datetime.now(timezone.utc) + timedelta(seconds=int(value))
return parse_date(value)
@retry_after.setter
def retry_after(self, value: t.Optional[t.Union[datetime, int, str]]) -> None:
if value is None:
if "retry-after" in self.headers:
del self.headers["retry-after"]
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers["Retry-After"] = value
vary = _set_property(
"Vary",
doc="""The Vary field value indicates the set of request-header
fields that fully determines, while the response is fresh,
whether a cache is permitted to use the response to reply to a
subsequent request without revalidation.""",
)
content_language = _set_property(
"Content-Language",
doc="""The Content-Language entity-header field describes the
natural language(s) of the intended audience for the enclosed
entity. Note that this might not be equivalent to all the
languages used within the entity-body.""",
)
allow = _set_property(
"Allow",
doc="""The Allow entity-header field lists the set of methods
supported by the resource identified by the Request-URI. The
purpose of this field is strictly to inform the recipient of
valid methods associated with the resource. An Allow header
field MUST be present in a 405 (Method Not Allowed)
response.""",
)
# ETag
@property
def cache_control(self) -> ResponseCacheControl:
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control: ResponseCacheControl) -> None:
if not cache_control and "cache-control" in self.headers:
del self.headers["cache-control"]
elif cache_control:
self.headers["Cache-Control"] = cache_control.to_header()
return parse_cache_control_header(
self.headers.get("cache-control"), on_update, ResponseCacheControl
)
def set_etag(self, etag: str, weak: bool = False) -> None:
"""Set the etag, and override the old one if there was one."""
self.headers["ETag"] = quote_etag(etag, weak)
def get_etag(self) -> t.Union[t.Tuple[str, bool], t.Tuple[None, None]]:
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get("ETag"))
accept_ranges = header_property[str](
"Accept-Ranges",
doc="""The `Accept-Ranges` header. Even though the name would
indicate that multiple values are supported, it must be one
string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7""",
)
@property
def content_range(self) -> ContentRange:
"""The ``Content-Range`` header as a
:class:`~werkzeug.datastructures.ContentRange` object. Available
even if the header is not set.
.. versionadded:: 0.7
"""
def on_update(rng: ContentRange) -> None:
if not rng:
del self.headers["content-range"]
else:
self.headers["Content-Range"] = rng.to_header()
rv = parse_content_range_header(self.headers.get("content-range"), on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
@content_range.setter
def content_range(self, value: t.Optional[t.Union[ContentRange, str]]) -> None:
if not value:
del self.headers["content-range"]
elif isinstance(value, str):
self.headers["Content-Range"] = value
else:
self.headers["Content-Range"] = value.to_header()
# Authorization
@property
def www_authenticate(self) -> WWWAuthenticate:
"""The ``WWW-Authenticate`` header in a parsed form."""
def on_update(www_auth: WWWAuthenticate) -> None:
if not www_auth and "www-authenticate" in self.headers:
del self.headers["www-authenticate"]
elif www_auth:
self.headers["WWW-Authenticate"] = www_auth.to_header()
header = self.headers.get("www-authenticate")
return parse_www_authenticate_header(header, on_update)
# CSP
@property
def content_security_policy(self) -> ContentSecurityPolicy:
"""The ``Content-Security-Policy`` header as a
:class:`~werkzeug.datastructures.ContentSecurityPolicy` object. Available
even if the header is not set.
The Content-Security-Policy header adds an additional layer of
security to help detect and mitigate certain types of attacks.
"""
def on_update(csp: ContentSecurityPolicy) -> None:
if not csp:
del self.headers["content-security-policy"]
else:
self.headers["Content-Security-Policy"] = csp.to_header()
rv = parse_csp_header(self.headers.get("content-security-policy"), on_update)
if rv is None:
rv = ContentSecurityPolicy(None, on_update=on_update)
return rv
@content_security_policy.setter
def content_security_policy(
self, value: t.Optional[t.Union[ContentSecurityPolicy, str]]
) -> None:
if not value:
del self.headers["content-security-policy"]
elif isinstance(value, str):
self.headers["Content-Security-Policy"] = value
else:
self.headers["Content-Security-Policy"] = value.to_header()
@property
def content_security_policy_report_only(self) -> ContentSecurityPolicy:
"""The ``Content-Security-policy-report-only`` header as a
:class:`~werkzeug.datastructures.ContentSecurityPolicy` object. Available
even if the header is not set.
The Content-Security-Policy-Report-Only header adds a csp policy
that is not enforced but is reported thereby helping detect
certain types of attacks.
"""
def on_update(csp: ContentSecurityPolicy) -> None:
if not csp:
del self.headers["content-security-policy-report-only"]
else:
self.headers["Content-Security-policy-report-only"] = csp.to_header()
rv = parse_csp_header(
self.headers.get("content-security-policy-report-only"), on_update
)
if rv is None:
rv = ContentSecurityPolicy(None, on_update=on_update)
return rv
@content_security_policy_report_only.setter
def content_security_policy_report_only(
self, value: t.Optional[t.Union[ContentSecurityPolicy, str]]
) -> None:
if not value:
del self.headers["content-security-policy-report-only"]
elif isinstance(value, str):
self.headers["Content-Security-policy-report-only"] = value
else:
self.headers["Content-Security-policy-report-only"] = value.to_header()
# CORS
@property
def access_control_allow_credentials(self) -> bool:
"""Whether credentials can be shared by the browser to
JavaScript code. As part of the preflight request it indicates
whether credentials can be used on the cross origin request.
"""
return "Access-Control-Allow-Credentials" in self.headers
@access_control_allow_credentials.setter
def access_control_allow_credentials(self, value: t.Optional[bool]) -> None:
if value is True:
self.headers["Access-Control-Allow-Credentials"] = "true"
else:
self.headers.pop("Access-Control-Allow-Credentials", None)
access_control_allow_headers = header_property(
"Access-Control-Allow-Headers",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which headers can be sent with the cross origin request.",
)
access_control_allow_methods = header_property(
"Access-Control-Allow-Methods",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which methods can be used for the cross origin request.",
)
access_control_allow_origin = header_property[str](
"Access-Control-Allow-Origin",
doc="The origin or '*' for any origin that may make cross origin requests.",
)
access_control_expose_headers = header_property(
"Access-Control-Expose-Headers",
load_func=parse_set_header,
dump_func=dump_header,
doc="Which headers can be shared by the browser to JavaScript code.",
)
access_control_max_age = header_property(
"Access-Control-Max-Age",
load_func=int,
dump_func=str,
doc="The maximum age in seconds the access control settings can be cached for.",
)
cross_origin_opener_policy = header_property[COOP](
"Cross-Origin-Opener-Policy",
load_func=lambda value: COOP(value),
dump_func=lambda value: value.value,
default=COOP.UNSAFE_NONE,
doc="""Allows control over sharing of browsing context group with cross-origin
documents. Values must be a member of the :class:`werkzeug.http.COOP` enum.""",
)
cross_origin_embedder_policy = header_property[COEP](
"Cross-Origin-Embedder-Policy",
load_func=lambda value: COEP(value),
dump_func=lambda value: value.value,
default=COEP.UNSAFE_NONE,
doc="""Prevents a document from loading any cross-origin resources that do not
explicitly grant the document permission. Values must be a member of the
:class:`werkzeug.http.COEP` enum.""",
)

View file

@ -0,0 +1,142 @@
import typing as t
from .._internal import _encode_idna
from ..exceptions import SecurityError
from ..urls import uri_to_iri
from ..urls import url_quote
def host_is_trusted(hostname: str, trusted_list: t.Iterable[str]) -> bool:
"""Check if a host matches a list of trusted names.
:param hostname: The name to check.
:param trusted_list: A list of valid names to match. If a name
starts with a dot it will match all subdomains.
.. versionadded:: 0.9
"""
if not hostname:
return False
if isinstance(trusted_list, str):
trusted_list = [trusted_list]
def _normalize(hostname: str) -> bytes:
if ":" in hostname:
hostname = hostname.rsplit(":", 1)[0]
return _encode_idna(hostname)
try:
hostname_bytes = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith("."):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref_bytes = _normalize(ref)
except UnicodeError:
return False
if ref_bytes == hostname_bytes:
return True
if suffix_match and hostname_bytes.endswith(b"." + ref_bytes):
return True
return False
def get_host(
scheme: str,
host_header: t.Optional[str],
server: t.Optional[t.Tuple[str, t.Optional[int]]] = None,
trusted_hosts: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Return the host for the given parameters.
This first checks the ``host_header``. If it's not present, then
``server`` is used. The host will only contain the port if it is
different than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param scheme: The protocol the request used, like ``"https"``.
:param host_header: The ``Host`` header value.
:param server: Address of the server. ``(host, port)``, or
``(path, None)`` for unix sockets.
:param trusted_hosts: A list of trusted host names.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
host = ""
if host_header is not None:
host = host_header
elif server is not None:
host = server[0]
if server[1] is not None:
host = f"{host}:{server[1]}"
if scheme in {"http", "ws"} and host.endswith(":80"):
host = host[:-3]
elif scheme in {"https", "wss"} and host.endswith(":443"):
host = host[:-4]
if trusted_hosts is not None:
if not host_is_trusted(host, trusted_hosts):
raise SecurityError(f"Host {host!r} is not trusted.")
return host
def get_current_url(
scheme: str,
host: str,
root_path: t.Optional[str] = None,
path: t.Optional[str] = None,
query_string: t.Optional[bytes] = None,
) -> str:
"""Recreate the URL for a request. If an optional part isn't
provided, it and subsequent parts are not included in the URL.
The URL is an IRI, not a URI, so it may contain Unicode characters.
Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
:param scheme: The protocol the request used, like ``"https"``.
:param host: The host the request was made to. See :func:`get_host`.
:param root_path: Prefix that the application is mounted under. This
is prepended to ``path``.
:param path: The path part of the URL after ``root_path``.
:param query_string: The portion of the URL after the "?".
"""
url = [scheme, "://", host]
if root_path is None:
url.append("/")
return uri_to_iri("".join(url))
url.append(url_quote(root_path.rstrip("/")))
url.append("/")
if path is None:
return uri_to_iri("".join(url))
url.append(url_quote(path.lstrip("/")))
if query_string:
url.append("?")
url.append(url_quote(query_string, safe=":&%=+$!*'(),"))
return uri_to_iri("".join(url))

View file

@ -0,0 +1,247 @@
import hashlib
import hmac
import os
import posixpath
import secrets
import typing as t
import warnings
if t.TYPE_CHECKING:
pass
SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
DEFAULT_PBKDF2_ITERATIONS = 260000
_os_alt_seps: t.List[str] = list(
sep for sep in [os.path.sep, os.path.altsep] if sep is not None and sep != "/"
)
def pbkdf2_hex(
data: t.Union[str, bytes],
salt: t.Union[str, bytes],
iterations: int = DEFAULT_PBKDF2_ITERATIONS,
keylen: t.Optional[int] = None,
hashfunc: t.Optional[t.Union[str, t.Callable]] = None,
) -> str:
"""Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided,
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function, or a function
from the hashlib module. Defaults to sha256.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use :func:`hashlib.pbkdf2_hmac`
instead.
.. versionadded:: 0.9
"""
warnings.warn(
"'pbkdf2_hex' is deprecated and will be removed in Werkzeug"
" 2.1. Use 'hashlib.pbkdf2_hmac().hex()' instead.",
DeprecationWarning,
stacklevel=2,
)
return pbkdf2_bin(data, salt, iterations, keylen, hashfunc).hex()
def pbkdf2_bin(
data: t.Union[str, bytes],
salt: t.Union[str, bytes],
iterations: int = DEFAULT_PBKDF2_ITERATIONS,
keylen: t.Optional[int] = None,
hashfunc: t.Optional[t.Union[str, t.Callable]] = None,
) -> bytes:
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` times and produces a
key of `keylen` bytes. By default, SHA-256 is used as hash function;
a different hashlib `hashfunc` can be provided.
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha256.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use :func:`hashlib.pbkdf2_hmac`
instead.
.. versionadded:: 0.9
"""
warnings.warn(
"'pbkdf2_bin' is deprecated and will be removed in Werkzeug"
" 2.1. Use 'hashlib.pbkdf2_hmac()' instead.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(data, str):
data = data.encode("utf8")
if isinstance(salt, str):
salt = salt.encode("utf8")
if not hashfunc:
hash_name = "sha256"
elif callable(hashfunc):
hash_name = hashfunc().name
else:
hash_name = hashfunc
return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
def safe_str_cmp(a: str, b: str) -> bool:
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal, or `False` if they are not.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use
:func:`hmac.compare_digest` instead.
.. versionadded:: 0.7
"""
warnings.warn(
"'safe_str_cmp' is deprecated and will be removed in Werkzeug"
" 2.1. Use 'hmac.compare_digest' instead.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(a, str):
a = a.encode("utf-8") # type: ignore
if isinstance(b, str):
b = b.encode("utf-8") # type: ignore
return hmac.compare_digest(a, b)
def gen_salt(length: int) -> str:
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError("Salt length must be positive")
return "".join(secrets.choice(SALT_CHARS) for _ in range(length))
def _hash_internal(method: str, salt: str, password: str) -> t.Tuple[str, str]:
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == "plain":
return password, method
salt = salt.encode("utf-8")
password = password.encode("utf-8")
if method.startswith("pbkdf2:"):
if not salt:
raise ValueError("Salt is required for PBKDF2")
args = method[7:].split(":")
if len(args) not in (1, 2):
raise ValueError("Invalid number of arguments for PBKDF2")
method = args.pop(0)
iterations = int(args[0] or 0) if args else DEFAULT_PBKDF2_ITERATIONS
return (
hashlib.pbkdf2_hmac(method, password, salt, iterations).hex(),
f"pbkdf2:{method}:{iterations}",
)
if salt:
return hmac.new(salt, password, method).hexdigest(), method
return hashlib.new(method, password).hexdigest(), method
def generate_password_hash(
password: str, method: str = "pbkdf2:sha256", salt_length: int = 16
) -> str:
"""Hash a password with the given method and salt with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set param method='plain' in order to enforce plaintext passwords.
If a salt is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha256:80000$salt$hash
pbkdf2:sha256$salt$hash
:param password: the password to hash.
:param method: the hash method to use (one that hashlib supports). Can
optionally be in the format ``pbkdf2:method:iterations``
to enable PBKDF2.
:param salt_length: the length of the salt in letters.
"""
salt = gen_salt(salt_length) if method != "plain" else ""
h, actual_method = _hash_internal(method, salt, password)
return f"{actual_method}${salt}${h}"
def check_password_hash(pwhash: str, password: str) -> bool:
"""Check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`.
:param password: the plaintext password to compare against the hash.
"""
if pwhash.count("$") < 2:
return False
method, salt, hashval = pwhash.split("$", 2)
return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory: str, *pathnames: str) -> t.Optional[str]:
"""Safely join zero or more untrusted path components to a base
directory to avoid escaping the base directory.
:param directory: The trusted base directory.
:param pathnames: The untrusted path components relative to the
base directory.
:return: A safe path, otherwise ``None``.
"""
parts = [directory]
for filename in pathnames:
if filename != "":
filename = posixpath.normpath(filename)
if (
any(sep in filename for sep in _os_alt_seps)
or os.path.isabs(filename)
or filename == ".."
or filename.startswith("../")
):
return None
parts.append(filename)
return posixpath.join(*parts)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,240 @@
"""A small application that can be used to test a WSGI server and check
it for WSGI compliance.
"""
import base64
import os
import sys
import typing as t
from html import escape
from textwrap import wrap
from . import __version__ as _werkzeug_version
from .wrappers.request import Request
from .wrappers.response import Response
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIEnvironment
logo = Response(
base64.b64decode(
"""
R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
="""
),
mimetype="image/png",
)
TEMPLATE = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
"""
def iter_sys_path() -> t.Iterator[t.Tuple[str, bool, bool]]:
if os.name == "posix":
def strip(x: str) -> str:
prefix = os.path.expanduser("~")
if x.startswith(prefix):
x = f"~{x[len(prefix) :]}"
return x
else:
def strip(x: str) -> str:
return x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
def render_testapp(req: Request) -> bytes:
try:
import pkg_resources
except ImportError:
eggs: t.Iterable[t.Any] = ()
else:
eggs = sorted(
pkg_resources.working_set,
key=lambda x: x.project_name.lower(), # type: ignore
)
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = "unknown"
python_eggs.append(
f"<li>{escape(egg.project_name)} <small>[{escape(version)}]</small>"
)
wsgi_env = []
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
value = "".join(wrap(escape(repr(value))))
wsgi_env.append(f"<tr><th>{escape(str(key))}<td><code>{value}</code>")
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append("virtual")
if expanded:
class_.append("exp")
class_ = f' class="{" ".join(class_)}"' if class_ else ""
sys_path.append(f"<li{class_}>{escape(item)}")
return (
TEMPLATE
% {
"python_version": "<br>".join(escape(sys.version).splitlines()),
"platform": escape(sys.platform),
"os": escape(os.name),
"api_version": sys.api_version,
"byteorder": sys.byteorder,
"werkzeug_version": _werkzeug_version,
"python_eggs": "\n".join(python_eggs),
"wsgi_env": "\n".join(wsgi_env),
"sys_path": "\n".join(sys_path),
}
).encode("utf-8")
def test_app(
environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get("resource") == "logo":
response = logo
else:
response = Response(render_testapp(req), mimetype="text/html")
return response(environ, start_response)
if __name__ == "__main__":
from .serving import run_simple
run_simple("localhost", 5000, test_app, use_reloader=True)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,47 @@
import typing as t
class UserAgent:
"""Represents a parsed user agent header value.
The default implementation does no parsing, only the :attr:`string`
attribute is set. A subclass may parse the string to set the
common attributes or expose other information. Set
:attr:`werkzeug.wrappers.Request.user_agent_class` to use a
subclass.
:param string: The header value to parse.
.. versionadded:: 2.0
This replaces the previous ``useragents`` module, but does not
provide a built-in parser.
"""
platform: t.Optional[str] = None
"""The OS name, if it could be parsed from the string."""
browser: t.Optional[str] = None
"""The browser name, if it could be parsed from the string."""
version: t.Optional[str] = None
"""The browser version, if it could be parsed from the string."""
language: t.Optional[str] = None
"""The browser language, if it could be parsed from the string."""
def __init__(self, string: str) -> None:
self.string: str = string
"""The original header value."""
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.browser}/{self.version}>"
def __str__(self) -> str:
return self.string
def __bool__(self) -> bool:
return bool(self.browser)
def to_header(self) -> str:
"""Convert to a header value."""
return self.string

View file

@ -0,0 +1,215 @@
import re
import typing as t
import warnings
from .user_agent import UserAgent as _BaseUserAgent
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIEnvironment
class _UserAgentParser:
platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
(" cros ", "chromeos"),
("iphone|ios", "iphone"),
("ipad", "ipad"),
(r"darwin\b|mac\b|os\s*x", "macos"),
("win", "windows"),
(r"android", "android"),
("netbsd", "netbsd"),
("openbsd", "openbsd"),
("freebsd", "freebsd"),
("dragonfly", "dragonflybsd"),
("(sun|i86)os", "solaris"),
(r"x11\b|lin(\b|ux)?", "linux"),
(r"nintendo\s+wii", "wii"),
("irix", "irix"),
("hp-?ux", "hpux"),
("aix", "aix"),
("sco|unix_sv", "sco"),
("bsd", "bsd"),
("amiga", "amiga"),
("blackberry|playbook", "blackberry"),
("symbian", "symbian"),
)
browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
("googlebot", "google"),
("msnbot", "msn"),
("yahoo", "yahoo"),
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
(r"opera|opr", "opera"),
("edge|edg", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
("galeon", "galeon"),
("safari|version", "safari"),
("webkit", "webkit"),
("camino", "camino"),
("konqueror", "konqueror"),
("k-meleon", "kmeleon"),
("netscape", "netscape"),
(r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
("lynx", "lynx"),
("links", "links"),
("Baiduspider", "baidu"),
("bingbot", "bing"),
("mozilla", "mozilla"),
)
_browser_version_re = r"(?:{pattern})[/\sa-z(]*(\d+[.\da-z]+)?"
_language_re = re.compile(
r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
)
def __init__(self) -> None:
self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]
self.browsers = [
(b, re.compile(self._browser_version_re.format(pattern=a), re.I))
for a, b in self.browser_rules
]
def __call__(
self, user_agent: str
) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:
platform: t.Optional[str]
browser: t.Optional[str]
version: t.Optional[str]
language: t.Optional[str]
for platform, regex in self.platforms: # noqa: B007
match = regex.search(user_agent)
if match is not None:
break
else:
platform = None
# Except for Trident, all browser key words come after the last ')'
last_closing_paren = 0
if (
not re.compile(r"trident/.+? rv:", re.I).search(user_agent)
and ")" in user_agent
and user_agent[-1] != ")"
):
last_closing_paren = user_agent.rindex(")")
for browser, regex in self.browsers: # noqa: B007
match = regex.search(user_agent[last_closing_paren:])
if match is not None:
version = match.group(1)
break
else:
browser = version = None
match = self._language_re.search(user_agent)
if match is not None:
language = match.group(1) or match.group(2)
else:
language = None
return platform, browser, version, language
# It wasn't public, but users might have imported it anyway, show a
# warning if a user created an instance.
class UserAgentParser(_UserAgentParser):
"""A simple user agent parser. Used by the `UserAgent`.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use a dedicated parser library
instead.
"""
def __init__(self) -> None:
warnings.warn(
"'UserAgentParser' is deprecated and will be removed in"
" Werkzeug 2.1. Use a dedicated parser library instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
class _deprecated_property(property):
def __init__(self, fget: t.Callable[["_UserAgent"], t.Any]) -> None:
super().__init__(fget)
self.message = (
"The built-in user agent parser is deprecated and will be"
f" removed in Werkzeug 2.1. The {fget.__name__!r} property"
" will be 'None'. Subclass 'werkzeug.user_agent.UserAgent'"
" and set 'Request.user_agent_class' to use a different"
" parser."
)
def __get__(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
warnings.warn(self.message, DeprecationWarning, stacklevel=3)
return super().__get__(*args, **kwargs)
# This is what Request.user_agent returns for now, only show warnings on
# attribute access, not creation.
class _UserAgent(_BaseUserAgent):
_parser = _UserAgentParser()
def __init__(self, string: str) -> None:
super().__init__(string)
info = self._parser(string)
self._platform, self._browser, self._version, self._language = info
@_deprecated_property
def platform(self) -> t.Optional[str]: # type: ignore
return self._platform
@_deprecated_property
def browser(self) -> t.Optional[str]: # type: ignore
return self._browser
@_deprecated_property
def version(self) -> t.Optional[str]: # type: ignore
return self._version
@_deprecated_property
def language(self) -> t.Optional[str]: # type: ignore
return self._language
# This is what users might be importing, show warnings on create.
class UserAgent(_UserAgent):
"""Represents a parsed user agent header value.
This uses a basic parser to try to extract some information from the
header.
:param environ_or_string: The header value to parse, or a WSGI
environ containing the header.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Subclass
:class:`werkzeug.user_agent.UserAgent` (note the new module
name) to use a dedicated parser instead.
.. versionchanged:: 2.0
Passing a WSGI environ is deprecated and will be removed in 2.1.
"""
def __init__(self, environ_or_string: "t.Union[str, WSGIEnvironment]") -> None:
if isinstance(environ_or_string, dict):
warnings.warn(
"Passing an environ to 'UserAgent' is deprecated and"
" will be removed in Werkzeug 2.1. Pass the header"
" value string instead.",
DeprecationWarning,
stacklevel=2,
)
string = environ_or_string.get("HTTP_USER_AGENT", "")
else:
string = environ_or_string
warnings.warn(
"The 'werkzeug.useragents' module is deprecated and will be"
" removed in Werkzeug 2.1. The new base API is"
" 'werkzeug.user_agent.UserAgent'.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(string)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,16 @@
from .accept import AcceptMixin
from .auth import AuthorizationMixin
from .auth import WWWAuthenticateMixin
from .base_request import BaseRequest
from .base_response import BaseResponse
from .common_descriptors import CommonRequestDescriptorsMixin
from .common_descriptors import CommonResponseDescriptorsMixin
from .etag import ETagRequestMixin
from .etag import ETagResponseMixin
from .request import PlainRequest
from .request import Request as Request
from .request import StreamOnlyMixin
from .response import Response as Response
from .response import ResponseStream
from .response import ResponseStreamMixin
from .user_agent import UserAgentMixin

View file

@ -0,0 +1,14 @@
import typing as t
import warnings
class AcceptMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AcceptMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,26 @@
import typing as t
import warnings
class AuthorizationMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'AuthorizationMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class WWWAuthenticateMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'WWWAuthenticateMixin' is deprecated and will be removed"
" in Werkzeug 2.1. 'Response' now includes the"
" functionality directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,36 @@
import typing as t
import warnings
from .request import Request
class _FakeSubclassCheck(type):
def __subclasscheck__(cls, subclass: t.Type) -> bool:
warnings.warn(
"'BaseRequest' is deprecated and will be removed in"
" Werkzeug 2.1. Use 'issubclass(cls, Request)' instead.",
DeprecationWarning,
stacklevel=2,
)
return issubclass(subclass, Request)
def __instancecheck__(cls, instance: t.Any) -> bool:
warnings.warn(
"'BaseRequest' is deprecated and will be removed in"
" Werkzeug 2.1. Use 'isinstance(obj, Request)' instead.",
DeprecationWarning,
stacklevel=2,
)
return isinstance(instance, Request)
class BaseRequest(Request, metaclass=_FakeSubclassCheck):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'BaseRequest' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,36 @@
import typing as t
import warnings
from .response import Response
class _FakeSubclassCheck(type):
def __subclasscheck__(cls, subclass: t.Type) -> bool:
warnings.warn(
"'BaseResponse' is deprecated and will be removed in"
" Werkzeug 2.1. Use 'issubclass(cls, Response)' instead.",
DeprecationWarning,
stacklevel=2,
)
return issubclass(subclass, Response)
def __instancecheck__(cls, instance: t.Any) -> bool:
warnings.warn(
"'BaseResponse' is deprecated and will be removed in"
" Werkzeug 2.1. Use 'isinstance(obj, Response)' instead.",
DeprecationWarning,
stacklevel=2,
)
return isinstance(instance, Response)
class BaseResponse(Response, metaclass=_FakeSubclassCheck):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'BaseResponse' is deprecated and will be removed in"
" Werkzeug 2.1. 'Response' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,26 @@
import typing as t
import warnings
class CommonRequestDescriptorsMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'CommonRequestDescriptorsMixin' is deprecated and will be"
" removed in Werkzeug 2.1. 'Request' now includes the"
" functionality directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class CommonResponseDescriptorsMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'CommonResponseDescriptorsMixin' is deprecated and will be"
" removed in Werkzeug 2.1. 'Response' now includes the"
" functionality directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,26 @@
import typing as t
import warnings
class CORSRequestMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'CORSRequestMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class CORSResponseMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'CORSResponseMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Response' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,26 @@
import typing as t
import warnings
class ETagRequestMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'ETagRequestMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class ETagResponseMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'ETagResponseMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Response' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,13 @@
import typing as t
import warnings
class JSONMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'JSONMixin' is deprecated and will be removed in Werkzeug"
" 2.1. 'Request' now includes the functionality directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,660 @@
import functools
import json
import typing
import typing as t
import warnings
from io import BytesIO
from .._internal import _wsgi_decoding_dance
from ..datastructures import CombinedMultiDict
from ..datastructures import EnvironHeaders
from ..datastructures import FileStorage
from ..datastructures import ImmutableMultiDict
from ..datastructures import iter_multi_items
from ..datastructures import MultiDict
from ..formparser import default_stream_factory
from ..formparser import FormDataParser
from ..sansio.request import Request as _SansIORequest
from ..utils import cached_property
from ..utils import environ_property
from ..wsgi import _get_server
from ..wsgi import get_input_stream
from werkzeug.exceptions import BadRequest
if t.TYPE_CHECKING:
import typing_extensions as te
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class Request(_SansIORequest):
"""Represents an incoming WSGI HTTP request, with headers and body
taken from the WSGI environment. Has properties and methods for
using the functionality defined by various HTTP specs. The data in
requests object is read-only.
Text data is assumed to use UTF-8 encoding, which should be true for
the vast majority of modern clients. Using an encoding set by the
client is unsafe in Python due to extra encodings it provides, such
as ``zip``. To change the assumed encoding, subclass and replace
:attr:`charset`.
:param environ: The WSGI environ is generated by the WSGI server and
contains information about the server configuration and client
request.
:param populate_request: Add this request object to the WSGI environ
as ``environ['werkzeug.request']``. Can be useful when
debugging.
:param shallow: Makes reading from :attr:`stream` (and any method
that would read from it) raise a :exc:`RuntimeError`. Useful to
prevent consuming the form data in middleware, which would make
it unavailable to the final application.
.. versionchanged:: 2.0
Combine ``BaseRequest`` and mixins into a single ``Request``
class. Using the old classes is deprecated and will be removed
in Werkzeug 2.1.
.. versionchanged:: 0.5
Read-only mode is enforced with immutable classes for all data.
"""
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :doc:`/request_data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length: t.Optional[int] = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :doc:`/request_data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size: t.Optional[int] = None
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class: t.Type[FormDataParser] = FormDataParser
#: Disable the :attr:`data` property to avoid reading from the input
#: stream.
#:
#: .. deprecated:: 2.0
#: Will be removed in Werkzeug 2.1. Create the request with
#: ``shallow=True`` instead.
#:
#: .. versionadded:: 0.9
disable_data_descriptor: t.Optional[bool] = None
#: The WSGI environment containing HTTP headers and information from
#: the WSGI server.
environ: "WSGIEnvironment"
#: Set when creating the request object. If ``True``, reading from
#: the request body will cause a ``RuntimeException``. Useful to
#: prevent modifying the stream from middleware.
shallow: bool
def __init__(
self,
environ: "WSGIEnvironment",
populate_request: bool = True,
shallow: bool = False,
) -> None:
super().__init__(
method=environ.get("REQUEST_METHOD", "GET"),
scheme=environ.get("wsgi.url_scheme", "http"),
server=_get_server(environ),
root_path=_wsgi_decoding_dance(
environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors
),
path=_wsgi_decoding_dance(
environ.get("PATH_INFO") or "", self.charset, self.encoding_errors
),
query_string=environ.get("QUERY_STRING", "").encode("latin1"),
headers=EnvironHeaders(environ),
remote_addr=environ.get("REMOTE_ADDR"),
)
self.environ = environ
if self.disable_data_descriptor is not None:
warnings.warn(
"'disable_data_descriptor' is deprecated and will be"
" removed in Werkzeug 2.1. Create the request with"
" 'shallow=True' instead.",
DeprecationWarning,
stacklevel=2,
)
shallow = shallow or self.disable_data_descriptor
self.shallow = shallow
if populate_request and not shallow:
self.environ["werkzeug.request"] = self
@classmethod
def from_values(cls, *args: t.Any, **kwargs: t.Any) -> "Request":
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from ..test import EnvironBuilder
charset = kwargs.pop("charset", cls.charset)
kwargs["charset"] = charset
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(
cls, f: t.Callable[["Request"], "WSGIApplication"]
) -> "WSGIApplication":
"""Decorate a function as responder that accepts the request as
the last argument. This works like the :func:`responder`
decorator but the function is passed the request object as the
last argument and the request object will be closed
automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
As of Werkzeug 0.14 HTTP exceptions are automatically caught and
converted to responses instead of failing.
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both standalone WSGI functions as well as bound methods and
#: partially applied functions.
from ..exceptions import HTTPException
@functools.wraps(f)
def application(*args): # type: ignore
request = cls(args[-2])
with request:
try:
resp = f(*args[:-2] + (request,))
except HTTPException as e:
resp = e.get_response(args[-2])
return resp(*args[-2:])
return t.cast("WSGIApplication", application)
def _get_file_stream(
self,
total_content_length: t.Optional[int],
content_type: t.Optional[str],
filename: t.Optional[str] = None,
content_length: t.Optional[int] = None,
) -> t.IO[bytes]:
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(
total_content_length=total_content_length,
filename=filename,
content_type=content_type,
content_length=content_length,
)
@property
def want_form_data_parsed(self) -> bool:
"""``True`` if the request method carries content. By default
this is true if a ``Content-Type`` is sent.
.. versionadded:: 0.8
"""
return bool(self.environ.get("CONTENT_TYPE"))
def make_form_data_parser(self) -> FormDataParser:
"""Creates the form data parser. Instantiates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(
self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class,
)
def _load_form_data(self) -> None:
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if "form" in self.__dict__:
return
if self.want_form_data_parsed:
parser = self.make_form_data_parser()
data = parser.parse(
self._get_stream_for_parsing(),
self.mimetype,
self.content_length,
self.mimetype_params,
)
else:
data = (
self.stream,
self.parameter_storage_class(),
self.parameter_storage_class(),
)
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d["stream"], d["form"], d["files"] = data
def _get_stream_for_parsing(self) -> t.IO[bytes]:
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, "_cached_data", None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self) -> None:
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement which will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get("files")
for _key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self) -> "Request":
return self
def __exit__(self, exc_type, exc_value, tb) -> None: # type: ignore
self.close()
@cached_property
def stream(self) -> t.IO[bytes]:
"""
If the incoming form data was not encoded with a known mimetype
the data is stored unmodified in this stream for consumption. Most
of the time it is a better idea to use :attr:`data` which will give
you that data as a string. The stream only returns the data once.
Unlike :attr:`input_stream` this stream is properly guarded that you
can't accidentally read past the length of the input. Werkzeug will
internally always refer to this stream to read data which makes it
possible to wrap this object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
if self.shallow:
raise RuntimeError(
"This request was created with 'shallow=True', reading"
" from the input stream is disabled."
)
return get_input_stream(self.environ)
input_stream = environ_property[t.IO[bytes]](
"wsgi.input",
doc="""The WSGI input stream.
In general it's a bad idea to use this one because you can
easily read past the boundary. Use the :attr:`stream`
instead.""",
)
@cached_property
def data(self) -> bytes:
"""
Contains the incoming request data as string in case it came with
a mimetype Werkzeug does not handle.
"""
return self.get_data(parse_form_data=True)
@typing.overload
def get_data( # type: ignore
self,
cache: bool = True,
as_text: "te.Literal[False]" = False,
parse_form_data: bool = False,
) -> bytes:
...
@typing.overload
def get_data(
self,
cache: bool = True,
as_text: "te.Literal[True]" = ...,
parse_form_data: bool = False,
) -> str:
...
def get_data(
self, cache: bool = True, as_text: bool = False, parse_form_data: bool = False
) -> t.Union[bytes, str]:
"""This reads the buffered incoming data from the client into one
bytes object. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
string.
.. versionadded:: 0.9
"""
rv = getattr(self, "_cached_data", None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self) -> "ImmutableMultiDict[str, str]":
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
Please keep in mind that file uploads will not end up here, but instead
in the :attr:`files` attribute.
.. versionchanged:: 0.9
Previous to Werkzeug 0.9 this would only contain form data for POST
and PUT requests.
"""
self._load_form_data()
return self.form
@cached_property
def values(self) -> "CombinedMultiDict[str, str]":
"""A :class:`werkzeug.datastructures.CombinedMultiDict` that
combines :attr:`args` and :attr:`form`.
For GET requests, only ``args`` are present, not ``form``.
.. versionchanged:: 2.0
For GET requests, only ``args`` are present, not ``form``.
"""
sources = [self.args]
if self.method != "GET":
# GET requests can have a body, and some caching proxies
# might not treat that differently than a normal GET
# request, allowing form data to "invisibly" affect the
# cache without indication in the query string / URL.
sources.append(self.form)
args = []
for d in sources:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self) -> "ImmutableMultiDict[str, FileStorage]":
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
It basically behaves like a standard file object you know from Python,
with the difference that it also has a
:meth:`~werkzeug.datastructures.FileStorage.save` function that can
store the file on the filesystem.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@property
def script_root(self) -> str:
"""Alias for :attr:`self.root_path`. ``environ["SCRIPT_ROOT"]``
without a trailing slash.
"""
return self.root_path
@cached_property
def url_root(self) -> str:
"""Alias for :attr:`root_url`. The URL with scheme, host, and
root path. For example, ``https://example.com/app/``.
"""
return self.root_url
remote_user = environ_property[str](
"REMOTE_USER",
doc="""If the server supports user authentication, and the
script is protected, this attribute contains the username the
user has authenticated as.""",
)
is_multithread = environ_property[bool](
"wsgi.multithread",
doc="""boolean that is `True` if the application is served by a
multithreaded WSGI server.""",
)
is_multiprocess = environ_property[bool](
"wsgi.multiprocess",
doc="""boolean that is `True` if the application is served by a
WSGI server that spawns multiple processes.""",
)
is_run_once = environ_property[bool](
"wsgi.run_once",
doc="""boolean that is `True` if the application will be
executed only once in a process lifetime. This is the case for
CGI for example, but it's not guaranteed that the execution only
happens one time.""",
)
# JSON
#: A module or other object that has ``dumps`` and ``loads``
#: functions that match the API of the built-in :mod:`json` module.
json_module = json
@property
def json(self) -> t.Optional[t.Any]:
"""The parsed JSON data if :attr:`mimetype` indicates JSON
(:mimetype:`application/json`, see :attr:`is_json`).
Calls :meth:`get_json` with default arguments.
"""
return self.get_json()
# Cached values for ``(silent=False, silent=True)``. Initialized
# with sentinel values.
_cached_json: t.Tuple[t.Any, t.Any] = (Ellipsis, Ellipsis)
def get_json(
self, force: bool = False, silent: bool = False, cache: bool = True
) -> t.Optional[t.Any]:
"""Parse :attr:`data` as JSON.
If the mimetype does not indicate JSON
(:mimetype:`application/json`, see :attr:`is_json`), this
returns ``None``.
If parsing fails, :meth:`on_json_loading_failed` is called and
its return value is used as the return value.
:param force: Ignore the mimetype and always try to parse JSON.
:param silent: Silence parsing errors and return ``None``
instead.
:param cache: Store the parsed JSON to return for subsequent
calls.
"""
if cache and self._cached_json[silent] is not Ellipsis:
return self._cached_json[silent]
if not (force or self.is_json):
return None
data = self.get_data(cache=cache)
try:
rv = self.json_module.loads(data)
except ValueError as e:
if silent:
rv = None
if cache:
normal_rv, _ = self._cached_json
self._cached_json = (normal_rv, rv)
else:
rv = self.on_json_loading_failed(e)
if cache:
_, silent_rv = self._cached_json
self._cached_json = (rv, silent_rv)
else:
if cache:
self._cached_json = (rv, rv)
return rv
def on_json_loading_failed(self, e: ValueError) -> t.Any:
"""Called if :meth:`get_json` parsing fails and isn't silenced.
If this method returns a value, it is used as the return value
for :meth:`get_json`. The default implementation raises
:exc:`~werkzeug.exceptions.BadRequest`.
"""
raise BadRequest(f"Failed to decode JSON object: {e}")
class StreamOnlyMixin:
"""Mixin to create a ``Request`` that disables the ``data``,
``form``, and ``files`` properties. Only ``stream`` is available.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Create the request with
``shallow=True`` instead.
.. versionadded:: 0.9
"""
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'StreamOnlyMixin' is deprecated and will be removed in"
" Werkzeug 2.1. Create the request with 'shallow=True'"
" instead.",
DeprecationWarning,
stacklevel=2,
)
kwargs["shallow"] = True
super().__init__(*args, **kwargs)
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without ``data``, ``form``, and ``files``.
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Create the request with
``shallow=True`` instead.
.. versionadded:: 0.9
"""
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'PlainRequest' is deprecated and will be removed in"
" Werkzeug 2.1. Create the request with 'shallow=True'"
" instead.",
DeprecationWarning,
stacklevel=2,
)
# Don't show the DeprecationWarning for StreamOnlyMixin.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,891 @@
import json
import typing
import typing as t
import warnings
from http import HTTPStatus
from .._internal import _to_bytes
from ..datastructures import Headers
from ..http import remove_entity_headers
from ..sansio.response import Response as _SansIOResponse
from ..urls import iri_to_uri
from ..urls import url_join
from ..utils import cached_property
from ..wsgi import ClosingIterator
from ..wsgi import get_current_url
from werkzeug._internal import _get_environ
from werkzeug.http import generate_etag
from werkzeug.http import http_date
from werkzeug.http import is_resource_modified
from werkzeug.http import parse_etags
from werkzeug.http import parse_range_header
from werkzeug.wsgi import _RangeWrapper
if t.TYPE_CHECKING:
import typing_extensions as te
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
from .request import Request
def _warn_if_string(iterable: t.Iterable) -> None:
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, str):
warnings.warn(
"Response iterable was set to a string. This will appear to"
" work but means that the server will send the data to the"
" client one character at a time. This is almost never"
" intended behavior, use 'response.data' to assign strings"
" to the response object.",
stacklevel=2,
)
def _iter_encoded(
iterable: t.Iterable[t.Union[str, bytes]], charset: str
) -> t.Iterator[bytes]:
for item in iterable:
if isinstance(item, str):
yield item.encode(charset)
else:
yield item
def _clean_accept_ranges(accept_ranges: t.Union[bool, str]) -> str:
if accept_ranges is True:
return "bytes"
elif accept_ranges is False:
return "none"
elif isinstance(accept_ranges, str):
return accept_ranges
raise ValueError("Invalid accept_ranges value")
class Response(_SansIOResponse):
"""Represents an outgoing WSGI HTTP response with body, status, and
headers. Has properties and methods for using the functionality
defined by various HTTP specs.
The response body is flexible to support different use cases. The
simple form is passing bytes, or a string which will be encoded as
UTF-8. Passing an iterable of bytes or strings makes this a
streaming response. A generator is particularly useful for building
a CSV file in memory or using SSE (Server Sent Events). A file-like
object is also iterable, although the
:func:`~werkzeug.utils.send_file` helper should be used in that
case.
The response object is itself a WSGI application callable. When
called (:meth:`__call__`) with ``environ`` and ``start_response``,
it will pass its status and headers to ``start_response`` then
return its body as an iterable.
.. code-block:: python
from werkzeug.wrappers.response import Response
def index():
return Response("Hello, World!")
def application(environ, start_response):
path = environ.get("PATH_INFO") or "/"
if path == "/":
response = index()
else:
response = Response("Not Found", status=404)
return response(environ, start_response)
:param response: The data for the body of the response. A string or
bytes, or tuple or list of strings or bytes, for a fixed-length
response, or any other iterable of strings or bytes for a
streaming response. Defaults to an empty body.
:param status: The status code for the response. Either an int, in
which case the default status message is added, or a string in
the form ``{code} {message}``, like ``404 Not Found``. Defaults
to 200.
:param headers: A :class:`~werkzeug.datastructures.Headers` object,
or a list of ``(key, value)`` tuples that will be converted to a
``Headers`` object.
:param mimetype: The mime type (content type without charset or
other parameters) of the response. If the value starts with
``text/`` (or matches some other special cases), the charset
will be added to create the ``content_type``.
:param content_type: The full content type of the response.
Overrides building the value from ``mimetype``.
:param direct_passthrough: Pass the response body directly through
as the WSGI iterable. This can be used when the body is a binary
file or other iterator of bytes, to skip some unnecessary
checks. Use :func:`~werkzeug.utils.send_file` instead of setting
this manually.
.. versionchanged:: 2.0
Combine ``BaseResponse`` and mixins into a single ``Response``
class. Using the old classes is deprecated and will be removed
in Werkzeug 2.1.
.. versionchanged:: 0.5
The ``direct_passthrough`` parameter was added.
"""
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
#: The response body to send as the WSGI iterable. A list of strings
#: or bytes represents a fixed-length response, any other iterable
#: is a streaming response. Strings are encoded to bytes as UTF-8.
#:
#: Do not set to a plain string or bytes, that will cause sending
#: the response to be very inefficient as it will iterate one byte
#: at a time.
response: t.Union[t.Iterable[str], t.Iterable[bytes]]
def __init__(
self,
response: t.Optional[
t.Union[t.Iterable[bytes], bytes, t.Iterable[str], str]
] = None,
status: t.Optional[t.Union[int, str, HTTPStatus]] = None,
headers: t.Optional[
t.Union[
t.Mapping[str, t.Union[str, int, t.Iterable[t.Union[str, int]]]],
t.Iterable[t.Tuple[str, t.Union[str, int]]],
]
] = None,
mimetype: t.Optional[str] = None,
content_type: t.Optional[str] = None,
direct_passthrough: bool = False,
) -> None:
super().__init__(
status=status,
headers=headers,
mimetype=mimetype,
content_type=content_type,
)
#: Pass the response body directly through as the WSGI iterable.
#: This can be used when the body is a binary file or other
#: iterator of bytes, to skip some unnecessary checks. Use
#: :func:`~werkzeug.utils.send_file` instead of setting this
#: manually.
self.direct_passthrough = direct_passthrough
self._on_close: t.List[t.Callable[[], t.Any]] = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (str, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func: t.Callable[[], t.Any]) -> t.Callable[[], t.Any]:
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self) -> str:
if self.is_sequence:
body_info = f"{sum(map(len, self.iter_encoded()))} bytes"
else:
body_info = "streamed" if self.is_streamed else "likely-streamed"
return f"<{type(self).__name__} {body_info} [{self.status}]>"
@classmethod
def force_type(
cls, response: "Response", environ: t.Optional["WSGIEnvironment"] = None
) -> "Response":
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`Response` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`Response` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, Response):
if environ is None:
raise TypeError(
"cannot convert WSGI application into response"
" objects without an environ"
)
from ..test import run_wsgi_app
response = Response(*run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(
cls, app: "WSGIApplication", environ: "WSGIEnvironment", buffered: bool = False
) -> "Response":
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
from ..test import run_wsgi_app
return cls(*run_wsgi_app(app, environ, buffered))
@typing.overload
def get_data(self, as_text: "te.Literal[False]" = False) -> bytes:
...
@typing.overload
def get_data(self, as_text: "te.Literal[True]") -> str:
...
def get_data(self, as_text: bool = False) -> t.Union[bytes, str]:
"""The string representation of the response body. Whenever you call
this property the response iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b"".join(self.iter_encoded())
if as_text:
return rv.decode(self.charset)
return rv
def set_data(self, value: t.Union[bytes, str]) -> None:
"""Sets a new string as response. The value must be a string or
bytes. If a string is set it's encoded to the charset of the
response (utf-8 by default).
.. versionadded:: 0.9
"""
# if a string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, str):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers["Content-Length"] = str(len(value))
data = property(
get_data,
set_data,
doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.",
)
def calculate_content_length(self) -> t.Optional[int]:
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.iter_encoded())
def _ensure_sequence(self, mutable: bool = False) -> None:
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response) # type: ignore
return
if self.direct_passthrough:
raise RuntimeError(
"Attempted implicit sequence conversion but the"
" response object is in direct passthrough mode."
)
if not self.implicit_sequence_conversion:
raise RuntimeError(
"The response object required the iterable to be a"
" sequence, but the implicit conversion was disabled."
" Call make_sequence() yourself."
)
self.make_sequence()
def make_sequence(self) -> None:
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, "close", None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self) -> t.Iterator[bytes]:
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
@property
def is_streamed(self) -> bool:
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response) # type: ignore
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self) -> bool:
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self) -> None:
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, "close"):
self.response.close() # type: ignore
for func in self._on_close:
func()
def __enter__(self) -> "Response":
return self
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.close()
def freeze(self, no_etag: None = None) -> None:
"""Make the response object ready to be pickled. Does the
following:
* Buffer the response into a list, ignoring
:attr:`implicity_sequence_conversion` and
:attr:`direct_passthrough`.
* Set the ``Content-Length`` header.
* Generate an ``ETag`` header if one is not already set.
.. versionchanged:: 2.0
An ``ETag`` header is added, the ``no_etag`` parameter is
deprecated and will be removed in Werkzeug 2.1.
.. versionchanged:: 0.6
The ``Content-Length`` header is set.
"""
# Always freeze the encoded response body, ignore
# implicit_sequence_conversion and direct_passthrough.
self.response = list(self.iter_encoded())
self.headers["Content-Length"] = str(sum(map(len, self.response)))
if no_etag is not None:
warnings.warn(
"The 'no_etag' parameter is deprecated and will be"
" removed in Werkzeug 2.1.",
DeprecationWarning,
stacklevel=2,
)
self.add_etag()
def get_wsgi_headers(self, environ: "WSGIEnvironment") -> Headers:
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location: t.Optional[str] = None
content_location: t.Optional[str] = None
content_length: t.Optional[t.Union[str, int]] = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == "location":
location = value
elif ikey == "content-location":
content_location = value
elif ikey == "content-length":
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, str):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
location = iri_to_uri(location, safe_conversion=True)
if self.autocorrect_location_header:
current_url = get_current_url(environ, strip_querystring=True)
if isinstance(current_url, str):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers["Location"] = location
# make sure the content location is a URL
if content_location is not None and isinstance(content_location, str):
headers["Content-Location"] = iri_to_uri(content_location)
if 100 <= status < 200 or status == 204:
# Per section 3.3.2 of RFC 7230, "a server MUST NOT send a
# Content-Length header field in any response with a status
# code of 1xx (Informational) or 204 (No Content)."
headers.remove("Content-Length")
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of strings in the
# response. We however should not do that if we have a 304
# response.
if (
self.automatically_set_content_length
and self.is_sequence
and content_length is None
and status not in (204, 304)
and not (100 <= status < 200)
):
try:
content_length = sum(len(_to_bytes(x, "ascii")) for x in self.response)
except UnicodeError:
# Something other than bytes, can't safely figure out
# the length of the response.
pass
else:
headers["Content-Length"] = str(content_length)
return headers
def get_app_iter(self, environ: "WSGIEnvironment") -> t.Iterable[bytes]:
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if (
environ["REQUEST_METHOD"] == "HEAD"
or 100 <= status < 200
or status in (204, 304)
):
iterable: t.Iterable[bytes] = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response # type: ignore
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(
self, environ: "WSGIEnvironment"
) -> t.Tuple[t.Iterable[bytes], str, t.List[t.Tuple[str, str]]]:
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
# JSON
#: A module or other object that has ``dumps`` and ``loads``
#: functions that match the API of the built-in :mod:`json` module.
json_module = json
@property
def json(self) -> t.Optional[t.Any]:
"""The parsed JSON data if :attr:`mimetype` indicates JSON
(:mimetype:`application/json`, see :attr:`is_json`).
Calls :meth:`get_json` with default arguments.
"""
return self.get_json()
def get_json(self, force: bool = False, silent: bool = False) -> t.Optional[t.Any]:
"""Parse :attr:`data` as JSON. Useful during testing.
If the mimetype does not indicate JSON
(:mimetype:`application/json`, see :attr:`is_json`), this
returns ``None``.
Unlike :meth:`Request.get_json`, the result is not cached.
:param force: Ignore the mimetype and always try to parse JSON.
:param silent: Silence parsing errors and return ``None``
instead.
"""
if not (force or self.is_json):
return None
data = self.get_data()
try:
return self.json_module.loads(data)
except ValueError:
if not silent:
raise
return None
# Stream
@cached_property
def stream(self) -> "ResponseStream":
"""The response iterable as write-only stream."""
return ResponseStream(self)
def _wrap_range_response(self, start: int, length: int) -> None:
"""Wrap existing Response in case of Range Request context."""
if self.status_code == 206:
self.response = _RangeWrapper(self.response, start, length) # type: ignore
def _is_range_request_processable(self, environ: "WSGIEnvironment") -> bool:
"""Return ``True`` if `Range` header is present and if underlying
resource is considered unchanged when compared with `If-Range` header.
"""
return (
"HTTP_IF_RANGE" not in environ
or not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
ignore_if_range=False,
)
) and "HTTP_RANGE" in environ
def _process_range_request(
self,
environ: "WSGIEnvironment",
complete_length: t.Optional[int] = None,
accept_ranges: t.Optional[t.Union[bool, str]] = None,
) -> bool:
"""Handle Range Request related headers (RFC7233). If `Accept-Ranges`
header is valid, and Range Request is processable, we set the headers
as described by the RFC, and wrap the underlying response in a
RangeWrapper.
Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
.. versionchanged:: 2.0
Returns ``False`` if the length is 0.
"""
from ..exceptions import RequestedRangeNotSatisfiable
if (
accept_ranges is None
or complete_length is None
or complete_length == 0
or not self._is_range_request_processable(environ)
):
return False
parsed_range = parse_range_header(environ.get("HTTP_RANGE"))
if parsed_range is None:
raise RequestedRangeNotSatisfiable(complete_length)
range_tuple = parsed_range.range_for_length(complete_length)
content_range_header = parsed_range.to_content_range_header(complete_length)
if range_tuple is None or content_range_header is None:
raise RequestedRangeNotSatisfiable(complete_length)
content_length = range_tuple[1] - range_tuple[0]
self.headers["Content-Length"] = content_length
self.headers["Accept-Ranges"] = accept_ranges
self.content_range = content_range_header # type: ignore
self.status_code = 206
self._wrap_range_response(range_tuple[0], content_length)
return True
def make_conditional(
self,
request_or_environ: t.Union["WSGIEnvironment", "Request"],
accept_ranges: t.Union[bool, str] = False,
complete_length: t.Optional[int] = None,
) -> "Response":
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
For optimal performance when handling range requests, it's recommended
that your response data object implements `seekable`, `seek` and `tell`
methods as described by :py:class:`io.IOBase`. Objects returned by
:meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
:param accept_ranges: This parameter dictates the value of
`Accept-Ranges` header. If ``False`` (default),
the header is not set. If ``True``, it will be set
to ``"bytes"``. If ``None``, it will be set to
``"none"``. If it's a string, it will use this
value.
:param complete_length: Will be used only in valid Range Requests.
It will set `Content-Range` complete length
value and compute `Content-Length` real value.
This parameter is mandatory for successful
Range Requests completion.
:raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable`
if `Range` header could not be parsed or satisfied.
.. versionchanged:: 2.0
Range processing is skipped if length is 0 instead of
raising a 416 Range Not Satisfiable error.
"""
environ = _get_environ(request_or_environ)
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if "date" not in self.headers:
self.headers["Date"] = http_date()
accept_ranges = _clean_accept_ranges(accept_ranges)
is206 = self._process_range_request(environ, complete_length, accept_ranges)
if not is206 and not is_resource_modified(
environ,
self.headers.get("etag"),
None,
self.headers.get("last-modified"),
):
if parse_etags(environ.get("HTTP_IF_MATCH")):
self.status_code = 412
else:
self.status_code = 304
if (
self.automatically_set_content_length
and "content-length" not in self.headers
):
length = self.calculate_content_length()
if length is not None:
self.headers["Content-Length"] = length
return self
def add_etag(self, overwrite: bool = False, weak: bool = False) -> None:
"""Add an etag for the current response if there is none yet.
.. versionchanged:: 2.0
SHA-1 is used to generate the value. MD5 may not be
available in some environments.
"""
if overwrite or "etag" not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
class ResponseStream:
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = "wb+"
def __init__(self, response: Response):
self.response = response
self.closed = False
def write(self, value: bytes) -> int:
if self.closed:
raise ValueError("I/O operation on closed file")
self.response._ensure_sequence(mutable=True)
self.response.response.append(value) # type: ignore
self.response.headers.pop("Content-Length", None)
return len(value)
def writelines(self, seq: t.Iterable[bytes]) -> None:
for item in seq:
self.write(item)
def close(self) -> None:
self.closed = True
def flush(self) -> None:
if self.closed:
raise ValueError("I/O operation on closed file")
def isatty(self) -> bool:
if self.closed:
raise ValueError("I/O operation on closed file")
return False
def tell(self) -> int:
self.response._ensure_sequence()
return sum(map(len, self.response.response))
@property
def encoding(self) -> str:
return self.response.charset
class ResponseStreamMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'ResponseStreamMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Response' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,14 @@
import typing as t
import warnings
class UserAgentMixin:
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
warnings.warn(
"'UserAgentMixin' is deprecated and will be removed in"
" Werkzeug 2.1. 'Request' now includes the functionality"
" directly.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)

View file

@ -0,0 +1,982 @@
import io
import re
import typing as t
from functools import partial
from functools import update_wrapper
from itertools import chain
from ._internal import _make_encode_wrapper
from ._internal import _to_bytes
from ._internal import _to_str
from .sansio import utils as _sansio_utils
from .sansio.utils import host_is_trusted # noqa: F401 # Imported as part of API
from .urls import _URLTuple
from .urls import uri_to_iri
from .urls import url_join
from .urls import url_parse
from .urls import url_quote
if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
def responder(f: t.Callable[..., "WSGIApplication"]) -> "WSGIApplication":
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(
environ: "WSGIEnvironment",
root_only: bool = False,
strip_querystring: bool = False,
host_only: bool = False,
trusted_hosts: t.Optional[t.Iterable[str]] = None,
) -> str:
"""Recreate the URL for a request from the parts in a WSGI
environment.
The URL is an IRI, not a URI, so it may contain Unicode characters.
Use :func:`~werkzeug.urls.iri_to_uri` to convert it to ASCII.
:param environ: The WSGI environment to get the URL parts from.
:param root_only: Only build the root path, don't include the
remaining path or query string.
:param strip_querystring: Don't include the query string.
:param host_only: Only build the scheme and host.
:param trusted_hosts: A list of trusted host names to validate the
host against.
"""
parts = {
"scheme": environ["wsgi.url_scheme"],
"host": get_host(environ, trusted_hosts),
}
if not host_only:
parts["root_path"] = environ.get("SCRIPT_NAME", "")
if not root_only:
parts["path"] = environ.get("PATH_INFO", "")
if not strip_querystring:
parts["query_string"] = environ.get("QUERY_STRING", "").encode("latin1")
return _sansio_utils.get_current_url(**parts)
def _get_server(
environ: "WSGIEnvironment",
) -> t.Optional[t.Tuple[str, t.Optional[int]]]:
name = environ.get("SERVER_NAME")
if name is None:
return None
try:
port: t.Optional[int] = int(environ.get("SERVER_PORT", None))
except (TypeError, ValueError):
# unix socket
port = None
return name, port
def get_host(
environ: "WSGIEnvironment", trusted_hosts: t.Optional[t.Iterable[str]] = None
) -> str:
"""Return the host for the given WSGI environment.
The ``Host`` header is preferred, then ``SERVER_NAME`` if it's not
set. The returned host will only contain the port if it is different
than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param environ: A WSGI environment dict.
:param trusted_hosts: A list of trusted host names.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
return _sansio_utils.get_host(
environ["wsgi.url_scheme"],
environ.get("HTTP_HOST"),
_get_server(environ),
trusted_hosts,
)
def get_content_length(environ: "WSGIEnvironment") -> t.Optional[int]:
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
return None
content_length = environ.get("CONTENT_LENGTH")
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
return None
def get_input_stream(
environ: "WSGIEnvironment", safe_fallback: bool = True
) -> t.IO[bytes]:
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = t.cast(t.IO[bytes], environ["wsgi.input"])
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get("wsgi.input_terminated"):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return io.BytesIO() if safe_fallback else stream
# Otherwise limit the stream to the content length
return t.cast(t.IO[bytes], LimitedStream(stream, content_length))
def get_query_string(environ: "WSGIEnvironment") -> str:
"""Returns the ``QUERY_STRING`` from the WSGI environment. This also
takes care of the WSGI decoding dance. The string returned will be
restricted to ASCII characters.
:param environ: WSGI environment to get the query string from.
.. versionadded:: 0.9
"""
qs = environ.get("QUERY_STRING", "").encode("latin1")
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return url_quote(qs, safe=":&%=+$!*'(),")
def get_path_info(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> str:
"""Return the ``PATH_INFO`` from the WSGI environment and decode it
unless ``charset`` is ``None``.
:param environ: WSGI environment to get the path from.
:param charset: The charset for the path info, or ``None`` if no
decoding should be performed.
:param errors: The decoding error handling.
.. versionadded:: 0.9
"""
path = environ.get("PATH_INFO", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True) # type: ignore
def get_script_name(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> str:
"""Return the ``SCRIPT_NAME`` from the WSGI environment and decode
it unless `charset` is set to ``None``.
:param environ: WSGI environment to get the path from.
:param charset: The charset for the path, or ``None`` if no decoding
should be performed.
:param errors: The decoding error handling.
.. versionadded:: 0.9
"""
path = environ.get("SCRIPT_NAME", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True) # type: ignore
def pop_path_info(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> t.Optional[str]:
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` bytes are returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
:param charset: The ``encoding`` parameter passed to
:func:`bytes.decode`.
:param errors: The ``errors`` paramater passed to
:func:`bytes.decode`.
"""
path = environ.get("PATH_INFO")
if not path:
return None
script_name = environ.get("SCRIPT_NAME", "")
# shift multiple leading slashes over
old_path = path
path = path.lstrip("/")
if path != old_path:
script_name += "/" * (len(old_path) - len(path))
if "/" not in path:
environ["PATH_INFO"] = ""
environ["SCRIPT_NAME"] = script_name + path
rv = path.encode("latin1")
else:
segment, path = path.split("/", 1)
environ["PATH_INFO"] = f"/{path}"
environ["SCRIPT_NAME"] = script_name + segment
rv = segment.encode("latin1")
return _to_str(rv, charset, errors, allow_none_charset=True) # type: ignore
def peek_path_info(
environ: "WSGIEnvironment", charset: str = "utf-8", errors: str = "replace"
) -> t.Optional[str]:
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` bytes are returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
if segments:
return _to_str( # type: ignore
segments[0].encode("latin1"), charset, errors, allow_none_charset=True
)
return None
def extract_path_info(
environ_or_baseurl: t.Union[str, "WSGIEnvironment"],
path_or_url: t.Union[str, _URLTuple],
charset: str = "utf-8",
errors: str = "werkzeug.url_quote",
collapse_http_schemes: bool = True,
) -> t.Optional[str]:
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a string. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
"""
def _normalize_netloc(scheme: str, netloc: str) -> str:
parts = netloc.split("@", 1)[-1].split(":", 1)
port: t.Optional[str]
if len(parts) == 2:
netloc, port = parts
if (scheme == "http" and port == "80") or (
scheme == "https" and port == "443"
):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += f":{port}"
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path = url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in ("http", "https"):
return None
else:
if not (base_scheme in ("http", "https") and base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip("/")
if not cur_path.startswith(base_path):
return None
return f"/{cur_path[len(base_path) :].lstrip('/')}"
class ClosingIterator:
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of the iterable returned by the application.
Because it is useful to add another close action to a returned iterable
and adding a custom iterable is a boring task this class can be used for
that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(
self,
iterable: t.Iterable[bytes],
callbacks: t.Optional[
t.Union[t.Callable[[], None], t.Iterable[t.Callable[[], None]]]
] = None,
) -> None:
iterator = iter(iterable)
self._next = t.cast(t.Callable[[], bytes], partial(next, iterator))
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterable, "close", None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self) -> "ClosingIterator":
return self
def __next__(self) -> bytes:
return self._next()
def close(self) -> None:
for callback in self._callbacks:
callback()
def wrap_file(
environ: "WSGIEnvironment", file: t.IO[bytes], buffer_size: int = 8192
) -> t.Iterable[bytes]:
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`Response.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get("wsgi.file_wrapper", FileWrapper)( # type: ignore
file, buffer_size
)
class FileWrapper:
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`Response` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file: t.IO[bytes], buffer_size: int = 8192) -> None:
self.file = file
self.buffer_size = buffer_size
def close(self) -> None:
if hasattr(self.file, "close"):
self.file.close()
def seekable(self) -> bool:
if hasattr(self.file, "seekable"):
return self.file.seekable()
if hasattr(self.file, "seek"):
return True
return False
def seek(self, *args: t.Any) -> None:
if hasattr(self.file, "seek"):
self.file.seek(*args)
def tell(self) -> t.Optional[int]:
if hasattr(self.file, "tell"):
return self.file.tell()
return None
def __iter__(self) -> "FileWrapper":
return self
def __next__(self) -> bytes:
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
class _RangeWrapper:
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`Response` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(
self,
iterable: t.Union[t.Iterable[bytes], t.IO[bytes]],
start_byte: int = 0,
byte_range: t.Optional[int] = None,
):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = start_byte + byte_range
self.read_length = 0
self.seekable = (
hasattr(iterable, "seekable") and iterable.seekable() # type: ignore
)
self.end_reached = False
def __iter__(self) -> "_RangeWrapper":
return self
def _next_chunk(self) -> bytes:
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self) -> t.Tuple[t.Optional[bytes], int]:
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte) # type: ignore
self.read_length = self.iterable.tell() # type: ignore
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length :]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self) -> bytes:
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[: self.end_byte - contextual_read_length]
return chunk
def __next__(self) -> bytes:
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self) -> None:
if hasattr(self.iterable, "close"):
self.iterable.close() # type: ignore
def _make_chunk_iter(
stream: t.Union[t.Iterable[bytes], t.IO[bytes]],
limit: t.Optional[int],
buffer_size: int,
) -> t.Iterator[bytes]:
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, str)):
raise TypeError(
"Passed a string or byte object instead of true iterator or stream."
)
if not hasattr(stream, "read"):
for item in stream:
if item:
yield item
return
stream = t.cast(t.IO[bytes], stream)
if not isinstance(stream, LimitedStream) and limit is not None:
stream = t.cast(t.IO[bytes], LimitedStream(stream, limit))
_read = stream.read
while True:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(
stream: t.Union[t.Iterable[bytes], t.IO[bytes]],
limit: t.Optional[int] = None,
buffer_size: int = 10 * 1024,
cap_at_buffer: bool = False,
) -> t.Iterator[bytes]:
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, "")
if not first_item:
return
s = _make_encode_wrapper(first_item)
empty = t.cast(bytes, s(""))
cr = t.cast(bytes, s("\r"))
lf = t.cast(bytes, s("\n"))
crlf = t.cast(bytes, s("\r\n"))
_iter = t.cast(t.Iterator[bytes], chain((first_item,), _iter))
def _iter_basic_lines() -> t.Iterator[bytes]:
_join = empty.join
buffer: t.List[bytes] = []
while True:
new_data = next(_iter, "")
if not new_data:
break
new_buf: t.List[bytes] = []
buf_size = 0
for item in t.cast(
t.Iterator[bytes], chain(buffer, new_data.splitlines(True))
):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(
stream: t.Union[t.Iterable[bytes], t.IO[bytes]],
separator: bytes,
limit: t.Optional[int] = None,
buffer_size: int = 10 * 1024,
cap_at_buffer: bool = False,
) -> t.Iterator[bytes]:
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, b"")
if not first_item:
return
_iter = t.cast(t.Iterator[bytes], chain((first_item,), _iter))
if isinstance(first_item, str):
separator = _to_str(separator)
_split = re.compile(f"({re.escape(separator)})").split
_join = "".join
else:
separator = _to_bytes(separator)
_split = re.compile(b"(" + re.escape(separator) + b")").split
_join = b"".join
buffer: t.List[bytes] = []
while True:
new_data = next(_iter, b"")
if not new_data:
break
chunks = _split(new_data)
new_buf: t.List[bytes] = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream: t.IO[bytes], limit: int) -> None:
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self) -> "LimitedStream":
return self
@property
def is_exhausted(self) -> bool:
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self) -> bytes:
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self) -> bytes:
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from .exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size: int = 1024 * 64) -> None:
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size: t.Optional[int] = None) -> bytes:
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (OSError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size: t.Optional[int] = None) -> bytes:
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, OSError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size: t.Optional[int] = None) -> t.List[bytes]:
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while True:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self) -> int:
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self) -> bytes:
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self) -> bool:
return True