diff --git a/bombsquad_server b/bombsquad_server index eb81a00..a28b9de 100644 --- a/bombsquad_server +++ b/bombsquad_server @@ -35,6 +35,8 @@ if TYPE_CHECKING: VERSION_STR = '1.3' # Version history: +# 1.3.1 +# Windows binary is now named BombSquadHeadless.exe # 1.3: # Added show_tutorial config option # Added team_names config option @@ -573,7 +575,7 @@ class ServerManagerApp: os.environ['BA_SERVER_WRAPPER_MANAGED'] = '1' print(f'{Clr.CYN}Launching server subprocess...{Clr.RST}', flush=True) - binary_name = ('bombsquad_headless.exe' + binary_name = ('BombSquadHeadless.exe' if os.name == 'nt' else './bombsquad_headless') assert self._ba_root_path is not None self._subprocess = None diff --git a/bombsquad_server.py b/bombsquad_server.py index eb81a00..a28b9de 100644 --- a/bombsquad_server.py +++ b/bombsquad_server.py @@ -35,6 +35,8 @@ if TYPE_CHECKING: VERSION_STR = '1.3' # Version history: +# 1.3.1 +# Windows binary is now named BombSquadHeadless.exe # 1.3: # Added show_tutorial config option # Added team_names config option @@ -573,7 +575,7 @@ class ServerManagerApp: os.environ['BA_SERVER_WRAPPER_MANAGED'] = '1' print(f'{Clr.CYN}Launching server subprocess...{Clr.RST}', flush=True) - binary_name = ('bombsquad_headless.exe' + binary_name = ('BombSquadHeadless.exe' if os.name == 'nt' else './bombsquad_headless') assert self._ba_root_path is not None self._subprocess = None diff --git a/dist/BombSquadHeadless.exe b/dist/BombSquadHeadless.exe new file mode 100644 index 0000000..0423347 Binary files /dev/null and b/dist/BombSquadHeadless.exe differ diff --git a/dist/ba_data/python/ba/__pycache__/__init__.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/__init__.cpython-38.opt-1.pyc index a7ede6c..078a129 100644 Binary files a/dist/ba_data/python/ba/__pycache__/__init__.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/__init__.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_account.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_account.cpython-38.opt-1.pyc index 6585e74..bf8db2d 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_account.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_account.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_achievement.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_achievement.cpython-38.opt-1.pyc index aea784d..7a06484 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_achievement.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_achievement.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_activity.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_activity.cpython-38.opt-1.pyc index abf2f0d..0f57dda 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_activity.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_activity.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_activitytypes.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_activitytypes.cpython-38.opt-1.pyc index 642d59e..d1623af 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_activitytypes.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_activitytypes.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_actor.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_actor.cpython-38.opt-1.pyc index e9f2dba..6422c5a 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_actor.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_actor.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_ads.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_ads.cpython-38.opt-1.pyc index 2195f4a..7a131ed 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_ads.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_ads.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_analytics.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_analytics.cpython-38.opt-1.pyc index 6bc87b7..60fc324 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_analytics.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_analytics.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_app.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_app.cpython-38.opt-1.pyc index fec4e2b..54105be 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_app.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_app.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_appconfig.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_appconfig.cpython-38.opt-1.pyc index 477beb8..bed6d08 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_appconfig.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_appconfig.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_appdelegate.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_appdelegate.cpython-38.opt-1.pyc index a46bb9a..8bf700d 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_appdelegate.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_appdelegate.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_apputils.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_apputils.cpython-38.opt-1.pyc index 715ca86..efda323 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_apputils.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_apputils.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_benchmark.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_benchmark.cpython-38.opt-1.pyc index c974e0b..f990d30 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_benchmark.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_benchmark.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_campaign.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_campaign.cpython-38.opt-1.pyc index 5fa974a..edb989c 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_campaign.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_campaign.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_collision.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_collision.cpython-38.opt-1.pyc index 13da3ff..768ce5f 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_collision.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_collision.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_coopgame.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_coopgame.cpython-38.opt-1.pyc index 7703dbf..5201fa8 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_coopgame.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_coopgame.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_coopsession.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_coopsession.cpython-38.opt-1.pyc index 542e59c..7a2c448 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_coopsession.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_coopsession.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_dependency.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_dependency.cpython-38.opt-1.pyc index 3daf6bc..6c83466 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_dependency.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_dependency.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_dualteamsession.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_dualteamsession.cpython-38.opt-1.pyc index 856fd27..bcc1c28 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_dualteamsession.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_dualteamsession.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_enums.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_enums.cpython-38.opt-1.pyc index 58ef7df..4260545 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_enums.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_enums.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_error.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_error.cpython-38.opt-1.pyc index c7705dc..5e9c42f 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_error.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_error.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_freeforallsession.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_freeforallsession.cpython-38.opt-1.pyc index 21c6872..37a94d8 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_freeforallsession.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_freeforallsession.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_gameactivity.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_gameactivity.cpython-38.opt-1.pyc index af0bdee..5bb25a9 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_gameactivity.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_gameactivity.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_gameresults.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_gameresults.cpython-38.opt-1.pyc index e470267..64ef779 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_gameresults.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_gameresults.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_gameutils.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_gameutils.cpython-38.opt-1.pyc index aae53fb..9256197 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_gameutils.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_gameutils.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_general.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_general.cpython-38.opt-1.pyc index 73ca6a4..ebde2bc 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_general.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_general.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_hooks.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_hooks.cpython-38.opt-1.pyc index 3a8a8ec..3428410 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_hooks.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_hooks.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_input.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_input.cpython-38.opt-1.pyc index 9dcc15c..46b08d9 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_input.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_input.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_keyboard.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_keyboard.cpython-38.opt-1.pyc index 21bc5b9..cd52c32 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_keyboard.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_keyboard.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_language.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_language.cpython-38.opt-1.pyc index 48b2afe..ad41c93 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_language.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_language.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_level.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_level.cpython-38.opt-1.pyc index 540b5f4..589bbbc 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_level.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_level.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_lobby.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_lobby.cpython-38.opt-1.pyc index 058bcbf..9765d9c 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_lobby.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_lobby.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_map.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_map.cpython-38.opt-1.pyc index cdabbec..7c27d09 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_map.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_map.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_math.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_math.cpython-38.opt-1.pyc index 82c0c77..492177a 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_math.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_math.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_messages.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_messages.cpython-38.opt-1.pyc index c7d6de2..bf0d185 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_messages.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_messages.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_meta.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_meta.cpython-38.opt-1.pyc index acf1cd1..034d7d8 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_meta.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_meta.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_multiteamsession.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_multiteamsession.cpython-38.opt-1.pyc index b0fcffc..c2f2014 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_multiteamsession.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_multiteamsession.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_music.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_music.cpython-38.opt-1.pyc index 5aed1f5..4d0bd93 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_music.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_music.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_net.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_net.cpython-38.opt-1.pyc index 30dd100..52f02ca 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_net.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_net.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_nodeactor.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_nodeactor.cpython-38.opt-1.pyc index f880b08..d9bedb5 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_nodeactor.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_nodeactor.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_player.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_player.cpython-38.opt-1.pyc index 91e94d6..098eabc 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_player.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_player.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_playlist.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_playlist.cpython-38.opt-1.pyc index b4b96f4..7a43b3f 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_playlist.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_playlist.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_plugin.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_plugin.cpython-38.opt-1.pyc index 9faf590..da2b886 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_plugin.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_plugin.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_powerup.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_powerup.cpython-38.opt-1.pyc index fe2e5ae..9c1dc2f 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_powerup.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_powerup.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_profile.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_profile.cpython-38.opt-1.pyc index d0a5658..17ea9c6 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_profile.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_profile.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_score.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_score.cpython-38.opt-1.pyc index 07c9a5b..e2f1a2b 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_score.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_score.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_servermode.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_servermode.cpython-38.opt-1.pyc index 3c72557..1bec71a 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_servermode.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_servermode.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_session.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_session.cpython-38.opt-1.pyc index 70aa6f6..622a2c2 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_session.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_session.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_settings.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_settings.cpython-38.opt-1.pyc index 0fdb74e..5eebb39 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_settings.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_settings.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_stats.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_stats.cpython-38.opt-1.pyc index ed0c3a6..060e00b 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_stats.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_stats.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_store.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_store.cpython-38.opt-1.pyc index 7040ab4..07bc435 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_store.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_store.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_team.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_team.cpython-38.opt-1.pyc index 0dc7b96..2f0201d 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_team.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_team.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_teamgame.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_teamgame.cpython-38.opt-1.pyc index 2b7c0b5..c9ac685 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_teamgame.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_teamgame.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_tips.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_tips.cpython-38.opt-1.pyc index e3a817d..08c409d 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_tips.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_tips.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_tournament.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_tournament.cpython-38.opt-1.pyc index 43fcf9c..4077a74 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_tournament.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_tournament.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/_ui.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/_ui.cpython-38.opt-1.pyc index 0fb00e6..98547d8 100644 Binary files a/dist/ba_data/python/ba/__pycache__/_ui.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/_ui.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/__pycache__/internal.cpython-38.opt-1.pyc b/dist/ba_data/python/ba/__pycache__/internal.cpython-38.opt-1.pyc index 0c4e17c..273cc72 100644 Binary files a/dist/ba_data/python/ba/__pycache__/internal.cpython-38.opt-1.pyc and b/dist/ba_data/python/ba/__pycache__/internal.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/ba/_enums.py b/dist/ba_data/python/ba/_enums.py index 7940bcf..ea937b5 100644 --- a/dist/ba_data/python/ba/_enums.py +++ b/dist/ba_data/python/ba/_enums.py @@ -1,5 +1,5 @@ # Released under the MIT License. See LICENSE for details. -"""Enums generated by tools/update_python_enums_module in ba-internal.""" +"""Enum vals generated by batools.pythonenumsmodule; do not edit by hand.""" from enum import Enum diff --git a/dist/ba_data/python/ba/_input.py b/dist/ba_data/python/ba/_input.py index 533ebe8..bd318ca 100644 --- a/dist/ba_data/python/ba/_input.py +++ b/dist/ba_data/python/ba/_input.py @@ -29,6 +29,27 @@ def get_device_value(device: ba.InputDevice, name: str) -> Any: subplatform = app.subplatform appconfig = _ba.app.config + # iiRcade: hard-code for a/b/c/x for now... + if _ba.app.iircade_mode: + return { + 'triggerRun2': 19, + 'unassignedButtonsRun': False, + 'buttonPickUp': 100, + 'buttonBomb': 98, + 'buttonJump': 97, + 'buttonStart': 83, + 'buttonStart2': 109, + 'buttonPunch': 99, + 'buttonRun2': 102, + 'buttonRun1': 101, + 'triggerRun1': 18, + 'buttonLeft': 22, + 'buttonRight': 23, + 'buttonUp': 20, + 'buttonDown': 21, + 'buttonVRReorient': 110 + }.get(name, -1) + # If there's an entry in our config for this controller, use it. if 'Controllers' in appconfig: ccfgs = appconfig['Controllers'] diff --git a/dist/ba_data/python/ba/_lobby.py b/dist/ba_data/python/ba/_lobby.py index 577a0c4..313b6ad 100644 --- a/dist/ba_data/python/ba/_lobby.py +++ b/dist/ba_data/python/ba/_lobby.py @@ -32,10 +32,14 @@ class JoinInfo: from ba._nodeactor import NodeActor from ba._general import WeakCall self._state = 0 - self._press_to_punch: Union[str, ba.Lstr] = _ba.charstr( - SpecialChar.LEFT_BUTTON) - self._press_to_bomb: Union[str, ba.Lstr] = _ba.charstr( - SpecialChar.RIGHT_BUTTON) + self._press_to_punch: Union[str, + ba.Lstr] = ('C' if _ba.app.iircade_mode + else _ba.charstr( + SpecialChar.LEFT_BUTTON)) + self._press_to_bomb: Union[str, + ba.Lstr] = ('B' if _ba.app.iircade_mode else + _ba.charstr( + SpecialChar.RIGHT_BUTTON)) self._joinmsg = Lstr(resource='pressAnyButtonToJoinText') can_switch_teams = (len(lobby.sessionteams) > 1) diff --git a/dist/ba_data/python/ba/_tips.py b/dist/ba_data/python/ba/_tips.py index a6c04c7..7a2501d 100644 --- a/dist/ba_data/python/ba/_tips.py +++ b/dist/ba_data/python/ba/_tips.py @@ -71,18 +71,20 @@ def get_all_tips() -> List[str]: ('You can judge when a bomb is going to explode based on the\n' 'color of sparks from its fuse: yellow..orange..red..BOOM.'), ] - tips += [ - 'If your framerate is choppy, try turning down resolution\nor ' - 'visuals in the game\'s graphics settings.' - ] app = _ba.app - if app.platform in ('android', 'ios') and not app.on_tv: + if not app.iircade_mode: + tips += [ + 'If your framerate is choppy, try turning down resolution\nor ' + 'visuals in the game\'s graphics settings.' + ] + if (app.platform in ('android', 'ios') and not app.on_tv + and not app.iircade_mode): tips += [ ('If your device gets too warm or you\'d like to conserve ' 'battery power,\nturn down "Visuals" or "Resolution" ' 'in Settings->Graphics'), ] - if app.platform in ['mac', 'android']: + if app.platform in ['mac', 'android'] and not app.iircade_mode: tips += [ 'Tired of the soundtrack? Replace it with your own!' '\nSee Settings->Audio->Soundtrack' @@ -90,7 +92,7 @@ def get_all_tips() -> List[str]: # Hot-plugging is currently only on some platforms. # FIXME: Should add a platform entry for this so don't forget to update it. - if app.platform in ['mac', 'android', 'windows']: + if app.platform in ['mac', 'android', 'windows'] and not app.iircade_mode: tips += [ 'Players can join and leave in the middle of most games,\n' 'and you can also plug and unplug controllers on the fly.', diff --git a/dist/ba_data/python/bacommon/__pycache__/net.cpython-38.opt-1.pyc b/dist/ba_data/python/bacommon/__pycache__/net.cpython-38.opt-1.pyc index 027c7ed..bbcbfb7 100644 Binary files a/dist/ba_data/python/bacommon/__pycache__/net.cpython-38.opt-1.pyc and b/dist/ba_data/python/bacommon/__pycache__/net.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bacommon/net.py b/dist/ba_data/python/bacommon/net.py index 13a2d45..1b5a7f2 100644 --- a/dist/ba_data/python/bacommon/net.py +++ b/dist/ba_data/python/bacommon/net.py @@ -39,7 +39,6 @@ class PrivateHostingState: """Combined state of whether we're hosting, whether we can, etc.""" unavailable_error: Optional[str] = None party_code: Optional[str] = None - able_to_host: bool = False tickets_to_host_now: int = 0 minutes_until_free_host: Optional[float] = None free_host_minutes_remaining: Optional[float] = None diff --git a/dist/ba_data/python/bastd/__pycache__/__init__.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/__pycache__/__init__.cpython-38.opt-1.pyc index eccea6a..fc2f5b1 100644 Binary files a/dist/ba_data/python/bastd/__pycache__/__init__.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/__pycache__/__init__.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/__pycache__/appdelegate.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/__pycache__/appdelegate.cpython-38.opt-1.pyc index 8bc0ec0..c07852a 100644 Binary files a/dist/ba_data/python/bastd/__pycache__/appdelegate.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/__pycache__/appdelegate.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/__pycache__/gameutils.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/__pycache__/gameutils.cpython-38.opt-1.pyc index c40a260..dd81cf4 100644 Binary files a/dist/ba_data/python/bastd/__pycache__/gameutils.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/__pycache__/gameutils.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/__pycache__/mainmenu.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/__pycache__/mainmenu.cpython-38.opt-1.pyc index c9ea8f6..01e49fb 100644 Binary files a/dist/ba_data/python/bastd/__pycache__/mainmenu.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/__pycache__/mainmenu.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/__pycache__/maps.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/__pycache__/maps.cpython-38.opt-1.pyc index cfcaf4a..6f7bcb0 100644 Binary files a/dist/ba_data/python/bastd/__pycache__/maps.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/__pycache__/maps.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/__pycache__/tutorial.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/__pycache__/tutorial.cpython-38.opt-1.pyc index fd743e7..68d740a 100644 Binary files a/dist/ba_data/python/bastd/__pycache__/tutorial.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/__pycache__/tutorial.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/__init__.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/__init__.cpython-38.opt-1.pyc index 0f7315c..feeea88 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/__init__.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/__init__.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/background.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/background.cpython-38.opt-1.pyc index 3ab80ec..7337efc 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/background.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/background.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/bomb.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/bomb.cpython-38.opt-1.pyc index f34bb1e..3730229 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/bomb.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/bomb.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/controlsguide.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/controlsguide.cpython-38.opt-1.pyc index da7c4e9..b689f0f 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/controlsguide.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/controlsguide.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/flag.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/flag.cpython-38.opt-1.pyc index 5931888..7e99c77 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/flag.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/flag.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/image.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/image.cpython-38.opt-1.pyc index 5afc864..4375d27 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/image.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/image.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/onscreencountdown.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/onscreencountdown.cpython-38.opt-1.pyc index ae15f2a..c86f484 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/onscreencountdown.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/onscreencountdown.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/onscreentimer.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/onscreentimer.cpython-38.opt-1.pyc index 5a95d23..3252496 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/onscreentimer.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/onscreentimer.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/playerspaz.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/playerspaz.cpython-38.opt-1.pyc index 9605781..dd49e13 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/playerspaz.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/playerspaz.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/popuptext.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/popuptext.cpython-38.opt-1.pyc index 4eeef83..209ae79 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/popuptext.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/popuptext.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/powerupbox.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/powerupbox.cpython-38.opt-1.pyc index 3b5a704..bf00871 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/powerupbox.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/powerupbox.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/respawnicon.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/respawnicon.cpython-38.opt-1.pyc index 88b6fad..a9547a6 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/respawnicon.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/respawnicon.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/scoreboard.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/scoreboard.cpython-38.opt-1.pyc index ac40c1b..887fe24 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/scoreboard.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/scoreboard.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/spaz.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/spaz.cpython-38.opt-1.pyc index eb307f6..21ef982 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/spaz.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/spaz.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/spazappearance.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/spazappearance.cpython-38.opt-1.pyc index e5322bf..4951cfc 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/spazappearance.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/spazappearance.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/spazbot.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/spazbot.cpython-38.opt-1.pyc index 58d3d5b..9370107 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/spazbot.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/spazbot.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/spazfactory.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/spazfactory.cpython-38.opt-1.pyc index cffc80e..1041b68 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/spazfactory.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/spazfactory.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/text.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/text.cpython-38.opt-1.pyc index fd466fe..c051e52 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/text.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/text.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/tipstext.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/tipstext.cpython-38.opt-1.pyc index a4a4bc1..a190ad3 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/tipstext.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/tipstext.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/__pycache__/zoomtext.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/actor/__pycache__/zoomtext.cpython-38.opt-1.pyc index 5de76af..410b02f 100644 Binary files a/dist/ba_data/python/bastd/actor/__pycache__/zoomtext.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/actor/__pycache__/zoomtext.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/actor/controlsguide.py b/dist/ba_data/python/bastd/actor/controlsguide.py index 8f284a2..db0d8ce 100644 --- a/dist/ba_data/python/bastd/actor/controlsguide.py +++ b/dist/ba_data/python/bastd/actor/controlsguide.py @@ -57,6 +57,34 @@ class ControlsGuide(ba.Actor): self._update_timer: Optional[ba.Timer] = None self._title_text: Optional[ba.Node] clr: Sequence[float] + extra_pos_1: Optional[Tuple[float, float]] + extra_pos_2: Optional[Tuple[float, float]] + if ba.app.iircade_mode: + xtweak = 0.2 + ytweak = 0.2 + jump_pos = (position[0] + offs * (-1.2 + xtweak), + position[1] + offs * (0.1 + ytweak)) + bomb_pos = (position[0] + offs * (0.0 + xtweak), + position[1] + offs * (0.5 + ytweak)) + punch_pos = (position[0] + offs * (1.2 + xtweak), + position[1] + offs * (0.5 + ytweak)) + + pickup_pos = (position[0] + offs * (-1.4 + xtweak), + position[1] + offs * (-1.2 + ytweak)) + extra_pos_1 = (position[0] + offs * (-0.2 + xtweak), + position[1] + offs * (-0.8 + ytweak)) + extra_pos_2 = (position[0] + offs * (1.0 + xtweak), + position[1] + offs * (-0.8 + ytweak)) + self._force_hide_button_names = True + else: + punch_pos = (position[0] - offs * 1.1, position[1]) + jump_pos = (position[0], position[1] - offs) + bomb_pos = (position[0] + offs * 1.1, position[1]) + pickup_pos = (position[0], position[1] + offs) + extra_pos_1 = None + extra_pos_2 = None + self._force_hide_button_names = False + if show_title: self._title_text_pos_top = (position[0], position[1] + 139.0 * scale) @@ -79,7 +107,7 @@ class ControlsGuide(ba.Actor): }) else: self._title_text = None - pos = (position[0], position[1] - offs) + pos = jump_pos clr = (0.4, 1, 0.4) self._jump_image = ba.newnode( 'image', @@ -104,8 +132,8 @@ class ControlsGuide(ba.Actor): 'position': (pos[0], pos[1] - offs5), 'color': clr }) - pos = (position[0] - offs * 1.1, position[1]) clr = (0.2, 0.6, 1) if ouya else (1, 0.7, 0.3) + pos = punch_pos self._punch_image = ba.newnode( 'image', attrs={ @@ -129,7 +157,7 @@ class ControlsGuide(ba.Actor): 'position': (pos[0], pos[1] - offs5), 'color': clr }) - pos = (position[0] + offs * 1.1, position[1]) + pos = bomb_pos clr = (1, 0.3, 0.3) self._bomb_image = ba.newnode( 'image', @@ -154,7 +182,7 @@ class ControlsGuide(ba.Actor): 'position': (pos[0], pos[1] - offs5), 'color': clr }) - pos = (position[0], position[1] + offs) + pos = pickup_pos clr = (1, 0.8, 0.3) if ouya else (0.8, 0.5, 1) self._pickup_image = ba.newnode( 'image', @@ -208,6 +236,36 @@ class ControlsGuide(ba.Actor): 'h_align': 'center', 'color': clr }) + + if extra_pos_1 is not None: + self._extra_image_1: Optional[ba.Node] = ba.newnode( + 'image', + attrs={ + 'texture': ba.gettexture('nub'), + 'absolute_scale': True, + 'host_only': True, + 'vr_depth': 10, + 'position': extra_pos_1, + 'scale': (image_size, image_size), + 'color': (0.5, 0.5, 0.5) + }) + else: + self._extra_image_1 = None + if extra_pos_2 is not None: + self._extra_image_2: Optional[ba.Node] = ba.newnode( + 'image', + attrs={ + 'texture': ba.gettexture('nub'), + 'absolute_scale': True, + 'host_only': True, + 'vr_depth': 10, + 'position': extra_pos_2, + 'scale': (image_size, image_size), + 'color': (0.5, 0.5, 0.5) + }) + else: + self._extra_image_2 = None + self._nodes = [ self._bomb_image, self._bomb_text, self._punch_image, self._punch_text, self._jump_image, self._jump_text, @@ -217,6 +275,10 @@ class ControlsGuide(ba.Actor): if show_title: assert self._title_text self._nodes.append(self._title_text) + if self._extra_image_1 is not None: + self._nodes.append(self._extra_image_1) + if self._extra_image_2 is not None: + self._nodes.append(self._extra_image_2) # Start everything invisible. for node in self._nodes: @@ -408,6 +470,12 @@ class ControlsGuide(ba.Actor): ('${D}', down_text), ('${R}', right_text), ('${RUN}', run_text)]) + if self._force_hide_button_names: + jump_button_names.clear() + punch_button_names.clear() + bomb_button_names.clear() + pickup_button_names.clear() + self._run_text.text = run_text w_text: Union[ba.Lstr, str] if only_remote and self._lifespan is None: diff --git a/dist/ba_data/python/bastd/ui/__pycache__/configerror.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/__pycache__/configerror.cpython-38.opt-1.pyc index 9450202..a4b197a 100644 Binary files a/dist/ba_data/python/bastd/ui/__pycache__/configerror.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/__pycache__/configerror.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/__pycache__/helpui.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/__pycache__/helpui.cpython-38.opt-1.pyc index 7e0d85a..99f202f 100644 Binary files a/dist/ba_data/python/bastd/ui/__pycache__/helpui.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/__pycache__/helpui.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/__pycache__/kiosk.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/__pycache__/kiosk.cpython-38.opt-1.pyc index a721abb..8922d1d 100644 Binary files a/dist/ba_data/python/bastd/ui/__pycache__/kiosk.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/__pycache__/kiosk.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/__pycache__/tournamententry.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/__pycache__/tournamententry.cpython-38.opt-1.pyc index 8b8c405..b47440c 100644 Binary files a/dist/ba_data/python/bastd/ui/__pycache__/tournamententry.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/__pycache__/tournamententry.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/gather/__pycache__/abouttab.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/gather/__pycache__/abouttab.cpython-38.opt-1.pyc index 1844ba8..0ddde02 100644 Binary files a/dist/ba_data/python/bastd/ui/gather/__pycache__/abouttab.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/gather/__pycache__/abouttab.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/gather/__pycache__/manualtab.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/gather/__pycache__/manualtab.cpython-38.opt-1.pyc index f630782..8b048c3 100644 Binary files a/dist/ba_data/python/bastd/ui/gather/__pycache__/manualtab.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/gather/__pycache__/manualtab.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/gather/__pycache__/privatetab.cpython-38.opt-1.pyc b/dist/ba_data/python/bastd/ui/gather/__pycache__/privatetab.cpython-38.opt-1.pyc index 0bb99a1..f4bd006 100644 Binary files a/dist/ba_data/python/bastd/ui/gather/__pycache__/privatetab.cpython-38.opt-1.pyc and b/dist/ba_data/python/bastd/ui/gather/__pycache__/privatetab.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/bastd/ui/gather/abouttab.py b/dist/ba_data/python/bastd/ui/gather/abouttab.py index a76eb11..f185b90 100644 --- a/dist/ba_data/python/bastd/ui/gather/abouttab.py +++ b/dist/ba_data/python/bastd/ui/gather/abouttab.py @@ -31,10 +31,12 @@ class AboutGatherTab(GatherTab): region_left: float, region_bottom: float, ) -> ba.Widget: + party_button_label = ('X' if ba.app.iircade_mode else ba.charstr( + ba.SpecialChar.TOP_BUTTON)) message = ba.Lstr( resource='gatherWindow.aboutDescriptionText', subs=[('${PARTY}', ba.charstr(ba.SpecialChar.PARTY_ICON)), - ('${BUTTON}', ba.charstr(ba.SpecialChar.TOP_BUTTON))], + ('${BUTTON}', party_button_label)], ) # Let's not talk about sharing in vr-mode; its tricky to fit more diff --git a/dist/ba_data/python/bastd/ui/gather/manualtab.py b/dist/ba_data/python/bastd/ui/gather/manualtab.py index 02963a7..1d4a677 100644 --- a/dist/ba_data/python/bastd/ui/gather/manualtab.py +++ b/dist/ba_data/python/bastd/ui/gather/manualtab.py @@ -13,6 +13,7 @@ from bastd.ui.gather import GatherTab import _ba import ba + if TYPE_CHECKING: from typing import Any, Optional, Dict, List, Tuple, Type, Union, Callable from bastd.ui.gather import GatherWindow @@ -87,8 +88,10 @@ class ManualGatherTab(GatherTab): self._scrollwidget: Optional[ba.Widget] = None self._columnwidget: Optional[ba.Widget] = None self._favorite_selected: Optional[str] = None - self._favorite_rename_window: Optional[ba.Widget] = None - self._party_rename_text: Optional[ba.Widget] = None + self._favorite_edit_window: Optional[ba.Widget] = None + self._party_edit_name_text: Optional[ba.Widget] = None + self._party_edit_addr_text: Optional[ba.Widget] = None + self._party_edit_port_text: Optional[ba.Widget] = None def on_activate( self, @@ -349,9 +352,9 @@ class ManualGatherTab(GatherTab): button_type='square', color=(0.6, 0.53, 0.63), textcolor=(0.75, 0.7, 0.8), - on_activate_call=self._on_favorites_rename_press, + on_activate_call=self._on_favorites_edit_press, text_scale=1.0 if uiscale is ba.UIScale.SMALL else 1.2, - label=ba.Lstr(resource='renameText'), + label=ba.Lstr(resource='editText'), autoselect=True) btnv -= b_height + b_space_extra ba.buttonwidget(parent=self._container, @@ -399,15 +402,15 @@ class ManualGatherTab(GatherTab): call=ba.WeakCall( self._host_lookup_result)).start() - def _on_favorites_rename_press(self) -> None: + def _on_favorites_edit_press(self) -> None: if self._favorite_selected is None: self._no_favorite_selected_error() return c_width = 600 - c_height = 250 + c_height = 310 uiscale = ba.app.ui.uiscale - self._favorite_rename_window = cnt = ba.containerwidget( + self._favorite_edit_window = cnt = ba.containerwidget( scale=(1.8 if uiscale is ba.UIScale.SMALL else 1.55 if uiscale is ba.UIScale.MEDIUM else 1.0), size=(c_width, c_height), @@ -417,22 +420,84 @@ class ManualGatherTab(GatherTab): size=(0, 0), h_align='center', v_align='center', - text='Enter Name of Party', + text=ba.Lstr(resource='editText'), + color=(0.6, 1.0, 0.6), maxwidth=c_width * 0.8, position=(c_width * 0.5, c_height - 60)) - self._party_rename_text = txt = ba.textwidget( + + ba.textwidget(parent=cnt, + position=(c_width * 0.2 - 15, c_height - 120), + color=(0.6, 1.0, 0.6), + scale=1.0, + size=(0, 0), + maxwidth=60, + h_align='right', + v_align='center', + text=ba.Lstr(resource='nameText')) + + self._party_edit_name_text = ba.textwidget( parent=cnt, - size=(c_width * 0.8, 40), + size=(c_width * 0.7, 40), h_align='left', v_align='center', text=ba.app.config['Saved Servers'][ self._favorite_selected]['name'], editable=True, - description='Server name text', - position=(c_width * 0.1, c_height - 140), + description=ba.Lstr(resource='nameText'), + position=(c_width * 0.2, c_height - 140), autoselect=True, - maxwidth=c_width * 0.7, + maxwidth=c_width * 0.6, max_chars=200) + + ba.textwidget(parent=cnt, + position=(c_width * 0.2 - 15, c_height - 180), + color=(0.6, 1.0, 0.6), + scale=1.0, + size=(0, 0), + maxwidth=60, + h_align='right', + v_align='center', + text=ba.Lstr(resource='gatherWindow.' + 'manualAddressText')) + + self._party_edit_addr_text = ba.textwidget( + parent=cnt, + size=(c_width * 0.4, 40), + h_align='left', + v_align='center', + text=ba.app.config['Saved Servers'][ + self._favorite_selected]['addr'], + editable=True, + description=ba.Lstr(resource='gatherWindow.manualAddressText'), + position=(c_width * 0.2, c_height - 200), + autoselect=True, + maxwidth=c_width * 0.35, + max_chars=200) + + ba.textwidget(parent=cnt, + position=(c_width * 0.7 - 10, c_height - 180), + color=(0.6, 1.0, 0.6), + scale=1.0, + size=(0, 0), + maxwidth=45, + h_align='right', + v_align='center', + text=ba.Lstr(resource='gatherWindow.' + 'portText')) + + self._party_edit_port_text = ba.textwidget( + parent=cnt, + size=(c_width * 0.2, 40), + h_align='left', + v_align='center', + text=str(ba.app.config['Saved Servers'][self._favorite_selected] + ['port']), + editable=True, + description=ba.Lstr(resource='gatherWindow.portText'), + position=(c_width * 0.7, c_height - 200), + autoselect=True, + maxwidth=c_width * 0.2, + max_chars=6) cbtn = ba.buttonwidget( parent=cnt, label=ba.Lstr(resource='cancelText'), @@ -443,32 +508,40 @@ class ManualGatherTab(GatherTab): position=(30, 30), autoselect=True) okb = ba.buttonwidget(parent=cnt, - label='Rename', + label=ba.Lstr(resource='saveText'), size=(180, 60), position=(c_width - 230, 30), - on_activate_call=ba.Call( - self._rename_saved_party), + on_activate_call=ba.Call(self._edit_saved_party), autoselect=True) ba.widget(edit=cbtn, right_widget=okb) ba.widget(edit=okb, left_widget=cbtn) - ba.textwidget(edit=txt, on_return_press_call=okb.activate) ba.containerwidget(edit=cnt, cancel_button=cbtn, start_button=okb) - def _rename_saved_party(self) -> None: - + def _edit_saved_party(self) -> None: server = self._favorite_selected if self._favorite_selected is None: self._no_favorite_selected_error() return - if not self._party_rename_text: + if not self._party_edit_name_text or not self._party_edit_addr_text: return - new_name_raw = cast(str, ba.textwidget(query=self._party_rename_text)) + new_name_raw = cast(str, + ba.textwidget(query=self._party_edit_name_text)) + new_addr_raw = cast(str, + ba.textwidget(query=self._party_edit_addr_text)) + new_port_raw = cast(str, + ba.textwidget(query=self._party_edit_port_text)) ba.app.config['Saved Servers'][server]['name'] = new_name_raw + ba.app.config['Saved Servers'][server]['addr'] = new_addr_raw + try: + ba.app.config['Saved Servers'][server]['port'] = int(new_port_raw) + except ValueError: + # Notify about incorrect port? I'm lazy; simply leave old value. + pass ba.app.config.commit() ba.playsound(ba.getsound('gunCocking')) self._refresh_favorites() - ba.containerwidget(edit=self._favorite_rename_window, + ba.containerwidget(edit=self._favorite_edit_window, transition='out_scale') def _on_favorite_delete_press(self) -> None: diff --git a/dist/ba_data/python/bastd/ui/gather/privatetab.py b/dist/ba_data/python/bastd/ui/gather/privatetab.py index 7852555..941be6d 100644 --- a/dist/ba_data/python/bastd/ui/gather/privatetab.py +++ b/dist/ba_data/python/bastd/ui/gather/privatetab.py @@ -256,7 +256,10 @@ class PrivateGatherTab(GatherTab): self._debug_server_comm('querying private party state') if _ba.get_account_state() == 'signed_in': _ba.add_transaction( - {'type': 'PRIVATE_PARTY_QUERY'}, + { + 'type': 'PRIVATE_PARTY_QUERY', + 'expire_time': time.time() + 20, + }, callback=ba.WeakCall( self._hosting_state_idle_response), ) @@ -761,6 +764,7 @@ class PrivateGatherTab(GatherTab): _ba.add_transaction( { 'type': 'PRIVATE_PARTY_CONNECT', + 'expire_time': time.time() + 20, 'code': code }, callback=ba.WeakCall(self._connect_response), @@ -806,15 +810,19 @@ class PrivateGatherTab(GatherTab): 'type': 'PRIVATE_PARTY_START', 'config': dataclass_to_dict(self._hostingconfig), 'region_pings': ba.app.net.region_pings, + 'expire_time': time.time() + 20, }, callback=ba.WeakCall(self._hosting_state_response)) _ba.run_transactions() else: self._last_action_send_time = time.time() - _ba.add_transaction({'type': 'PRIVATE_PARTY_STOP'}, - callback=ba.WeakCall( - self._hosting_state_response)) + _ba.add_transaction( + { + 'type': 'PRIVATE_PARTY_STOP', + 'expire_time': time.time() + 20, + }, + callback=ba.WeakCall(self._hosting_state_response)) _ba.run_transactions() ba.playsound(ba.getsound('click01')) diff --git a/dist/ba_data/python/bastd/ui/helpui.py b/dist/ba_data/python/bastd/ui/helpui.py index b49e51c..2fda7f8 100644 --- a/dist/ba_data/python/bastd/ui/helpui.py +++ b/dist/ba_data/python/bastd/ui/helpui.py @@ -301,17 +301,20 @@ class HelpWindow(ba.Window): maxwidth=100, text=txt, h_align='right', - color=header, v_align='center', + color=header, flatness=1.0) txt_scale = 0.7 if not app.vr_mode: - txt = ba.Lstr(resource=self._r + '.controllersInfoText', - subs=[('${APP_NAME}', - ba.Lstr(resource='titleText')), - ('${REMOTE_APP_NAME}', - get_remote_app_name())]).evaluate() + infotxt = ('.controllersInfoTextRemoteOnly' + if app.iircade_mode else '.controllersInfoText') + txt = ba.Lstr( + resource=self._r + infotxt, + fallback_resource=self._r + '.controllersInfoText', + subs=[('${APP_NAME}', ba.Lstr(resource='titleText')), + ('${REMOTE_APP_NAME}', get_remote_app_name()) + ]).evaluate() else: txt = ba.Lstr(resource=self._r + '.devicesInfoText', subs=[('${APP_NAME}', diff --git a/dist/ba_data/python/efro/__pycache__/dataclassio.cpython-38.opt-1.pyc b/dist/ba_data/python/efro/__pycache__/dataclassio.cpython-38.opt-1.pyc index 282c3cf..22b024f 100644 Binary files a/dist/ba_data/python/efro/__pycache__/dataclassio.cpython-38.opt-1.pyc and b/dist/ba_data/python/efro/__pycache__/dataclassio.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/efro/__pycache__/terminal.cpython-38.opt-1.pyc b/dist/ba_data/python/efro/__pycache__/terminal.cpython-38.opt-1.pyc index a0bde9a..674a335 100644 Binary files a/dist/ba_data/python/efro/__pycache__/terminal.cpython-38.opt-1.pyc and b/dist/ba_data/python/efro/__pycache__/terminal.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/efro/__pycache__/util.cpython-38.opt-1.pyc b/dist/ba_data/python/efro/__pycache__/util.cpython-38.opt-1.pyc index 52074fa..cedc7b7 100644 Binary files a/dist/ba_data/python/efro/__pycache__/util.cpython-38.opt-1.pyc and b/dist/ba_data/python/efro/__pycache__/util.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/efro/dataclassio.py b/dist/ba_data/python/efro/dataclassio.py index 1ccde93..5b9b449 100644 --- a/dist/ba_data/python/efro/dataclassio.py +++ b/dist/ba_data/python/efro/dataclassio.py @@ -21,7 +21,10 @@ from enum import Enum import dataclasses import typing import datetime -from typing import TYPE_CHECKING, TypeVar, Generic, get_type_hints +from typing import TYPE_CHECKING, TypeVar, Generic +# Note: can pull this from typing once we update to Python 3.9+ +# noinspection PyProtectedMember +from typing_extensions import get_args, get_type_hints, _AnnotatedAlias from efro.util import enum_by_value @@ -35,7 +38,7 @@ except ModuleNotFoundError: _pytz_utc = None # pylint: disable=invalid-name if TYPE_CHECKING: - from typing import Any, Dict, Type, Tuple, Optional, List + from typing import Any, Dict, Type, Tuple, Optional, List, Set T = TypeVar('T') @@ -54,7 +57,116 @@ PREP_ATTR = '_DCIOPREP' EXTRA_ATTRS_ATTR = '_DCIOEXATTRS' -def dataclass_to_dict(obj: Any, coerce_to_float: bool = True) -> dict: +class Codec(Enum): + """Specifies expected data format exported to or imported from.""" + + # Use only types that will translate cleanly to/from json: lists, + # dicts with str keys, bools, ints, floats, and None. + JSON = 'json' + + # Mostly like JSON but passes bytes and datetime objects through + # as-is instead of converting them to json-friendly types. + FIRESTORE = 'firestore' + + +class IOAttrs: + """For specifying io behavior in annotations.""" + + storagename: Optional[str] = None + store_default: bool = True + whole_days: bool = False + whole_hours: bool = False + + def __init__(self, + storagename: Optional[str] = storagename, + store_default: bool = store_default, + whole_days: bool = whole_days, + whole_hours: bool = whole_hours): + + # Only store values that differ from class defaults to keep + # our instances nice and lean. + cls = type(self) + if storagename != cls.storagename: + self.storagename = storagename + if store_default != cls.store_default: + self.store_default = store_default + if whole_days != cls.whole_days: + self.whole_days = whole_days + if whole_hours != cls.whole_hours: + self.whole_hours = whole_hours + + def validate_for_field(self, cls: Type, field: dataclasses.Field) -> None: + """Ensure the IOAttrs instance is ok to use with the provided field.""" + + # Turning off store_default requires the field to have either + # a default_factory or a default + if not self.store_default: + default_factory: Any = field.default_factory # type: ignore + if (default_factory is dataclasses.MISSING + and field.default is dataclasses.MISSING): + raise TypeError(f'Field {field.name} of {cls} has' + f' neither a default nor a default_factory;' + f' store_default=False cannot be set for it.') + + def validate_datetime(self, value: datetime.datetime, + fieldpath: str) -> None: + """Ensure a datetime value meets our value requirements.""" + if self.whole_days: + if any(x != 0 for x in (value.hour, value.minute, value.second, + value.microsecond)): + raise ValueError( + f'Value {value} at {fieldpath} is not a whole day.') + if self.whole_hours: + if any(x != 0 + for x in (value.minute, value.second, value.microsecond)): + raise ValueError(f'Value {value} at {fieldpath}' + f' is not a whole hour.') + + +class FieldStoragePathCapture: + """Utility for obtaining dataclass storage paths in a type safe way. + + Given dataclass instance foo, FieldStoragePathCapture(foo).bar.eep + will return 'bar.eep' (or something like 'b.e' if storagenames are + overridden). This can be combined with type-checking tricks that + return foo in the type-checker's eyes while returning + FieldStoragePathCapture(foo) at runtime in order to grant a measure + of type safety to specifying field paths for things such as db + queries. Be aware, however, that the type-checker will incorrectly + think these lookups are returning actual attr values when they + are actually returning strings. + """ + + def __init__(self, obj: Any, path: List[str] = None): + if path is None: + path = [] + if not dataclasses.is_dataclass(obj): + raise TypeError(f'Expected a dataclass type/instance;' + f' got {type(obj)}.') + self._cls = obj if isinstance(obj, type) else type(obj) + self._path = path + + def __getattr__(self, name: str) -> Any: + prep = PrepSession(explicit=False).prep_dataclass(self._cls, + recursion_level=0) + try: + anntype = prep.annotations[name] + except KeyError as exc: + raise AttributeError(f'{type(self)} has no {name} field.') from exc + anntype, ioattrs = _parse_annotated(anntype) + storagename = (name if (ioattrs is None or ioattrs.storagename is None) + else ioattrs.storagename) + origin = _get_origin(anntype) + path = self._path + [storagename] + + if dataclasses.is_dataclass(origin): + return FieldStoragePathCapture(origin, path=path) + return '.'.join(path) + + +def dataclass_to_dict(obj: Any, + codec: Codec = Codec.JSON, + coerce_to_float: bool = True) -> dict: """Given a dataclass object, return a json-friendly dict. All values will be checked to ensure they match the types specified @@ -71,7 +183,10 @@ def dataclass_to_dict(obj: Any, coerce_to_float: bool = True) -> dict: will be triggered. """ - out = _Outputter(obj, create=True, coerce_to_float=coerce_to_float).run() + out = _Outputter(obj, + create=True, + codec=codec, + coerce_to_float=coerce_to_float).run() assert isinstance(out, dict) return out @@ -83,21 +198,24 @@ def dataclass_to_json(obj: Any, coerce_to_float: bool = True) -> str: """ import json return json.dumps( - dataclass_to_dict(obj=obj, coerce_to_float=coerce_to_float), + dataclass_to_dict(obj=obj, + coerce_to_float=coerce_to_float, + codec=Codec.JSON), separators=(',', ':'), ) def dataclass_from_dict(cls: Type[T], values: dict, + codec: Codec = Codec.JSON, coerce_to_float: bool = True, allow_unknown_attrs: bool = True, discard_unknown_attrs: bool = False) -> T: """Given a dict, return a dataclass of a given type. - The dict must be in the json-friendly format as emitted from - dataclass_to_dict. This means that sequence values such as tuples or - sets should be passed as lists, enums should be passed as their + The dict must be formatted to match the specified codec (generally + json-friendly object types). This means that sequence values such as + tuples or sets should be passed as lists, enums should be passed as their associated values, nested dataclasses should be passed as dicts, etc. All values are checked to ensure their types/values are valid. @@ -117,6 +235,7 @@ def dataclass_from_dict(cls: Type[T], case they will simply be discarded. """ return _Inputter(cls, + codec=codec, coerce_to_float=coerce_to_float, allow_unknown_attrs=allow_unknown_attrs, discard_unknown_attrs=discard_unknown_attrs).run(values) @@ -139,12 +258,15 @@ def dataclass_from_json(cls: Type[T], discard_unknown_attrs=discard_unknown_attrs) -def dataclass_validate(obj: Any, coerce_to_float: bool = True) -> None: +def dataclass_validate(obj: Any, + coerce_to_float: bool = True, + codec: Codec = Codec.JSON) -> None: """Ensure that values in a dataclass instance are the correct types.""" # Simply run an output pass but tell it not to generate data; # only run validation. - _Outputter(obj, create=False, coerce_to_float=coerce_to_float).run() + _Outputter(obj, create=False, codec=codec, + coerce_to_float=coerce_to_float).run() def ioprep(cls: Type) -> None: @@ -162,10 +284,7 @@ def ioprep(cls: Type) -> None: Prepping a dataclass involves evaluating its type annotations, which, as of PEP 563, are stored simply as strings. This evaluation is done in the module namespace containing the class, so all referenced types - must be defined at that level. The exception is Typing types (Optional, - Union, etc.) which are often defined under an 'if TYPE_CHECKING' - conditional and thus not available at runtime, so are explicitly made - available during annotation evaluation. + must be defined at that level. """ PrepSession(explicit=True).prep_dataclass(cls, recursion_level=0) @@ -176,7 +295,8 @@ def ioprepped(cls: Type[T]) -> Type[T]: Note that in some cases it may not be possible to prep a dataclass immediately (such as when its type annotations refer to forward-declared types). In these cases, dataclass_prep() should be explicitly called for - the class once it is safe to do so. + the class as soon as possible; ideally at module import time to expose any + errors as early as possible in execution. """ ioprep(cls) return cls @@ -192,6 +312,9 @@ class PrepData: # Resolved annotation data with 'live' classes. annotations: Dict[str, Any] + # Map of storage names to attr names. + storage_names_to_attr_names: Dict[str, str] + class PrepSession: """Context for a prep.""" @@ -202,6 +325,7 @@ class PrepSession: def prep_dataclass(self, cls: Type, recursion_level: int) -> PrepData: """Run prep on a dataclass if necessary and return its prep data.""" + # We should only need to do this once per dataclass. existing_data = getattr(cls, PREP_ATTR, None) if existing_data is not None: assert isinstance(existing_data, PrepData) @@ -223,15 +347,15 @@ class PrepSession: 'efro.dataclassio: implicitly prepping dataclass: %s.' ' It is highly recommended to explicitly prep dataclasses' ' as soon as possible after definition (via' - ' efro.dataclassio.dataclass_prep() or the' - ' @efro.dataclassio.prepped decorator).', cls) + ' efro.dataclassio.ioprep() or the' + ' @efro.dataclassio.ioprepped decorator).', cls) try: - # Use default globalns which should be the class' module, - # but provide our own locals to cover things like typing.* - # which are generally not actually present at runtime for us. - # resolved_annotations = get_type_hints(cls, localns=localns) - resolved_annotations = get_type_hints(cls) + # NOTE: perhaps we want to expose the globalns/localns args + # to this? + # pylint: disable=unexpected-keyword-arg + resolved_annotations = get_type_hints(cls, include_extras=True) + # pylint: enable=unexpected-keyword-arg except Exception as exc: raise RuntimeError( f'dataclassio prep for {cls} failed with error: {exc}.' @@ -239,17 +363,47 @@ class PrepSession: f' at the module level or add them as part of an explicit' f' prep call.') from exc + # noinspection PyDataclass + fields = dataclasses.fields(cls) + fields_by_name = {f.name: f for f in fields} + + all_storage_names: Set[str] = set() + storage_names_to_attr_names: Dict[str, str] = {} + # Ok; we've resolved actual types for this dataclass. # now recurse through them, verifying that we support all contained # types and prepping any contained dataclass types. - for attrname, attrtype in resolved_annotations.items(): + for attrname, anntype in resolved_annotations.items(): + + anntype, ioattrs = _parse_annotated(anntype) + + # If we found attached IOAttrs data, make sure it contains + # valid values for the field it is attached to. + if ioattrs is not None: + ioattrs.validate_for_field(cls, fields_by_name[attrname]) + if ioattrs.storagename is not None: + storagename = ioattrs.storagename + storage_names_to_attr_names[ioattrs.storagename] = attrname + else: + storagename = attrname + else: + storagename = attrname + + # Make sure we don't have any clashes in our storage names. + if storagename in all_storage_names: + raise TypeError(f'Multiple attrs on {cls} are using' + f' storage-name \'{storagename}\'') + all_storage_names.add(storagename) + self.prep_type(cls, attrname, - attrtype, + anntype, recursion_level=recursion_level + 1) # Success! Store our resolved stuff with the class and we're done. - prepdata = PrepData(annotations=resolved_annotations) + prepdata = PrepData( + annotations=resolved_annotations, + storage_names_to_attr_names=storage_names_to_attr_names) setattr(cls, PREP_ATTR, prepdata) return prepdata @@ -320,7 +474,7 @@ class PrepSession: else: raise TypeError( f'Dict key type {childtypes[0]} for \'{attrname}\'' - f' on {cls} is not supported by dataclassio.') + f' on {cls.__name__} is not supported by dataclassio.') # For value types we support any of our normal types. if not childtypes or _get_origin(childtypes[1]) is typing.Any: @@ -344,7 +498,8 @@ class PrepSession: f' has no type args; dataclassio requires type args.') if childtypes[-1] is ...: raise TypeError(f'Found ellipsis as part of type for' - f' \'{attrname}\' on {cls}; these are not' + f' \'{attrname}\' on {cls.__name__};' + f' these are not' f' supported by dataclassio.') for childtype in childtypes: self.prep_type(cls, @@ -366,7 +521,11 @@ class PrepSession: self.prep_dataclass(origin, recursion_level=recursion_level + 1) return - raise TypeError(f"Attr '{attrname}' on {cls} contains type '{anntype}'" + if origin is bytes: + return + + raise TypeError(f"Attr '{attrname}' on {cls.__name__} contains" + f" type '{anntype}'" f' which is not supported by dataclassio.') def prep_union(self, cls: Type, attrname: str, anntype: Any, @@ -376,7 +535,7 @@ class PrepSession: if (len(typeargs) != 2 or len([c for c in typeargs if c is type(None)]) != 1): raise TypeError(f'Union {anntype} for attr \'{attrname}\' on' - f' {cls} is not supported by dataclassio;' + f' {cls.__name__} is not supported by dataclassio;' f' only 2 member Unions with one type being None' f' are supported.') for childtype in typeargs: @@ -406,7 +565,7 @@ class PrepSession: f' them to be uniform.') -def _is_valid_json(obj: Any) -> bool: +def _is_valid_for_codec(obj: Any, codec: Codec) -> bool: """Return whether a value consists solely of json-supported types. Note that this does not include things like tuples which are @@ -421,9 +580,15 @@ def _is_valid_json(obj: Any) -> bool: if objtype is dict: # JSON 'objects' supports only string dict keys, but all value types. return all( - type(k) is str and _is_valid_json(v) for k, v in obj.items()) + type(k) is str and _is_valid_for_codec(v, codec) + for k, v in obj.items()) if objtype is list: - return all(_is_valid_json(elem) for elem in obj) + return all(_is_valid_for_codec(elem, codec) for elem in obj) + + # A few things are valid in firestore but not json. + if issubclass(objtype, datetime.datetime) or objtype is bytes: + return codec is Codec.FIRESTORE + return False @@ -457,9 +622,11 @@ def _get_origin(anntype: Any) -> Any: class _Outputter: """Validates or exports data contained in a dataclass instance.""" - def __init__(self, obj: Any, create: bool, coerce_to_float: bool) -> None: + def __init__(self, obj: Any, create: bool, codec: Codec, + coerce_to_float: bool) -> None: self._obj = obj self._create = create + self._codec = codec self._coerce_to_float = coerce_to_float def run(self) -> Any: @@ -467,6 +634,8 @@ class _Outputter: return self._process_dataclass(type(self._obj), self._obj, '') def _process_dataclass(self, cls: Type, obj: Any, fieldpath: str) -> Any: + # pylint: disable=too-many-locals + # pylint: disable=too-many-branches prep = PrepSession(explicit=False).prep_dataclass(type(obj), recursion_level=0) fields = dataclasses.fields(obj) @@ -477,17 +646,41 @@ class _Outputter: subfieldpath = f'{fieldpath}.{fieldname}' else: subfieldpath = fieldname - fieldtype = prep.annotations[fieldname] + anntype = prep.annotations[fieldname] value = getattr(obj, fieldname) - outvalue = self._process_value(cls, subfieldpath, fieldtype, value) + + anntype, ioattrs = _parse_annotated(anntype) + + # If we're not storing default values for this fella, + # we can skip all output processing if we've got a default value. + if ioattrs is not None and not ioattrs.store_default: + default_factory: Any = field.default_factory # type: ignore + if default_factory is not dataclasses.MISSING: + if default_factory() == value: + continue + elif field.default is not dataclasses.MISSING: + if field.default == value: + continue + else: + raise RuntimeError( + f'Field {fieldname} of {cls.__name__} has' + f' neither a default nor a default_factory;' + f' store_default=False cannot be set for it.' + f' (AND THIS SHOULD HAVE BEEN CAUGHT IN PREP!)') + + outvalue = self._process_value(cls, subfieldpath, anntype, value, + ioattrs) if self._create: assert out is not None - out[fieldname] = outvalue + storagename = (fieldname if + (ioattrs is None or ioattrs.storagename is None) + else ioattrs.storagename) + out[storagename] = outvalue # If there's extra-attrs stored on us, check/include them. extra_attrs = getattr(obj, EXTRA_ATTRS_ATTR, None) if isinstance(extra_attrs, dict): - if not _is_valid_json(extra_attrs): + if not _is_valid_for_codec(extra_attrs, self._codec): raise TypeError( f'Extra attrs on {fieldpath} contains data type(s)' f' not supported by json.') @@ -497,7 +690,7 @@ class _Outputter: return out def _process_value(self, cls: Type, fieldpath: str, anntype: Any, - value: Any) -> Any: + value: Any, ioattrs: Optional[IOAttrs]) -> Any: # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements @@ -505,11 +698,12 @@ class _Outputter: origin = _get_origin(anntype) if origin is typing.Any: - if not _is_valid_json(value): - raise TypeError(f'Invalid value type for \'{fieldpath}\';' - f" 'Any' typed values must be types directly" - f' supported by json; got' - f" '{type(value).__name__}'.") + if not _is_valid_for_codec(value, self._codec): + raise TypeError( + f'Invalid value type for \'{fieldpath}\';' + f" 'Any' typed values must contain types directly" + f' supported by the specified codec ({self._codec.name});' + f' found \'{type(value).__name__}\' which is not.') return value if self._create else None if origin is typing.Union: @@ -523,7 +717,7 @@ class _Outputter: ] assert len(childanntypes_l) == 1 return self._process_value(cls, fieldpath, childanntypes_l[0], - value) + value, ioattrs) # Everything below this point assumes the annotation type resolves # to a concrete type. (This should have been verified at prep time). @@ -553,11 +747,12 @@ class _Outputter: f' {len(childanntypes)}.') if self._create: return [ - self._process_value(cls, fieldpath, childanntypes[i], x) - for i, x in enumerate(value) + self._process_value(cls, fieldpath, childanntypes[i], x, + ioattrs) for i, x in enumerate(value) ] for i, x in enumerate(value): - self._process_value(cls, fieldpath, childanntypes[i], x) + self._process_value(cls, fieldpath, childanntypes[i], x, + ioattrs) return None if origin is list: @@ -566,13 +761,15 @@ class _Outputter: f' found a {type(value)}') childanntypes = typing.get_args(anntype) - # 'Any' type children; make sure they are valid json values. + # 'Any' type children; make sure they are valid values for + # the specified codec. if len(childanntypes) == 0 or childanntypes[0] is typing.Any: for i, child in enumerate(value): - if not _is_valid_json(child): + if not _is_valid_for_codec(child, self._codec): raise TypeError( f'Item {i} of {fieldpath} contains' - f' data type(s) not supported by json.') + f' data type(s) not supported by the specified' + f' codec ({self._codec.name}).') # Hmm; should we do a copy here? return value if self._create else None @@ -580,11 +777,12 @@ class _Outputter: assert len(childanntypes) == 1 if self._create: return [ - self._process_value(cls, fieldpath, childanntypes[0], x) - for x in value + self._process_value(cls, fieldpath, childanntypes[0], x, + ioattrs) for x in value ] for x in value: - self._process_value(cls, fieldpath, childanntypes[0], x) + self._process_value(cls, fieldpath, childanntypes[0], x, + ioattrs) return None if origin is set: @@ -596,10 +794,11 @@ class _Outputter: # 'Any' type children; make sure they are valid Any values. if len(childanntypes) == 0 or childanntypes[0] is typing.Any: for child in value: - if not _is_valid_json(child): + if not _is_valid_for_codec(child, self._codec): raise TypeError( f'Set at {fieldpath} contains' - f' data type(s) not supported by json.') + f' data type(s) not supported by the' + f' specified codec ({self._codec.name}).') return list(value) if self._create else None # We contain elements of some specified type. @@ -608,15 +807,16 @@ class _Outputter: # Note: we output json-friendly values so this becomes # a list. return [ - self._process_value(cls, fieldpath, childanntypes[0], x) - for x in value + self._process_value(cls, fieldpath, childanntypes[0], x, + ioattrs) for x in value ] for x in value: - self._process_value(cls, fieldpath, childanntypes[0], x) + self._process_value(cls, fieldpath, childanntypes[0], x, + ioattrs) return None if origin is dict: - return self._process_dict(cls, fieldpath, anntype, value) + return self._process_dict(cls, fieldpath, anntype, value, ioattrs) if dataclasses.is_dataclass(origin): if not isinstance(value, origin): @@ -636,21 +836,42 @@ class _Outputter: if not isinstance(value, origin): raise TypeError(f'Expected a {origin} for {fieldpath};' f' found a {type(value)}.') - # We only support timezone-aware utc times. - if (value.tzinfo is not datetime.timezone.utc - and (_pytz_utc is None or value.tzinfo is not _pytz_utc)): - raise ValueError( - 'datetime values must have timezone set as timezone.utc') + _ensure_datetime_is_timezone_aware(value) + if ioattrs is not None: + ioattrs.validate_datetime(value, fieldpath) + if self._codec is Codec.FIRESTORE: + return value + assert self._codec is Codec.JSON return [ value.year, value.month, value.day, value.hour, value.minute, value.second, value.microsecond ] if self._create else None + if origin is bytes: + return self._process_bytes(cls, fieldpath, value) + raise TypeError( f"Field '{fieldpath}' of type '{anntype}' is unsupported here.") + def _process_bytes(self, cls: Type, fieldpath: str, value: bytes) -> Any: + import base64 + if not isinstance(value, bytes): + raise TypeError( + f'Expected bytes for {fieldpath} on {cls.__name__};' + f' found a {type(value)}.') + + if not self._create: + return None + + # In JSON we convert to base64, but firestore directly supports bytes. + if self._codec is Codec.JSON: + return base64.b64encode(value).decode() + + assert self._codec is Codec.FIRESTORE + return value + def _process_dict(self, cls: Type, fieldpath: str, anntype: Any, - value: dict) -> Any: + value: dict, ioattrs: Optional[IOAttrs]) -> Any: # pylint: disable=too-many-branches if not isinstance(value, dict): raise TypeError(f'Expected a dict for {fieldpath};' @@ -660,11 +881,14 @@ class _Outputter: # We treat 'Any' dicts simply as json; we don't do any translating. if not childtypes or childtypes[0] is typing.Any: - if not isinstance(value, dict) or not _is_valid_json(value): + if not isinstance(value, dict) or not _is_valid_for_codec( + value, self._codec): raise TypeError( f'Invalid value for Dict[Any, Any]' - f' at \'{fieldpath}\' on {cls}; all keys and values' - f' must be json-compatible when dict type is Any.') + f' at \'{fieldpath}\' on {cls.__name__};' + f' all keys and values must be directly compatible' + f' with the specified codec ({self._codec.name})' + f' when dict type is Any.') return value if self._create else None # Ok; we've got a definite key type (which we verified as valid @@ -676,22 +900,26 @@ class _Outputter: if keyanntype is str: for key, val in value.items(): if not isinstance(key, str): - raise TypeError(f'Got invalid key type {type(key)} for' - f' dict key at \'{fieldpath}\' on {cls};' - f' expected {keyanntype}.') - outval = self._process_value(cls, fieldpath, valanntype, val) + raise TypeError( + f'Got invalid key type {type(key)} for' + f' dict key at \'{fieldpath}\' on {cls.__name__};' + f' expected {keyanntype}.') + outval = self._process_value(cls, fieldpath, valanntype, val, + ioattrs) if self._create: assert out is not None out[key] = outval - # int keys are stored in json as str versions of themselves. + # int keys are stored as str versions of themselves. elif keyanntype is int: for key, val in value.items(): if not isinstance(key, int): - raise TypeError(f'Got invalid key type {type(key)} for' - f' dict key at \'{fieldpath}\' on {cls};' - f' expected an int.') - outval = self._process_value(cls, fieldpath, valanntype, val) + raise TypeError( + f'Got invalid key type {type(key)} for' + f' dict key at \'{fieldpath}\' on {cls.__name__};' + f' expected an int.') + outval = self._process_value(cls, fieldpath, valanntype, val, + ioattrs) if self._create: assert out is not None out[str(key)] = outval @@ -699,10 +927,12 @@ class _Outputter: elif issubclass(keyanntype, Enum): for key, val in value.items(): if not isinstance(key, keyanntype): - raise TypeError(f'Got invalid key type {type(key)} for' - f' dict key at \'{fieldpath}\' on {cls};' - f' expected a {keyanntype}.') - outval = self._process_value(cls, fieldpath, valanntype, val) + raise TypeError( + f'Got invalid key type {type(key)} for' + f' dict key at \'{fieldpath}\' on {cls.__name__};' + f' expected a {keyanntype}.') + outval = self._process_value(cls, fieldpath, valanntype, val, + ioattrs) if self._create: assert out is not None out[str(key.value)] = outval @@ -716,10 +946,12 @@ class _Inputter(Generic[T]): def __init__(self, cls: Type[T], + codec: Codec, coerce_to_float: bool, allow_unknown_attrs: bool = True, discard_unknown_attrs: bool = False): self._cls = cls + self._codec = codec self._coerce_to_float = coerce_to_float self._allow_unknown_attrs = allow_unknown_attrs self._discard_unknown_attrs = discard_unknown_attrs @@ -735,7 +967,7 @@ class _Inputter(Generic[T]): return out def _value_from_input(self, cls: Type, fieldpath: str, anntype: Any, - value: Any) -> Any: + value: Any, ioattrs: Optional[IOAttrs]) -> Any: """Convert an assigned value to what a dataclass field expects.""" # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches @@ -743,11 +975,12 @@ class _Inputter(Generic[T]): origin = _get_origin(anntype) if origin is typing.Any: - if not _is_valid_json(value): + if not _is_valid_for_codec(value, self._codec): raise TypeError(f'Invalid value type for \'{fieldpath}\';' - f' \'Any\' typed values must be types directly' - f' supported by json; got' - f' \'{type(value).__name__}\'.') + f' \'Any\' typed values must contain only' + f' types directly supported by the specified' + f' codec ({self._codec.name}); found' + f' \'{type(value).__name__}\' which is not.') return value if origin is typing.Union: @@ -761,7 +994,7 @@ class _Inputter(Generic[T]): ] assert len(childanntypes_l) == 1 return self._value_from_input(cls, fieldpath, childanntypes_l[0], - value) + value, ioattrs) # Everything below this point assumes the annotation type resolves # to a concrete type. (This should have been verified at prep time). @@ -778,13 +1011,15 @@ class _Inputter(Generic[T]): if origin in {list, set}: return self._sequence_from_input(cls, fieldpath, anntype, value, - origin) + origin, ioattrs) if origin is tuple: - return self._tuple_from_input(cls, fieldpath, anntype, value) + return self._tuple_from_input(cls, fieldpath, anntype, value, + ioattrs) if origin is dict: - return self._dict_from_input(cls, fieldpath, anntype, value) + return self._dict_from_input(cls, fieldpath, anntype, value, + ioattrs) if dataclasses.is_dataclass(origin): return self._dataclass_from_input(origin, fieldpath, value) @@ -793,11 +1028,34 @@ class _Inputter(Generic[T]): return enum_by_value(origin, value) if issubclass(origin, datetime.datetime): - return self._datetime_from_input(cls, fieldpath, value) + return self._datetime_from_input(cls, fieldpath, value, ioattrs) + + if origin is bytes: + return self._bytes_from_input(origin, fieldpath, value) raise TypeError( f"Field '{fieldpath}' of type '{anntype}' is unsupported here.") + def _bytes_from_input(self, cls: Type, fieldpath: str, + value: Any) -> bytes: + """Given input data, returns bytes.""" + import base64 + + # For firestore, bytes are passed as-is. Otherwise they're encoded + # as base64. + if self._codec is Codec.FIRESTORE: + if not isinstance(value, bytes): + raise TypeError(f'Expected a bytes object for {fieldpath}' + f' on {cls.__name__}; got a {type(value)}.') + + return value + + assert self._codec is Codec.JSON + if not isinstance(value, str): + raise TypeError(f'Expected a string object for {fieldpath}' + f' on {cls.__name__}; got a {type(value)}.') + return base64.b64decode(value) + def _dataclass_from_input(self, cls: Type, fieldpath: str, values: dict) -> Any: """Given a dict, instantiates a dataclass of the given type. @@ -809,7 +1067,9 @@ class _Inputter(Generic[T]): """ # pylint: disable=too-many-locals if not isinstance(values, dict): - raise TypeError("Expected a dict for 'values' arg.") + raise TypeError( + f'Expected a dict for {fieldpath} on {cls.__name__};' + f' got a {type(values)}.') prep = PrepSession(explicit=False).prep_dataclass(cls, recursion_level=0) @@ -820,8 +1080,11 @@ class _Inputter(Generic[T]): fields = dataclasses.fields(cls) fields_by_name = {f.name: f for f in fields} args: Dict[str, Any] = {} - for key, value in values.items(): + for rawkey, value in values.items(): + key = prep.storage_names_to_attr_names.get(rawkey, rawkey) field = fields_by_name.get(key) + + # Store unknown attrs off to the side (or error if desired). if field is None: if self._allow_unknown_attrs: if self._discard_unknown_attrs: @@ -829,39 +1092,43 @@ class _Inputter(Generic[T]): # Treat this like 'Any' data; ensure that it is valid # raw json. - if not _is_valid_json(value): + if not _is_valid_for_codec(value, self._codec): raise TypeError( - f'Unknown attr {key}' + f'Unknown attr \'{key}\'' f' on {fieldpath} contains data type(s)' - f' not supported by json.') + f' not supported by the specified codec' + f' ({self._codec.name}).') extra_attrs[key] = value else: raise AttributeError( f"'{cls.__name__}' has no '{key}' field.") else: fieldname = field.name - fieldtype = prep.annotations[fieldname] + anntype = prep.annotations[fieldname] + anntype, ioattrs = _parse_annotated(anntype) + subfieldpath = (f'{fieldpath}.{fieldname}' if fieldpath else fieldname) - args[key] = self._value_from_input(cls, subfieldpath, - fieldtype, value) + args[key] = self._value_from_input(cls, subfieldpath, anntype, + value, ioattrs) try: out = cls(**args) except Exception as exc: - raise RuntimeError( - f'Error instantiating class {cls} at {fieldpath}: {exc}' - ) from exc + raise RuntimeError(f'Error instantiating class {cls.__name__}' + f' at {fieldpath}: {exc}') from exc if extra_attrs: setattr(out, EXTRA_ATTRS_ATTR, extra_attrs) return out def _dict_from_input(self, cls: Type, fieldpath: str, anntype: Any, - value: Any) -> Any: + value: Any, ioattrs: Optional[IOAttrs]) -> Any: # pylint: disable=too-many-branches + # pylint: disable=too-many-locals if not isinstance(value, dict): - raise TypeError(f'Expected a dict for \'{fieldpath}\' on {cls};' - f' got a {type(value)}.') + raise TypeError( + f'Expected a dict for \'{fieldpath}\' on {cls.__name__};' + f' got a {type(value)}.') childtypes = typing.get_args(anntype) assert len(childtypes) in (0, 2) @@ -870,11 +1137,13 @@ class _Inputter(Generic[T]): # We treat 'Any' dicts simply as json; we don't do any translating. if not childtypes or childtypes[0] is typing.Any: - if not isinstance(value, dict) or not _is_valid_json(value): + if not isinstance(value, dict) or not _is_valid_for_codec( + value, self._codec): raise TypeError(f'Got invalid value for Dict[Any, Any]' - f' at \'{fieldpath}\' on {cls};' + f' at \'{fieldpath}\' on {cls.__name__};' f' all keys and values must be' - f' json-compatible.') + f' compatible with the specified codec' + f' ({self._codec.name}).') out = value else: out = {} @@ -889,10 +1158,10 @@ class _Inputter(Generic[T]): if not isinstance(key, str): raise TypeError( f'Got invalid key type {type(key)} for' - f' dict key at \'{fieldpath}\' on {cls};' + f' dict key at \'{fieldpath}\' on {cls.__name__};' f' expected a str.') out[key] = self._value_from_input(cls, fieldpath, - valanntype, val) + valanntype, val, ioattrs) # int keys are stored in json as str versions of themselves. elif keyanntype is int: @@ -900,17 +1169,17 @@ class _Inputter(Generic[T]): if not isinstance(key, str): raise TypeError( f'Got invalid key type {type(key)} for' - f' dict key at \'{fieldpath}\' on {cls};' + f' dict key at \'{fieldpath}\' on {cls.__name__};' f' expected a str.') try: keyint = int(key) except ValueError as exc: raise TypeError( f'Got invalid key value {key} for' - f' dict key at \'{fieldpath}\' on {cls};' + f' dict key at \'{fieldpath}\' on {cls.__name__};' f' expected an int in string form.') from exc out[keyint] = self._value_from_input( - cls, fieldpath, valanntype, val) + cls, fieldpath, valanntype, val, ioattrs) elif issubclass(keyanntype, Enum): # In prep we verified that all these enums' values have @@ -925,11 +1194,12 @@ class _Inputter(Generic[T]): except ValueError as exc: raise ValueError( f'Got invalid key value {key} for' - f' dict key at \'{fieldpath}\' on {cls};' + f' dict key at \'{fieldpath}\'' + f' on {cls.__name__};' f' expected a value corresponding to' f' a {keyanntype}.') from exc out[enumval] = self._value_from_input( - cls, fieldpath, valanntype, val) + cls, fieldpath, valanntype, val, ioattrs) else: for key, val in value.items(): try: @@ -937,11 +1207,12 @@ class _Inputter(Generic[T]): except (ValueError, TypeError) as exc: raise ValueError( f'Got invalid key value {key} for' - f' dict key at \'{fieldpath}\' on {cls};' + f' dict key at \'{fieldpath}\'' + f' on {cls.__name__};' f' expected {keyanntype} value (though' f' in string form).') from exc out[enumval] = self._value_from_input( - cls, fieldpath, valanntype, val) + cls, fieldpath, valanntype, val, ioattrs) else: raise RuntimeError(f'Unhandled dict in-key-type {keyanntype}') @@ -949,7 +1220,8 @@ class _Inputter(Generic[T]): return out def _sequence_from_input(self, cls: Type, fieldpath: str, anntype: Any, - value: Any, seqtype: Type) -> Any: + value: Any, seqtype: Type, + ioattrs: Optional[IOAttrs]) -> Any: # Because we are json-centric, we expect a list for all sequences. if type(value) is not list: @@ -962,7 +1234,7 @@ class _Inputter(Generic[T]): # and then just grab them. if len(childanntypes) == 0 or childanntypes[0] is typing.Any: for i, child in enumerate(value): - if not _is_valid_json(child): + if not _is_valid_for_codec(child, self._codec): raise TypeError(f'Item {i} of {fieldpath} contains' f' data type(s) not supported by json.') return value if type(value) is seqtype else seqtype(value) @@ -971,26 +1243,43 @@ class _Inputter(Generic[T]): assert len(childanntypes) == 1 childanntype = childanntypes[0] return seqtype( - self._value_from_input(cls, fieldpath, childanntype, i) + self._value_from_input(cls, fieldpath, childanntype, i, ioattrs) for i in value) - def _datetime_from_input(self, cls: Type, fieldpath: str, - value: Any) -> Any: + def _datetime_from_input(self, cls: Type, fieldpath: str, value: Any, + ioattrs: Optional[IOAttrs]) -> Any: + + # For firestore we expect a datetime object. + if self._codec is Codec.FIRESTORE: + # Don't compare exact type here, as firestore can give us + # a subclass with extended precision. + if not isinstance(value, datetime.datetime): + raise TypeError( + f'Invalid input value for "{fieldpath}" on' + f' "{cls.__name__}";' + f' expected a datetime, got a {type(value).__name__}') + _ensure_datetime_is_timezone_aware(value) + return value + + assert self._codec is Codec.JSON # We expect a list of 7 ints. if type(value) is not list: raise TypeError( - f'Invalid input value for "{fieldpath}" on "{cls}";' + f'Invalid input value for "{fieldpath}" on "{cls.__name__}";' f' expected a list, got a {type(value).__name__}') if len(value) != 7 or not all(isinstance(x, int) for x in value): raise TypeError( - f'Invalid input value for "{fieldpath}" on "{cls}";' + f'Invalid input value for "{fieldpath}" on "{cls.__name__}";' f' expected a list of 7 ints.') - return datetime.datetime( # type: ignore + out = datetime.datetime( # type: ignore *value, tzinfo=datetime.timezone.utc) + if ioattrs is not None: + ioattrs.validate_datetime(out, fieldpath) + return out def _tuple_from_input(self, cls: Type, fieldpath: str, anntype: Any, - value: Any) -> Any: + value: Any, ioattrs: Optional[IOAttrs]) -> Any: out: List = [] @@ -1015,14 +1304,48 @@ class _Inputter(Generic[T]): # 'Any' type children; make sure they are valid json values # and then just grab them. if childanntype is typing.Any: - if not _is_valid_json(childval): + if not _is_valid_for_codec(childval, self._codec): raise TypeError(f'Item {i} of {fieldpath} contains' f' data type(s) not supported by json.') out.append(childval) else: out.append( self._value_from_input(cls, fieldpath, childanntype, - childval)) + childval, ioattrs)) assert len(out) == len(childanntypes) return tuple(out) + + +def _ensure_datetime_is_timezone_aware(value: datetime.datetime) -> None: + # We only support timezone-aware utc times. + if (value.tzinfo is not datetime.timezone.utc + and (_pytz_utc is None or value.tzinfo is not _pytz_utc)): + raise ValueError( + 'datetime values must have timezone set as timezone.utc') + + +def _parse_annotated(anntype: Any) -> Tuple[Any, Optional[IOAttrs]]: + """Parse Annotated() constructs, returning annotated type & IOAttrs.""" + # If we get an Annotated[foo, bar, eep] we take + # foo as the actual type and we look for IOAttrs instances in + # bar/eep to affect our behavior. + ioattrs: Optional[IOAttrs] = None + if isinstance(anntype, _AnnotatedAlias): + annargs = get_args(anntype) + for annarg in annargs[1:]: + if isinstance(annarg, IOAttrs): + if ioattrs is not None: + raise RuntimeError( + 'Multiple IOAttrs instances found for a' + ' single annotation; this is not supported.') + ioattrs = annarg + + # I occasionally just throw a 'x' down when I mean IOAttrs('x'); + # catch these mistakes. + elif isinstance(annarg, (str, int, float, bool)): + raise RuntimeError( + f'Raw {type(annarg)} found in Annotated[] entry:' + f' {anntype}; this is probably not what you intended.') + anntype = annargs[0] + return anntype, ioattrs diff --git a/dist/ba_data/python/efro/entity/__pycache__/_entity.cpython-38.opt-1.pyc b/dist/ba_data/python/efro/entity/__pycache__/_entity.cpython-38.opt-1.pyc index 74189f5..e0d42e5 100644 Binary files a/dist/ba_data/python/efro/entity/__pycache__/_entity.cpython-38.opt-1.pyc and b/dist/ba_data/python/efro/entity/__pycache__/_entity.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/efro/entity/__pycache__/_value.cpython-38.opt-1.pyc b/dist/ba_data/python/efro/entity/__pycache__/_value.cpython-38.opt-1.pyc index 3d7e945..944c098 100644 Binary files a/dist/ba_data/python/efro/entity/__pycache__/_value.cpython-38.opt-1.pyc and b/dist/ba_data/python/efro/entity/__pycache__/_value.cpython-38.opt-1.pyc differ diff --git a/dist/ba_data/python/efro/entity/_entity.py b/dist/ba_data/python/efro/entity/_entity.py index b866964..25ba202 100644 --- a/dist/ba_data/python/efro/entity/_entity.py +++ b/dist/ba_data/python/efro/entity/_entity.py @@ -48,9 +48,8 @@ class EntityMixin: Note that it is more efficient to pass data to an Entity's constructor than it is to create a default Entity and then call this on it. """ - self.d_data = data assert isinstance(self, CompoundValue) - self.apply_fields_to_data(self.d_data, error=error) + self.d_data = self.filter_input(data, error=error) def copy_data(self, target: Union[CompoundValue, BoundCompoundValue]) -> None: diff --git a/dist/ba_data/python/efro/entity/_value.py b/dist/ba_data/python/efro/entity/_value.py index 5798179..5fa50a2 100644 --- a/dist/ba_data/python/efro/entity/_value.py +++ b/dist/ba_data/python/efro/entity/_value.py @@ -66,7 +66,7 @@ class SimpleValue(TypedValue[T]): self._allow_none = allow_none # We store _default_data in our internal data format so need - # to run user-facing value through our input filter. + # to run user-facing values through our input filter. # Make sure we do this last since filter_input depends on above vals. self._default_data: T = self.filter_input(default, error=True) diff --git a/dist/ba_data/python/efro/terminal.py b/dist/ba_data/python/efro/terminal.py index aafa34f..2fad689 100644 --- a/dist/ba_data/python/efro/terminal.py +++ b/dist/ba_data/python/efro/terminal.py @@ -125,7 +125,7 @@ def _windows_enable_color() -> bool: mode = mask = ENABLE_VIRTUAL_TERMINAL_PROCESSING try: return set_conout_mode(mode, mask) - except WindowsError as exc: + except WindowsError as exc: # type: ignore if exc.winerror == ERROR_INVALID_PARAMETER: raise NotImplementedError from exc raise diff --git a/dist/ba_data/python/efro/util.py b/dist/ba_data/python/efro/util.py index 9a0a8c4..162d2aa 100644 --- a/dist/ba_data/python/efro/util.py +++ b/dist/ba_data/python/efro/util.py @@ -71,6 +71,25 @@ def utc_now() -> datetime.datetime: return datetime.datetime.now(datetime.timezone.utc) +def utc_today() -> datetime.datetime: + """Get offset-aware midnight in the utc time zone.""" + now = datetime.datetime.now(datetime.timezone.utc) + return datetime.datetime(year=now.year, + month=now.month, + day=now.day, + tzinfo=now.tzinfo) + + +def utc_this_hour() -> datetime.datetime: + """Get offset-aware beginning of the current hour in the utc time zone.""" + now = datetime.datetime.now(datetime.timezone.utc) + return datetime.datetime(year=now.year, + month=now.month, + day=now.day, + hour=now.hour, + tzinfo=now.tzinfo) + + def empty_weakref(objtype: Type[T]) -> ReferenceType[T]: """Return an invalidated weak-reference for the specified type.""" # At runtime, all weakrefs are the same; our type arg is just diff --git a/dist/ba_root/.bsac2 b/dist/ba_root/.bsac2 index bda4b4b..d25e260 100644 --- a/dist/ba_root/.bsac2 +++ b/dist/ba_root/.bsac2 @@ -1 +1 @@ -DBRdDAdZXxReVhxNFDUAQ1cAUkkAVQ9cAFhTUl9LAlAFBh9dX04OHFpWXApQUhxVVgQDFBZbShNCTRJKTExXVg1XAWJDVVlBBFoDX14NFxoJbTkBQwJaD0haFXh1DwJQAA8cDgZIGxJfUl9SRhZqA1dFBkYCAhpGWQgCXQNZR0MLBR0URE1dTFRBNFUHT11dREYWagNXRQZGAgMaRlkEAFsVfVgASF1ESxJKGhdCT0gbAFBQZFtTHFUeRUUQN0xSQgEPW1EaREtSAGFdTEEKUEtZShBeQhsaE0RbUxxcBWZDDAJRX1MXfhUAQgMPDlAdE11UB0BdX2lAC18PWlRHFQgeGyhpUAAHV0ZYEHI+QwwdD1cOXV5fGlxiCAMMHlQXVAMdBRkCOBVVXlgEDFRaUQxZQ1ttVgMBTQEfHw1KCBYdZB5GWg5XQ1VURgBLVQwTMBRZSRQZUE1DVwVFXQRHVEBdCE1Lfk1TEFxEDAAYFVMGUR5TRwYJXV1CFw9bGhQgX1EEEX1CWQJcSg8DSUZ6CVtBWFJGABtNQkMWAUVOGkZOAAxGB0RTD0JiWVkSXBoXCR5GWgdbQVVeVQtKVQxKHkgaQ0MWTgkARQNeZxVQRUgaXAkUD0lHFloOV0JRRBBfQgoaEw4NS1BgBUESMkIHWVFDCwEBGgtQS05vUwhKRAxKSRsQCFAEVWMGBVxlVwheMhVXEkgWWwcFGw5VFRpAUEEHawNXVWJWXhYbTU0TAgdQckEFXwVPew9DUUF2UEBdFRsCGRUQFEsPVVQaXlEKVwQYVw8FX2xEEV4SCFdEFwBRAR0PWQVReVpYQAAXIVlFFENaABk6WUcGFxoJDkgPERNfBUgaAllQX1kFTV1fShwGTAhYSBYNA1cJRxoTBRZRVlgAeRMYdxFMRgVlWE5TA01LDwMDUQlKFEFGXlEAFx5VXg0XFlVaBUo+Al4PQVFDCwUdCEobSF9QUQEXD1VeWkQcA1UWUW4OAUBaVQsPW1UGVgEWAFJZYV0HXk1IaV0NVxJFH2ZCXARLGENfB0R/XFJGF1JRGkRMVwl9VExfE1xoQlBcEEpIZkNbF3QKVgNUUA8IGGBeEVkOFEJEFwZRHRNMWw54T0xLVkprE1hQRlhHC11Xe1AQEF1BFF4ZTUNXBUV4BFBWWF02VlFDTUFKaRRZEXtZQQlYAlFZF0RuWlUQQhMYFFwcBE0TUEFRN2t+X1BXCl0zZH0WDRBBQiRzYzUhak4ZBVwTXlVbCU8ifnVoRUQVGl1WRQFLNFdfX15cAmkYX18XFxZSVQxEBBdTC0haFRMLGhREWFxeF0EQWBRCZ1VbAEcDRhgBT0ZZV0VKXhUARBJ7VQ0AExcISAABARtCFlAFUx9XX1MXWBRCVBEXFkNXF04ADRRcHAZRAR0PSAlOXV9rUwpSD1hWZFhbC00EGEURC0hbT1UPW1QaRF1bFlRDf1kIUlFDXmILUAhCQhpDQApJH08DQV4JAxpGXQ4WUxR/VQ9aWENfNlZRQ01BSk0UWUFcTgFHA0UDHUEFW1t6AUwGFFM2Ql0PRUIDbA5cGGpLVwVNRmFQWFsQXw1HGhMCB1ByQQVfBU95AEsUOF5EDX8JGWxFXFxGA1IaE1VUWilcFlFEBjRXWlgQXk8zQwhMRg5EX0kYMVBCTEtWRgNXAx0WR0AMWhIYWAALVkAYAkEABmkXTEAAQxMXDFYJFA9JQA1aAxhSXFZABFoDU0MQSlJWRRBIEz5bFA8OUAQBHRREWFtFdVcFXhNTYVteXBFKWXlfEAhZRlEMWUE1RAdEWghfVg1uD1pMQktLRgNTGhNVVFokThZEVU0xWlZERH8UD1cUQkEPVRF7UQVNV19AEF4AShRQV19zElgFUh8hC1deFiNCBBIWEkVRQXVIQ1kLUExIGwhRFURGXkNSQDdYGV1YDQNoXF8KWRJPQhRCRAlIAUwaXAgUD1xTF00DRBMOUVMJShIaEwIHUH9TBUoUBGYJRFoVQh99SgNaUV5QXQoZJFlcVl5cAhtNBAFPRllQXiVaABNSSH5AAEhYQx9GeFRET1dGA18aE1JFWwBXE2JDGiFAR0QFbBYARAJ5XQJaVFlLRAMMHQkeRkkUX1JRGVUEVBJFHw0NVllXO0sIBl4SDw5TBAEBGgdaTERPVwpcFUV8VU8QXwhZBARPRkhBXwdITwhVCUNHT1ddTF85SldYTVo7UglEVFUVCFEJRxoTAgdQckEFXwVPegdeQEFiRUxWAhl1TEpGAUtEDAgYFVMBSkUYXAIcdHB/ClkEE0AHQRZbAAMdFERaV0NNWwpMA2VFVUVGJlYEQhNZVggfFBRfCAJTSERXDl9CA14KWF9yV1cQUQNEXVVZVhYbTQIBU0gaQ0QNTgRPQglYRg9QXEhWEmZdQ01AHRtcAx0WR0AMWhIYUgsFSlJVEEgTEhgRRE4AQ1VyVRQbAhsJAkgbB1VZdUBTF11ZZkMMRHddRQhMFAZeEg1iCFJFQkofGwIYFRAFWg53RlVFVktqH1dDCgpfE18XDSIARA9DU0MLABgURFhbRXVXBV4TU2FbXlwRSlllRQIdUV0RRGwNCEADDw5VAR0PTA9aU0hNQVZ4C1lEWkMQXwxHBh1BBVtbdxNMEwUYNl9bQXdeQkwEWFRBGWQNWhJZQ00VCFMVVUZDCgddHV8HQg8SGABBVQZuWENcD1gaFw0CVBVEV1Jce1cEXgJTYQwNVkdFSn0TDhYkQkwEQxMXClYVGl1LWwdcSF9SW1lBS18bV1Y8DVZXWQpIEghXRBcAUQEdD1kFUXlaWEAAFzJeVBRgUwlVVQwHT0ZZUF4oSAAGQwN9WwhfRV4WMlxZQBliCFgfU0MWDQBVFVVXUgslT1JEAAMtAEUSDWcVUF9JGCFWXA8DA1AVREZDXVRXS1AUWV8QSl5aRAFPAA1aRBcGUQEBARoHWlBhXFMDTANmXl1ZRhYXMFlFQxBQVhYpQhcERUQXB1EdE1VVB0oaF19TCEoDGhNERVsGXFlfUgwKSx1QCEwGPkMNDw5VAQEBGhZIcUNNEF4IVgYBBBsQBFofelQCA01WZgtEDxVFSH1GDhF3QlcSW1lBVRIyUAVCXkZOEF8IRxoTExZRUFNKRAIOWBUDUg1QVnJdAUBIWRsIUAlWGhNVU0FXFxpXSS8nGgkHVANRTRQHTlwtVFBKTQNpV0RXRhcXKVBXFG5dEBkwWRE3DF1dFF4YTUNGFERXBB9YTlcIShZJS1MDVggUCwUFAlUVVVdSCyVPUkQAYBQNQkQXAE0TUE5QJ05ZX10cNEsDVVhHXl0LGTVZXAENVlQUXhtNQ0YURFcEH1JFWRRYW1lcQBcXHFlUFg0DUAlbFEERDVtWGA1ODg9FSENdD1tQcksSWEoPAwpUCUoUVFpWUAlcNkZBKgpOWkIBXkNbQhRYUU0TUE5QJ05ZX10cNlYJXVhRF3QKVgNUUA8IGGBeEVkOFEJEFwFNE1BOUCdOWV9dHDRLCRZ3W1hGB1gbWhEwDE1HWRFZQ1sASg9EE1hSSBYPWldDShwJTBVeQ1tYX0cDRgMBT0ZZUF4oSAAGQwN9WwhfRV4WNFZXRlBXRHYIRV1VQlUNTVdgWAAQV0FPRhdUTRQWX10CVB9EWwlXSwNfXgVeOUVQQVNbOlgFV1MKBRoJAlQdTUNGFERXBB9SRVkUWFtZXEAXFwxXUl9aXRdeFlgTWVAIAxpGXRMIVQMDXQJeX14WDlhUDwMBUQlKFFBQRABLVB5YfSBGAgMYVAFDAFUObEMAQ1UDaxNJXV8ZYhFXBV4TDgIeR1gUXnAUBUpXGCJBABZaA15HQWdYTkwJS0EPAwZIGwdVWXVAUxddWWJZBkR/QVMFWUE2VwpBFlsIHQ9IFFBbSBdbB1YIRR9HXEcJVVUMAFNUCB8UFF8IAlNIQFURQh9BWQ1cZ0tLWwNQAhQLAgICSRsWVVkvAVlUQwF9DghYEl4aM0RfTEoJTFZJGX8FShJTQxYNA1UVVUJGERBQQBReVkMRRAkPDkNFWE5TA01LGRseRkkUWW5HVl4AG00URQoHU1ZCFxlDHBpEXUYIUlQDXwdUXV4XVwVKElNDa1JVAmYfQ18XRgILBlQBQwBVDmFRAFZESGgJUFZZShwiSwNTEXhYUwFcBRQLUlQUEVcAXk8MVx5hdzJSUEFdRAMIAwgeRkkUX1JRGVsGVhlFHxUNU1pYA3IJBFoLSEBDCwAYCFYVGl1LWwdcSF9SW1lBS18bV1Y8EUsRDFAdUU0UC0xMLVhfRnkFWldYV0YXG1wDHRZWUQ14AFdDB0pqRlgFXw4UWAINcw5VExcBShtIX1BRARcBWl5WVl46SQVZVwoIXREMVR1RTRQHQVgORnBOWwlMVll1WwpSD1hWBhUIEUsCUx1BFEpaVQEDDABGFQNXAENFQlcIZl5CS0Y7VBQUCwwHAkkbB0ReMAVUVnIRXwAVXwlDeQhfRFldFRsCHRUQEFAFXVRARAYkVBhDXxdGAgEDVB1NQ1cFRXUWUENJFiJMWUEZZQ1cClJYWlAQXwhHGhMRF1xBFF4cUU8GSg9EDkZUX2oHV1NEV1U0Vg9YRUcZRhdWB15IV0YCBwZIDwACXidaVRNVH35NFlxKDXRXA1hGZkRaVFpHA0EaExMWUVBTSkQCDlgVA1INUFZyWQpeXV9QU0YDUgYBGBVTBlE7U1AEEV1jWQ1DFRIYKkxHFRFiWVkIXRhgWEEQXBQUCwYHHkdNHlVaBhBLAHcJQhQPQkQXBVMBAQEaFktRTlwcDVoJWEIaUV4EXihUQwIeUV8UXhlRURpETFcJfVRMXxNcaEJQXBBKSGZDWxdgEFcWRF4WClwTYA1OFQ5EHw8OUAQdD1kFUXlaWEAAFylYQlhWRwJRAxZlEQVRXV8KSkE3XwVZWxNIExcMShtZTlFzE1gUUh9yRVcAGTtZUAcBShEMVR1NQ1cCXhoMUElhe0QDDAMJHkZYBV5wQ1ZAARciVFQRRHddRQhMFAZeEg1iCFJFQkofGwIaFRAUSw9VVBpDXRBLGVdcBgpMbFMKWRMYaVIPDlMEHQ9IFFBbSBdbB1YIRR9SW1MCZhpXXQIdS1pXRhdVUQZKD0QTWFJIFhJWTV9XUwlcCEJuUVlGF0AoBBNZVQ0fFBRfCAJTSFlbFENfTFUDV0xyXFwQSx9pAhYNAFUVVUZDCgddHUILWBMPVwtIWhVuVENMFEBnHRsIURVERkNdVFdLTRhDQw0FVVZYEHIED0IUVGtQEwscCEobWUlKHBdRCURFfVlGAEsBV10uEVRHFF4dT1MDSg9EE1hSSBYFUVlfWFEQXBRFH0BWXQdYGFtQEAdXRxReGVFRBkoPRBNYUkgWBVFZX1hREFwURR9HVlwRWFUMAFVUCB8UFl0RQwxXAwZNE1BOUCdOWV9dHDFbA0QRclhdEVsWWl1DN1BGQgtYFUMMXgEWEUNYTl1IUFtCV0FKVAlZXxYNB1UJWxRBEQt6XFgRXjUIVQ1IQBITCxsIVhUaTlZcEFAIQ1RHekcJTVUMA09GWVBeJVoAE1JIf1sOWlhIGClXS0FYRwNREhZnXVRGCksOFAtXSBpSUjZCFQBCA2BbBVQTFwhKG0pdWlVGA1UGARgVUwFKRRhcCgp0cH8KWQQTQAdBFlsACR0UREtIThsIUAlWGhNVVFopXBZRRAY0V1pYEF5PI1keSEZDCwAdFERJSkRaV0paDldDVVRGAEsEGFIME2deREYXVlQGSg9EE1hSSBYPWldDShwCVQdRbl1FUwsbTQIBU0gaVlgFTw0EdQlDQAhfREhLRANMX0xXSBsWRFhXUhwMWhhYQk0MXVJEEA9bUwZWARYAUllsTwdLXAN7XRxcFBQLABsQFUseVVRNB1BSRAVOFQREFQNSE15CWUFEAwkdCQJIGwdSQgYZXwRBO3ViAAVUVhReHU9YA0oPVQJZfUhZAUxdfVZbCk0VGGVcUhIyWBtaE1lWDR8UFl4MEhRcGARRHRNMVgdVQVlQURd6JBQLQEVHABVVV1ILKF1SURFIMQ5fCFlHT2RTSEpGf1dCTVAFVQoWZ11URgpLDhQLUVQUEUYWRAIEGAVFVRNQUlldFEoWTF5XCk1EDAgEBx5HWBRefQYFX0ZTNEIID0IVA2cUQVRfGDZMVk5REF4IVhoTREVbBlxZX1IMCksdUAhMBj5QFExaAlQTFwxWCRQPSUANWgMYUlxWQARaA1NDEEpaVkQKTBMFFFwVBFEdE0xbDnVdTF5HAWkJX19ARBwwWxJEESwKS19XEUoJFRYwRFcVXkNUGlwLCAEbUwpdFBQLTxVRB0tVDEUREV0fFAVbDEMMVgMBTRNYQEoAGwIdFwJUDEoUUlZTEF9NBUNUT0ZZXVoAQUNbBFYBFghcVw8CVxcIARtTCBtcQkNBUh5HWBlaVRBGAgYaRk4VFxRcWUYUVB0PTA0bAktYXhdcGxoTREVbBlxZX1IMCksdUAhMBj5GDkRYCEFBRFYDShoXDQJUFURGQ11UV0taH1dDAgdMVkQXAxEITg9IFlsABR0IShtIX1BRARcPVV5aRBwDVRZRbhMLVFJYAA9bVQZWARYAVXJCVRZVXVlccSYbXFBQWERXSRsWVVkvAVlUQwF9DghYEl4aLl9CQVkTXlBZGWUNQwdEVRYNA1AVVUZeFAFKYVcKRggPUTZCXQ9FQgNMFFZIRUACBhtcBB0WVlENdRJXVhYBaFxfClkST3oHXkBBYkVMVgIZb0RDUxZdRAwFBBsQFUseVVRNDVtcWBcDBw1XAXJXAF9QSVlEAwwdCR5GSRRfUlEZUQ1YBVdSFwFKQBgPQwgGXhJyWRMTCxwIVgkUD1hRDHUDV1ZBUmIKUBlCQk03TUNTFg0sBFEHDWQUX1JFGlwIDQEbQhZQBVMfV19TF1gUQlQRFxZeUwgPW1QGVgEWAFJZbE8HS1wDcFxEeglYRUZYXkcDQhoTExZRUFNKSgAMUxUDRw9eRk9ZClVnS1BVDE05W0MWDQpVCVsUUAAMdFZXA1gEMVkPQ0ASH35DSwpYTUpRRkR+CVITDgQCSRsWVVkvAVlUQwF9DghYEl4aM15eRlEDGX5CVkYGWApaEWJeURFWBU8TWVUIHxQUXwgCU0hOXABDUE5MA0tLA05bHlgUUhMOBgRVCVsUX0FeGn5ZAEkEE0VKDVcJVFJGGAlMTA1bUwhVD0VFXVRTS1cSQhEFC0oTWRRID0xFCVhGAlQRT1cLW0tcTFMAGRJZXlhEEkMZBFVDChRMQBg4Q10DRFhxWideXUFXERl6QlRQN0gTV1UUWFxFfxZVVAELV1gMREsAAlMEQlsKH1JCVUl7V0BbYRVMB1J2VVpXOVdLVENdOFZ1WRFDBUFXRk9BBhFeXxgOWE5IGVNESBNTQkBeXQsGV3NcAg1UE0URXREORBJtUhNeVEBUD1dfA1dXEBtKFEFGXlEAFx5VXg0XFlVaBUo+El8ISlURXkNIGlwNCB0VEAVaDnpUVVBHAGkYX18XFxZxWQtAQSZZA14UFVlUDXwfV1lAUEYBG1wHBBgVUwZRO1NQBBFdY1kNQxUSGDJjYEFlVF9KCUsaFwoCSBsWRFhXUhwMWhhYQk0XSFpSAV9DWwFWHRhDQUNEWwMXUU5WXBcXAFpQU2hRH1wUXm4RAUhGVAhEAkMMUh0ETRNBX1EFXBZEWl0KSkhQVFBYQAQbTQABU0gaQ0QNTgRPXwVCWhIfV0FZAWZbRVBcBRtcAgEEGxAVSx5VVE0NW1xYFwMHDVcBclMEQ1xMVh8bAhkJAkgbFVddUUQQX0JVVVkCFllQQgFfEk9MCUgWW0oTSBpcCA4fDQRdDVEAARoHHkdWBxQLUFQITktIDxETXwVIGghSXkNLSFFdQVRXEBtcBQMEBx5HWBRefQYFX0ZTNEIID0IVA2cJUENEVgEZUV4ZcQVLD1hWFg0DUBVVVV4NEFFdQwFeLgdQFUhAQwsBARoSUFtGXEYXCCdbXkFZRkcDQgYdQRRKWlUBAwgCWQheGgddUEpnDFhITFcQXg1WBh0WR0AMWhIYUgsFSlJVEEgTEhgVQ1UKVEJFWQJWTw8DBFQJShRQV19zElgFUh8zFlcTZBFDABNZE0NQQWdYTkwJS0EPAwRIGwdVWXhSUwJMEmZeCgpMQBg2Qg4KXwMNcg5eRU9ZClUYflFHEFYTQhMOBgdJGwNfUggBTEADJUAOFFgSDw5XBAEdFERYW0V1VwVeE1NhW15cEUpZcF0CE1RWRRcNNwhVEkJGGBMLHA1KG0tdVlwXVhRiWFdcVxFKVQwAVkgaQ0QNTgRPXwVCWhIfVFRdBFhUQRsIVgxWGhNVU0FXFxpfXy8na1BXCEhDWwZIGRhDUFJFdANYX1hcYgtQCEJCGnpbC1xXcVAOAUsRDFUdTUNGFERXBB9YTlcIShZLVVMDZhNXVBYNBlUJWxRBEQ1bVhgHRQATVwVZURNCH05BBFZKShsIVgxWBh0WR0AKaRhBVBE2WV1dDUMGI1kJXkBDCwAdFERYW0V1VwVeE1NhW15cEUpZckQCCBhkXwFBBQhYAQ8OUAEdD0gUUFtIF1EMWBRXUkBSQBYXFVlfBhcaCQFUHU1DVwJeGgxYX2F7L1dMSEtEBVVEDAICBx5HWBRecBQFSlcYMU8EExYgQlsVU1BBVEZvUU5NXRZARAwJGBVTBlE2QVARABZ0WQhJQSxfCEhGQwsGARoWS1FOXBwNWglYQhpUQApOGRQLVVAIAxpGTAIJdxFMRgUfZUhZCxloQVhLAUtEDAMEGxAEWgMUCwUFVEBTSA8RE18FSBoIUl5DS0hfVExebQ9MEVdYQBUIUQlHGhMCB1ByQQVfBU95CF5YAERWRUxGdFleTVcWG1wCHRZWUQ11EldWFgFoXF8KWRJPYwRIRkF3XkJMBFhUQRlhDEwSWURAFQhRCVsUUAAMeURXFklPNXgyDWAEQ0NCSkQDDwEbUwdRKlNQU0JXNVYeWEUQSn9cWgANLAhYA18WWwIBARoHWlBhXFMDTANmXl1ZRhYXOFhCDwVNVF4QDSwARRJIRkMLAB0URFhcXhdfDVcqdRMOBxxVFVVGQwoHXR1DFEoTAFIDXhoRQ14PAlcJCB0VEAVaDnpUVVBHAGkYX18XFxZ6WERuDg9CFEJYQwsEARoHWlBhXFMDTANmXl1ZRhYXO1dCF0RrR1cKSUEmWQIPDlQBHQ9IFFBbSBdbB1YIRR9SW1MCZh5CUA8dGgkCVB1NQ0YURFcEH1hOVwhKFktVUwNmB0NCQEVTCVAWFAtXVAgfFBZdEQYUXBwaVh0TTFsOeE9MS1ZKaRRZEXZYSgBLVQwET0ZZUF4lWgATUkh/Ww5aWEgYIFZXWVtTCFVGYFhXQ10XQFUMBE9GSEFfB0hPBlcLSEdPRVBfXwNNZ11LUwdND1VUFg0FUAlbFFAADHlEVxZJTy5YFUFVFFZZWRgxUEJMS1ZGA1AaE1JFWwBXE2JDGjBRUF0BWRJDDFIdBE0TUElLSFRRQ3VxN1oHWlQWDQJLCFsUQRENW1YYA0wMBEVIQFEVVF5fZxVRV1pcQEYDUwYBGBVBDF4Zf183DVtYUxBeQ1ttVx0YUAQdHABKCwgBCwBIC1UaAwAbAFBkWxRQAAx5RFcWSU8uWBVBVRRWWVkYIVZcDwMLSBsHUkIaWlMddTR/XxcBSkVXCA9bUwJWARYRQ1hOXUhaUExLUwdNA0RCGlxAClccFAtRVAgfFBRfCAJTSERXDl9CA0EPV0FMV1VGA1UGARgVUwZRNkFQEQAWf1cXWUEyQgdDUEFmWFdZFF0aFwgDSBsHVVl4UlMCTBJmXgoKTEAYMU8EExY0WFoAQ15YVgIZbkRaRgtLHxQLBgceR0kFX1IGSlFQWQpeTwdaB0prAENWSFYSUFZMGwhQCVYaE1VUWiROFkRVTTZNXVcWQhQPUkZ6XRtQQ0kaXA9FARtfDUoFZFRVU2QEVQQEYhcFTFYUXh9NQ1sPXlczVFBJbgdVSx8bCB8bCl9fX1JWJFoUWUQNEEsBFF52GkNYRBcWMlRDW10UDg0cCgNSG0oUVRYNEDlMEgYCUzddQUABX1ZUB1UcAkMdE0RcRAMaXVsfKVVSYmR2cnskbAdyZgQQXGJkAHUgCl4hfGEnVGZBUzBhbhl3ZwhaMWVgRG1jN2s1Y0YlBxpOa0gPFQoERBdvQ0UDAHMKAQl1ewNSdDNOVWJfeDNrMQNiN1V2fRQ5AUMNXwhGUQVwUk5XE1dMXhsIPxs6Q1QEBAI2XAVAVBFTDQIFVRtDPBpEWV9DCxNZUwcUc0EBAzx7VwB8ZQoPRxVVRFQQC1RFUwBsAgJZE0NAKHUTFxoWWxVkfwZJbzMGSX52D1gbChoTBAVVVmUBXxcIVQNsVwlYVFtdC1xWWUoQXmJEcENRUhIpVhZSVBFGZU5L \ No newline at end of file +DBRdDAdZXxReVhxNFDUAQ1cAUkkAVQ9cAFhTUl9LAlAFBh9dX04OHFpWXApQUhxVVgQDFBZbShNCTRJKTExXVg1XAWJDVVlBBFoDX14NFxoJbTkBQwJaD0haFXh1DwJQAA8cDgZIGxJfUl9SRhZqA1dFBkYCAhpGWQgCXQNZR0MLBR0URE1dTFRBNFUHT11dREYWagNXRQZGAgMaRlkEAFsVfVgASF1ESxJKGhdCT0gbAFBQZFtTHFUeRUUQN0xSQgEPW1EaREtSAGFdTEEKUEtZShBeQhsaE0RbUxxcBWZDDAJRX1MXfhUAQgMPDlAdE11UB0BdX2lAC18PWlRHFQgeGyhpUAAHV0ZYEHI+QwwdD1cOXV5fGlxiCAMMHlQXVAMdBRkCOBVVXlgEDFRaUQxZQ1ttVgMBTQEfHw1KCBYdZB5GWg5XQ1VURgBLVQwTMBRZSRQZUE1DVwVFXQRHVEBdCE1Lfk1TEFxEDAAYFVMGUR5TRwYJXV1CFw9bGhQgX1EEEX1CWQJcSg8DSUZ6CVtBWFJGABtNQkMWAUVOGkZOAAxGB0RTD0JiWVkSXBoXCR5GWgdbQVVeVQtKVQxKHkgaQ0MWTgkARQNeZxVQRUgaXAkUD0lHFloOV0JRRBBfQgoaEw4NS1BgBUESMkIHWVFDCwEBGgtQS05vUwhKRAxKSRsQCFAEVWMGBVxlVwheMhVXEkgWWwcFGw5SFRpAUEEHawNXVWJWXhYbTU0TAgdQckEFXwVPew9DUUF2UEBdFRsCGRUQFEsPVVQaXlEKVwQYVw8FX2xEEV4SCFdEFwBRAR0PWQVReVpYQAAXIVlFFENaABk6WUcGFxoJDkgPERNfBUgaAllQX1kFTV1fShwGTAhYSBYNA1cJRxoTBRZRVlgAeRMYdxFMRgVlWE5TA01LDwMDUQlKFEFGXlEAFx5VXg0XFlVaBUo+Al4PQVFDCwUdCEobSF9QUQEXD1VeWkQcA1UWUW4OAUBaVQsPW1UGVgEWAFJZYV0HXk1IaV0NVxJFH2ZCXARLGENfB0R/XFJGF1JRGkRMVwl9VExfE1xoQlBcEEpIZkNbF3QKVgNUUA8IGGBeEVkOFEJEFwZRHRNMWw54T0xLVkprE1hQRlhHC11Xe1AQEF1BFF4ZTUNXBUV4BFBWWF02VlFDTUFKaRRZEXtZQQlYAlFZF0RuWlUQQhMYFFwcBE0TUEFRN2t+X1BXCl0zZH0WDRBBQiRzYzUhak4ZBVwTXlVbCU8ifnVoRUQVGl1WRQFLNFdfX15cAmkYX18XFxZSVQxEBBdTC0haFRMLGhREWFxeF0EQWBRCZ1VbAEcDRhgBT0ZZV0VKXhUARBJ7VQ0AExcISAABARtCFlAFUx9XX1MXWBRCVBEXFkNXF04ADRRcHAZRAR0PSAlOXV9rUwpSD1hWZFhbC00EGEURC0hbT1UPW1QaRF1bFlRDf1kIUlFDXmILUAhCQhpDQApJH08DQV4JAxpGXQ4WUxR/VQ9aWENfNlZRQ01BSk0UWUFcTgFHA0UDHUEFW1t6AUwGFFM2Ql0PRUIDbA5cGGpLVwVNRmFQWFsQXw1HGhMCB1ByQQVfBU95AEsUOF5EDX8JGWxFXFxGA1IaE1VUWilcFlFEBjRXWlgQXk8zQwhMRg5EX0kYMVBCTEtWRgNXAx0WR0AMWhIYWAALVkAYAkEABmkXTEAAQxMXDFYJFA9JQA1aAxhSXFZABFoDU0MQSlJWRRBIEz5bFA8OUAQBHRREWFtFdVcFXhNTYVteXBFKWXlfEAhZRlEMWUE1RAdEWghfVg1uD1pMQktLRgNTGhNVVFokThZEVU0xWlZERH8UD1cUQkEPVRF7UQVNV19AEF4AShRQV19zElgFUh8hC1deFiNCBBIWEkVRQXVIQ1kLUExIGwhRFURGXkNSQDdYGV1YDQNoXF8KWRJPQhRCRAlIAUwaXAgUD1xTF00DRBMOUVMJShIaEwIHUH9TBUoUBGYJRFoVQh99SgNaUV5QXQoZJFlcVl5cAhtNBAFPRllQXiVaABNSSH5AAEhYQx9GeFRET1dGA18aE1JFWwBXE2JDGiFAR0QFbBYARAJ5XQJaVFlLRAMMHQkeRkkUX1JRGVUEVBJFHw0NVllXO0sIBl4SDw5TBAEBGgdaTERPVwpcFUV8VU8QXwhZBARPRkhBXwdITwhVCUNHT1ddTF85SldYTVo7UglEVFUVCFEJRxoTAgdQckEFXwVPegdeQEFiRUxWAhl1TEpGAUtEDAgYFVMBSkUYXAIcdHB/ClkEE0AHQRZbAAMdFERaV0NNWwpMA2VFVUVGJlYEQhNZVggfFBRfCAJTSERXDl9CA14KWF9yV1cQUQNEXVVZVhYbTQIBU0gaQ0QNTgRPQglYRg9QXEhWEmZdQ01AHRtcAx0WR0AMWhIYUgsFSlJVEEgTEhgRRE4AQ1VyVRQbAhsJAkgbB1VZdUBTF11ZZkMMRHddRQhMFAZeEg1iCFJFQkofGwIYFRAFWg53RlVFVktqH1dDCgpfE18XDSIARA9DU0MLABgURFhbRXVXBV4TU2FbXlwRSlllRQIdUV0RRGwNCEADDw5VAR0PTA9aU0hNQVZ4C1lEWkMQXwxHBh1BBVtbdxNMEwUYNl9bQXdeQkwEWFRBGWQNWhJZQ00VCFMVVUZDCgddHV8HQg8SGABBVQZuWENcD1gaFw0CVBVEV1Jce1cEXgJTYQwNVkdFSn0TDhYkQkwEQxMXClYVGl1LWwdcSF9SW1lBS18bV1Y8DVZXWQpIEghXRBcAUQEdD1kFUXlaWEAAFzJeVBRgUwlVVQwHT0ZZUF4oSAAGQwN9WwhfRV4WMlxZQBliCFgfU0MWDQBVFVVXUgslT1JEAAMtAEUSDWcVUF9JGCFWXA8DA1AVREZDXVRXS1AUWV8QSl5aRAFPAA1aRBcGUQEBARoHWlBhXFMDTANmXl1ZRhYXMFlFQxBQVhYpQhcERUQXB1EdE1VVB0oaF19TCEoDGhNERVsGXFlfUgwKSx1QCEwGPkMNDw5VAQEBGhZIcUNNEF4IVgYBBBsQBFofelQCA01WZgtEDxVFSH1GDhF3QlcSW1lBVRIyUAVCXkZOEF8IRxoTExZRUFNKRAIOWBUDUg1QVnJdAUBIWRsIUAlWGhNVU0FXFxpXSS8nGgkHVANRTRQHTlwtVFBKTQNpV0RXRhcXKVBXFG5dEBkwWRE3DF1dFF4YTUNGFERXBB9YTlcIShZJS1MDVggUCwUFAlUVVVdSCyVPUkQAYBQNQkQXAE0TUE5QJ05ZX10cNEsDVVhHXl0LGTVZXAENVlQUXhtNQ0YURFcEH1JFWRRYW1lcQBcXHFlUFg0BVQlbFEERDVtWGA1ODg9FSENdD1tQcksSWEoPAwpUCUoUVFpWUAlcNkZBKgpOWkIBXkNbQhRYUU0TUE5QJ05ZX10cNlYJXVhRF3QKVgNUUA8IGGBeEVkOFEJEFwFNE1BOUCdOWV9dHDRLCRZ3W1hGB1gbWhEwDE1HWRFZQ1sASg9EE1hSSBYPWldDShwJTBVeQ1tYX0cDRgMBT0ZZUF4oSAAGQwN9WwhfRV4WNFZXRlBXRHYIRV1VQlUNTVdgWAAQV0FPRhdUTRQWX10CVB9EWwlXSwNfXgVeOUVQQVNbOlgFV1MKBRoJAlQdTUNGFERXBB9SRVkUWFtZXEAXFwxXUl9aXRdeFlgTWVAIAxpGXRMIVQMDXQJeX14WDlhUDwMBUQlKFFBQRABLVB5YfSBGAgMYVAFDAFUObEMAQ1UDaxNJXV8ZYhFXBV4TDgIeR1gUXnAUBUpXGCJBABZaA15HQWdYTkwJS0EPAwZIGwdVWXVAUxddWWJZBkR/QVMFWUE2VwpBFlsIHQ9IFFBbSBdbB1YIRR9HXEcJVVUMAFNUCB8UFF8IAlNIQFURQh9BWQ1cZ0tLWwNQAhQLAgICSRsWVVkvAVlUQwF9DghYEl4aM0RfTEoJTFZJGX8FShJTQxYNA1UVVUJGERBQQBReVkMRRAkPDkNFWE5TA01LGRseRkkUWW5HVl4AG00URQoHU1ZCFxlDHBpEXUYIUlQDXwdUXV4XVwVKElNDa1JVAmYfQ18XRgILBlQBQwBVDmFRAFZESGgJUFZZShwiSwNTEXhYUwFcBRQLUlQUEVcAXk8MVx5hdzJSUEFdRAMIAwgeRkkUX1JRGVsGVhlFHxUNU1pYA3IJBFoLSEBDCwAYCFYVGl1LWwdcSF9SW1lBS18bV1Y8EUsRDFAdUU0UC0xMLVhfRnkFWldYV0YXG1wDHRZWUQ14AFdDB0pqRlgFXw4UWAINcw5VExcBShtIX1BRARcBWl5WVl46SQVZVwoIXREMVR1RTRQHQVgORnBOWwlMVll1WwpSD1hWBhUIEUsCUx1BFEpaVQEDDABGFQNXAENFQlcIZl5CS0Y7VBQUCwwHAkkbB0ReMAVUVnIRXwAVXwlDeQhfRFldFRsCHRUQEFAFXVRARAYkVBhDXxdGAgEDVB1NQ1cFRXUWUENJFiJMWUEZZQ1cClJYWlAQXwhHGhMRF1xBFF4cUU8GSg9EDkZUX2oHV1NEV1U0Vg9YRUcZRhdWB15IV0YCBwZIDwACXidaVRNVH35NFlxKDXRXA1hGZkRaVFpHA0EaExMWUVBTSkQCDlgVA1INUFZyWQpeXV9QU0YDUgYBGBVTBlE7U1AEEV1jWQ1DFRIYKkxHFRFiWVkIXRhgWEEQXBQUCwYHHkdNHlVaBhBLAHcJQhQPQkQXBVMBAQEaFktRTlwcDVoJWEIaUV4EXihUQwIeUV8UXhlRURpETFcJfVRMXxNcaEJQXBBKSGZDWxdgEFcWRF4WClwTYA1OFQ5EHw8OUAQdD1kFUXlaWEAAFylYQlhWRwJRAxZlEQVRXV8KSkE3XwVZWxNIExcMShtZTlFzE1gUUh9yRVcAGTtZUAcBShEMVR1NQ1cCXhoMUElhe0QDDAMJHkZYBV5wQ1ZAARciVFQRRHddRQhMFAZeEg1iCFJFQkofGwIaFRAUSw9VVBpDXRBLGVdcBgpMbFMKWRMYaVIPDlMEHQ9IFFBbSBdbB1YIRR9SW1MCZhpXXQIdS1pXRhdVUQZKD0QTWFJIFhJWTV9XUwlcCEJuUVlGF0AoBBNZVQ0fFBRfCAJTSFlbFENfTFUDV0xyXFwQSx9pAhYNAFUVVUZDCgddHUILWBMPVwtIWhVuVENMFEBnHRsIURVERkNdVFdLTRhDQw0FVVZYEHIED0IUVGtQEwscCEobWUlKHBdRCURFfVlGAEsBV10uEVRHFF4dT1MDSg9EE1hSSBYFUVlfWFEQXBRFH0BWXQdYGFtQEAdXRxReGVFRBkoPRBNYUkgWBVFZX1hREFwURR9HVlwRWFUMAFVUCB8UFl0RQwxXAwZNE1BOUCdOWV9dHDFbA0QRclhdEVsWWl1DN1BGQgtYFUMMXgEWEUNYTl1IUFtCV0FKVAlZXxYNB1UJWxRBEQt6XFgRXjUIVQ1IQBITCxsIVhUaTlZcEFAIQ1RHekcJTVUMA09GWVBeJVoAE1JIf1sOWlhIGClXS0FYRwNREhZnXVRGCksOFAtXSBpSUjZCFQBCA2BbBVQTFwhKG0pdWlVGA1UGARgVUwFKRRhcCgp0cH8KWQQTQAdBFlsACR0UREtIThsIUAlWGhNVVFopXBZRRAY0V1pYEF5PI1keSEZDCwAdFERJSkRaV0paDldDVVRGAEsEGFIME2deREYXVlQGSg9EE1hSSBYPWldDShwCVQdRbl1FUwsbTQIBU0gaVlgFTw0EdQlDQAhfREhLRANMX0xXSBsWRFhXUhwMWhhYQk0MXVJEEA9bUwZWARYAUllsTwdLXAN7XRxcFBQLABsQFUseVVRNB1BSRAVOFQREFQNSE15CWUFEAwkdCQJIGwdSQgYZXwRBO3ViAAVUVhReHU9YA0oPVQJZfUhZAUxdfVZbCk0VGGVcUhIyWBtaE1lWDR8UFl4MEhRcGARRHRNMVgdVQVlQURd6JBQLQEVHABVVV1ILKF1SURFIMQ5fCFlHT2RTSEpGf1dCTVAFVQoWZ11URgpLDhQLUVQUEUYWRAIEGAVFVRNQUlldFEoWTF5XCk1EDAgEBx5HWBRefQYFX0ZTNEIID0IVA2cUQVRfGDZMVk5REF4IVhoTREVbBlxZX1IMCksdUAhMBj5QFExaAlQTFwxWCRQPSUANWgMYUlxWQARaA1NDEEpaVkQKTBMFFFwVBFEdE0xbDnVdTF5HAWkJX19ARBwwWxJEESwKS19XEUoJFRYwRFcVXkNUGlwLCAEbUwpdFBQLTxVRB0tVDEUREV0fFAVbDEMMVgMBTRNYQEoAGwIdFwJUDEoUUlZTEF9NBUNUT0ZZXVoAQUNbBFYBFghcVw8CVxcIARtTCBtcQkNBUh5HWBlaVRBGAgYaRk4VFxRcWUYUVB0PTA0bAktYXhdcGxoTREVbBlxZX1IMCksdUAhMBj5GDkRYCEFBRFYDShoXDQJUFURGQ11UV0taH1dDAgdMVkQXAxEITg9IFlsABR0IShtIX1BRARcPVV5aRBwDVRZRbhMLVFJYAA9bVQZWARYAVXJCVRZVXVlccSYbXFBQWERXSRsWVVkvAVlUQwF9DghYEl4aLl9CQVkTXlBZGWUNQwdEVRYNA1AVVUZeFAFKYVcKRggPUTZCXQ9FQgNMFFZIRUACBhtcBB0WVlENdRJXVhYBaFxfClkST3oHXkBBYkVMVgIZb0RDUxZdRAwFBBsQFUseVVRNDVtcWBcDBw1XAXJXAF9QSVlEAwwdCR5GSRRfUlEZUQ1YBVdSFwFKQBgPQwgGXhJyWRMTCxwIVgkUD1hRDHUDV1ZBUmIKUBlCQk03TUNTFg0sBFEHDWQUX1JFGlwIDQEbQhZQBVMfV19TF1gUQlQRFxZeUwgPW1QGVgEWAFJZbE8HS1wDcFxEeglYRUZYXkcDQhoTExZRUFNKSgAMUxUDRw9eRk9ZClVnS1BVDE05W0MWDQpVCVsUUAAMdFZXA1gEMVkPQ0ASH35DSwpYTUpRRkR+CVITDgQCSRsWVVkvAVlUQwF9DghYEl4aM15eRlEDGX5CVkYGWApaEWJeURFWBU8TWVUIHxQUXwgCU0hOXABDUE5MA0tLA05bHlgUUhMOBgRVCVsUX0FeGn5ZAEkEE0VKDVcJVFJGGAlMTA1bUwhVD0VFXVRTS1cSQhEFC0oTWRRID0xFCVhGAlQRT1cLW0tcTFMAGRJZXlhEEkMZBFVDChRMQBg4Q10DRFhxWideXUFXERl6QlRQN0gTV1UUWFxFfxZVVAELV1gMREsAAlMEQlsKH1JCVUl7V0BbYRVMB1J2VVpXOVdLVENdOFZ1WRFDBUFXRk9BBhFeXxgOWE5IGVNESBNTQkBeXQsGV3NcAg1UE0URXREORBJtUhNeVEBUD1dfA1dXEBtKFEFGXlEAFx5VXg0XFlVaBUo+El8ISlURXkNIGlwNCB0VEAVaDnpUVVBHAGkYX18XFxZxWQtAQSZZA14UFVlUDXwfV1lAUEYBG1wHBBgVUwZRO1NQBBFdY1kNQxUSGDJjYEFlVF9KCUsaFwoCSBsWRFhXUhwMWhhYQk0XSFpSAV9DWwFWHRhDQUNEWwMXUU5WXBcXAFpQU2hRH1wUXm4RAUhGVAhEAkMMUh0ETRNBX1EFXBZEWl0KSkhQVFBYQAQbTQABU0gaQ0QNTgRPXwVCWhIfV0FZAWZbRVBcBRtcAgEEGxAVSx5VVE0NW1xYFwMHDVcBclMEQ1xMVh8bAhkJAkgbFVddUUQQX0IKGhMTFlFQU0pEAg5YFQNcBF1cSExEAwsfCQJIGwdVWXhSUwJMEmZeCgpMQBg3RQATXwhKFAhCEW5ZFFBWShsIVQxKFFJbWUYMVwJTQiwCXkBTEA9bURpEWV0CWlRZS1d4VUJMXBAbXAMBGBVCF1AUUx8KB1ddRUpLDQBROUdVEVBfDwJSCQgBG0IWUAVTH1dfUxdYFEJUERcWQFgFRgQSXgdJWxYTCxsIVhUaTFpaJU4HRFUaZ0AKGSVDXwIWV0ZYAA03CFUSQkYYEwsbFERYW0V1VwVeE1NhW15cEUpZZF4MD1FWFiJCDhVUB0FYQWJZWEwJTEwPAwNRFURCWFdcVxFKQndcDBFWRxReG1RRBkoPVQJZfUhZAUxdfVZbCk0VGHdYVkUJXARFETUNW0dZFlRDWwdTARYSQV5DSwlLbERaWQFNFRQLBQIeR0kFX1IGSlFQWQpeTwRPA09VDV0TFwpTCRQPWFYXC0hbWFp7cTZaFlpUQV4IHQJIDwACXipIVQZEVH1XD1dMXhd/DVcDFnZVWlcWG00HAU9GSEFfB0hPCFUJQ0dPV11MXzlMWUgbCFAJVhoTREVbBlxZVVkCFllQQgFfEk9VH09bE1YTFwpTCQgBG0IWVjZZRlFFYARXHF9fBCZXXEUQD1tQBkoPVQJZfUhZAUxdfVZbCk0VGHVBVl5Fbh5TXQcNVlQUXhxRTRQWX10CVB9OUAdLWU5NVxZKSFReWlJBRwNABgFPRllXRUpACA96JWRaFVRDW1kKGwIeDwJIGwdVWXVAUxddWWNTBhYYdVkLWQMAWgoNYghSRUJKHxsCFRUQBVoOd0ZVRVZLfhhaVUMpUV1TFg9bVhpEXUYIUlQDUQVWVl4XURZWEVgTDgEGVQlbFFAADHlEVxZJTzVTB0AUMV1QVF0UGwIfCR5GWAVCEw5RUwlKEhoTExZRUFNKRAIOWBUDUg1QVnJTE05ZRE0QXg1WBh0WVlENeABXQwdKd11FCEwUBl4SDXkAQkVISkQDDAEbUwdRKlNQU0JXNVYeWEUQSm1RUxYNJw5ZEk9VDV0RflATTVdYTRBeDVYaE1VUWiROFkRVTTB2ZxYwSBMTWRQPDlYdE0xbDnVdTF5HAWkJX19ARBwiVhtSES4NVlZERhdSURpETFcJfVRMXxNcaEJQXBBKSHlfR1tTEF4fQhEuBUtHUxYPW1AGSg9VBUIfQFEIdXsPAwJKCUoUQUZeUQAXAkZWEQVcVkVKXRMOFFwcBFEBHQ9ZBVF0SFhVEVw2WVhaQ0FLcBkWcgwKTEFZCA9bVBpETFcJfVRMXxNcaEJQXBBKSHpQR0MSNk0WWFVDI1dXFF4YUU0UFl9dAlQfRFsJV0sDX14FXjlfRVVbS0cDQwYBT0ZIQV8HSE8IVQlDR09XXUxfOVhNXk1ABVUPVxMOAwJVFVVEQRMDGgkHShpNQ1cFRXUWUENJFjZLVw17XRxcFBQLARsQBFofd0YCFlwdZAtCCghTRmtbDkVTTFQKGW5EWkYLSx8UCwEbEBVLHlVUTQNZXlMXAxUARAFIQD5BQ0xbElBbSBsIUwxWGhNVVFokThZEVU0rVkBaBVgGCUJGel0bUENJGlwPFA9fQA1cCFJlRk5mDFocU0UQRgIHBlQBQwBSFQNZCF99bmsFWFRIGwhUF1caE0RFWwZcWVFQDgFLHVsBWQQORDleXA5GVF8aXAwIHRUQF1ABWHhaY1sGUhJCQkFeYwIGSBxUTQdeAQZRHQMfFFQKFB8NHlYMOxoTVVRaJE4WRFVNK1ZAWgVYBglCRmpbBRMLFBREWFxeF18FQSp1eFpDVxdPFloTWVYMAxpGXRMIVQMDVwlQQ0xbElxKXhdZFlYIXRMOBQJVFVVGQwoHXR1fB0IPEhgfRFoYUF9KGlwKCB0VEAVaDndGVUVWS3UWRUVDN0xSWAANNghMB19QQwsAHBREWFtFdVcFXhNTYVteXBFKWWNTBhYYYUMKTBMOQwhJFDdYUllXFEAaFwsCSBsWRFhXUhwMWhhYQk0CVFJRO0wTBlMIWV0PUBMXDFYJFA9YUQx4EVdDUBlgEFcWRF4WClwTYQ1XABNSRBcCHB0TQFEVWmpIWFYyWApFA2dDUxFcVQwDT0ZVWkUHfwQAUjBMWBIDExdDRFVRQ1JXAHgFVV5BWUYWC1UMahhGVhEMRn4EE0ADXwNUAAIcDkQVGkkbCEZlE1MBBwdhAEsBU0NUUQkAB1IPTUNfAg8OQ0FTAHUKDWx4e3cteDNGdWNQRgFoJVJpIg9QdGcxawQ2Wg17bDcFf3hUBW5rfEloNWs0dGRDcVFHRCoaExcPChEMPw8VUxstQQxQaXMcDitsQElvWi5vNHAEZ2MDK3dVax1BCFFdXQFJIAJVCVhaFUITF2NEZU1ICQFUagNER1FFBVAIRAcHQTkUEUIPD1tDQg1MGSpdCRxgJAgOYGgPWRtKFENRRF0JTxJScAAHV0ZYEGQlQwxEXVZMeHcZFTBsCFVzc1kEREsdFlBTCFwkU0MVDVtWdwdFCARAA0BRD0VCDwI9G35fXFdEdQlXVVFFEDhECg== \ No newline at end of file diff --git a/dist/ba_root/config.json b/dist/ba_root/config.json index d99fc8e..060ccf4 100644 --- a/dist/ba_root/config.json +++ b/dist/ba_root/config.json @@ -171,31 +171,31 @@ }, "Port": 43210, "Region Pings": { - "af-south-1": 309.84879999999924, - "ap-northeast-1": 148.42390000000006, - "ap-northeast-2": 161.85600000000022, - "ap-south-1": 42.165108800000134, - "ap-southeast-1": 79.17185659999987, - "ap-southeast-2": 177.35650000000015, - "ca-central-1": 233.7878, - "eu-central-1": 154.8980000000002, - "eu-north-1": 184.45529999999977, - "eu-south-1": 162.1055, - "eu-west-1": 169.56609999999995, - "eu-west-2": 161.36629999999917, - "eu-west-3": 156.3263999999993, - "me-south-1": 71.85055820000007, - "sa-east-1": 351.13220000000035, - "us-east-1": 235.26540000000028, - "us-east-2": 252.81119999999913, - "us-west-1": 269.8210999999997, - "us-west-2": 282.83270000000016 + "af-south-1": 307.60571399999986, + "ap-northeast-1": 151.92372059999946, + "ap-northeast-2": 161.63225000000023, + "ap-south-1": 40.56100719999999, + "ap-southeast-1": 94.25813800000009, + "ap-southeast-2": 173.65287819999975, + "ca-central-1": 233.5935730000004, + "eu-central-1": 167.27788639999903, + "eu-north-1": 215.4378117999997, + "eu-south-1": 155.38100080000012, + "eu-west-1": 178.21445299999917, + "eu-west-2": 168.02421059999926, + "eu-west-3": 162.10895180000057, + "me-south-1": 78.06156159999996, + "sa-east-1": 343.15721339999993, + "us-east-1": 244.98570420000055, + "us-east-2": 255.25865600000049, + "us-west-1": 266.49579560000063, + "us-west-2": 283.16640400000074 }, "Show Tutorial": false, "Signed In Last Session": false, "Team Game Max Players": 20, "Team Tournament Playlists": {}, - "launchCount": 93, + "launchCount": 97, "lc14173": 1, "lc14292": 1 } \ No newline at end of file diff --git a/dist/ba_root/config.json.prev b/dist/ba_root/config.json.prev index af5b1c1..cf2ef8d 100644 --- a/dist/ba_root/config.json.prev +++ b/dist/ba_root/config.json.prev @@ -1 +1,201 @@ -{"Achievements": {"Boom Goes the Dynamite": {"Complete": false}, "Boxer": {"Complete": false}, "Dual Wielding": {"Complete": false}, "Flawless Victory": {"Complete": false}, "Free Loader": {"Complete": true}, "Gold Miner": {"Complete": false}, "Got the Moves": {"Complete": false}, "In Control": {"Complete": false}, "Last Stand God": {"Complete": false}, "Last Stand Master": {"Complete": false}, "Last Stand Wizard": {"Complete": false}, "Mine Games": {"Complete": false}, "Off You Go Then": {"Complete": false}, "Onslaught God": {"Complete": false}, "Onslaught Master": {"Complete": false}, "Onslaught Training Victory": {"Complete": false}, "Onslaught Wizard": {"Complete": false}, "Precision Bombing": {"Complete": false}, "Pro Boxer": {"Complete": false}, "Pro Football Shutout": {"Complete": false}, "Pro Football Victory": {"Complete": false}, "Pro Onslaught Victory": {"Complete": false}, "Pro Runaround Victory": {"Complete": false}, "Rookie Football Shutout": {"Complete": false}, "Rookie Football Victory": {"Complete": false}, "Rookie Onslaught Victory": {"Complete": false}, "Runaround God": {"Complete": false}, "Runaround Master": {"Complete": false}, "Runaround Wizard": {"Complete": false}, "Sharing is Caring": {"Complete": false}, "Stayin' Alive": {"Complete": false}, "Super Mega Punch": {"Complete": false}, "Super Punch": {"Complete": false}, "TNT Terror": {"Complete": false}, "Team Player": {"Complete": false}, "The Great Wall": {"Complete": false}, "The Wall": {"Complete": false}, "Uber Football Shutout": {"Complete": false}, "Uber Football Victory": {"Complete": false}, "Uber Onslaught Victory": {"Complete": false}, "Uber Runaround Victory": {"Complete": false}}, "Auto Account State": "Server", "Auto Balance Teams": true, "Campaigns": {}, "Default Player Profiles": {"Client Input Device #1": "__account__", "Client Input Device #10": "__account__", "Client Input Device #2": "__account__", "Client Input Device #3": "__account__", "Client Input Device #4": "__account__", "Client Input Device #5": "__account__", "Client Input Device #6": "__account__", "Client Input Device #7": "__account__", "Client Input Device #8": "AARAV SINGH", "Client Input Device #9": "__account__"}, "Free-for-All Max Players": 20, "Free-for-All Playlist Randomize": true, "Free-for-All Playlist Selection": "__default__", "Free-for-All Playlists": {}, "Idle Exit Minutes": null, "Local Account Name": "Server751316", "PSTR": 0, "Player Profiles": {"__account__": {"character": "Spaz", "color": [0.5, 0.25, 1.0], "highlight": [0.5, 0.25, 1.0]}}, "Plugins": {"characters_duplicate.unlock_characters": {"enabled": true}, "importcustomcharacters.HeySmoothy": {"enabled": true}}, "Port": 43210, "Region Pings": {"af-south-1": 307.74070000000006, "ap-northeast-1": 154.2659999999998, "ap-northeast-2": 158.4915999999996, "ap-south-1": 40.25631320000015, "ap-southeast-1": 86.4678368, "ap-southeast-2": 174.49279999999945, "ca-central-1": 236.5851000000001, "eu-central-1": 152.09510000000037, "eu-north-1": 212.78989999999976, "eu-south-1": 144.31680000000034, "eu-west-1": 178.33469999999974, "eu-west-2": 161.02659999999958, "eu-west-3": 170.33000000000075, "me-south-1": 74.62674959999978, "sa-east-1": 335.5498000000008, "us-east-1": 247.49200000000116, "us-east-2": 252.68529999999956, "us-west-1": 265.23389999999927, "us-west-2": 287.52839999999935}, "Show Tutorial": false, "Signed In Last Session": false, "Team Game Max Players": 20, "Team Tournament Playlists": {}, "launchCount": 92, "lc14173": 1, "lc14292": 1} \ No newline at end of file +{ + "Achievements": { + "Boom Goes the Dynamite": { + "Complete": false + }, + "Boxer": { + "Complete": false + }, + "Dual Wielding": { + "Complete": false + }, + "Flawless Victory": { + "Complete": false + }, + "Free Loader": { + "Complete": true + }, + "Gold Miner": { + "Complete": false + }, + "Got the Moves": { + "Complete": false + }, + "In Control": { + "Complete": false + }, + "Last Stand God": { + "Complete": false + }, + "Last Stand Master": { + "Complete": false + }, + "Last Stand Wizard": { + "Complete": false + }, + "Mine Games": { + "Complete": false + }, + "Off You Go Then": { + "Complete": false + }, + "Onslaught God": { + "Complete": false + }, + "Onslaught Master": { + "Complete": false + }, + "Onslaught Training Victory": { + "Complete": false + }, + "Onslaught Wizard": { + "Complete": false + }, + "Precision Bombing": { + "Complete": false + }, + "Pro Boxer": { + "Complete": false + }, + "Pro Football Shutout": { + "Complete": false + }, + "Pro Football Victory": { + "Complete": false + }, + "Pro Onslaught Victory": { + "Complete": false + }, + "Pro Runaround Victory": { + "Complete": false + }, + "Rookie Football Shutout": { + "Complete": false + }, + "Rookie Football Victory": { + "Complete": false + }, + "Rookie Onslaught Victory": { + "Complete": false + }, + "Runaround God": { + "Complete": false + }, + "Runaround Master": { + "Complete": false + }, + "Runaround Wizard": { + "Complete": false + }, + "Sharing is Caring": { + "Complete": false + }, + "Stayin' Alive": { + "Complete": false + }, + "Super Mega Punch": { + "Complete": false + }, + "Super Punch": { + "Complete": false + }, + "TNT Terror": { + "Complete": false + }, + "Team Player": { + "Complete": false + }, + "The Great Wall": { + "Complete": false + }, + "The Wall": { + "Complete": false + }, + "Uber Football Shutout": { + "Complete": false + }, + "Uber Football Victory": { + "Complete": false + }, + "Uber Onslaught Victory": { + "Complete": false + }, + "Uber Runaround Victory": { + "Complete": false + } + }, + "Auto Account State": "Server", + "Auto Balance Teams": true, + "Campaigns": {}, + "Default Player Profiles": { + "Client Input Device #1": "__account__", + "Client Input Device #10": "__account__", + "Client Input Device #2": "__account__", + "Client Input Device #3": "__account__", + "Client Input Device #4": "__account__", + "Client Input Device #5": "__account__", + "Client Input Device #6": "__account__", + "Client Input Device #7": "__account__", + "Client Input Device #8": "AARAV SINGH", + "Client Input Device #9": "__account__" + }, + "Free-for-All Max Players": 20, + "Free-for-All Playlist Randomize": true, + "Free-for-All Playlist Selection": "__default__", + "Free-for-All Playlists": {}, + "Idle Exit Minutes": null, + "Local Account Name": "Server751316", + "PSTR": 0, + "Player Profiles": { + "__account__": { + "character": "Spaz", + "color": [ + 0.5, + 0.25, + 1.0 + ], + "highlight": [ + 0.5, + 0.25, + 1.0 + ] + } + }, + "Plugins": { + "characters_duplicate.unlock_characters": { + "enabled": true + }, + "importcustomcharacters.HeySmoothy": { + "enabled": true + } + }, + "Port": 43210, + "Region Pings": { + "af-south-1": 307.5986999999998, + "ap-northeast-1": 150.9280999999998, + "ap-northeast-2": 161.70740000000006, + "ap-south-1": 40.56100719999999, + "ap-southeast-1": 94.25813800000009, + "ap-southeast-2": 173.9644, + "ca-central-1": 230.90169999999955, + "eu-central-1": 157.7155999999995, + "eu-north-1": 230.69570000000004, + "eu-south-1": 158.1695000000005, + "eu-west-1": 177.26940000000013, + "eu-west-2": 168.53659999999948, + "eu-west-3": 158.8932999999999, + "me-south-1": 78.06156159999996, + "sa-east-1": 336.63750000000016, + "us-east-1": 242.1279000000016, + "us-east-2": 256.9507, + "us-west-1": 267.4833000000003, + "us-west-2": 281.160400000001 + }, + "Show Tutorial": false, + "Signed In Last Session": false, + "Team Game Max Players": 20, + "Team Tournament Playlists": {}, + "launchCount": 97, + "lc14173": 1, + "lc14292": 1 +} \ No newline at end of file diff --git a/dist/ba_root/mods/__pycache__/importcustomcharacters.cpython-38.opt-1.pyc b/dist/ba_root/mods/__pycache__/importcustomcharacters.cpython-38.opt-1.pyc index 16e2c34..4918eea 100644 Binary files a/dist/ba_root/mods/__pycache__/importcustomcharacters.cpython-38.opt-1.pyc and b/dist/ba_root/mods/__pycache__/importcustomcharacters.cpython-38.opt-1.pyc differ diff --git a/dist/ba_root/mods/importcustomcharacters.py b/dist/ba_root/mods/importcustomcharacters.py index ef65e3a..0265e93 100644 --- a/dist/ba_root/mods/importcustomcharacters.py +++ b/dist/ba_root/mods/importcustomcharacters.py @@ -48,6 +48,7 @@ def registercharacter(name,char): class HeySmoothy(ba.Plugin): def __init__(self): + print("custom character importer") path=os.path.join(_ba.env()["python_directory_user"],"CustomCharacters" + os.sep) if not os.path.isdir(path): diff --git a/dist/bombsquad_headless.exe b/dist/bombsquad_headless.exe deleted file mode 100644 index b0f53f5..0000000 Binary files a/dist/bombsquad_headless.exe and /dev/null differ diff --git a/dist/lib/logging/__future__.py b/dist/lib/logging/__future__.py new file mode 100644 index 0000000..d7cb8ac --- /dev/null +++ b/dist/lib/logging/__future__.py @@ -0,0 +1,146 @@ +"""Record of phased-in incompatible language changes. + +Each line is of the form: + + FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease "," + CompilerFlag ")" + +where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples +of the same form as sys.version_info: + + (PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int + PY_MINOR_VERSION, # the 1; an int + PY_MICRO_VERSION, # the 0; an int + PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string + PY_RELEASE_SERIAL # the 3; an int + ) + +OptionalRelease records the first release in which + + from __future__ import FeatureName + +was accepted. + +In the case of MandatoryReleases that have not yet occurred, +MandatoryRelease predicts the release in which the feature will become part +of the language. + +Else MandatoryRelease records when the feature became part of the language; +in releases at or after that, modules no longer need + + from __future__ import FeatureName + +to use the feature in question, but may continue to use such imports. + +MandatoryRelease may also be None, meaning that a planned feature got +dropped. + +Instances of class _Feature have two corresponding methods, +.getOptionalRelease() and .getMandatoryRelease(). + +CompilerFlag is the (bitfield) flag that should be passed in the fourth +argument to the builtin function compile() to enable the feature in +dynamically compiled code. This flag is stored in the .compiler_flag +attribute on _Future instances. These values must match the appropriate +#defines of CO_xxx flags in Include/compile.h. + +No feature line is ever to be deleted from this file. +""" + +all_feature_names = [ + "nested_scopes", + "generators", + "division", + "absolute_import", + "with_statement", + "print_function", + "unicode_literals", + "barry_as_FLUFL", + "generator_stop", + "annotations", +] + +__all__ = ["all_feature_names"] + all_feature_names + +# The CO_xxx symbols are defined here under the same names defined in +# code.h and used by compile.h, so that an editor search will find them here. +# However, they're not exported in __all__, because they don't really belong to +# this module. +CO_NESTED = 0x0010 # nested_scopes +CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000) +CO_FUTURE_DIVISION = 0x20000 # division +CO_FUTURE_ABSOLUTE_IMPORT = 0x40000 # perform absolute imports by default +CO_FUTURE_WITH_STATEMENT = 0x80000 # with statement +CO_FUTURE_PRINT_FUNCTION = 0x100000 # print function +CO_FUTURE_UNICODE_LITERALS = 0x200000 # unicode string literals +CO_FUTURE_BARRY_AS_BDFL = 0x400000 +CO_FUTURE_GENERATOR_STOP = 0x800000 # StopIteration becomes RuntimeError in generators +CO_FUTURE_ANNOTATIONS = 0x1000000 # annotations become strings at runtime + +class _Feature: + def __init__(self, optionalRelease, mandatoryRelease, compiler_flag): + self.optional = optionalRelease + self.mandatory = mandatoryRelease + self.compiler_flag = compiler_flag + + def getOptionalRelease(self): + """Return first release in which this feature was recognized. + + This is a 5-tuple, of the same form as sys.version_info. + """ + + return self.optional + + def getMandatoryRelease(self): + """Return release in which this feature will become mandatory. + + This is a 5-tuple, of the same form as sys.version_info, or, if + the feature was dropped, is None. + """ + + return self.mandatory + + def __repr__(self): + return "_Feature" + repr((self.optional, + self.mandatory, + self.compiler_flag)) + +nested_scopes = _Feature((2, 1, 0, "beta", 1), + (2, 2, 0, "alpha", 0), + CO_NESTED) + +generators = _Feature((2, 2, 0, "alpha", 1), + (2, 3, 0, "final", 0), + CO_GENERATOR_ALLOWED) + +division = _Feature((2, 2, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_DIVISION) + +absolute_import = _Feature((2, 5, 0, "alpha", 1), + (3, 0, 0, "alpha", 0), + CO_FUTURE_ABSOLUTE_IMPORT) + +with_statement = _Feature((2, 5, 0, "alpha", 1), + (2, 6, 0, "alpha", 0), + CO_FUTURE_WITH_STATEMENT) + +print_function = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_PRINT_FUNCTION) + +unicode_literals = _Feature((2, 6, 0, "alpha", 2), + (3, 0, 0, "alpha", 0), + CO_FUTURE_UNICODE_LITERALS) + +barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2), + (4, 0, 0, "alpha", 0), + CO_FUTURE_BARRY_AS_BDFL) + +generator_stop = _Feature((3, 5, 0, "beta", 1), + (3, 7, 0, "alpha", 0), + CO_FUTURE_GENERATOR_STOP) + +annotations = _Feature((3, 7, 0, "beta", 1), + (4, 0, 0, "alpha", 0), + CO_FUTURE_ANNOTATIONS) diff --git a/dist/lib/logging/__phello__.foo.py b/dist/lib/logging/__phello__.foo.py new file mode 100644 index 0000000..8e8623e --- /dev/null +++ b/dist/lib/logging/__phello__.foo.py @@ -0,0 +1 @@ +# This file exists as a helper for the test.test_frozen module. diff --git a/dist/lib/logging/__pycache__/__future__.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/__future__.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0bf191f Binary files /dev/null and b/dist/lib/logging/__pycache__/__future__.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/__phello__.foo.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/__phello__.foo.cpython-38.opt-1.pyc new file mode 100644 index 0000000..8a3a4d0 Binary files /dev/null and b/dist/lib/logging/__pycache__/__phello__.foo.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_bootlocale.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_bootlocale.cpython-38.opt-1.pyc new file mode 100644 index 0000000..cebdf12 Binary files /dev/null and b/dist/lib/logging/__pycache__/_bootlocale.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_collections_abc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_collections_abc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..c4ab11e Binary files /dev/null and b/dist/lib/logging/__pycache__/_collections_abc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_compat_pickle.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_compat_pickle.cpython-38.opt-1.pyc new file mode 100644 index 0000000..366dbcf Binary files /dev/null and b/dist/lib/logging/__pycache__/_compat_pickle.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_compression.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_compression.cpython-38.opt-1.pyc new file mode 100644 index 0000000..34c66c7 Binary files /dev/null and b/dist/lib/logging/__pycache__/_compression.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_dummy_thread.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_dummy_thread.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2a34e23 Binary files /dev/null and b/dist/lib/logging/__pycache__/_dummy_thread.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_markupbase.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_markupbase.cpython-38.opt-1.pyc new file mode 100644 index 0000000..183da10 Binary files /dev/null and b/dist/lib/logging/__pycache__/_markupbase.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_osx_support.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_osx_support.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e799447 Binary files /dev/null and b/dist/lib/logging/__pycache__/_osx_support.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_py_abc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_py_abc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..342bc17 Binary files /dev/null and b/dist/lib/logging/__pycache__/_py_abc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_pydecimal.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_pydecimal.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0e850d0 Binary files /dev/null and b/dist/lib/logging/__pycache__/_pydecimal.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_pyio.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_pyio.cpython-38.opt-1.pyc new file mode 100644 index 0000000..893e941 Binary files /dev/null and b/dist/lib/logging/__pycache__/_pyio.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_sitebuiltins.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_sitebuiltins.cpython-38.opt-1.pyc new file mode 100644 index 0000000..f25a897 Binary files /dev/null and b/dist/lib/logging/__pycache__/_sitebuiltins.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_strptime.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_strptime.cpython-38.opt-1.pyc new file mode 100644 index 0000000..7df5354 Binary files /dev/null and b/dist/lib/logging/__pycache__/_strptime.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_threading_local.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_threading_local.cpython-38.opt-1.pyc new file mode 100644 index 0000000..800274b Binary files /dev/null and b/dist/lib/logging/__pycache__/_threading_local.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/_weakrefset.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/_weakrefset.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6081f3c Binary files /dev/null and b/dist/lib/logging/__pycache__/_weakrefset.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/abc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/abc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..65cfe4a Binary files /dev/null and b/dist/lib/logging/__pycache__/abc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/aifc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/aifc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..35a0eb7 Binary files /dev/null and b/dist/lib/logging/__pycache__/aifc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/antigravity.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/antigravity.cpython-38.opt-1.pyc new file mode 100644 index 0000000..aa15f5d Binary files /dev/null and b/dist/lib/logging/__pycache__/antigravity.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/argparse.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/argparse.cpython-38.opt-1.pyc new file mode 100644 index 0000000..26f957e Binary files /dev/null and b/dist/lib/logging/__pycache__/argparse.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/ast.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/ast.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2df5a49 Binary files /dev/null and b/dist/lib/logging/__pycache__/ast.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/asynchat.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/asynchat.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2175586 Binary files /dev/null and b/dist/lib/logging/__pycache__/asynchat.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/asyncore.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/asyncore.cpython-38.opt-1.pyc new file mode 100644 index 0000000..c127ff3 Binary files /dev/null and b/dist/lib/logging/__pycache__/asyncore.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/base64.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/base64.cpython-38.opt-1.pyc new file mode 100644 index 0000000..bbcbdda Binary files /dev/null and b/dist/lib/logging/__pycache__/base64.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/bdb.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/bdb.cpython-38.opt-1.pyc new file mode 100644 index 0000000..edabe3f Binary files /dev/null and b/dist/lib/logging/__pycache__/bdb.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/binhex.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/binhex.cpython-38.opt-1.pyc new file mode 100644 index 0000000..4c6b123 Binary files /dev/null and b/dist/lib/logging/__pycache__/binhex.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/bisect.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/bisect.cpython-38.opt-1.pyc new file mode 100644 index 0000000..8eac1e9 Binary files /dev/null and b/dist/lib/logging/__pycache__/bisect.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/bz2.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/bz2.cpython-38.opt-1.pyc new file mode 100644 index 0000000..407ad09 Binary files /dev/null and b/dist/lib/logging/__pycache__/bz2.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/cProfile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/cProfile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..ed37fd5 Binary files /dev/null and b/dist/lib/logging/__pycache__/cProfile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/calendar.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/calendar.cpython-38.opt-1.pyc new file mode 100644 index 0000000..4210284 Binary files /dev/null and b/dist/lib/logging/__pycache__/calendar.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/cgi.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/cgi.cpython-38.opt-1.pyc new file mode 100644 index 0000000..8ba6720 Binary files /dev/null and b/dist/lib/logging/__pycache__/cgi.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/cgitb.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/cgitb.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d0371cf Binary files /dev/null and b/dist/lib/logging/__pycache__/cgitb.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/chunk.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/chunk.cpython-38.opt-1.pyc new file mode 100644 index 0000000..b77b75a Binary files /dev/null and b/dist/lib/logging/__pycache__/chunk.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/cmd.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/cmd.cpython-38.opt-1.pyc new file mode 100644 index 0000000..067004c Binary files /dev/null and b/dist/lib/logging/__pycache__/cmd.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/code.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/code.cpython-38.opt-1.pyc new file mode 100644 index 0000000..4f053cf Binary files /dev/null and b/dist/lib/logging/__pycache__/code.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/codecs.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/codecs.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e4401a8 Binary files /dev/null and b/dist/lib/logging/__pycache__/codecs.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/codeop.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/codeop.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2ef3071 Binary files /dev/null and b/dist/lib/logging/__pycache__/codeop.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/colorsys.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/colorsys.cpython-38.opt-1.pyc new file mode 100644 index 0000000..146d1f8 Binary files /dev/null and b/dist/lib/logging/__pycache__/colorsys.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/compileall.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/compileall.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2601439 Binary files /dev/null and b/dist/lib/logging/__pycache__/compileall.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/configparser.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/configparser.cpython-38.opt-1.pyc new file mode 100644 index 0000000..1ef5e71 Binary files /dev/null and b/dist/lib/logging/__pycache__/configparser.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/contextlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/contextlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..9b1cf52 Binary files /dev/null and b/dist/lib/logging/__pycache__/contextlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/contextvars.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/contextvars.cpython-38.opt-1.pyc new file mode 100644 index 0000000..3926f1f Binary files /dev/null and b/dist/lib/logging/__pycache__/contextvars.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/copy.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/copy.cpython-38.opt-1.pyc new file mode 100644 index 0000000..510c7aa Binary files /dev/null and b/dist/lib/logging/__pycache__/copy.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/copyreg.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/copyreg.cpython-38.opt-1.pyc new file mode 100644 index 0000000..ab6d124 Binary files /dev/null and b/dist/lib/logging/__pycache__/copyreg.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/crypt.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/crypt.cpython-38.opt-1.pyc new file mode 100644 index 0000000..18f51be Binary files /dev/null and b/dist/lib/logging/__pycache__/crypt.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/csv.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/csv.cpython-38.opt-1.pyc new file mode 100644 index 0000000..37e36cd Binary files /dev/null and b/dist/lib/logging/__pycache__/csv.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/dataclasses.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/dataclasses.cpython-38.opt-1.pyc new file mode 100644 index 0000000..9842296 Binary files /dev/null and b/dist/lib/logging/__pycache__/dataclasses.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/datetime.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/datetime.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6f0846e Binary files /dev/null and b/dist/lib/logging/__pycache__/datetime.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/decimal.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/decimal.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e617e66 Binary files /dev/null and b/dist/lib/logging/__pycache__/decimal.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/difflib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/difflib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..f96648a Binary files /dev/null and b/dist/lib/logging/__pycache__/difflib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/dis.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/dis.cpython-38.opt-1.pyc new file mode 100644 index 0000000..9aed396 Binary files /dev/null and b/dist/lib/logging/__pycache__/dis.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/doctest.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/doctest.cpython-38.opt-1.pyc new file mode 100644 index 0000000..3bb9a30 Binary files /dev/null and b/dist/lib/logging/__pycache__/doctest.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/dummy_threading.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/dummy_threading.cpython-38.opt-1.pyc new file mode 100644 index 0000000..b41eff8 Binary files /dev/null and b/dist/lib/logging/__pycache__/dummy_threading.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/enum.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/enum.cpython-38.opt-1.pyc new file mode 100644 index 0000000..83a6d73 Binary files /dev/null and b/dist/lib/logging/__pycache__/enum.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/filecmp.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/filecmp.cpython-38.opt-1.pyc new file mode 100644 index 0000000..369cec1 Binary files /dev/null and b/dist/lib/logging/__pycache__/filecmp.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/fileinput.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/fileinput.cpython-38.opt-1.pyc new file mode 100644 index 0000000..4013d1f Binary files /dev/null and b/dist/lib/logging/__pycache__/fileinput.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/fnmatch.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/fnmatch.cpython-38.opt-1.pyc new file mode 100644 index 0000000..09af3df Binary files /dev/null and b/dist/lib/logging/__pycache__/fnmatch.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/formatter.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/formatter.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e4ef0b9 Binary files /dev/null and b/dist/lib/logging/__pycache__/formatter.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/fractions.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/fractions.cpython-38.opt-1.pyc new file mode 100644 index 0000000..1587bdb Binary files /dev/null and b/dist/lib/logging/__pycache__/fractions.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/ftplib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/ftplib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..055c99b Binary files /dev/null and b/dist/lib/logging/__pycache__/ftplib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/functools.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/functools.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2364dd1 Binary files /dev/null and b/dist/lib/logging/__pycache__/functools.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/genericpath.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/genericpath.cpython-38.opt-1.pyc new file mode 100644 index 0000000..04870e5 Binary files /dev/null and b/dist/lib/logging/__pycache__/genericpath.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/getopt.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/getopt.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d715306 Binary files /dev/null and b/dist/lib/logging/__pycache__/getopt.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/getpass.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/getpass.cpython-38.opt-1.pyc new file mode 100644 index 0000000..97185cb Binary files /dev/null and b/dist/lib/logging/__pycache__/getpass.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/gettext.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/gettext.cpython-38.opt-1.pyc new file mode 100644 index 0000000..96866a9 Binary files /dev/null and b/dist/lib/logging/__pycache__/gettext.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/glob.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/glob.cpython-38.opt-1.pyc new file mode 100644 index 0000000..ec9025f Binary files /dev/null and b/dist/lib/logging/__pycache__/glob.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/gzip.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/gzip.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d3e5ac3 Binary files /dev/null and b/dist/lib/logging/__pycache__/gzip.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/hashlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/hashlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..36df546 Binary files /dev/null and b/dist/lib/logging/__pycache__/hashlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/heapq.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/heapq.cpython-38.opt-1.pyc new file mode 100644 index 0000000..87aea63 Binary files /dev/null and b/dist/lib/logging/__pycache__/heapq.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/hmac.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/hmac.cpython-38.opt-1.pyc new file mode 100644 index 0000000..f51ccf7 Binary files /dev/null and b/dist/lib/logging/__pycache__/hmac.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/imghdr.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/imghdr.cpython-38.opt-1.pyc new file mode 100644 index 0000000..3aabd20 Binary files /dev/null and b/dist/lib/logging/__pycache__/imghdr.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/imp.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/imp.cpython-38.opt-1.pyc new file mode 100644 index 0000000..09b41d9 Binary files /dev/null and b/dist/lib/logging/__pycache__/imp.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/inspect.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/inspect.cpython-38.opt-1.pyc new file mode 100644 index 0000000..69599de Binary files /dev/null and b/dist/lib/logging/__pycache__/inspect.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/io.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/io.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d536517 Binary files /dev/null and b/dist/lib/logging/__pycache__/io.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/ipaddress.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/ipaddress.cpython-38.opt-1.pyc new file mode 100644 index 0000000..525ed51 Binary files /dev/null and b/dist/lib/logging/__pycache__/ipaddress.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/keyword.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/keyword.cpython-38.opt-1.pyc new file mode 100644 index 0000000..21f7ca0 Binary files /dev/null and b/dist/lib/logging/__pycache__/keyword.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/linecache.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/linecache.cpython-38.opt-1.pyc new file mode 100644 index 0000000..632ad6c Binary files /dev/null and b/dist/lib/logging/__pycache__/linecache.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/locale.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/locale.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e14f8e9 Binary files /dev/null and b/dist/lib/logging/__pycache__/locale.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/lzma.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/lzma.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0fed456 Binary files /dev/null and b/dist/lib/logging/__pycache__/lzma.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/mailbox.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/mailbox.cpython-38.opt-1.pyc new file mode 100644 index 0000000..909488f Binary files /dev/null and b/dist/lib/logging/__pycache__/mailbox.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/mailcap.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/mailcap.cpython-38.opt-1.pyc new file mode 100644 index 0000000..bd902ea Binary files /dev/null and b/dist/lib/logging/__pycache__/mailcap.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/mimetypes.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/mimetypes.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0d574a7 Binary files /dev/null and b/dist/lib/logging/__pycache__/mimetypes.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/modulefinder.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/modulefinder.cpython-38.opt-1.pyc new file mode 100644 index 0000000..8490816 Binary files /dev/null and b/dist/lib/logging/__pycache__/modulefinder.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/netrc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/netrc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a8c6285 Binary files /dev/null and b/dist/lib/logging/__pycache__/netrc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/nntplib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/nntplib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..78eb01b Binary files /dev/null and b/dist/lib/logging/__pycache__/nntplib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/ntpath.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/ntpath.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e3907b4 Binary files /dev/null and b/dist/lib/logging/__pycache__/ntpath.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/nturl2path.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/nturl2path.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6ce2d3d Binary files /dev/null and b/dist/lib/logging/__pycache__/nturl2path.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/numbers.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/numbers.cpython-38.opt-1.pyc new file mode 100644 index 0000000..8136e54 Binary files /dev/null and b/dist/lib/logging/__pycache__/numbers.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/opcode.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/opcode.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2779e5d Binary files /dev/null and b/dist/lib/logging/__pycache__/opcode.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/operator.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/operator.cpython-38.opt-1.pyc new file mode 100644 index 0000000..ba6abd9 Binary files /dev/null and b/dist/lib/logging/__pycache__/operator.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/optparse.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/optparse.cpython-38.opt-1.pyc new file mode 100644 index 0000000..026a6d3 Binary files /dev/null and b/dist/lib/logging/__pycache__/optparse.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/os.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/os.cpython-38.opt-1.pyc new file mode 100644 index 0000000..89a0695 Binary files /dev/null and b/dist/lib/logging/__pycache__/os.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pathlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pathlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a0b78f0 Binary files /dev/null and b/dist/lib/logging/__pycache__/pathlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pdb.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pdb.cpython-38.opt-1.pyc new file mode 100644 index 0000000..1f91515 Binary files /dev/null and b/dist/lib/logging/__pycache__/pdb.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pickle.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pickle.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a4f11d2 Binary files /dev/null and b/dist/lib/logging/__pycache__/pickle.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pickletools.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pickletools.cpython-38.opt-1.pyc new file mode 100644 index 0000000..c2edc05 Binary files /dev/null and b/dist/lib/logging/__pycache__/pickletools.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pipes.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pipes.cpython-38.opt-1.pyc new file mode 100644 index 0000000..80a45f5 Binary files /dev/null and b/dist/lib/logging/__pycache__/pipes.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pkgutil.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pkgutil.cpython-38.opt-1.pyc new file mode 100644 index 0000000..29128d9 Binary files /dev/null and b/dist/lib/logging/__pycache__/pkgutil.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/platform.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/platform.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a2045c9 Binary files /dev/null and b/dist/lib/logging/__pycache__/platform.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/plistlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/plistlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..4f03d37 Binary files /dev/null and b/dist/lib/logging/__pycache__/plistlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/poplib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/poplib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6cc0fed Binary files /dev/null and b/dist/lib/logging/__pycache__/poplib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/posixpath.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/posixpath.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6d89aaa Binary files /dev/null and b/dist/lib/logging/__pycache__/posixpath.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pprint.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pprint.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6b187ac Binary files /dev/null and b/dist/lib/logging/__pycache__/pprint.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/profile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/profile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d223719 Binary files /dev/null and b/dist/lib/logging/__pycache__/profile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pstats.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pstats.cpython-38.opt-1.pyc new file mode 100644 index 0000000..47bb39d Binary files /dev/null and b/dist/lib/logging/__pycache__/pstats.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pty.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pty.cpython-38.opt-1.pyc new file mode 100644 index 0000000..b0d8841 Binary files /dev/null and b/dist/lib/logging/__pycache__/pty.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/py_compile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/py_compile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..9c948e2 Binary files /dev/null and b/dist/lib/logging/__pycache__/py_compile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pyclbr.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pyclbr.cpython-38.opt-1.pyc new file mode 100644 index 0000000..21b02d8 Binary files /dev/null and b/dist/lib/logging/__pycache__/pyclbr.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/pydoc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/pydoc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..36ec757 Binary files /dev/null and b/dist/lib/logging/__pycache__/pydoc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/queue.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/queue.cpython-38.opt-1.pyc new file mode 100644 index 0000000..366e9a9 Binary files /dev/null and b/dist/lib/logging/__pycache__/queue.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/quopri.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/quopri.cpython-38.opt-1.pyc new file mode 100644 index 0000000..19c8830 Binary files /dev/null and b/dist/lib/logging/__pycache__/quopri.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/random.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/random.cpython-38.opt-1.pyc new file mode 100644 index 0000000..929dcd5 Binary files /dev/null and b/dist/lib/logging/__pycache__/random.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/re.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/re.cpython-38.opt-1.pyc new file mode 100644 index 0000000..664ee7f Binary files /dev/null and b/dist/lib/logging/__pycache__/re.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/reprlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/reprlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..993b8c8 Binary files /dev/null and b/dist/lib/logging/__pycache__/reprlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/rlcompleter.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/rlcompleter.cpython-38.opt-1.pyc new file mode 100644 index 0000000..b7dbaa9 Binary files /dev/null and b/dist/lib/logging/__pycache__/rlcompleter.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/runpy.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/runpy.cpython-38.opt-1.pyc new file mode 100644 index 0000000..5fbcd6c Binary files /dev/null and b/dist/lib/logging/__pycache__/runpy.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sched.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sched.cpython-38.opt-1.pyc new file mode 100644 index 0000000..84b675e Binary files /dev/null and b/dist/lib/logging/__pycache__/sched.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/secrets.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/secrets.cpython-38.opt-1.pyc new file mode 100644 index 0000000..de7e332 Binary files /dev/null and b/dist/lib/logging/__pycache__/secrets.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/selectors.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/selectors.cpython-38.opt-1.pyc new file mode 100644 index 0000000..132374d Binary files /dev/null and b/dist/lib/logging/__pycache__/selectors.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/shelve.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/shelve.cpython-38.opt-1.pyc new file mode 100644 index 0000000..eadd59c Binary files /dev/null and b/dist/lib/logging/__pycache__/shelve.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/shlex.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/shlex.cpython-38.opt-1.pyc new file mode 100644 index 0000000..844c728 Binary files /dev/null and b/dist/lib/logging/__pycache__/shlex.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/shutil.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/shutil.cpython-38.opt-1.pyc new file mode 100644 index 0000000..3d45ffc Binary files /dev/null and b/dist/lib/logging/__pycache__/shutil.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/signal.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/signal.cpython-38.opt-1.pyc new file mode 100644 index 0000000..3b89ad7 Binary files /dev/null and b/dist/lib/logging/__pycache__/signal.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/site.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/site.cpython-38.opt-1.pyc new file mode 100644 index 0000000..17f6e0d Binary files /dev/null and b/dist/lib/logging/__pycache__/site.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/smtpd.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/smtpd.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e015a68 Binary files /dev/null and b/dist/lib/logging/__pycache__/smtpd.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/smtplib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/smtplib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d65869a Binary files /dev/null and b/dist/lib/logging/__pycache__/smtplib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sndhdr.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sndhdr.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6401418 Binary files /dev/null and b/dist/lib/logging/__pycache__/sndhdr.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/socket.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/socket.cpython-38.opt-1.pyc new file mode 100644 index 0000000..9327ad9 Binary files /dev/null and b/dist/lib/logging/__pycache__/socket.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/socketserver.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/socketserver.cpython-38.opt-1.pyc new file mode 100644 index 0000000..da0b284 Binary files /dev/null and b/dist/lib/logging/__pycache__/socketserver.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sre_compile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sre_compile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0a3d186 Binary files /dev/null and b/dist/lib/logging/__pycache__/sre_compile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sre_constants.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sre_constants.cpython-38.opt-1.pyc new file mode 100644 index 0000000..51a4a38 Binary files /dev/null and b/dist/lib/logging/__pycache__/sre_constants.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sre_parse.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sre_parse.cpython-38.opt-1.pyc new file mode 100644 index 0000000..40cb7a1 Binary files /dev/null and b/dist/lib/logging/__pycache__/sre_parse.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/ssl.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/ssl.cpython-38.opt-1.pyc new file mode 100644 index 0000000..816041f Binary files /dev/null and b/dist/lib/logging/__pycache__/ssl.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/stat.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/stat.cpython-38.opt-1.pyc new file mode 100644 index 0000000..65897aa Binary files /dev/null and b/dist/lib/logging/__pycache__/stat.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/statistics.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/statistics.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0c2a73d Binary files /dev/null and b/dist/lib/logging/__pycache__/statistics.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/string.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/string.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d899244 Binary files /dev/null and b/dist/lib/logging/__pycache__/string.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/stringprep.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/stringprep.cpython-38.opt-1.pyc new file mode 100644 index 0000000..9e63e64 Binary files /dev/null and b/dist/lib/logging/__pycache__/stringprep.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/struct.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/struct.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6dfcd7f Binary files /dev/null and b/dist/lib/logging/__pycache__/struct.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/subprocess.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/subprocess.cpython-38.opt-1.pyc new file mode 100644 index 0000000..c0de491 Binary files /dev/null and b/dist/lib/logging/__pycache__/subprocess.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sunau.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sunau.cpython-38.opt-1.pyc new file mode 100644 index 0000000..0fcd2fa Binary files /dev/null and b/dist/lib/logging/__pycache__/sunau.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/symbol.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/symbol.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2f28d22 Binary files /dev/null and b/dist/lib/logging/__pycache__/symbol.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/symtable.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/symtable.cpython-38.opt-1.pyc new file mode 100644 index 0000000..8765d5b Binary files /dev/null and b/dist/lib/logging/__pycache__/symtable.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/sysconfig.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/sysconfig.cpython-38.opt-1.pyc new file mode 100644 index 0000000..3a81836 Binary files /dev/null and b/dist/lib/logging/__pycache__/sysconfig.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/tabnanny.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/tabnanny.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d8d8daa Binary files /dev/null and b/dist/lib/logging/__pycache__/tabnanny.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/tarfile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/tarfile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..bb88533 Binary files /dev/null and b/dist/lib/logging/__pycache__/tarfile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/telnetlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/telnetlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..e4a82aa Binary files /dev/null and b/dist/lib/logging/__pycache__/telnetlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/tempfile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/tempfile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..29cae83 Binary files /dev/null and b/dist/lib/logging/__pycache__/tempfile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/textwrap.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/textwrap.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6761ea2 Binary files /dev/null and b/dist/lib/logging/__pycache__/textwrap.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/this.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/this.cpython-38.opt-1.pyc new file mode 100644 index 0000000..c5e4f71 Binary files /dev/null and b/dist/lib/logging/__pycache__/this.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/threading.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/threading.cpython-38.opt-1.pyc new file mode 100644 index 0000000..fbd4e99 Binary files /dev/null and b/dist/lib/logging/__pycache__/threading.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/timeit.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/timeit.cpython-38.opt-1.pyc new file mode 100644 index 0000000..aeef11a Binary files /dev/null and b/dist/lib/logging/__pycache__/timeit.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/token.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/token.cpython-38.opt-1.pyc new file mode 100644 index 0000000..20b7d78 Binary files /dev/null and b/dist/lib/logging/__pycache__/token.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/tokenize.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/tokenize.cpython-38.opt-1.pyc new file mode 100644 index 0000000..edb2158 Binary files /dev/null and b/dist/lib/logging/__pycache__/tokenize.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/trace.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/trace.cpython-38.opt-1.pyc new file mode 100644 index 0000000..f23f1db Binary files /dev/null and b/dist/lib/logging/__pycache__/trace.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/traceback.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/traceback.cpython-38.opt-1.pyc new file mode 100644 index 0000000..00d5641 Binary files /dev/null and b/dist/lib/logging/__pycache__/traceback.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/tracemalloc.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/tracemalloc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..da863e8 Binary files /dev/null and b/dist/lib/logging/__pycache__/tracemalloc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/tty.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/tty.cpython-38.opt-1.pyc new file mode 100644 index 0000000..494de8b Binary files /dev/null and b/dist/lib/logging/__pycache__/tty.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/types.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/types.cpython-38.opt-1.pyc new file mode 100644 index 0000000..f970c7a Binary files /dev/null and b/dist/lib/logging/__pycache__/types.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/typing.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/typing.cpython-38.opt-1.pyc new file mode 100644 index 0000000..34decb7 Binary files /dev/null and b/dist/lib/logging/__pycache__/typing.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/uu.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/uu.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2ee7ac0 Binary files /dev/null and b/dist/lib/logging/__pycache__/uu.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/uuid.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/uuid.cpython-38.opt-1.pyc new file mode 100644 index 0000000..6f792d6 Binary files /dev/null and b/dist/lib/logging/__pycache__/uuid.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/warnings.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/warnings.cpython-38.opt-1.pyc new file mode 100644 index 0000000..ecadb43 Binary files /dev/null and b/dist/lib/logging/__pycache__/warnings.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/wave.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/wave.cpython-38.opt-1.pyc new file mode 100644 index 0000000..7976573 Binary files /dev/null and b/dist/lib/logging/__pycache__/wave.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/weakref.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/weakref.cpython-38.opt-1.pyc new file mode 100644 index 0000000..645c304 Binary files /dev/null and b/dist/lib/logging/__pycache__/weakref.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/webbrowser.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/webbrowser.cpython-38.opt-1.pyc new file mode 100644 index 0000000..5ba3b3b Binary files /dev/null and b/dist/lib/logging/__pycache__/webbrowser.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/xdrlib.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/xdrlib.cpython-38.opt-1.pyc new file mode 100644 index 0000000..7a6e822 Binary files /dev/null and b/dist/lib/logging/__pycache__/xdrlib.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/zipapp.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/zipapp.cpython-38.opt-1.pyc new file mode 100644 index 0000000..409eb33 Binary files /dev/null and b/dist/lib/logging/__pycache__/zipapp.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/zipfile.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/zipfile.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a96d864 Binary files /dev/null and b/dist/lib/logging/__pycache__/zipfile.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/__pycache__/zipimport.cpython-38.opt-1.pyc b/dist/lib/logging/__pycache__/zipimport.cpython-38.opt-1.pyc new file mode 100644 index 0000000..12d9fd3 Binary files /dev/null and b/dist/lib/logging/__pycache__/zipimport.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/_bootlocale.py b/dist/lib/logging/_bootlocale.py new file mode 100644 index 0000000..3273a3b --- /dev/null +++ b/dist/lib/logging/_bootlocale.py @@ -0,0 +1,46 @@ +"""A minimal subset of the locale module used at interpreter startup +(imported by the _io module), in order to reduce startup time. + +Don't import directly from third-party code; use the `locale` module instead! +""" + +import sys +import _locale + +if sys.platform.startswith("win"): + def getpreferredencoding(do_setlocale=True): + if sys.flags.utf8_mode: + return 'UTF-8' + return _locale._getdefaultlocale()[1] +else: + try: + _locale.CODESET + except AttributeError: + if hasattr(sys, 'getandroidapilevel'): + # On Android langinfo.h and CODESET are missing, and UTF-8 is + # always used in mbstowcs() and wcstombs(). + def getpreferredencoding(do_setlocale=True): + return 'UTF-8' + else: + def getpreferredencoding(do_setlocale=True): + if sys.flags.utf8_mode: + return 'UTF-8' + # This path for legacy systems needs the more complex + # getdefaultlocale() function, import the full locale module. + import locale + return locale.getpreferredencoding(do_setlocale) + else: + def getpreferredencoding(do_setlocale=True): + assert not do_setlocale + if sys.flags.utf8_mode: + return 'UTF-8' + result = _locale.nl_langinfo(_locale.CODESET) + if not result and sys.platform == 'darwin': + # nl_langinfo can return an empty string + # when the setting has an invalid value. + # Default to UTF-8 in that case because + # UTF-8 is the default charset on OSX and + # returning nothing will crash the + # interpreter. + result = 'UTF-8' + return result diff --git a/dist/lib/logging/_collections_abc.py b/dist/lib/logging/_collections_abc.py new file mode 100644 index 0000000..2b2ddba --- /dev/null +++ b/dist/lib/logging/_collections_abc.py @@ -0,0 +1,1004 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) for collections, according to PEP 3119. + +Unit tests are in test_collections. +""" + +from abc import ABCMeta, abstractmethod +import sys + +__all__ = ["Awaitable", "Coroutine", + "AsyncIterable", "AsyncIterator", "AsyncGenerator", + "Hashable", "Iterable", "Iterator", "Generator", "Reversible", + "Sized", "Container", "Callable", "Collection", + "Set", "MutableSet", + "Mapping", "MutableMapping", + "MappingView", "KeysView", "ItemsView", "ValuesView", + "Sequence", "MutableSequence", + "ByteString", + ] + +# This module has been renamed from collections.abc to _collections_abc to +# speed up interpreter startup. Some of the types such as MutableMapping are +# required early but collections module imports a lot of other modules. +# See issue #19218 +__name__ = "collections.abc" + +# Private list of types that we want to register with the various ABCs +# so that they will pass tests like: +# it = iter(somebytearray) +# assert isinstance(it, Iterable) +# Note: in other implementations, these types might not be distinct +# and they may have their own implementation specific types that +# are not included on this list. +bytes_iterator = type(iter(b'')) +bytearray_iterator = type(iter(bytearray())) +#callable_iterator = ??? +dict_keyiterator = type(iter({}.keys())) +dict_valueiterator = type(iter({}.values())) +dict_itemiterator = type(iter({}.items())) +list_iterator = type(iter([])) +list_reverseiterator = type(iter(reversed([]))) +range_iterator = type(iter(range(0))) +longrange_iterator = type(iter(range(1 << 1000))) +set_iterator = type(iter(set())) +str_iterator = type(iter("")) +tuple_iterator = type(iter(())) +zip_iterator = type(iter(zip())) +## views ## +dict_keys = type({}.keys()) +dict_values = type({}.values()) +dict_items = type({}.items()) +## misc ## +mappingproxy = type(type.__dict__) +generator = type((lambda: (yield))()) +## coroutine ## +async def _coro(): pass +_coro = _coro() +coroutine = type(_coro) +_coro.close() # Prevent ResourceWarning +del _coro +## asynchronous generator ## +async def _ag(): yield +_ag = _ag() +async_generator = type(_ag) +del _ag + + +### ONE-TRICK PONIES ### + +def _check_methods(C, *methods): + mro = C.__mro__ + for method in methods: + for B in mro: + if method in B.__dict__: + if B.__dict__[method] is None: + return NotImplemented + break + else: + return NotImplemented + return True + +class Hashable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __hash__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Hashable: + return _check_methods(C, "__hash__") + return NotImplemented + + +class Awaitable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __await__(self): + yield + + @classmethod + def __subclasshook__(cls, C): + if cls is Awaitable: + return _check_methods(C, "__await__") + return NotImplemented + + +class Coroutine(Awaitable): + + __slots__ = () + + @abstractmethod + def send(self, value): + """Send a value into the coroutine. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the coroutine. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside coroutine. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("coroutine ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Coroutine: + return _check_methods(C, '__await__', 'send', 'throw', 'close') + return NotImplemented + + +Coroutine.register(coroutine) + + +class AsyncIterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __aiter__(self): + return AsyncIterator() + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterable: + return _check_methods(C, "__aiter__") + return NotImplemented + + +class AsyncIterator(AsyncIterable): + + __slots__ = () + + @abstractmethod + async def __anext__(self): + """Return the next item or raise StopAsyncIteration when exhausted.""" + raise StopAsyncIteration + + def __aiter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncIterator: + return _check_methods(C, "__anext__", "__aiter__") + return NotImplemented + + +class AsyncGenerator(AsyncIterator): + + __slots__ = () + + async def __anext__(self): + """Return the next item from the asynchronous generator. + When exhausted, raise StopAsyncIteration. + """ + return await self.asend(None) + + @abstractmethod + async def asend(self, value): + """Send a value into the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + raise StopAsyncIteration + + @abstractmethod + async def athrow(self, typ, val=None, tb=None): + """Raise an exception in the asynchronous generator. + Return next yielded value or raise StopAsyncIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + async def aclose(self): + """Raise GeneratorExit inside coroutine. + """ + try: + await self.athrow(GeneratorExit) + except (GeneratorExit, StopAsyncIteration): + pass + else: + raise RuntimeError("asynchronous generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is AsyncGenerator: + return _check_methods(C, '__aiter__', '__anext__', + 'asend', 'athrow', 'aclose') + return NotImplemented + + +AsyncGenerator.register(async_generator) + + +class Iterable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __iter__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterable: + return _check_methods(C, "__iter__") + return NotImplemented + + +class Iterator(Iterable): + + __slots__ = () + + @abstractmethod + def __next__(self): + 'Return the next item from the iterator. When exhausted, raise StopIteration' + raise StopIteration + + def __iter__(self): + return self + + @classmethod + def __subclasshook__(cls, C): + if cls is Iterator: + return _check_methods(C, '__iter__', '__next__') + return NotImplemented + +Iterator.register(bytes_iterator) +Iterator.register(bytearray_iterator) +#Iterator.register(callable_iterator) +Iterator.register(dict_keyiterator) +Iterator.register(dict_valueiterator) +Iterator.register(dict_itemiterator) +Iterator.register(list_iterator) +Iterator.register(list_reverseiterator) +Iterator.register(range_iterator) +Iterator.register(longrange_iterator) +Iterator.register(set_iterator) +Iterator.register(str_iterator) +Iterator.register(tuple_iterator) +Iterator.register(zip_iterator) + + +class Reversible(Iterable): + + __slots__ = () + + @abstractmethod + def __reversed__(self): + while False: + yield None + + @classmethod + def __subclasshook__(cls, C): + if cls is Reversible: + return _check_methods(C, "__reversed__", "__iter__") + return NotImplemented + + +class Generator(Iterator): + + __slots__ = () + + def __next__(self): + """Return the next item from the generator. + When exhausted, raise StopIteration. + """ + return self.send(None) + + @abstractmethod + def send(self, value): + """Send a value into the generator. + Return next yielded value or raise StopIteration. + """ + raise StopIteration + + @abstractmethod + def throw(self, typ, val=None, tb=None): + """Raise an exception in the generator. + Return next yielded value or raise StopIteration. + """ + if val is None: + if tb is None: + raise typ + val = typ() + if tb is not None: + val = val.with_traceback(tb) + raise val + + def close(self): + """Raise GeneratorExit inside generator. + """ + try: + self.throw(GeneratorExit) + except (GeneratorExit, StopIteration): + pass + else: + raise RuntimeError("generator ignored GeneratorExit") + + @classmethod + def __subclasshook__(cls, C): + if cls is Generator: + return _check_methods(C, '__iter__', '__next__', + 'send', 'throw', 'close') + return NotImplemented + +Generator.register(generator) + + +class Sized(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __len__(self): + return 0 + + @classmethod + def __subclasshook__(cls, C): + if cls is Sized: + return _check_methods(C, "__len__") + return NotImplemented + + +class Container(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __contains__(self, x): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Container: + return _check_methods(C, "__contains__") + return NotImplemented + +class Collection(Sized, Iterable, Container): + + __slots__ = () + + @classmethod + def __subclasshook__(cls, C): + if cls is Collection: + return _check_methods(C, "__len__", "__iter__", "__contains__") + return NotImplemented + +class Callable(metaclass=ABCMeta): + + __slots__ = () + + @abstractmethod + def __call__(self, *args, **kwds): + return False + + @classmethod + def __subclasshook__(cls, C): + if cls is Callable: + return _check_methods(C, "__call__") + return NotImplemented + + +### SETS ### + + +class Set(Collection): + + """A set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__ and __len__. + + To override the comparisons (presumably for speed, as the + semantics are fixed), redefine __le__ and __ge__, + then the other operations will automatically follow suit. + """ + + __slots__ = () + + def __le__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) > len(other): + return False + for elem in self: + if elem not in other: + return False + return True + + def __lt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) < len(other) and self.__le__(other) + + def __gt__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) > len(other) and self.__ge__(other) + + def __ge__(self, other): + if not isinstance(other, Set): + return NotImplemented + if len(self) < len(other): + return False + for elem in other: + if elem not in self: + return False + return True + + def __eq__(self, other): + if not isinstance(other, Set): + return NotImplemented + return len(self) == len(other) and self.__le__(other) + + @classmethod + def _from_iterable(cls, it): + '''Construct an instance of the class from any iterable input. + + Must override this method if the class constructor signature + does not accept an iterable for an input. + ''' + return cls(it) + + def __and__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + return self._from_iterable(value for value in other if value in self) + + __rand__ = __and__ + + def isdisjoint(self, other): + 'Return True if two sets have a null intersection.' + for value in other: + if value in self: + return False + return True + + def __or__(self, other): + if not isinstance(other, Iterable): + return NotImplemented + chain = (e for s in (self, other) for e in s) + return self._from_iterable(chain) + + __ror__ = __or__ + + def __sub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in self + if value not in other) + + def __rsub__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return self._from_iterable(value for value in other + if value not in self) + + def __xor__(self, other): + if not isinstance(other, Set): + if not isinstance(other, Iterable): + return NotImplemented + other = self._from_iterable(other) + return (self - other) | (other - self) + + __rxor__ = __xor__ + + def _hash(self): + """Compute the hash value of a set. + + Note that we don't define __hash__: not all sets are hashable. + But if you define a hashable set type, its __hash__ should + call this function. + + This must be compatible __eq__. + + All sets ought to compare equal if they contain the same + elements, regardless of how they are implemented, and + regardless of the order of the elements; so there's not much + freedom for __eq__ or __hash__. We match the algorithm used + by the built-in frozenset type. + """ + MAX = sys.maxsize + MASK = 2 * MAX + 1 + n = len(self) + h = 1927868237 * (n + 1) + h &= MASK + for x in self: + hx = hash(x) + h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167 + h &= MASK + h = h * 69069 + 907133923 + h &= MASK + if h > MAX: + h -= MASK + 1 + if h == -1: + h = 590923713 + return h + +Set.register(frozenset) + + +class MutableSet(Set): + """A mutable set is a finite, iterable container. + + This class provides concrete generic implementations of all + methods except for __contains__, __iter__, __len__, + add(), and discard(). + + To override the comparisons (presumably for speed, as the + semantics are fixed), all you have to do is redefine __le__ and + then the other operations will automatically follow suit. + """ + + __slots__ = () + + @abstractmethod + def add(self, value): + """Add an element.""" + raise NotImplementedError + + @abstractmethod + def discard(self, value): + """Remove an element. Do not raise an exception if absent.""" + raise NotImplementedError + + def remove(self, value): + """Remove an element. If not a member, raise a KeyError.""" + if value not in self: + raise KeyError(value) + self.discard(value) + + def pop(self): + """Return the popped value. Raise KeyError if empty.""" + it = iter(self) + try: + value = next(it) + except StopIteration: + raise KeyError from None + self.discard(value) + return value + + def clear(self): + """This is slow (creates N new iterators!) but effective.""" + try: + while True: + self.pop() + except KeyError: + pass + + def __ior__(self, it): + for value in it: + self.add(value) + return self + + def __iand__(self, it): + for value in (self - it): + self.discard(value) + return self + + def __ixor__(self, it): + if it is self: + self.clear() + else: + if not isinstance(it, Set): + it = self._from_iterable(it) + for value in it: + if value in self: + self.discard(value) + else: + self.add(value) + return self + + def __isub__(self, it): + if it is self: + self.clear() + else: + for value in it: + self.discard(value) + return self + +MutableSet.register(set) + + +### MAPPINGS ### + + +class Mapping(Collection): + + __slots__ = () + + """A Mapping is a generic container for associating key/value + pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __iter__, and __len__. + + """ + + @abstractmethod + def __getitem__(self, key): + raise KeyError + + def get(self, key, default=None): + 'D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None.' + try: + return self[key] + except KeyError: + return default + + def __contains__(self, key): + try: + self[key] + except KeyError: + return False + else: + return True + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return KeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return ItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return ValuesView(self) + + def __eq__(self, other): + if not isinstance(other, Mapping): + return NotImplemented + return dict(self.items()) == dict(other.items()) + + __reversed__ = None + +Mapping.register(mappingproxy) + + +class MappingView(Sized): + + __slots__ = '_mapping', + + def __init__(self, mapping): + self._mapping = mapping + + def __len__(self): + return len(self._mapping) + + def __repr__(self): + return '{0.__class__.__name__}({0._mapping!r})'.format(self) + + +class KeysView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, key): + return key in self._mapping + + def __iter__(self): + yield from self._mapping + +KeysView.register(dict_keys) + + +class ItemsView(MappingView, Set): + + __slots__ = () + + @classmethod + def _from_iterable(self, it): + return set(it) + + def __contains__(self, item): + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v is value or v == value + + def __iter__(self): + for key in self._mapping: + yield (key, self._mapping[key]) + +ItemsView.register(dict_items) + + +class ValuesView(MappingView, Collection): + + __slots__ = () + + def __contains__(self, value): + for key in self._mapping: + v = self._mapping[key] + if v is value or v == value: + return True + return False + + def __iter__(self): + for key in self._mapping: + yield self._mapping[key] + +ValuesView.register(dict_values) + + +class MutableMapping(Mapping): + + __slots__ = () + + """A MutableMapping is a generic container for associating + key/value pairs. + + This class provides concrete generic implementations of all + methods except for __getitem__, __setitem__, __delitem__, + __iter__, and __len__. + + """ + + @abstractmethod + def __setitem__(self, key, value): + raise KeyError + + @abstractmethod + def __delitem__(self, key): + raise KeyError + + __marker = object() + + def pop(self, key, default=__marker): + '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value. + If key is not found, d is returned if given, otherwise KeyError is raised. + ''' + try: + value = self[key] + except KeyError: + if default is self.__marker: + raise + return default + else: + del self[key] + return value + + def popitem(self): + '''D.popitem() -> (k, v), remove and return some (key, value) pair + as a 2-tuple; but raise KeyError if D is empty. + ''' + try: + key = next(iter(self)) + except StopIteration: + raise KeyError from None + value = self[key] + del self[key] + return key, value + + def clear(self): + 'D.clear() -> None. Remove all items from D.' + try: + while True: + self.popitem() + except KeyError: + pass + + def update(self, other=(), /, **kwds): + ''' D.update([E, ]**F) -> None. Update D from mapping/iterable E and F. + If E present and has a .keys() method, does: for k in E: D[k] = E[k] + If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v + In either case, this is followed by: for k, v in F.items(): D[k] = v + ''' + if isinstance(other, Mapping): + for key in other: + self[key] = other[key] + elif hasattr(other, "keys"): + for key in other.keys(): + self[key] = other[key] + else: + for key, value in other: + self[key] = value + for key, value in kwds.items(): + self[key] = value + + def setdefault(self, key, default=None): + 'D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D' + try: + return self[key] + except KeyError: + self[key] = default + return default + +MutableMapping.register(dict) + + +### SEQUENCES ### + + +class Sequence(Reversible, Collection): + + """All the operations on a read-only sequence. + + Concrete subclasses must override __new__ or __init__, + __getitem__, and __len__. + """ + + __slots__ = () + + @abstractmethod + def __getitem__(self, index): + raise IndexError + + def __iter__(self): + i = 0 + try: + while True: + v = self[i] + yield v + i += 1 + except IndexError: + return + + def __contains__(self, value): + for v in self: + if v is value or v == value: + return True + return False + + def __reversed__(self): + for i in reversed(range(len(self))): + yield self[i] + + def index(self, value, start=0, stop=None): + '''S.index(value, [start, [stop]]) -> integer -- return first index of value. + Raises ValueError if the value is not present. + + Supporting start and stop arguments is optional, but + recommended. + ''' + if start is not None and start < 0: + start = max(len(self) + start, 0) + if stop is not None and stop < 0: + stop += len(self) + + i = start + while stop is None or i < stop: + try: + v = self[i] + if v is value or v == value: + return i + except IndexError: + break + i += 1 + raise ValueError + + def count(self, value): + 'S.count(value) -> integer -- return number of occurrences of value' + return sum(1 for v in self if v is value or v == value) + +Sequence.register(tuple) +Sequence.register(str) +Sequence.register(range) +Sequence.register(memoryview) + + +class ByteString(Sequence): + + """This unifies bytes and bytearray. + + XXX Should add all their methods. + """ + + __slots__ = () + +ByteString.register(bytes) +ByteString.register(bytearray) + + +class MutableSequence(Sequence): + + __slots__ = () + + """All the operations on a read-write sequence. + + Concrete subclasses must provide __new__ or __init__, + __getitem__, __setitem__, __delitem__, __len__, and insert(). + + """ + + @abstractmethod + def __setitem__(self, index, value): + raise IndexError + + @abstractmethod + def __delitem__(self, index): + raise IndexError + + @abstractmethod + def insert(self, index, value): + 'S.insert(index, value) -- insert value before index' + raise IndexError + + def append(self, value): + 'S.append(value) -- append value to the end of the sequence' + self.insert(len(self), value) + + def clear(self): + 'S.clear() -> None -- remove all items from S' + try: + while True: + self.pop() + except IndexError: + pass + + def reverse(self): + 'S.reverse() -- reverse *IN PLACE*' + n = len(self) + for i in range(n//2): + self[i], self[n-i-1] = self[n-i-1], self[i] + + def extend(self, values): + 'S.extend(iterable) -- extend sequence by appending elements from the iterable' + if values is self: + values = list(values) + for v in values: + self.append(v) + + def pop(self, index=-1): + '''S.pop([index]) -> item -- remove and return item at index (default last). + Raise IndexError if list is empty or index is out of range. + ''' + v = self[index] + del self[index] + return v + + def remove(self, value): + '''S.remove(value) -- remove first occurrence of value. + Raise ValueError if the value is not present. + ''' + del self[self.index(value)] + + def __iadd__(self, values): + self.extend(values) + return self + +MutableSequence.register(list) +MutableSequence.register(bytearray) # Multiply inheriting, see ByteString diff --git a/dist/lib/logging/_compat_pickle.py b/dist/lib/logging/_compat_pickle.py new file mode 100644 index 0000000..f68496a --- /dev/null +++ b/dist/lib/logging/_compat_pickle.py @@ -0,0 +1,251 @@ +# This module is used to map the old Python 2 names to the new names used in +# Python 3 for the pickle module. This needed to make pickle streams +# generated with Python 2 loadable by Python 3. + +# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import +# lib2to3 and use the mapping defined there, because lib2to3 uses pickle. +# Thus, this could cause the module to be imported recursively. +IMPORT_MAPPING = { + '__builtin__' : 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'SocketServer': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + 'tkFileDialog': 'tkinter.filedialog', + 'tkSimpleDialog': 'tkinter.simpledialog', + 'tkColorChooser': 'tkinter.colorchooser', + 'tkCommonDialog': 'tkinter.commondialog', + 'Dialog': 'tkinter.dialog', + 'Tkdnd': 'tkinter.dnd', + 'tkFont': 'tkinter.font', + 'tkMessageBox': 'tkinter.messagebox', + 'ScrolledText': 'tkinter.scrolledtext', + 'Tkconstants': 'tkinter.constants', + 'Tix': 'tkinter.tix', + 'ttk': 'tkinter.ttk', + 'Tkinter': 'tkinter', + 'markupbase': '_markupbase', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + 'dbhash': 'dbm.bsd', + 'dumbdbm': 'dbm.dumb', + 'dbm': 'dbm.ndbm', + 'gdbm': 'dbm.gnu', + 'xmlrpclib': 'xmlrpc.client', + 'SimpleXMLRPCServer': 'xmlrpc.server', + 'httplib': 'http.client', + 'htmlentitydefs' : 'html.entities', + 'HTMLParser' : 'html.parser', + 'Cookie': 'http.cookies', + 'cookielib': 'http.cookiejar', + 'BaseHTTPServer': 'http.server', + 'test.test_support': 'test.support', + 'commands': 'subprocess', + 'urlparse' : 'urllib.parse', + 'robotparser' : 'urllib.robotparser', + 'urllib2': 'urllib.request', + 'anydbm': 'dbm', + '_abcoll' : 'collections.abc', +} + + +# This contains rename rules that are easy to handle. We ignore the more +# complex stuff (e.g. mapping the names in the urllib and types modules). +# These rules should be run before import names are fixed. +NAME_MAPPING = { + ('__builtin__', 'xrange'): ('builtins', 'range'), + ('__builtin__', 'reduce'): ('functools', 'reduce'), + ('__builtin__', 'intern'): ('sys', 'intern'), + ('__builtin__', 'unichr'): ('builtins', 'chr'), + ('__builtin__', 'unicode'): ('builtins', 'str'), + ('__builtin__', 'long'): ('builtins', 'int'), + ('itertools', 'izip'): ('builtins', 'zip'), + ('itertools', 'imap'): ('builtins', 'map'), + ('itertools', 'ifilter'): ('builtins', 'filter'), + ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), + ('itertools', 'izip_longest'): ('itertools', 'zip_longest'), + ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'), + ('UserList', 'UserList'): ('collections', 'UserList'), + ('UserString', 'UserString'): ('collections', 'UserString'), + ('whichdb', 'whichdb'): ('dbm', 'whichdb'), + ('_socket', 'fromfd'): ('socket', 'fromfd'), + ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'), + ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'), + ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'), + ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'), + ('urllib', 'getproxies'): ('urllib.request', 'getproxies'), + ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'), + ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'), + ('urllib', 'quote'): ('urllib.parse', 'quote'), + ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'), + ('urllib', 'unquote'): ('urllib.parse', 'unquote'), + ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'), + ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'), + ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'), + ('urllib', 'urlopen'): ('urllib.request', 'urlopen'), + ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'), + ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'), + ('urllib2', 'URLError'): ('urllib.error', 'URLError'), +} + +PYTHON2_EXCEPTIONS = ( + "ArithmeticError", + "AssertionError", + "AttributeError", + "BaseException", + "BufferError", + "BytesWarning", + "DeprecationWarning", + "EOFError", + "EnvironmentError", + "Exception", + "FloatingPointError", + "FutureWarning", + "GeneratorExit", + "IOError", + "ImportError", + "ImportWarning", + "IndentationError", + "IndexError", + "KeyError", + "KeyboardInterrupt", + "LookupError", + "MemoryError", + "NameError", + "NotImplementedError", + "OSError", + "OverflowError", + "PendingDeprecationWarning", + "ReferenceError", + "RuntimeError", + "RuntimeWarning", + # StandardError is gone in Python 3, so we map it to Exception + "StopIteration", + "SyntaxError", + "SyntaxWarning", + "SystemError", + "SystemExit", + "TabError", + "TypeError", + "UnboundLocalError", + "UnicodeDecodeError", + "UnicodeEncodeError", + "UnicodeError", + "UnicodeTranslateError", + "UnicodeWarning", + "UserWarning", + "ValueError", + "Warning", + "ZeroDivisionError", +) + +try: + WindowsError +except NameError: + pass +else: + PYTHON2_EXCEPTIONS += ("WindowsError",) + +for excname in PYTHON2_EXCEPTIONS: + NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) + +MULTIPROCESSING_EXCEPTIONS = ( + 'AuthenticationError', + 'BufferTooShort', + 'ProcessError', + 'TimeoutError', +) + +for excname in MULTIPROCESSING_EXCEPTIONS: + NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) + +# Same, but for 3.x to 2.x +REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) +assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) +REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) +assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) + +# Non-mutual mappings. + +IMPORT_MAPPING.update({ + 'cPickle': 'pickle', + '_elementtree': 'xml.etree.ElementTree', + 'FileDialog': 'tkinter.filedialog', + 'SimpleDialog': 'tkinter.simpledialog', + 'DocXMLRPCServer': 'xmlrpc.server', + 'SimpleHTTPServer': 'http.server', + 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', +}) + +REVERSE_IMPORT_MAPPING.update({ + '_bz2': 'bz2', + '_dbm': 'dbm', + '_functools': 'functools', + '_gdbm': 'gdbm', + '_pickle': 'pickle', +}) + +NAME_MAPPING.update({ + ('__builtin__', 'basestring'): ('builtins', 'str'), + ('exceptions', 'StandardError'): ('builtins', 'Exception'), + ('UserDict', 'UserDict'): ('collections', 'UserDict'), + ('socket', '_socketobject'): ('socket', 'SocketType'), +}) + +REVERSE_NAME_MAPPING.update({ + ('_functools', 'reduce'): ('__builtin__', 'reduce'), + ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), + ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), + ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), + ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), + ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), + ('xmlrpc.server', 'XMLRPCDocGenerator'): + ('DocXMLRPCServer', 'XMLRPCDocGenerator'), + ('xmlrpc.server', 'DocXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), + ('xmlrpc.server', 'DocXMLRPCServer'): + ('DocXMLRPCServer', 'DocXMLRPCServer'), + ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), + ('http.server', 'SimpleHTTPRequestHandler'): + ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), + ('http.server', 'CGIHTTPRequestHandler'): + ('CGIHTTPServer', 'CGIHTTPRequestHandler'), + ('_socket', 'socket'): ('socket', '_socketobject'), +}) + +PYTHON3_OSERROR_EXCEPTIONS = ( + 'BrokenPipeError', + 'ChildProcessError', + 'ConnectionAbortedError', + 'ConnectionError', + 'ConnectionRefusedError', + 'ConnectionResetError', + 'FileExistsError', + 'FileNotFoundError', + 'InterruptedError', + 'IsADirectoryError', + 'NotADirectoryError', + 'PermissionError', + 'ProcessLookupError', + 'TimeoutError', +) + +for excname in PYTHON3_OSERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') + +PYTHON3_IMPORTERROR_EXCEPTIONS = ( + 'ModuleNotFoundError', +) + +for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') diff --git a/dist/lib/logging/_compression.py b/dist/lib/logging/_compression.py new file mode 100644 index 0000000..b00f31b --- /dev/null +++ b/dist/lib/logging/_compression.py @@ -0,0 +1,152 @@ +"""Internal classes used by the gzip, lzma and bz2 modules""" + +import io + + +BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size + + +class BaseStream(io.BufferedIOBase): + """Mode-checking helper functions.""" + + def _check_not_closed(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _check_can_read(self): + if not self.readable(): + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if not self.writable(): + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if not self.readable(): + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + if not self.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + +class DecompressReader(io.RawIOBase): + """Adapts the decompressor API to a RawIOBase reader API""" + + def readable(self): + return True + + def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): + self._fp = fp + self._eof = False + self._pos = 0 # Current offset in decompressed stream + + # Set to size of decompressed stream once it is known, for SEEK_END + self._size = -1 + + # Save the decompressor factory and arguments. + # If the file contains multiple compressed streams, each + # stream will need a separate decompressor object. A new decompressor + # object is also needed when implementing a backwards seek(). + self._decomp_factory = decomp_factory + self._decomp_args = decomp_args + self._decompressor = self._decomp_factory(**self._decomp_args) + + # Exception class to catch from decompressor signifying invalid + # trailing data to ignore + self._trailing_error = trailing_error + + def close(self): + self._decompressor = None + return super().close() + + def seekable(self): + return self._fp.seekable() + + def readinto(self, b): + with memoryview(b) as view, view.cast("B") as byte_view: + data = self.read(len(byte_view)) + byte_view[:len(data)] = data + return len(data) + + def read(self, size=-1): + if size < 0: + return self.readall() + + if not size or self._eof: + return b"" + data = None # Default if EOF is encountered + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while True: + if self._decompressor.eof: + rawblock = (self._decompressor.unused_data or + self._fp.read(BUFFER_SIZE)) + if not rawblock: + break + # Continue to next stream. + self._decompressor = self._decomp_factory( + **self._decomp_args) + try: + data = self._decompressor.decompress(rawblock, size) + except self._trailing_error: + # Trailing data isn't a valid compressed stream; ignore it. + break + else: + if self._decompressor.needs_input: + rawblock = self._fp.read(BUFFER_SIZE) + if not rawblock: + raise EOFError("Compressed file ended before the " + "end-of-stream marker was reached") + else: + rawblock = b"" + data = self._decompressor.decompress(rawblock, size) + if data: + break + if not data: + self._eof = True + self._size = self._pos + return b"" + self._pos += len(data) + return data + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0) + self._eof = False + self._pos = 0 + self._decompressor = self._decomp_factory(**self._decomp_args) + + def seek(self, offset, whence=io.SEEK_SET): + # Recalculate offset as an absolute file position. + if whence == io.SEEK_SET: + pass + elif whence == io.SEEK_CUR: + offset = self._pos + offset + elif whence == io.SEEK_END: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + while self.read(io.DEFAULT_BUFFER_SIZE): + pass + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: {}".format(whence)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + while offset > 0: + data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) + if not data: + break + offset -= len(data) + + return self._pos + + def tell(self): + """Return the current file position.""" + return self._pos diff --git a/dist/lib/logging/_dummy_thread.py b/dist/lib/logging/_dummy_thread.py new file mode 100644 index 0000000..2e46a07 --- /dev/null +++ b/dist/lib/logging/_dummy_thread.py @@ -0,0 +1,193 @@ +"""Drop-in replacement for the thread module. + +Meant to be used as a brain-dead substitute so that threaded code does +not need to be rewritten for when the thread module is not present. + +Suggested usage is:: + + try: + import _thread + except ImportError: + import _dummy_thread as _thread + +""" +# Exports only things specified by thread documentation; +# skipping obsolete synonyms allocate(), start_new(), exit_thread(). +__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', + 'interrupt_main', 'LockType', 'RLock'] + +# A dummy value +TIMEOUT_MAX = 2**31 + +# NOTE: this module can be imported early in the extension building process, +# and so top level imports of other modules should be avoided. Instead, all +# imports are done when needed on a function-by-function basis. Since threads +# are disabled, the import lock should not be an issue anyway (??). + +error = RuntimeError + +def start_new_thread(function, args, kwargs={}): + """Dummy implementation of _thread.start_new_thread(). + + Compatibility is maintained by making sure that ``args`` is a + tuple and ``kwargs`` is a dictionary. If an exception is raised + and it is SystemExit (which can be done by _thread.exit()) it is + caught and nothing is done; all other exceptions are printed out + by using traceback.print_exc(). + + If the executed function calls interrupt_main the KeyboardInterrupt will be + raised when the function returns. + + """ + if type(args) != type(tuple()): + raise TypeError("2nd arg must be a tuple") + if type(kwargs) != type(dict()): + raise TypeError("3rd arg must be a dict") + global _main + _main = False + try: + function(*args, **kwargs) + except SystemExit: + pass + except: + import traceback + traceback.print_exc() + _main = True + global _interrupt + if _interrupt: + _interrupt = False + raise KeyboardInterrupt + +def exit(): + """Dummy implementation of _thread.exit().""" + raise SystemExit + +def get_ident(): + """Dummy implementation of _thread.get_ident(). + + Since this module should only be used when _threadmodule is not + available, it is safe to assume that the current process is the + only thread. Thus a constant can be safely returned. + """ + return 1 + +def allocate_lock(): + """Dummy implementation of _thread.allocate_lock().""" + return LockType() + +def stack_size(size=None): + """Dummy implementation of _thread.stack_size().""" + if size is not None: + raise error("setting thread stack size not supported") + return 0 + +def _set_sentinel(): + """Dummy implementation of _thread._set_sentinel().""" + return LockType() + +class LockType(object): + """Class implementing dummy implementation of _thread.LockType. + + Compatibility is maintained by maintaining self.locked_status + which is a boolean that stores the state of the lock. Pickling of + the lock, though, should not be done since if the _thread module is + then used with an unpickled ``lock()`` from here problems could + occur from this class not having atomic methods. + + """ + + def __init__(self): + self.locked_status = False + + def acquire(self, waitflag=None, timeout=-1): + """Dummy implementation of acquire(). + + For blocking calls, self.locked_status is automatically set to + True and returned appropriately based on value of + ``waitflag``. If it is non-blocking, then the value is + actually checked and not set if it is already acquired. This + is all done so that threading.Condition's assert statements + aren't triggered and throw a little fit. + + """ + if waitflag is None or waitflag: + self.locked_status = True + return True + else: + if not self.locked_status: + self.locked_status = True + return True + else: + if timeout > 0: + import time + time.sleep(timeout) + return False + + __enter__ = acquire + + def __exit__(self, typ, val, tb): + self.release() + + def release(self): + """Release the dummy lock.""" + # XXX Perhaps shouldn't actually bother to test? Could lead + # to problems for complex, threaded code. + if not self.locked_status: + raise error + self.locked_status = False + return True + + def locked(self): + return self.locked_status + + def __repr__(self): + return "<%s %s.%s object at %s>" % ( + "locked" if self.locked_status else "unlocked", + self.__class__.__module__, + self.__class__.__qualname__, + hex(id(self)) + ) + + +class RLock(LockType): + """Dummy implementation of threading._RLock. + + Re-entrant lock can be aquired multiple times and needs to be released + just as many times. This dummy implemention does not check wheter the + current thread actually owns the lock, but does accounting on the call + counts. + """ + def __init__(self): + super().__init__() + self._levels = 0 + + def acquire(self, waitflag=None, timeout=-1): + """Aquire the lock, can be called multiple times in succession. + """ + locked = super().acquire(waitflag, timeout) + if locked: + self._levels += 1 + return locked + + def release(self): + """Release needs to be called once for every call to acquire(). + """ + if self._levels == 0: + raise error + if self._levels == 1: + super().release() + self._levels -= 1 + +# Used to signal that interrupt_main was called in a "thread" +_interrupt = False +# True when not executing in a "thread" +_main = True + +def interrupt_main(): + """Set _interrupt flag to True to have start_new_thread raise + KeyboardInterrupt upon exiting.""" + if _main: + raise KeyboardInterrupt + else: + global _interrupt + _interrupt = True diff --git a/dist/lib/logging/_markupbase.py b/dist/lib/logging/_markupbase.py new file mode 100644 index 0000000..2af5f1c --- /dev/null +++ b/dist/lib/logging/_markupbase.py @@ -0,0 +1,395 @@ +"""Shared support for scanning document type declarations in HTML and XHTML. + +This module is used as a foundation for the html.parser module. It has no +documented public API and should not be used directly. + +""" + +import re + +_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match +_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match +_commentclose = re.compile(r'--\s*>') +_markedsectionclose = re.compile(r']\s*]\s*>') + +# An analysis of the MS-Word extensions is available at +# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf + +_msmarkedsectionclose = re.compile(r']\s*>') + +del re + + +class ParserBase: + """Parser base class which provides some common support methods used + by the SGML/HTML and XHTML parsers.""" + + def __init__(self): + if self.__class__ is ParserBase: + raise RuntimeError( + "_markupbase.ParserBase must be subclassed") + + def error(self, message): + raise NotImplementedError( + "subclasses of ParserBase must override error()") + + def reset(self): + self.lineno = 1 + self.offset = 0 + + def getpos(self): + """Return current line number and offset.""" + return self.lineno, self.offset + + # Internal -- update line number and offset. This should be + # called for each piece of data exactly once, in order -- in other + # words the concatenation of all the input strings to this + # function should be exactly the entire input. + def updatepos(self, i, j): + if i >= j: + return j + rawdata = self.rawdata + nlines = rawdata.count("\n", i, j) + if nlines: + self.lineno = self.lineno + nlines + pos = rawdata.rindex("\n", i, j) # Should not fail + self.offset = j-(pos+1) + else: + self.offset = self.offset + j-i + return j + + _decl_otherchars = '' + + # Internal -- parse declaration (for use by subclasses). + def parse_declaration(self, i): + # This is some sort of declaration; in "HTML as + # deployed," this should only be the document type + # declaration (""). + # ISO 8879:1986, however, has more complex + # declaration syntax for elements in , including: + # --comment-- + # [marked section] + # name in the following list: ENTITY, DOCTYPE, ELEMENT, + # ATTLIST, NOTATION, SHORTREF, USEMAP, + # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM + rawdata = self.rawdata + j = i + 2 + assert rawdata[i:j] == "": + # the empty comment + return j + 1 + if rawdata[j:j+1] in ("-", ""): + # Start of comment followed by buffer boundary, + # or just a buffer boundary. + return -1 + # A simple, practical version could look like: ((name|stringlit) S*) + '>' + n = len(rawdata) + if rawdata[j:j+2] == '--': #comment + # Locate --.*-- as the body of the comment + return self.parse_comment(i) + elif rawdata[j] == '[': #marked section + # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section + # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA + # Note that this is extended by Microsoft Office "Save as Web" function + # to include [if...] and [endif]. + return self.parse_marked_section(i) + else: #all other declaration elements + decltype, j = self._scan_name(j, i) + if j < 0: + return j + if decltype == "doctype": + self._decl_otherchars = '' + while j < n: + c = rawdata[j] + if c == ">": + # end of declaration syntax + data = rawdata[i+2:j] + if decltype == "doctype": + self.handle_decl(data) + else: + # According to the HTML5 specs sections "8.2.4.44 Bogus + # comment state" and "8.2.4.45 Markup declaration open + # state", a comment token should be emitted. + # Calling unknown_decl provides more flexibility though. + self.unknown_decl(data) + return j + 1 + if c in "\"'": + m = _declstringlit_match(rawdata, j) + if not m: + return -1 # incomplete + j = m.end() + elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": + name, j = self._scan_name(j, i) + elif c in self._decl_otherchars: + j = j + 1 + elif c == "[": + # this could be handled in a separate doctype parser + if decltype == "doctype": + j = self._parse_doctype_subset(j + 1, i) + elif decltype in {"attlist", "linktype", "link", "element"}: + # must tolerate []'d groups in a content model in an element declaration + # also in data attribute specifications of attlist declaration + # also link type declaration subsets in linktype declarations + # also link attribute specification lists in link declarations + self.error("unsupported '[' char in %s declaration" % decltype) + else: + self.error("unexpected '[' char in declaration") + else: + self.error( + "unexpected %r char in declaration" % rawdata[j]) + if j < 0: + return j + return -1 # incomplete + + # Internal -- parse a marked section + # Override this to handle MS-word extension syntax content + def parse_marked_section(self, i, report=1): + rawdata= self.rawdata + assert rawdata[i:i+3] == ' ending + match= _markedsectionclose.search(rawdata, i+3) + elif sectName in {"if", "else", "endif"}: + # look for MS Office ]> ending + match= _msmarkedsectionclose.search(rawdata, i+3) + else: + self.error('unknown status keyword %r in marked section' % rawdata[i+3:j]) + if not match: + return -1 + if report: + j = match.start(0) + self.unknown_decl(rawdata[i+3: j]) + return match.end(0) + + # Internal -- parse comment, return length or -1 if not terminated + def parse_comment(self, i, report=1): + rawdata = self.rawdata + if rawdata[i:i+4] != ' + --> --> + + ''' + +__UNDEF__ = [] # a special sentinel object +def small(text): + if text: + return '' + text + '' + else: + return '' + +def strong(text): + if text: + return '' + text + '' + else: + return '' + +def grey(text): + if text: + return '' + text + '' + else: + return '' + +def lookup(name, frame, locals): + """Find the value for a given name in the given environment.""" + if name in locals: + return 'local', locals[name] + if name in frame.f_globals: + return 'global', frame.f_globals[name] + if '__builtins__' in frame.f_globals: + builtins = frame.f_globals['__builtins__'] + if type(builtins) is type({}): + if name in builtins: + return 'builtin', builtins[name] + else: + if hasattr(builtins, name): + return 'builtin', getattr(builtins, name) + return None, __UNDEF__ + +def scanvars(reader, frame, locals): + """Scan one logical line of Python and look up values of variables used.""" + vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ + for ttype, token, start, end, line in tokenize.generate_tokens(reader): + if ttype == tokenize.NEWLINE: break + if ttype == tokenize.NAME and token not in keyword.kwlist: + if lasttoken == '.': + if parent is not __UNDEF__: + value = getattr(parent, token, __UNDEF__) + vars.append((prefix + token, prefix, value)) + else: + where, value = lookup(token, frame, locals) + vars.append((token, where, value)) + elif token == '.': + prefix += lasttoken + '.' + parent = value + else: + parent, prefix = None, '' + lasttoken = token + return vars + +def html(einfo, context=5): + """Return a nice HTML document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = '' + pydoc.html.heading( + '%s' % + strong(pydoc.html.escape(str(etype))), + '#ffffff', '#6622aa', pyver + '
' + date) + ''' +

A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred.

''' + + indent = '' + small(' ' * 5) + ' ' + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + if file: + file = os.path.abspath(file) + link = '%s' % (file, pydoc.html.escape(file)) + else: + file = link = '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + strong(pydoc.html.escape(func)) + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.html.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = ['%s%s %s' % + (' ', link, call)] + if index is not None: + i = lnum - index + for line in lines: + num = small(' ' * (5-len(str(i))) + str(i)) + ' ' + if i in highlight: + line = '=>%s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % line) + else: + line = '  %s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % grey(line)) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where in ('global', 'builtin'): + name = ('%s ' % where) + strong(name) + elif where == 'local': + name = strong(name) + else: + name = where + strong(name.split('.')[-1]) + dump.append('%s = %s' % (name, pydoc.html.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('%s' % small(grey(', '.join(dump)))) + frames.append(''' + +%s
''' % '\n'.join(rows)) + + exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), + pydoc.html.escape(str(evalue)))] + for name in dir(evalue): + if name[:1] == '_': continue + value = pydoc.html.repr(getattr(evalue, name)) + exception.append('\n
%s%s =\n%s' % (indent, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + + + +''' % pydoc.html.escape( + ''.join(traceback.format_exception(etype, evalue, etb))) + +def text(einfo, context=5): + """Return a plain text document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' +A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred. +''' + + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + file = file and os.path.abspath(file) or '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + func + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.text.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = [' %s %s' % (file, call)] + if index is not None: + i = lnum - index + for line in lines: + num = '%5d ' % i + rows.append(num+line.rstrip()) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where == 'global': name = 'global ' + name + elif where != 'local': name = where + name.split('.')[-1] + dump.append('%s = %s' % (name, pydoc.text.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('\n'.join(dump)) + frames.append('\n%s\n' % '\n'.join(rows)) + + exception = ['%s: %s' % (str(etype), str(evalue))] + for name in dir(evalue): + value = pydoc.text.repr(getattr(evalue, name)) + exception.append('\n%s%s = %s' % (" "*4, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + +The above is a description of an error in a Python program. Here is +the original traceback: + +%s +''' % ''.join(traceback.format_exception(etype, evalue, etb)) + +class Hook: + """A hook to replace sys.excepthook that shows tracebacks in HTML.""" + + def __init__(self, display=1, logdir=None, context=5, file=None, + format="html"): + self.display = display # send tracebacks to browser if true + self.logdir = logdir # log tracebacks to files if not None + self.context = context # number of source code lines per frame + self.file = file or sys.stdout # place to send the output + self.format = format + + def __call__(self, etype, evalue, etb): + self.handle((etype, evalue, etb)) + + def handle(self, info=None): + info = info or sys.exc_info() + if self.format == "html": + self.file.write(reset()) + + formatter = (self.format=="html") and html or text + plain = False + try: + doc = formatter(info, self.context) + except: # just in case something goes wrong + doc = ''.join(traceback.format_exception(*info)) + plain = True + + if self.display: + if plain: + doc = pydoc.html.escape(doc) + self.file.write('

' + doc + '
\n') + else: + self.file.write(doc + '\n') + else: + self.file.write('

A problem occurred in a Python script.\n') + + if self.logdir is not None: + suffix = ['.txt', '.html'][self.format=="html"] + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + + try: + with os.fdopen(fd, 'w') as file: + file.write(doc) + msg = '%s contains the description of this error.' % path + except: + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

%s

\n' % msg) + else: + self.file.write(msg + '\n') + try: + self.file.flush() + except: pass + +handler = Hook().handle +def enable(display=1, logdir=None, context=5, format="html"): + """Install an exception handler that formats tracebacks as HTML. + + The optional argument 'display' can be set to 0 to suppress sending the + traceback to the browser, and 'logdir' can be set to a directory to cause + tracebacks to be written to files there.""" + sys.excepthook = Hook(display=display, logdir=logdir, + context=context, format=format) diff --git a/dist/lib/logging/chunk.py b/dist/lib/logging/chunk.py new file mode 100644 index 0000000..870c39f --- /dev/null +++ b/dist/lib/logging/chunk.py @@ -0,0 +1,169 @@ +"""Simple class to read IFF chunks. + +An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File +Format)) has the following structure: + ++----------------+ +| ID (4 bytes) | ++----------------+ +| size (4 bytes) | ++----------------+ +| data | +| ... | ++----------------+ + +The ID is a 4-byte string which identifies the type of chunk. + +The size field (a 32-bit value, encoded using big-endian byte order) +gives the size of the whole chunk, including the 8-byte header. + +Usually an IFF-type file consists of one or more chunks. The proposed +usage of the Chunk class defined here is to instantiate an instance at +the start of each chunk and read from the instance until it reaches +the end, after which a new instance can be instantiated. At the end +of the file, creating a new instance will fail with an EOFError +exception. + +Usage: +while True: + try: + chunk = Chunk(file) + except EOFError: + break + chunktype = chunk.getname() + while True: + data = chunk.read(nbytes) + if not data: + pass + # do something with data + +The interface is file-like. The implemented methods are: +read, close, seek, tell, isatty. +Extra methods are: skip() (called by close, skips to the end of the chunk), +getname() (returns the name (ID) of the chunk) + +The __init__ method has one required argument, a file-like object +(including a chunk instance), and one optional argument, a flag which +specifies whether or not chunks are aligned on 2-byte boundaries. The +default is 1, i.e. aligned. +""" + +class Chunk: + def __init__(self, file, align=True, bigendian=True, inclheader=False): + import struct + self.closed = False + self.align = align # whether to align to word (2-byte) boundaries + if bigendian: + strflag = '>' + else: + strflag = '<' + self.file = file + self.chunkname = file.read(4) + if len(self.chunkname) < 4: + raise EOFError + try: + self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0] + except struct.error: + raise EOFError from None + if inclheader: + self.chunksize = self.chunksize - 8 # subtract header + self.size_read = 0 + try: + self.offset = self.file.tell() + except (AttributeError, OSError): + self.seekable = False + else: + self.seekable = True + + def getname(self): + """Return the name (ID) of the current chunk.""" + return self.chunkname + + def getsize(self): + """Return the size of the current chunk.""" + return self.chunksize + + def close(self): + if not self.closed: + try: + self.skip() + finally: + self.closed = True + + def isatty(self): + if self.closed: + raise ValueError("I/O operation on closed file") + return False + + def seek(self, pos, whence=0): + """Seek to specified position into the chunk. + Default position is 0 (start of chunk). + If the file is not seekable, this will result in an error. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if not self.seekable: + raise OSError("cannot seek") + if whence == 1: + pos = pos + self.size_read + elif whence == 2: + pos = pos + self.chunksize + if pos < 0 or pos > self.chunksize: + raise RuntimeError + self.file.seek(self.offset + pos, 0) + self.size_read = pos + + def tell(self): + if self.closed: + raise ValueError("I/O operation on closed file") + return self.size_read + + def read(self, size=-1): + """Read at most size bytes from the chunk. + If size is omitted or negative, read until the end + of the chunk. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if self.size_read >= self.chunksize: + return b'' + if size < 0: + size = self.chunksize - self.size_read + if size > self.chunksize - self.size_read: + size = self.chunksize - self.size_read + data = self.file.read(size) + self.size_read = self.size_read + len(data) + if self.size_read == self.chunksize and \ + self.align and \ + (self.chunksize & 1): + dummy = self.file.read(1) + self.size_read = self.size_read + len(dummy) + return data + + def skip(self): + """Skip the rest of the chunk. + If you are not interested in the contents of the chunk, + this method should be called so that the file points to + the start of the next chunk. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if self.seekable: + try: + n = self.chunksize - self.size_read + # maybe fix alignment + if self.align and (self.chunksize & 1): + n = n + 1 + self.file.seek(n, 1) + self.size_read = self.size_read + n + return + except OSError: + pass + while self.size_read < self.chunksize: + n = min(8192, self.chunksize - self.size_read) + dummy = self.read(n) + if not dummy: + raise EOFError diff --git a/dist/lib/logging/cmd.py b/dist/lib/logging/cmd.py new file mode 100644 index 0000000..859e910 --- /dev/null +++ b/dist/lib/logging/cmd.py @@ -0,0 +1,401 @@ +"""A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +""" + +import string, sys + +__all__ = ["Cmd"] + +PROMPT = '(Cmd) ' +IDENTCHARS = string.ascii_letters + string.digits + '_' + +class Cmd: + """A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + """ + prompt = PROMPT + identchars = IDENTCHARS + ruler = '=' + lastcmd = '' + intro = None + doc_leader = "" + doc_header = "Documented commands (type help ):" + misc_header = "Miscellaneous help topics:" + undoc_header = "Undocumented commands:" + nohelp = "*** No help on %s" + use_rawinput = 1 + + def __init__(self, completekey='tab', stdin=None, stdout=None): + """Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + """ + if stdin is not None: + self.stdin = stdin + else: + self.stdin = sys.stdin + if stdout is not None: + self.stdout = stdout + else: + self.stdout = sys.stdout + self.cmdqueue = [] + self.completekey = completekey + + def cmdloop(self, intro=None): + """Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + """ + + self.preloop() + if self.use_rawinput and self.completekey: + try: + import readline + self.old_completer = readline.get_completer() + readline.set_completer(self.complete) + readline.parse_and_bind(self.completekey+": complete") + except ImportError: + pass + try: + if intro is not None: + self.intro = intro + if self.intro: + self.stdout.write(str(self.intro)+"\n") + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue.pop(0) + else: + if self.use_rawinput: + try: + line = input(self.prompt) + except EOFError: + line = 'EOF' + else: + self.stdout.write(self.prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + line = 'EOF' + else: + line = line.rstrip('\r\n') + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + finally: + if self.use_rawinput and self.completekey: + try: + import readline + readline.set_completer(self.old_completer) + except ImportError: + pass + + + def precmd(self, line): + """Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + """ + return line + + def postcmd(self, stop, line): + """Hook method executed just after a command dispatch is finished.""" + return stop + + def preloop(self): + """Hook method executed once when the cmdloop() method is called.""" + pass + + def postloop(self): + """Hook method executed once when the cmdloop() method is about to + return. + + """ + pass + + def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + """ + cmd, arg, line = self.parseline(line) + if not line: + return self.emptyline() + if cmd is None: + return self.default(line) + self.lastcmd = line + if line == 'EOF' : + self.lastcmd = '' + if cmd == '': + return self.default(line) + else: + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + return self.default(line) + return func(arg) + + def emptyline(self): + """Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + """ + if self.lastcmd: + return self.onecmd(self.lastcmd) + + def default(self, line): + """Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + """ + self.stdout.write('*** Unknown syntax: %s\n'%line) + + def completedefault(self, *ignored): + """Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + """ + return [] + + def completenames(self, text, *ignored): + dotext = 'do_'+text + return [a[3:] for a in self.get_names() if a.startswith(dotext)] + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + """ + if state == 0: + import readline + origline = readline.get_line_buffer() + line = origline.lstrip() + stripped = len(origline) - len(line) + begidx = readline.get_begidx() - stripped + endidx = readline.get_endidx() - stripped + if begidx>0: + cmd, args, foo = self.parseline(line) + if cmd == '': + compfunc = self.completedefault + else: + try: + compfunc = getattr(self, 'complete_' + cmd) + except AttributeError: + compfunc = self.completedefault + else: + compfunc = self.completenames + self.completion_matches = compfunc(text, line, begidx, endidx) + try: + return self.completion_matches[state] + except IndexError: + return None + + def get_names(self): + # This method used to pull in base class attributes + # at a time dir() didn't do it yet. + return dir(self.__class__) + + def complete_help(self, *args): + commands = set(self.completenames(*args)) + topics = set(a[5:] for a in self.get_names() + if a.startswith('help_' + args[0])) + return list(commands | topics) + + def do_help(self, arg): + 'List available commands with "help" or detailed help with "help cmd".' + if arg: + # XXX check arg syntax + try: + func = getattr(self, 'help_' + arg) + except AttributeError: + try: + doc=getattr(self, 'do_' + arg).__doc__ + if doc: + self.stdout.write("%s\n"%str(doc)) + return + except AttributeError: + pass + self.stdout.write("%s\n"%str(self.nohelp % (arg,))) + return + func() + else: + names = self.get_names() + cmds_doc = [] + cmds_undoc = [] + help = {} + for name in names: + if name[:5] == 'help_': + help[name[5:]]=1 + names.sort() + # There can be duplicates if routines overridden + prevname = '' + for name in names: + if name[:3] == 'do_': + if name == prevname: + continue + prevname = name + cmd=name[3:] + if cmd in help: + cmds_doc.append(cmd) + del help[cmd] + elif getattr(self, name).__doc__: + cmds_doc.append(cmd) + else: + cmds_undoc.append(cmd) + self.stdout.write("%s\n"%str(self.doc_leader)) + self.print_topics(self.doc_header, cmds_doc, 15,80) + self.print_topics(self.misc_header, list(help.keys()),15,80) + self.print_topics(self.undoc_header, cmds_undoc, 15,80) + + def print_topics(self, header, cmds, cmdlen, maxcol): + if cmds: + self.stdout.write("%s\n"%str(header)) + if self.ruler: + self.stdout.write("%s\n"%str(self.ruler * len(header))) + self.columnize(cmds, maxcol-1) + self.stdout.write("\n") + + def columnize(self, list, displaywidth=80): + """Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + """ + if not list: + self.stdout.write("\n") + return + + nonstrings = [i for i in range(len(list)) + if not isinstance(list[i], str)] + if nonstrings: + raise TypeError("list[i] not a string for i in %s" + % ", ".join(map(str, nonstrings))) + size = len(list) + if size == 1: + self.stdout.write('%s\n'%str(list[0])) + return + # Try every row count from 1 upwards + for nrows in range(1, len(list)): + ncols = (size+nrows-1) // nrows + colwidths = [] + totwidth = -2 + for col in range(ncols): + colwidth = 0 + for row in range(nrows): + i = row + nrows*col + if i >= size: + break + x = list[i] + colwidth = max(colwidth, len(x)) + colwidths.append(colwidth) + totwidth += colwidth + 2 + if totwidth > displaywidth: + break + if totwidth <= displaywidth: + break + else: + nrows = len(list) + ncols = 1 + colwidths = [0] + for row in range(nrows): + texts = [] + for col in range(ncols): + i = row + nrows*col + if i >= size: + x = "" + else: + x = list[i] + texts.append(x) + while texts and not texts[-1]: + del texts[-1] + for col in range(len(texts)): + texts[col] = texts[col].ljust(colwidths[col]) + self.stdout.write("%s\n"%str(" ".join(texts))) diff --git a/dist/lib/logging/code.py b/dist/lib/logging/code.py new file mode 100644 index 0000000..76000f8 --- /dev/null +++ b/dist/lib/logging/code.py @@ -0,0 +1,315 @@ +"""Utilities needed to emulate Python's interactive interpreter. + +""" + +# Inspired by similar code by Jeff Epler and Fredrik Lundh. + + +import sys +import traceback +from codeop import CommandCompiler, compile_command + +__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", + "compile_command"] + +class InteractiveInterpreter: + """Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + """ + + def __init__(self, locals=None): + """Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + """ + if locals is None: + locals = {"__name__": "__console__", "__doc__": None} + self.locals = locals + self.compile = CommandCompiler() + + def runsource(self, source, filename="", symbol="single"): + """Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + """ + try: + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self.runcode(code) + return False + + def runcode(self, code): + """Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + """ + try: + exec(code, self.locals) + except SystemExit: + raise + except: + self.showtraceback() + + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + if sys.excepthook is sys.__excepthook__: + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def showtraceback(self): + """Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb + try: + lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) + if sys.excepthook is sys.__excepthook__: + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(ei[0], ei[1], last_tb) + finally: + last_tb = ei = None + + def write(self, data): + """Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + """ + sys.stderr.write(data) + + +class InteractiveConsole(InteractiveInterpreter): + """Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + """ + + def __init__(self, locals=None, filename=""): + """Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + """ + InteractiveInterpreter.__init__(self, locals) + self.filename = filename + self.resetbuffer() + + def resetbuffer(self): + """Reset the input buffer.""" + self.buffer = [] + + def interact(self, banner=None, exitmsg=None): + """Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + """ + try: + sys.ps1 + except AttributeError: + sys.ps1 = ">>> " + try: + sys.ps2 + except AttributeError: + sys.ps2 = "... " + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' + if banner is None: + self.write("Python %s on %s\n%s\n(%s)\n" % + (sys.version, sys.platform, cprt, + self.__class__.__name__)) + elif banner: + self.write("%s\n" % str(banner)) + more = 0 + while 1: + try: + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + if exitmsg is None: + self.write('now exiting %s...\n' % self.__class__.__name__) + elif exitmsg != '': + self.write('%s\n' % exitmsg) + + def push(self, line): + """Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + """ + self.buffer.append(line) + source = "\n".join(self.buffer) + more = self.runsource(source, self.filename) + if not more: + self.resetbuffer() + return more + + def raw_input(self, prompt=""): + """Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + """ + return input(prompt) + + + +def interact(banner=None, readfunc=None, local=None, exitmsg=None): + """Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + """ + console = InteractiveConsole(local) + if readfunc is not None: + console.raw_input = readfunc + else: + try: + import readline + except ImportError: + pass + console.interact(banner, exitmsg) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-q', action='store_true', + help="don't print version and copyright messages") + args = parser.parse_args() + if args.q or sys.flags.quiet: + banner = '' + else: + banner = None + interact(banner) diff --git a/dist/lib/logging/codecs.py b/dist/lib/logging/codecs.py new file mode 100644 index 0000000..7f23e97 --- /dev/null +++ b/dist/lib/logging/codecs.py @@ -0,0 +1,1126 @@ +""" codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" + +import builtins +import sys + +### Registry and builtin stateless codec functions + +try: + from _codecs import * +except ImportError as why: + raise SystemError('Failed to load the builtin codecs: %s' % why) + +__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", + "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", + "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", + "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", + "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", + "StreamReader", "StreamWriter", + "StreamReaderWriter", "StreamRecoder", + "getencoder", "getdecoder", "getincrementalencoder", + "getincrementaldecoder", "getreader", "getwriter", + "encode", "decode", "iterencode", "iterdecode", + "strict_errors", "ignore_errors", "replace_errors", + "xmlcharrefreplace_errors", + "backslashreplace_errors", "namereplace_errors", + "register_error", "lookup_error"] + +### Constants + +# +# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) +# and its possible byte string values +# for UTF8/UTF16/UTF32 output and little/big endian machines +# + +# UTF-8 +BOM_UTF8 = b'\xef\xbb\xbf' + +# UTF-16, little endian +BOM_LE = BOM_UTF16_LE = b'\xff\xfe' + +# UTF-16, big endian +BOM_BE = BOM_UTF16_BE = b'\xfe\xff' + +# UTF-32, little endian +BOM_UTF32_LE = b'\xff\xfe\x00\x00' + +# UTF-32, big endian +BOM_UTF32_BE = b'\x00\x00\xfe\xff' + +if sys.byteorder == 'little': + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_LE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_LE + +else: + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_BE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_BE + +# Old broken names (don't use in new code) +BOM32_LE = BOM_UTF16_LE +BOM32_BE = BOM_UTF16_BE +BOM64_LE = BOM_UTF32_LE +BOM64_BE = BOM_UTF32_BE + + +### Codec base classes (defining the API) + +class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python 3.4 to blacklist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default + + def __new__(cls, encode, decode, streamreader=None, streamwriter=None, + incrementalencoder=None, incrementaldecoder=None, name=None, + *, _is_text_encoding=None): + self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) + self.name = name + self.encode = encode + self.decode = decode + self.incrementalencoder = incrementalencoder + self.incrementaldecoder = incrementaldecoder + self.streamwriter = streamwriter + self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding + return self + + def __repr__(self): + return "<%s.%s object for encoding %s at %#x>" % \ + (self.__class__.__module__, self.__class__.__qualname__, + self.name, id(self)) + +class Codec: + + """ Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \\N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + """ + def encode(self, input, errors='strict'): + + """ Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + + def decode(self, input, errors='strict'): + + """ Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + +class IncrementalEncoder(object): + """ + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + """ + def __init__(self, errors='strict'): + """ + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + self.buffer = "" + + def encode(self, input, final=False): + """ + Encodes input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Resets the encoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the encoder. + """ + return 0 + + def setstate(self, state): + """ + Set the current state of the encoder. state must have been + returned by getstate(). + """ + +class BufferedIncrementalEncoder(IncrementalEncoder): + """ + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + """ + def __init__(self, errors='strict'): + IncrementalEncoder.__init__(self, errors) + # unencoded input that is kept between calls to encode() + self.buffer = "" + + def _buffer_encode(self, input, errors, final): + # Overwrite this method in subclasses: It must encode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def encode(self, input, final=False): + # encode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_encode(data, self.errors, final) + # keep unencoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalEncoder.reset(self) + self.buffer = "" + + def getstate(self): + return self.buffer or 0 + + def setstate(self, state): + self.buffer = state or "" + +class IncrementalDecoder(object): + """ + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + """ + def __init__(self, errors='strict'): + """ + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + + def decode(self, input, final=False): + """ + Decode input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the decoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + """ + return (b"", 0) + + def setstate(self, state): + """ + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + """ + +class BufferedIncrementalDecoder(IncrementalDecoder): + """ + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + """ + def __init__(self, errors='strict'): + IncrementalDecoder.__init__(self, errors) + # undecoded input that is kept between calls to decode() + self.buffer = b"" + + def _buffer_decode(self, input, errors, final): + # Overwrite this method in subclasses: It must decode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def decode(self, input, final=False): + # decode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_decode(data, self.errors, final) + # keep undecoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalDecoder.reset(self) + self.buffer = b"" + + def getstate(self): + # additional state info is always 0 + return (self.buffer, 0) + + def setstate(self, state): + # ignore additional state info + self.buffer = state[0] + +# +# The StreamWriter and StreamReader class provide generic working +# interfaces which can be used to implement new encoding submodules +# very easily. See encodings/utf_8.py for an example on how this is +# done. +# + +class StreamWriter(Codec): + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \\N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + + def write(self, object): + + """ Writes the object's contents encoded to self.stream. + """ + data, consumed = self.encode(object, self.errors) + self.stream.write(data) + + def writelines(self, list): + + """ Writes the concatenated list of strings to the stream + using .write(). + """ + self.write(''.join(list)) + + def reset(self): + + """ Flushes and resets the codec buffers used for keeping state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + """ + pass + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + if whence == 0 and offset == 0: + self.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReader(Codec): + + charbuffertype = str + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + self.bytebuffer = b"" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def decode(self, input, errors='strict'): + raise NotImplementedError + + def read(self, size=-1, chars=-1, firstline=False): + + """ Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + """ + # If we have lines cached, first merge them back into characters + if self.linebuffer: + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) + self.linebuffer = None + + if chars < 0: + # For compatibility with other read() methods that take a + # single argument + chars = size + + # read until we get the required number of characters (if available) + while True: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: + break + # we need more data + if size < 0: + newdata = self.stream.read() + else: + newdata = self.stream.read(size) + # decode bytes (those remaining from the last call included) + data = self.bytebuffer + newdata + if not data: + break + try: + newchars, decodedbytes = self.decode(data, self.errors) + except UnicodeDecodeError as exc: + if firstline: + newchars, decodedbytes = \ + self.decode(data[:exc.start], self.errors) + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: + raise + else: + raise + # keep undecoded bytes until the next call + self.bytebuffer = data[decodedbytes:] + # put new characters in the character buffer + self.charbuffer += newchars + # there was no data available + if not newdata: + break + if chars < 0: + # Return everything we've got + result = self.charbuffer + self.charbuffer = self._empty_charbuffer + else: + # Return the first chars characters + result = self.charbuffer[:chars] + self.charbuffer = self.charbuffer[chars:] + return result + + def readline(self, size=None, keepends=True): + + """ Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + """ + # If we have lines cached from an earlier read, return + # them unconditionally + if self.linebuffer: + line = self.linebuffer[0] + del self.linebuffer[0] + if len(self.linebuffer) == 1: + # revert to charbuffer mode; we might need more data + # next time + self.charbuffer = self.linebuffer[0] + self.linebuffer = None + if not keepends: + line = line.splitlines(keepends=False)[0] + return line + + readsize = size or 72 + line = self._empty_charbuffer + # If size is given, we call read() only once + while True: + data = self.read(readsize, firstline=True) + if data: + # If we're at a "\r" read one extra character (which might + # be a "\n") to get a proper line ending. If the stream is + # temporarily exhausted we return the wrong line ending. + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): + data += self.read(size=1, chars=1) + + line += data + lines = line.splitlines(keepends=True) + if lines: + if len(lines) > 1: + # More than one line result; the first line is a full line + # to return + line = lines[0] + del lines[0] + if len(lines) > 1: + # cache the remaining lines + lines[-1] += self.charbuffer + self.linebuffer = lines + self.charbuffer = None + else: + # only one remaining line, put it back into charbuffer + self.charbuffer = lines[0] + self.charbuffer + if not keepends: + line = line.splitlines(keepends=False)[0] + break + line0withend = lines[0] + line0withoutend = lines[0].splitlines(keepends=False)[0] + if line0withend != line0withoutend: # We really have a line end + # Put the rest back together and keep it until the next call + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer + if keepends: + line = line0withend + else: + line = line0withoutend + break + # we didn't get anything or this was our only try + if not data or size is not None: + if line and not keepends: + line = line.splitlines(keepends=False)[0] + break + if readsize < 8000: + readsize *= 2 + return line + + def readlines(self, sizehint=None, keepends=True): + + """ Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + """ + data = self.read() + return data.splitlines(keepends) + + def reset(self): + + """ Resets the codec buffers used for keeping state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + """ + self.bytebuffer = b"" + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def seek(self, offset, whence=0): + """ Set the input stream's current position. + + Resets the codec buffers used for keeping state. + """ + self.stream.seek(offset, whence) + self.reset() + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + line = self.readline() + if line: + return line + raise StopIteration + + def __iter__(self): + return self + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReaderWriter: + + """ StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + """ + # Optional attributes set by the file wrappers below + encoding = 'unknown' + + def __init__(self, stream, Reader, Writer, errors='strict'): + + """ Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + return self.reader.read(size) + + def readline(self, size=None): + + return self.reader.readline(size) + + def readlines(self, sizehint=None): + + return self.reader.readlines(sizehint) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + return next(self.reader) + + def __iter__(self): + return self + + def write(self, data): + + return self.writer.write(data) + + def writelines(self, list): + + return self.writer.writelines(list) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + self.reader.reset() + if whence == 0 and offset == 0: + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + # these are needed to make "with StreamReaderWriter(...)" work properly + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamRecoder: + + """ StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + """ + # Optional attributes set by the file wrappers below + data_encoding = 'unknown' + file_encoding = 'unknown' + + def __init__(self, stream, encode, decode, Reader, Writer, + errors='strict'): + + """ Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.encode = encode + self.decode = decode + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + data = self.reader.read(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readline(self, size=None): + + if size is None: + data = self.reader.readline() + else: + data = self.reader.readline(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readlines(self, sizehint=None): + + data = self.reader.read() + data, bytesencoded = self.encode(data, self.errors) + return data.splitlines(keepends=True) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + data = next(self.reader) + data, bytesencoded = self.encode(data, self.errors) + return data + + def __iter__(self): + return self + + def write(self, data): + + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def writelines(self, list): + + data = b''.join(list) + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + # Seeks must be propagated to both the readers and writers + # as they might need to reset their internal buffers. + self.reader.seek(offset, whence) + self.writer.seek(offset, whence) + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### Shortcuts + +def open(filename, mode='r', encoding=None, errors='strict', buffering=-1): + + """ Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + Underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to -1 which means that the default buffer size will + be used. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + """ + if encoding is not None and \ + 'b' not in mode: + # Force opening of the file in binary mode + mode = mode + 'b' + file = builtins.open(filename, mode, buffering) + if encoding is None: + return file + + try: + info = lookup(encoding) + srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) + # Add attributes to simplify introspection + srw.encoding = encoding + return srw + except: + file.close() + raise + +def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): + + """ Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + """ + if file_encoding is None: + file_encoding = data_encoding + data_info = lookup(data_encoding) + file_info = lookup(file_encoding) + sr = StreamRecoder(file, data_info.encode, data_info.decode, + file_info.streamreader, file_info.streamwriter, errors) + # Add attributes to simplify introspection + sr.data_encoding = data_encoding + sr.file_encoding = file_encoding + return sr + +### Helpers for codec lookup + +def getencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).encode + +def getdecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).decode + +def getincrementalencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + """ + encoder = lookup(encoding).incrementalencoder + if encoder is None: + raise LookupError(encoding) + return encoder + +def getincrementaldecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + """ + decoder = lookup(encoding).incrementaldecoder + if decoder is None: + raise LookupError(encoding) + return decoder + +def getreader(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamreader + +def getwriter(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamwriter + +def iterencode(iterator, encoding, errors='strict', **kwargs): + """ + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + """ + encoder = getincrementalencoder(encoding)(errors, **kwargs) + for input in iterator: + output = encoder.encode(input) + if output: + yield output + output = encoder.encode("", True) + if output: + yield output + +def iterdecode(iterator, encoding, errors='strict', **kwargs): + """ + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + """ + decoder = getincrementaldecoder(encoding)(errors, **kwargs) + for input in iterator: + output = decoder.decode(input) + if output: + yield output + output = decoder.decode(b"", True) + if output: + yield output + +### Helpers for charmap-based codecs + +def make_identity_dict(rng): + + """ make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + """ + return {i:i for i in rng} + +def make_encoding_map(decoding_map): + + """ Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \\u001a. + + """ + m = {} + for k,v in decoding_map.items(): + if not v in m: + m[v] = k + else: + m[v] = None + return m + +### error handlers + +try: + strict_errors = lookup_error("strict") + ignore_errors = lookup_error("ignore") + replace_errors = lookup_error("replace") + xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") + backslashreplace_errors = lookup_error("backslashreplace") + namereplace_errors = lookup_error("namereplace") +except LookupError: + # In --disable-unicode builds, these error handler are missing + strict_errors = None + ignore_errors = None + replace_errors = None + xmlcharrefreplace_errors = None + backslashreplace_errors = None + namereplace_errors = None + +# Tell modulefinder that using codecs probably needs the encodings +# package +_false = 0 +if _false: + import encodings + +### Tests + +if __name__ == '__main__': + + # Make stdout translate Latin-1 output into UTF-8 output + sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') + + # Have stdin translate Latin-1 input into UTF-8 input + sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/dist/lib/logging/codeop.py b/dist/lib/logging/codeop.py new file mode 100644 index 0000000..3c2bb60 --- /dev/null +++ b/dist/lib/logging/codeop.py @@ -0,0 +1,176 @@ +r"""Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +Approach: + +First, check if the source consists entirely of blank lines and +comments; if so, replace it with 'pass', because the built-in +parser doesn't always do the right thing for these. + +Compile three times: as is, with \n, and with \n\n appended. If it +compiles as is, it's complete. If it compiles with one \n appended, +we expect more. If it doesn't compile either way, we compare the +error we get when compiling with \n or \n\n appended. If the errors +are the same, the code is broken. But if the errors are different, we +expect more. Not intuitive; not even guaranteed to hold in future +releases; but this matches the compiler's behavior from Python 1.4 +through 2.2, at least. + +Caveat: + +It is possible (but not likely) that the parser stops parsing with a +successful outcome before reaching the end of the source; in this +case, trailing symbols may be ignored instead of causing an error. +For example, a backslash followed by two newlines may be followed by +arbitrary garbage. This will be fixed once the API for the parser is +better. + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +""" + +import __future__ +import warnings + +_features = [getattr(__future__, fname) + for fname in __future__.all_feature_names] + +__all__ = ["compile_command", "Compile", "CommandCompiler"] + +PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h + +def _maybe_compile(compiler, source, filename, symbol): + # Check for source consisting of only blank lines and comments + for line in source.split("\n"): + line = line.strip() + if line and line[0] != '#': + break # Leave it alone + else: + if symbol != "eval": + source = "pass" # Replace it with a 'pass' statement + + err = err1 = err2 = None + code = code1 = code2 = None + + try: + code = compiler(source, filename, symbol) + except SyntaxError as err: + pass + + # Suppress warnings after the first compile to avoid duplication. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + try: + code1 = compiler(source + "\n", filename, symbol) + except SyntaxError as e: + err1 = e + + try: + code2 = compiler(source + "\n\n", filename, symbol) + except SyntaxError as e: + err2 = e + + try: + if code: + return code + if not code1 and repr(err1) == repr(err2): + raise err1 + finally: + err1 = err2 = None + +def _compile(source, filename, symbol): + return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT) + +def compile_command(source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default), "exec" + or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(_compile, source, filename, symbol) + +class Compile: + """Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.""" + def __init__(self): + self.flags = PyCF_DONT_IMPLY_DEDENT + + def __call__(self, source, filename, symbol): + codeob = compile(source, filename, symbol, self.flags, 1) + for feature in _features: + if codeob.co_flags & feature.compiler_flag: + self.flags |= feature.compiler_flag + return codeob + +class CommandCompiler: + """Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.""" + + def __init__(self,): + self.compiler = Compile() + + def __call__(self, source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(self.compiler, source, filename, symbol) diff --git a/dist/lib/logging/collections/__init__.py b/dist/lib/logging/collections/__init__.py new file mode 100644 index 0000000..a78a47c --- /dev/null +++ b/dist/lib/logging/collections/__init__.py @@ -0,0 +1,1279 @@ +'''This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +''' + +__all__ = ['deque', 'defaultdict', 'namedtuple', 'UserDict', 'UserList', + 'UserString', 'Counter', 'OrderedDict', 'ChainMap'] + +import _collections_abc +from operator import itemgetter as _itemgetter, eq as _eq +from keyword import iskeyword as _iskeyword +import sys as _sys +import heapq as _heapq +from _weakref import proxy as _proxy +from itertools import repeat as _repeat, chain as _chain, starmap as _starmap +from reprlib import recursive_repr as _recursive_repr + +try: + from _collections import deque +except ImportError: + pass +else: + _collections_abc.MutableSequence.register(deque) + +try: + from _collections import defaultdict +except ImportError: + pass + + +def __getattr__(name): + # For backwards compatibility, continue to make the collections ABCs + # through Python 3.6 available through the collections module. + # Note, no new collections ABCs were added in Python 3.7 + if name in _collections_abc.__all__: + obj = getattr(_collections_abc, name) + import warnings + warnings.warn("Using or importing the ABCs from 'collections' instead " + "of from 'collections.abc' is deprecated since Python 3.3, " + "and in 3.9 it will stop working", + DeprecationWarning, stacklevel=2) + globals()[name] = obj + return obj + raise AttributeError(f'module {__name__!r} has no attribute {name!r}') + +################################################################################ +### OrderedDict +################################################################################ + +class _OrderedDictKeysView(_collections_abc.KeysView): + + def __reversed__(self): + yield from reversed(self._mapping) + +class _OrderedDictItemsView(_collections_abc.ItemsView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield (key, self._mapping[key]) + +class _OrderedDictValuesView(_collections_abc.ValuesView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield self._mapping[key] + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, other=(), /, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + ''' + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(other, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + link.prev = None + link.next = None + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + soft_link = link_next.prev + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + root.prev = soft_link + last.next = link + else: + first = root.next + link.prev = root + link.next = first + first.prev = soft_link + root.next = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = _collections_abc.MutableMapping.update + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return _OrderedDictKeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return _OrderedDictItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return _OrderedDictValuesView(self) + + __ne__ = _collections_abc.MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + '''Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + ''' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + return self.__class__, (), inst_dict or None, None, iter(self.items()) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''Create a new ordered dictionary with keys from iterable and values set to value. + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(map(_eq, self, other)) + return dict.__eq__(self, other) + + +try: + from _collections import OrderedDict +except ImportError: + # Leave the pure Python version in place. + pass + + +################################################################################ +### namedtuple +################################################################################ + +try: + from _collections import _tuplegetter +except ImportError: + _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) + +def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Validate the field names. At the user's option, either generate an error + # message or automatically replace the field name with a valid name. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() + field_names = list(map(str, field_names)) + typename = _sys.intern(str(typename)) + + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not name.isidentifier() + or _iskeyword(name) + or name.startswith('_') + or name in seen): + field_names[index] = f'_{index}' + seen.add(name) + + for name in [typename] + field_names: + if type(name) is not str: + raise TypeError('Type names and field names must be strings') + if not name.isidentifier(): + raise ValueError('Type names and field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Encountered duplicate field name: {name!r}') + seen.add(name) + + field_defaults = {} + if defaults is not None: + defaults = tuple(defaults) + if len(defaults) > len(field_names): + raise TypeError('Got more default values than field names') + field_defaults = dict(reversed(list(zip(reversed(field_names), + reversed(defaults))))) + + # Variables used in the methods and docstrings + field_names = tuple(map(_sys.intern, field_names)) + num_fields = len(field_names) + arg_list = repr(field_names).replace("'", "")[1:-1] + repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' + tuple_new = tuple.__new__ + _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip + + # Create all the named tuple methods to be added to the class namespace + + s = f'def __new__(_cls, {arg_list}): return _tuple_new(_cls, ({arg_list}))' + namespace = {'_tuple_new': tuple_new, '__name__': f'namedtuple_{typename}'} + # Note: exec() has the side-effect of interning the field names + exec(s, namespace) + __new__ = namespace['__new__'] + __new__.__doc__ = f'Create new instance of {typename}({arg_list})' + if defaults is not None: + __new__.__defaults__ = defaults + + @classmethod + def _make(cls, iterable): + result = tuple_new(cls, iterable) + if _len(result) != num_fields: + raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') + return result + + _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' + 'or iterable') + + def _replace(self, /, **kwds): + result = self._make(_map(kwds.pop, field_names, self)) + if kwds: + raise ValueError(f'Got unexpected field names: {list(kwds)!r}') + return result + + _replace.__doc__ = (f'Return a new {typename} object replacing specified ' + 'fields with new values') + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self + + def _asdict(self): + 'Return a new dict which maps field names to their values.' + return _dict(_zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return _tuple(self) + + # Modify function metadata to help with introspection and debugging + for method in (__new__, _make.__func__, _replace, + __repr__, _asdict, __getnewargs__): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({arg_list})', + '__slots__': (), + '_fields': field_names, + '_field_defaults': field_defaults, + # alternate spelling for backward compatibility + '_fields_defaults': field_defaults, + '__new__': __new__, + '_make': _make, + '_replace': _replace, + '__repr__': __repr__, + '_asdict': _asdict, + '__getnewargs__': __getnewargs__, + } + for index, name in enumerate(field_names): + doc = _sys.intern(f'Alias for field number {index}') + class_namespace[name] = _tuplegetter(index, doc) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython), or where the user has + # specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + + return result + + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +try: # Load C helper function if available + from _collections import _count_elements +except ImportError: + pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(self, iterable=None, /, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + super(Counter, self).__init__() + self.update(iterable, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abracadabra').most_common(3) + [('a', 5), ('b', 2), ('r', 2)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + return _heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because the semantics + # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). + # Initializing counters to zero values isn't necessary because zero + # is already the default value for counter lookups. Initializing + # to one is easily accomplished with Counter(set(iterable)). For + # more exotic cases, create a dictionary first using a dictionary + # comprehension or dict.fromkeys(). + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(self, iterable=None, /, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if iterable is not None: + if isinstance(iterable, _collections_abc.Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + super(Counter, self).update(iterable) # fast path when counter is empty + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(self, iterable=None, /, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if iterable is not None: + self_get = self.get + if isinstance(iterable, _collections_abc.Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __repr__(self): + if not self: + return '%s()' % self.__class__.__name__ + try: + items = ', '.join(map('%r: %r'.__mod__, self.most_common())) + return '%s({%s})' % (self.__class__.__name__, items) + except TypeError: + # handle case where values are not orderable + return '{0}({1!r})'.format(self.__class__.__name__, dict(self)) + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + # + # Rich comparison operators for multiset subset and superset tests + # are deliberately omitted due to semantic conflicts with the + # existing inherited dict equality method. Subset and superset + # semantics ignore zero counts and require that p≤q ∧ p≥q → p=q; + # however, that would not be the case for p=Counter(a=1, b=0) + # and q=Counter(a=1) where the dictionaries are not equal. + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + def __pos__(self): + 'Adds an empty counter, effectively stripping negative and zero counts' + result = Counter() + for elem, count in self.items(): + if count > 0: + result[elem] = count + return result + + def __neg__(self): + '''Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + ''' + result = Counter() + for elem, count in self.items(): + if count < 0: + result[elem] = 0 - count + return result + + def _keep_positive(self): + '''Internal method to strip elements with a negative or zero count''' + nonpositive = [elem for elem, count in self.items() if not count > 0] + for elem in nonpositive: + del self[elem] + return self + + def __iadd__(self, other): + '''Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] += count + return self._keep_positive() + + def __isub__(self, other): + '''Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] -= count + return self._keep_positive() + + def __ior__(self, other): + '''Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + for elem, other_count in other.items(): + count = self[elem] + if other_count > count: + self[elem] = other_count + return self._keep_positive() + + def __iand__(self, other): + '''Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + ''' + for elem, count in self.items(): + other_count = other[elem] + if other_count < count: + self[elem] = other_count + return self._keep_positive() + + +######################################################################## +### ChainMap +######################################################################## + +class ChainMap(_collections_abc.MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + d = {} + for mapping in reversed(self.maps): + d.update(mapping) # reuses stored hash values if possible + return iter(d) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self, m=None): # like Django's Context.push() + '''New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + ''' + if m is None: + m = {} + return self.__class__(m, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(_collections_abc.MutableMapping): + + # Start by filling-out the abstract methods + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'UserDict' object " + "needs an argument") + self, *args = args + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + if args: + dict = args[0] + elif 'dict' in kwargs: + dict = kwargs.pop('dict') + import warnings + warnings.warn("Passing 'dict' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + dict = None + self.data = {} + if dict is not None: + self.update(dict) + if kwargs: + self.update(kwargs) + __init__.__text_signature__ = '($self, dict=None, /, **kwargs)' + + def __len__(self): return len(self.data) + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + def __setitem__(self, key, item): self.data[key] = item + def __delitem__(self, key): del self.data[key] + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): return repr(self.data) + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + return inst + + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + + +################################################################################ +### UserList +################################################################################ + +class UserList(_collections_abc.MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + def __repr__(self): return repr(self.data) + def __lt__(self, other): return self.data < self.__cast(other) + def __le__(self, other): return self.data <= self.__cast(other) + def __eq__(self, other): return self.data == self.__cast(other) + def __gt__(self, other): return self.data > self.__cast(other) + def __ge__(self, other): return self.data >= self.__cast(other) + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + def __contains__(self, item): return item in self.data + def __len__(self): return len(self.data) + def __getitem__(self, i): + if isinstance(i, slice): + return self.__class__(self.data[i]) + else: + return self.data[i] + def __setitem__(self, i, item): self.data[i] = item + def __delitem__(self, i): del self.data[i] + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __imul__(self, n): + self.data *= n + return self + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"][:] + return inst + def append(self, item): self.data.append(item) + def insert(self, i, item): self.data.insert(i, item) + def pop(self, i=-1): return self.data.pop(i) + def remove(self, item): self.data.remove(item) + def clear(self): self.data.clear() + def copy(self): return self.__class__(self) + def count(self, item): return self.data.count(item) + def index(self, item, *args): return self.data.index(item, *args) + def reverse(self): self.data.reverse() + def sort(self, /, *args, **kwds): self.data.sort(*args, **kwds) + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + + +################################################################################ +### UserString +################################################################################ + +class UserString(_collections_abc.Sequence): + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + def __str__(self): return str(self.data) + def __repr__(self): return repr(self.data) + def __int__(self): return int(self.data) + def __float__(self): return float(self.data) + def __complex__(self): return complex(self.data) + def __hash__(self): return hash(self.data) + def __getnewargs__(self): + return (self.data[:],) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): return len(self.data) + def __getitem__(self, index): return self.__class__(self.data[index]) + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + def __mul__(self, n): + return self.__class__(self.data*n) + __rmul__ = __mul__ + def __mod__(self, args): + return self.__class__(self.data % args) + def __rmod__(self, template): + return self.__class__(str(template) % self) + # the following methods are defined in alphabetical order: + def capitalize(self): return self.__class__(self.data.capitalize()) + def casefold(self): + return self.__class__(self.data.casefold()) + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + def encode(self, encoding='utf-8', errors='strict'): + encoding = 'utf-8' if encoding is None else encoding + errors = 'strict' if errors is None else errors + return self.data.encode(encoding, errors) + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + def format(self, /, *args, **kwds): + return self.data.format(*args, **kwds) + def format_map(self, mapping): + return self.data.format_map(mapping) + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + def isalpha(self): return self.data.isalpha() + def isalnum(self): return self.data.isalnum() + def isascii(self): return self.data.isascii() + def isdecimal(self): return self.data.isdecimal() + def isdigit(self): return self.data.isdigit() + def isidentifier(self): return self.data.isidentifier() + def islower(self): return self.data.islower() + def isnumeric(self): return self.data.isnumeric() + def isprintable(self): return self.data.isprintable() + def isspace(self): return self.data.isspace() + def istitle(self): return self.data.istitle() + def isupper(self): return self.data.isupper() + def join(self, seq): return self.data.join(seq) + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + def lower(self): return self.__class__(self.data.lower()) + def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars)) + maketrans = str.maketrans + def partition(self, sep): + return self.data.partition(sep) + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + def rpartition(self, sep): + return self.data.rpartition(sep) + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + def splitlines(self, keepends=False): return self.data.splitlines(keepends) + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + def strip(self, chars=None): return self.__class__(self.data.strip(chars)) + def swapcase(self): return self.__class__(self.data.swapcase()) + def title(self): return self.__class__(self.data.title()) + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + def upper(self): return self.__class__(self.data.upper()) + def zfill(self, width): return self.__class__(self.data.zfill(width)) diff --git a/dist/lib/logging/collections/__pycache__/__init__.cpython-38.opt-1.pyc b/dist/lib/logging/collections/__pycache__/__init__.cpython-38.opt-1.pyc new file mode 100644 index 0000000..d1be6c9 Binary files /dev/null and b/dist/lib/logging/collections/__pycache__/__init__.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/collections/__pycache__/abc.cpython-38.opt-1.pyc b/dist/lib/logging/collections/__pycache__/abc.cpython-38.opt-1.pyc new file mode 100644 index 0000000..65996c4 Binary files /dev/null and b/dist/lib/logging/collections/__pycache__/abc.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/collections/abc.py b/dist/lib/logging/collections/abc.py new file mode 100644 index 0000000..891600d --- /dev/null +++ b/dist/lib/logging/collections/abc.py @@ -0,0 +1,2 @@ +from _collections_abc import * +from _collections_abc import __all__ diff --git a/dist/lib/logging/colorsys.py b/dist/lib/logging/colorsys.py new file mode 100644 index 0000000..b93e384 --- /dev/null +++ b/dist/lib/logging/colorsys.py @@ -0,0 +1,164 @@ +"""Conversion functions between RGB and other color systems. + +This modules provides two functions for each color system ABC: + + rgb_to_abc(r, g, b) --> a, b, c + abc_to_rgb(a, b, c) --> r, g, b + +All inputs and outputs are triples of floats in the range [0.0...1.0] +(with the exception of I and Q, which covers a slightly larger range). +Inputs outside the valid range may cause exceptions or invalid outputs. + +Supported color systems: +RGB: Red, Green, Blue components +YIQ: Luminance, Chrominance (used by composite video signals) +HLS: Hue, Luminance, Saturation +HSV: Hue, Saturation, Value +""" + +# References: +# http://en.wikipedia.org/wiki/YIQ +# http://en.wikipedia.org/wiki/HLS_color_space +# http://en.wikipedia.org/wiki/HSV_color_space + +__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", + "rgb_to_hsv","hsv_to_rgb"] + +# Some floating point constants + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +# YIQ: used by composite video signals (linear combinations of RGB) +# Y: perceived grey level (0.0 == black, 1.0 == white) +# I, Q: color components +# +# There are a great many versions of the constants used in these formulae. +# The ones in this library uses constants from the FCC version of NTSC. + +def rgb_to_yiq(r, g, b): + y = 0.30*r + 0.59*g + 0.11*b + i = 0.74*(r-y) - 0.27*(b-y) + q = 0.48*(r-y) + 0.41*(b-y) + return (y, i, q) + +def yiq_to_rgb(y, i, q): + # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) + # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) + # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 + + r = y + 0.9468822170900693*i + 0.6235565819861433*q + g = y - 0.27478764629897834*i - 0.6356910791873801*q + b = y - 1.1085450346420322*i + 1.7090069284064666*q + + if r < 0.0: + r = 0.0 + if g < 0.0: + g = 0.0 + if b < 0.0: + b = 0.0 + if r > 1.0: + r = 1.0 + if g > 1.0: + g = 1.0 + if b > 1.0: + b = 1.0 + return (r, g, b) + + +# HLS: Hue, Luminance, Saturation +# H: position in the spectrum +# L: color lightness +# S: color saturation + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + # XXX Can optimize (maxc+minc) and (maxc-minc) + l = (minc+maxc)/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = (maxc-minc) / (maxc+minc) + else: + s = (maxc-minc) / (2.0-maxc-minc) + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + + +# HSV: Hue, Saturation, Value +# H: position in the spectrum +# S: color saturation ("purity") +# V: color brightness + +def rgb_to_hsv(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + v = maxc + if minc == maxc: + return 0.0, 0.0, v + s = (maxc-minc) / maxc + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, s, v + +def hsv_to_rgb(h, s, v): + if s == 0.0: + return v, v, v + i = int(h*6.0) # XXX assume int() truncates! + f = (h*6.0) - i + p = v*(1.0 - s) + q = v*(1.0 - s*f) + t = v*(1.0 - s*(1.0-f)) + i = i%6 + if i == 0: + return v, t, p + if i == 1: + return q, v, p + if i == 2: + return p, v, t + if i == 3: + return p, q, v + if i == 4: + return t, p, v + if i == 5: + return v, p, q + # Cannot get here diff --git a/dist/lib/logging/compileall.py b/dist/lib/logging/compileall.py new file mode 100644 index 0000000..bfac8ef --- /dev/null +++ b/dist/lib/logging/compileall.py @@ -0,0 +1,333 @@ +"""Module/script to byte-compile all .py files to .pyc files. + +When called as a script with arguments, this compiles the directories +given as arguments recursively; the -l option prevents it from +recursing into directories. + +Without arguments, if compiles all modules on sys.path, without +recursing into subdirectories. (Even though it should do so for +packages -- for now, you'll have to deal with packages separately.) + +See module py_compile for details of the actual byte-compilation. +""" +import os +import sys +import importlib.util +import py_compile +import struct + +from functools import partial + +__all__ = ["compile_dir","compile_file","compile_path"] + +def _walk_dir(dir, ddir=None, maxlevels=10, quiet=0): + if quiet < 2 and isinstance(dir, os.PathLike): + dir = os.fspath(dir) + if not quiet: + print('Listing {!r}...'.format(dir)) + try: + names = os.listdir(dir) + except OSError: + if quiet < 2: + print("Can't list {!r}".format(dir)) + names = [] + names.sort() + for name in names: + if name == '__pycache__': + continue + fullname = os.path.join(dir, name) + if ddir is not None: + dfile = os.path.join(ddir, name) + else: + dfile = None + if not os.path.isdir(fullname): + yield fullname, ddir + elif (maxlevels > 0 and name != os.curdir and name != os.pardir and + os.path.isdir(fullname) and not os.path.islink(fullname)): + yield from _walk_dir(fullname, ddir=dfile, + maxlevels=maxlevels - 1, quiet=quiet) + +def compile_dir(dir, maxlevels=10, ddir=None, force=False, rx=None, + quiet=0, legacy=False, optimize=-1, workers=1, + invalidation_mode=None): + """Byte-compile all modules in the given directory tree. + + Arguments (only dir is required): + + dir: the directory to byte-compile + maxlevels: maximum recursion level (default 10) + ddir: the directory that will be prepended to the path to the + file as it is compiled into each byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: optimization level or -1 for level of the interpreter + workers: maximum number of parallel workers + invalidation_mode: how the up-to-dateness of the pyc will be checked + """ + ProcessPoolExecutor = None + if workers < 0: + raise ValueError('workers must be greater or equal to 0') + if workers != 1: + try: + # Only import when needed, as low resource platforms may + # fail to import it + from concurrent.futures import ProcessPoolExecutor + except ImportError: + workers = 1 + files_and_ddirs = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels, + ddir=ddir) + success = True + if workers != 1 and ProcessPoolExecutor is not None: + # If workers == 0, let ProcessPoolExecutor choose + workers = workers or None + with ProcessPoolExecutor(max_workers=workers) as executor: + results = executor.map( + partial(_compile_file_tuple, + force=force, rx=rx, quiet=quiet, + legacy=legacy, optimize=optimize, + invalidation_mode=invalidation_mode, + ), + files_and_ddirs) + success = min(results, default=True) + else: + for file, dfile in files_and_ddirs: + if not compile_file(file, dfile, force, rx, quiet, + legacy, optimize, invalidation_mode): + success = False + return success + +def _compile_file_tuple(file_and_dfile, **kwargs): + """Needs to be toplevel for ProcessPoolExecutor.""" + file, dfile = file_and_dfile + return compile_file(file, dfile, **kwargs) + +def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None): + """Byte-compile one file. + + Arguments (only fullname is required): + + fullname: the file to byte-compile + ddir: if given, the directory name compiled in to the + byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: optimization level or -1 for level of the interpreter + invalidation_mode: how the up-to-dateness of the pyc will be checked + """ + success = True + if quiet < 2 and isinstance(fullname, os.PathLike): + fullname = os.fspath(fullname) + name = os.path.basename(fullname) + if ddir is not None: + dfile = os.path.join(ddir, name) + else: + dfile = None + if rx is not None: + mo = rx.search(fullname) + if mo: + return success + if os.path.isfile(fullname): + if legacy: + cfile = fullname + 'c' + else: + if optimize >= 0: + opt = optimize if optimize >= 1 else '' + cfile = importlib.util.cache_from_source( + fullname, optimization=opt) + else: + cfile = importlib.util.cache_from_source(fullname) + cache_dir = os.path.dirname(cfile) + head, tail = name[:-3], name[-3:] + if tail == '.py': + if not force: + try: + mtime = int(os.stat(fullname).st_mtime) + expect = struct.pack('<4sll', importlib.util.MAGIC_NUMBER, + 0, mtime) + with open(cfile, 'rb') as chandle: + actual = chandle.read(12) + if expect == actual: + return success + except OSError: + pass + if not quiet: + print('Compiling {!r}...'.format(fullname)) + try: + ok = py_compile.compile(fullname, cfile, dfile, True, + optimize=optimize, + invalidation_mode=invalidation_mode) + except py_compile.PyCompileError as err: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + # escape non-printable characters in msg + msg = err.msg.encode(sys.stdout.encoding, + errors='backslashreplace') + msg = msg.decode(sys.stdout.encoding) + print(msg) + except (SyntaxError, UnicodeError, OSError) as e: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + print(e.__class__.__name__ + ':', e) + else: + if ok == 0: + success = False + return success + +def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None): + """Byte-compile all module on sys.path. + + Arguments (all optional): + + skip_curdir: if true, skip current directory (default True) + maxlevels: max recursion level (default 0) + force: as for compile_dir() (default False) + quiet: as for compile_dir() (default 0) + legacy: as for compile_dir() (default False) + optimize: as for compile_dir() (default -1) + invalidation_mode: as for compiler_dir() + """ + success = True + for dir in sys.path: + if (not dir or dir == os.curdir) and skip_curdir: + if quiet < 2: + print('Skipping current directory') + else: + success = success and compile_dir( + dir, + maxlevels, + None, + force, + quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + ) + return success + + +def main(): + """Script main program.""" + import argparse + + parser = argparse.ArgumentParser( + description='Utilities to support installing Python libraries.') + parser.add_argument('-l', action='store_const', const=0, + default=10, dest='maxlevels', + help="don't recurse into subdirectories") + parser.add_argument('-r', type=int, dest='recursion', + help=('control the maximum recursion level. ' + 'if `-l` and `-r` options are specified, ' + 'then `-r` takes precedence.')) + parser.add_argument('-f', action='store_true', dest='force', + help='force rebuild even if timestamps are up to date') + parser.add_argument('-q', action='count', dest='quiet', default=0, + help='output only error messages; -qq will suppress ' + 'the error messages as well.') + parser.add_argument('-b', action='store_true', dest='legacy', + help='use legacy (pre-PEP3147) compiled file locations') + parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, + help=('directory to prepend to file paths for use in ' + 'compile-time tracebacks and in runtime ' + 'tracebacks in cases where the source file is ' + 'unavailable')) + parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, + help=('skip files matching the regular expression; ' + 'the regexp is searched for in the full path ' + 'of each file considered for compilation')) + parser.add_argument('-i', metavar='FILE', dest='flist', + help=('add all the files and directories listed in ' + 'FILE to the list considered for compilation; ' + 'if "-", names are read from stdin')) + parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', + help=('zero or more file and directory names ' + 'to compile; if no arguments given, defaults ' + 'to the equivalent of -l sys.path')) + parser.add_argument('-j', '--workers', default=1, + type=int, help='Run compileall concurrently') + invalidation_modes = [mode.name.lower().replace('_', '-') + for mode in py_compile.PycInvalidationMode] + parser.add_argument('--invalidation-mode', + choices=sorted(invalidation_modes), + help=('set .pyc invalidation mode; defaults to ' + '"checked-hash" if the SOURCE_DATE_EPOCH ' + 'environment variable is set, and ' + '"timestamp" otherwise.')) + + args = parser.parse_args() + compile_dests = args.compile_dest + + if args.rx: + import re + args.rx = re.compile(args.rx) + + + if args.recursion is not None: + maxlevels = args.recursion + else: + maxlevels = args.maxlevels + + # if flist is provided then load it + if args.flist: + try: + with (sys.stdin if args.flist=='-' else open(args.flist)) as f: + for line in f: + compile_dests.append(line.strip()) + except OSError: + if args.quiet < 2: + print("Error reading file list {}".format(args.flist)) + return False + + if args.invalidation_mode: + ivl_mode = args.invalidation_mode.replace('-', '_').upper() + invalidation_mode = py_compile.PycInvalidationMode[ivl_mode] + else: + invalidation_mode = None + + success = True + try: + if compile_dests: + for dest in compile_dests: + if os.path.isfile(dest): + if not compile_file(dest, args.ddir, args.force, args.rx, + args.quiet, args.legacy, + invalidation_mode=invalidation_mode): + success = False + else: + if not compile_dir(dest, maxlevels, args.ddir, + args.force, args.rx, args.quiet, + args.legacy, workers=args.workers, + invalidation_mode=invalidation_mode): + success = False + return success + else: + return compile_path(legacy=args.legacy, force=args.force, + quiet=args.quiet, + invalidation_mode=invalidation_mode) + except KeyboardInterrupt: + if args.quiet < 2: + print("\n[interrupted]") + return False + return True + + +if __name__ == '__main__': + exit_status = int(not main()) + sys.exit(exit_status) diff --git a/dist/lib/logging/concurrent/__init__.py b/dist/lib/logging/concurrent/__init__.py new file mode 100644 index 0000000..196d378 --- /dev/null +++ b/dist/lib/logging/concurrent/__init__.py @@ -0,0 +1 @@ +# This directory is a Python package. diff --git a/dist/lib/logging/concurrent/__pycache__/__init__.cpython-38.opt-1.pyc b/dist/lib/logging/concurrent/__pycache__/__init__.cpython-38.opt-1.pyc new file mode 100644 index 0000000..cb37ba0 Binary files /dev/null and b/dist/lib/logging/concurrent/__pycache__/__init__.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/concurrent/futures/__init__.py b/dist/lib/logging/concurrent/futures/__init__.py new file mode 100644 index 0000000..d746aea --- /dev/null +++ b/dist/lib/logging/concurrent/futures/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + InvalidStateError, + BrokenExecutor, + Future, + Executor, + wait, + as_completed) + +__all__ = ( + 'FIRST_COMPLETED', + 'FIRST_EXCEPTION', + 'ALL_COMPLETED', + 'CancelledError', + 'TimeoutError', + 'BrokenExecutor', + 'Future', + 'Executor', + 'wait', + 'as_completed', + 'ProcessPoolExecutor', + 'ThreadPoolExecutor', +) + + +def __dir__(): + return __all__ + ('__author__', '__doc__') + + +def __getattr__(name): + global ProcessPoolExecutor, ThreadPoolExecutor + + if name == 'ProcessPoolExecutor': + from .process import ProcessPoolExecutor as pe + ProcessPoolExecutor = pe + return pe + + if name == 'ThreadPoolExecutor': + from .thread import ThreadPoolExecutor as te + ThreadPoolExecutor = te + return te + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/dist/lib/logging/concurrent/futures/__pycache__/__init__.cpython-38.opt-1.pyc b/dist/lib/logging/concurrent/futures/__pycache__/__init__.cpython-38.opt-1.pyc new file mode 100644 index 0000000..1d1acff Binary files /dev/null and b/dist/lib/logging/concurrent/futures/__pycache__/__init__.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/concurrent/futures/__pycache__/_base.cpython-38.opt-1.pyc b/dist/lib/logging/concurrent/futures/__pycache__/_base.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a007e39 Binary files /dev/null and b/dist/lib/logging/concurrent/futures/__pycache__/_base.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/concurrent/futures/__pycache__/process.cpython-38.opt-1.pyc b/dist/lib/logging/concurrent/futures/__pycache__/process.cpython-38.opt-1.pyc new file mode 100644 index 0000000..2b1efad Binary files /dev/null and b/dist/lib/logging/concurrent/futures/__pycache__/process.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/concurrent/futures/__pycache__/thread.cpython-38.opt-1.pyc b/dist/lib/logging/concurrent/futures/__pycache__/thread.cpython-38.opt-1.pyc new file mode 100644 index 0000000..a12207a Binary files /dev/null and b/dist/lib/logging/concurrent/futures/__pycache__/thread.cpython-38.opt-1.pyc differ diff --git a/dist/lib/logging/concurrent/futures/_base.py b/dist/lib/logging/concurrent/futures/_base.py new file mode 100644 index 0000000..6001e3b --- /dev/null +++ b/dist/lib/logging/concurrent/futures/_base.py @@ -0,0 +1,643 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import collections +import logging +import threading +import time + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class InvalidStateError(Error): + """The operation is not allowed in this state.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super().add_result(future) + self.event.set() + + def add_exception(self, future): + super().add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super().add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super().__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super().add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super().add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super().add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + + +def _yield_finished_futures(fs, waiter, ref_collect): + """ + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + """ + while fs: + f = fs[-1] + for futures_set in ref_collect: + futures_set.remove(f) + with f._condition: + f._waiters.remove(waiter) + del f + # Careful not to keep a reference to the popped value + yield fs.pop() + + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.monotonic() + + fs = set(fs) + total_futures = len(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + finished = list(finished) + try: + yield from _yield_finished_futures(finished, waiter, + ref_collect=(fs,)) + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.monotonic() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), total_futures)) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + # reverse to keep finishing order + finished.reverse() + yield from _yield_finished_futures(finished, waiter, + ref_collect=(fs, pending)) + + finally: + # Remove waiter from unfinished futures + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. + """ + with _AcquireFutures(fs): + done = set(f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + not_done = set(fs) - done + + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, set(fs) - done) + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True of the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + raise self._exception + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + try: + fn(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + else: + raise TimeoutError() + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._exception = exception + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(*args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + if len(args) >= 2: + pass + elif not args: + raise TypeError("descriptor 'submit' of 'Executor' object " + "needs an argument") + elif 'fn' in kwargs: + import warnings + warnings.warn("Passing 'fn' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError('submit expected at least 1 positional argument, ' + 'got %d' % (len(args)-1)) + + raise NotImplementedError() + submit.__text_signature__ = '($self, fn, /, *args, **kwargs)' + + def map(self, fn, *iterables, timeout=None, chunksize=1): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if timeout is not None: + end_time = timeout + time.monotonic() + + fs = [self.submit(fn, *args) for args in zip(*iterables)] + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + # reverse to keep finishing order + fs.reverse() + while fs: + # Careful not to keep a reference to the popped future + if timeout is None: + yield fs.pop().result() + else: + yield fs.pop().result(end_time - time.monotonic()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False + + +class BrokenExecutor(RuntimeError): + """ + Raised when a executor has become non-functional after a severe failure. + """ diff --git a/dist/lib/logging/concurrent/futures/process.py b/dist/lib/logging/concurrent/futures/process.py new file mode 100644 index 0000000..2b2b78e --- /dev/null +++ b/dist/lib/logging/concurrent/futures/process.py @@ -0,0 +1,704 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The following diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import atexit +import os +from concurrent.futures import _base +import queue +from queue import Full +import multiprocessing as mp +import multiprocessing.connection +from multiprocessing.queues import Queue +import threading +import weakref +from functools import partial +import itertools +import sys +import traceback + +# Workers are created as daemon threads and processes. This is done to allow the +# interpreter to exit when there are still idle processes in a +# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, +# allowing workers to die with the interpreter has two undesirable properties: +# - The workers would still be running during interpreter shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads/processes finish. + +_threads_wakeups = weakref.WeakKeyDictionary() +_global_shutdown = False + + +class _ThreadWakeup: + def __init__(self): + self._reader, self._writer = mp.Pipe(duplex=False) + + def close(self): + self._writer.close() + self._reader.close() + + def wakeup(self): + self._writer.send_bytes(b"") + + def clear(self): + while self._reader.poll(): + self._reader.recv_bytes() + + +def _python_exit(): + global _global_shutdown + _global_shutdown = True + items = list(_threads_wakeups.items()) + for _, thread_wakeup in items: + thread_wakeup.wakeup() + for t, _ in items: + t.join() + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + + +# On Windows, WaitForMultipleObjects is used to wait for processes to finish. +# It can wait on, at most, 63 objects. There is an overhead of two objects: +# - the result queue reader +# - the thread wakeup reader +_MAX_WINDOWS_WORKERS = 63 - 2 + +# Hack to embed stringification of remote traceback in local traceback + +class _RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class _ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + + +class _SafeQueue(Queue): + """Safe Queue set exception to the future object linked to a job""" + def __init__(self, max_size=0, *, ctx, pending_work_items): + self.pending_work_items = pending_work_items + super().__init__(max_size, ctx=ctx) + + def _on_queue_feeder_error(self, e, obj): + if isinstance(obj, _CallItem): + tb = traceback.format_exception(type(e), e, e.__traceback__) + e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb))) + work_item = self.pending_work_items.pop(obj.work_id, None) + # work_item can be None if another process terminated. In this case, + # the queue_manager_thread fails all work_items with BrokenProcessPool + if work_item is not None: + work_item.future.set_exception(e) + else: + super()._on_queue_feeder_error(e, obj) + + +def _get_chunks(*iterables, chunksize): + """ Iterates over zip()ed iterables in chunks. """ + it = zip(*iterables) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk + +def _process_chunk(fn, chunk): + """ Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + + +def _sendback_result(result_queue, work_id, result=None, exception=None): + """Safely send back the given result or exception""" + try: + result_queue.put(_ResultItem(work_id, result=result, + exception=exception)) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + result_queue.put(_ResultItem(work_id, exception=exc)) + + +def _process_worker(call_queue, result_queue, initializer, initargs): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + """ + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + _base.LOGGER.critical('Exception in initializer:', exc_info=True) + # The parent will notice that the process stopped and + # mark the pool broken + return + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(os.getpid()) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + _sendback_result(result_queue, call_item.work_id, exception=exc) + else: + _sendback_result(result_queue, call_item.work_id, result=r) + del r + + # Liberate the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + +def _add_call_item_to_queue(pending_work_items, + work_ids, + call_queue): + """Fills call_queue with _WorkItems from pending_work_items. + + This function never blocks. + + Args: + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids + are consumed and the corresponding _WorkItems from + pending_work_items are transformed into _CallItems and put in + call_queue. + call_queue: A multiprocessing.Queue that will be filled with _CallItems + derived from _WorkItems. + """ + while True: + if call_queue.full(): + return + try: + work_id = work_ids.get(block=False) + except queue.Empty: + return + else: + work_item = pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del pending_work_items[work_id] + continue + + +def _queue_management_worker(executor_reference, + processes, + pending_work_items, + work_ids_queue, + call_queue, + result_queue, + thread_wakeup): + """Manages the communication between this process and the worker processes. + + This function is run in a local thread. + + Args: + executor_reference: A weakref.ref to the ProcessPoolExecutor that owns + this thread. Used to determine if the ProcessPoolExecutor has been + garbage collected and that this function can exit. + process: A list of the ctx.Process instances used as + workers. + pending_work_items: A dict mapping work ids to _WorkItems e.g. + {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). + call_queue: A ctx.Queue that will be filled with _CallItems + derived from _WorkItems for processing by the process workers. + result_queue: A ctx.SimpleQueue of _ResultItems generated by the + process workers. + thread_wakeup: A _ThreadWakeup to allow waking up the + queue_manager_thread from the main Thread and avoid deadlocks + caused by permanently locked queues. + """ + executor = None + + def shutting_down(): + return (_global_shutdown or executor is None + or executor._shutdown_thread) + + def shutdown_worker(): + # This is an upper bound on the number of children alive. + n_children_alive = sum(p.is_alive() for p in processes.values()) + n_children_to_stop = n_children_alive + n_sentinels_sent = 0 + # Send the right number of sentinels, to make sure all children are + # properly terminated. + while n_sentinels_sent < n_children_to_stop and n_children_alive > 0: + for i in range(n_children_to_stop - n_sentinels_sent): + try: + call_queue.put_nowait(None) + n_sentinels_sent += 1 + except Full: + break + n_children_alive = sum(p.is_alive() for p in processes.values()) + + # Release the queue's resources as soon as possible. + call_queue.close() + # If .join() is not called on the created processes then + # some ctx.Queue methods may deadlock on Mac OS X. + for p in processes.values(): + p.join() + + result_reader = result_queue._reader + wakeup_reader = thread_wakeup._reader + readers = [result_reader, wakeup_reader] + + while True: + _add_call_item_to_queue(pending_work_items, + work_ids_queue, + call_queue) + + # Wait for a result to be ready in the result_queue while checking + # that all worker processes are still running, or for a wake up + # signal send. The wake up signals come either from new tasks being + # submitted, from the executor being shutdown/gc-ed, or from the + # shutdown of the python interpreter. + worker_sentinels = [p.sentinel for p in processes.values()] + ready = mp.connection.wait(readers + worker_sentinels) + + cause = None + is_broken = True + if result_reader in ready: + try: + result_item = result_reader.recv() + is_broken = False + except BaseException as e: + cause = traceback.format_exception(type(e), e, e.__traceback__) + + elif wakeup_reader in ready: + is_broken = False + result_item = None + thread_wakeup.clear() + if is_broken: + # Mark the process pool broken so that submits fail right now. + executor = executor_reference() + if executor is not None: + executor._broken = ('A child process terminated ' + 'abruptly, the process pool is not ' + 'usable anymore') + executor._shutdown_thread = True + executor = None + bpe = BrokenProcessPool("A process in the process pool was " + "terminated abruptly while the future was " + "running or pending.") + if cause is not None: + bpe.__cause__ = _RemoteTraceback( + f"\n'''\n{''.join(cause)}'''") + # All futures in flight must be marked failed + for work_id, work_item in pending_work_items.items(): + work_item.future.set_exception(bpe) + # Delete references to object. See issue16284 + del work_item + pending_work_items.clear() + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + for p in processes.values(): + p.terminate() + shutdown_worker() + return + if isinstance(result_item, int): + # Clean shutdown of a worker using its PID + # (avoids marking the executor broken) + assert shutting_down() + p = processes.pop(result_item) + p.join() + if not processes: + shutdown_worker() + return + elif result_item is not None: + work_item = pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + # Delete references to object. See issue16284 + del work_item + # Delete reference to result_item + del result_item + + # Check whether we should start shutting down. + executor = executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + if shutting_down(): + try: + # Flag the executor as shutting down as early as possible if it + # is not gc-ed yet. + if executor is not None: + executor._shutdown_thread = True + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not pending_work_items: + shutdown_worker() + return + except Full: + # This is not a problem: we will eventually be woken up (in + # result_queue.get()) and be able to send a sentinel again. + pass + executor = None + + +_system_limits_checked = False +_system_limited = None + + +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = ("system provides too few semaphores (%d" + " available, 256 necessary)" % nsems_max) + raise NotImplementedError(_system_limited) + + +def _chain_from_iterable_of_lists(iterable): + """ + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + """ + for element in iterable: + element.reverse() + while element: + yield element.pop() + + +class BrokenProcessPool(_base.BrokenExecutor): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None, mp_context=None, + initializer=None, initargs=()): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + mp_context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: A callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = os.cpu_count() or 1 + if sys.platform == 'win32': + self._max_workers = min(_MAX_WINDOWS_WORKERS, + self._max_workers) + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + elif (sys.platform == 'win32' and + max_workers > _MAX_WINDOWS_WORKERS): + raise ValueError( + f"max_workers must be <= {_MAX_WINDOWS_WORKERS}") + + self._max_workers = max_workers + + if mp_context is None: + mp_context = mp.get_context() + self._mp_context = mp_context + + if initializer is not None and not callable(initializer): + raise TypeError("initializer must be a callable") + self._initializer = initializer + self._initargs = initargs + + # Management thread + self._queue_management_thread = None + + # Map of pids to processes + self._processes = {} + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._broken = False + self._queue_count = 0 + self._pending_work_items = {} + + # Create communication channels for the executor + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + queue_size = self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = _SafeQueue( + max_size=queue_size, ctx=self._mp_context, + pending_work_items=self._pending_work_items) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + self._result_queue = mp_context.SimpleQueue() + self._work_ids = queue.Queue() + + # _ThreadWakeup is a communication channel used to interrupt the wait + # of the main loop of queue_manager_thread from another thread (e.g. + # when calling executor.submit or executor.shutdown). We do not use the + # _result_queue to send the wakeup signal to the queue_manager_thread + # as it could result in a deadlock if a worker process dies with the + # _result_queue write lock still acquired. + self._queue_management_thread_wakeup = _ThreadWakeup() + + def _start_queue_management_thread(self): + if self._queue_management_thread is None: + # When the executor gets garbarge collected, the weakref callback + # will wake up the queue management thread so that it can terminate + # if there is no pending work item. + def weakref_cb(_, + thread_wakeup=self._queue_management_thread_wakeup): + mp.util.debug('Executor collected: triggering callback for' + ' QueueManager wakeup') + thread_wakeup.wakeup() + # Start the processes so that their sentinels are known. + self._adjust_process_count() + self._queue_management_thread = threading.Thread( + target=_queue_management_worker, + args=(weakref.ref(self, weakref_cb), + self._processes, + self._pending_work_items, + self._work_ids, + self._call_queue, + self._result_queue, + self._queue_management_thread_wakeup), + name="QueueManagerThread") + self._queue_management_thread.daemon = True + self._queue_management_thread.start() + _threads_wakeups[self._queue_management_thread] = \ + self._queue_management_thread_wakeup + + def _adjust_process_count(self): + for _ in range(len(self._processes), self._max_workers): + p = self._mp_context.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue, + self._initializer, + self._initargs)) + p.start() + self._processes[p.pid] = p + + def submit(*args, **kwargs): + if len(args) >= 2: + self, fn, *args = args + elif not args: + raise TypeError("descriptor 'submit' of 'ProcessPoolExecutor' object " + "needs an argument") + elif 'fn' in kwargs: + fn = kwargs.pop('fn') + self, *args = args + import warnings + warnings.warn("Passing 'fn' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError('submit expected at least 1 positional argument, ' + 'got %d' % (len(args)-1)) + + with self._shutdown_lock: + if self._broken: + raise BrokenProcessPool(self._broken) + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + if _global_shutdown: + raise RuntimeError('cannot schedule new futures after ' + 'interpreter shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._queue_management_thread_wakeup.wakeup() + + self._start_queue_management_thread() + return f + submit.__text_signature__ = _base.Executor.submit.__text_signature__ + submit.__doc__ = _base.Executor.submit.__doc__ + + def map(self, fn, *iterables, timeout=None, chunksize=1): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map(partial(_process_chunk, fn), + _get_chunks(*iterables, chunksize=chunksize), + timeout=timeout) + return _chain_from_iterable_of_lists(results) + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown_thread = True + if self._queue_management_thread: + # Wake up queue management thread + self._queue_management_thread_wakeup.wakeup() + if wait: + self._queue_management_thread.join() + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._queue_management_thread = None + if self._call_queue is not None: + self._call_queue.close() + if wait: + self._call_queue.join_thread() + self._call_queue = None + self._result_queue = None + self._processes = None + + if self._queue_management_thread_wakeup: + self._queue_management_thread_wakeup.close() + self._queue_management_thread_wakeup = None + + shutdown.__doc__ = _base.Executor.shutdown.__doc__ + +atexit.register(_python_exit) diff --git a/dist/lib/logging/concurrent/futures/thread.py b/dist/lib/logging/concurrent/futures/thread.py new file mode 100644 index 0000000..9e669b2 --- /dev/null +++ b/dist/lib/logging/concurrent/futures/thread.py @@ -0,0 +1,237 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import atexit +from concurrent.futures import _base +import itertools +import queue +import threading +import weakref +import os + +# Workers are created as daemon threads. This is done to allow the interpreter +# to exit when there are still idle threads in a ThreadPoolExecutor's thread +# pool (i.e. shutdown() was not called). However, allowing workers to die with +# the interpreter has two undesirable properties: +# - The workers would still be running during interpreter shutdown, +# meaning that they would fail in unpredictable ways. +# - The workers could be killed while evaluating a work item, which could +# be bad if the callable being evaluated has external side-effects e.g. +# writing to a file. +# +# To work around this problem, an exit handler is installed which tells the +# workers to exit when their work queues are empty and then waits until the +# threads finish. + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False + +def _python_exit(): + global _shutdown + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +atexit.register(_python_exit) + + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException as exc: + self.future.set_exception(exc) + # Break a reference cycle with the exception 'exc' + self = None + else: + self.future.set_result(result) + + +def _worker(executor_reference, work_queue, initializer, initargs): + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + _base.LOGGER.critical('Exception in initializer:', exc_info=True) + executor = executor_reference() + if executor is not None: + executor._initializer_failed() + return + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + # Delete references to object. See issue16284 + del work_item + + # attempt to increment idle count + executor = executor_reference() + if executor is not None: + executor._idle_semaphore.release() + del executor + continue + + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Flag the executor as shutting down as early as possible if it + # is not gc-ed yet. + if executor is not None: + executor._shutdown = True + # Notice other workers + work_queue.put(None) + return + del executor + except BaseException: + _base.LOGGER.critical('Exception in worker', exc_info=True) + + +class BrokenThreadPool(_base.BrokenExecutor): + """ + Raised when a worker thread in a ThreadPoolExecutor failed initializing. + """ + + +class ThreadPoolExecutor(_base.Executor): + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().__next__ + + def __init__(self, max_workers=None, thread_name_prefix='', + initializer=None, initargs=()): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + """ + if max_workers is None: + # ThreadPoolExecutor is often used to: + # * CPU bound task which releases GIL + # * I/O bound task (which releases GIL, of course) + # + # We use cpu_count + 4 for both types of tasks. + # But we limit it to 32 to avoid consuming surprisingly large resource + # on many core machine. + max_workers = min(32, (os.cpu_count() or 1) + 4) + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + if initializer is not None and not callable(initializer): + raise TypeError("initializer must be a callable") + + self._max_workers = max_workers + self._work_queue = queue.SimpleQueue() + self._idle_semaphore = threading.Semaphore(0) + self._threads = set() + self._broken = False + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = (thread_name_prefix or + ("ThreadPoolExecutor-%d" % self._counter())) + self._initializer = initializer + self._initargs = initargs + + def submit(*args, **kwargs): + if len(args) >= 2: + self, fn, *args = args + elif not args: + raise TypeError("descriptor 'submit' of 'ThreadPoolExecutor' object " + "needs an argument") + elif 'fn' in kwargs: + fn = kwargs.pop('fn') + self, *args = args + import warnings + warnings.warn("Passing 'fn' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError('submit expected at least 1 positional argument, ' + 'got %d' % (len(args)-1)) + + with self._shutdown_lock: + if self._broken: + raise BrokenThreadPool(self._broken) + + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + if _shutdown: + raise RuntimeError('cannot schedule new futures after ' + 'interpreter shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__text_signature__ = _base.Executor.submit.__text_signature__ + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self._idle_semaphore.acquire(timeout=0): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue, + self._initializer, + self._initargs)) + t.daemon = True + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def _initializer_failed(self): + with self._shutdown_lock: + self._broken = ('A thread initializer failed, the thread pool ' + 'is not usable anymore') + # Drain work queue and mark pending futures failed + while True: + try: + work_item = self._work_queue.get_nowait() + except queue.Empty: + break + if work_item is not None: + work_item.future.set_exception(BrokenThreadPool(self._broken)) + + def shutdown(self, wait=True): + with self._shutdown_lock: + self._shutdown = True + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join() + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/dist/lib/logging/configparser.py b/dist/lib/logging/configparser.py new file mode 100644 index 0000000..924cc56 --- /dev/null +++ b/dist/lib/logging/configparser.py @@ -0,0 +1,1363 @@ +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): + Create the parser. When `defaults' is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type' is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters' is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes' is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values' is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value' is True (default: False), options without + values are accepted; the value presented for these is None. + + When `default_section' is given, the name of the special section is + named accordingly. By default it is called ``"DEFAULT"`` but this can + be customized to point to any other valid section name. Its current + value can be retrieved using the ``parser_instance.default_section`` + attribute and may be modified at runtime. + + When `interpolation` is given, it should be an Interpolation subclass + instance. It will be used as the handler for option value + pre-processing when using getters. RawConfigParser objects don't do + any sort of interpolation, whereas ConfigParser uses an instance of + BasicInterpolation. The library also provides a ``zc.buildbot`` + inspired ExtendedInterpolation implementation. + + When `converters` is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its corresponding get*() method on the parser object and + section proxies. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the iterable of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name' attribute, the string `' is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars' argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option' is a key in + `vars', the value from `vars' is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters' is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +from collections.abc import MutableMapping +from collections import ChainMap as _ChainMap +import functools +import io +import itertools +import os +import re +import sys +import warnings + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +_default_dict = dict +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution: option {!r} in section {!r} contains " + "an interpolation key {!r} which is not a valid option name. " + "Raw value: {!r}".format(option, section, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Recursion limit exceeded in value substitution: option {!r} " + "in section {!r} contains an interpolation key which " + "cannot be substituted in {} steps. Raw value: {!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source=None, filename=None): + # Exactly one of `source'/`filename' arguments has to be given. + # `filename' kept for compatibility. + if filename and source: + raise ValueError("Cannot specify both `filename' and `source'. " + "Use `source'.") + elif not filename and not source: + raise ValueError("Required argument `source' not given.") + elif filename: + source = filename + Error.__init__(self, 'Source contains parsing errors: %r' % source) + self.source = source + self.errors = [] + self.args = (source, ) + + @property + def filename(self): + """Deprecated, use `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + return self.source + + @filename.setter + def filename(self, value): + """Deprecated, user `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in future versions. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + self.source = value + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, line) + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %r, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None' as +# a valid fallback value. +_UNSET = object() + + +class Interpolation: + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError'.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise InterpolationMissingOptionError( + option, section, rawval, var) from None + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout'. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('$'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise InterpolationMissingOptionError( + option, section, rawval, ":".join(path)) from None + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class LegacyInterpolation(Interpolation): + """Deprecated interpolation used in old versions of ConfigParser. + Use BasicInterpolation or ExtendedInterpolation instead.""" + + _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") + + def before_get(self, parser, section, option, value, vars): + rawval = value + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if value and "%(" in value: + replace = functools.partial(self._interpolation_replace, + parser=parser) + value = self._KEYCRE.sub(replace, value) + try: + value = value % vars + except KeyError as e: + raise InterpolationMissingOptionError( + option, section, rawval, e.args[0]) from None + else: + break + if value and "%(" in value: + raise InterpolationDepthError(option, section, rawval) + return value + + def before_set(self, parser, section, option, value): + return value + + @staticmethod + def _interpolation_replace(match, parser): + s = match.group(1) + if s is None: + return match.group() + else: + return "%%(%s)s" % parser.optionxform(s) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
[^]]+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P