diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/INSTALLER b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/LICENSE.txt b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/LICENSE.txt
new file mode 100644
index 00000000..07806f8a
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/LICENSE.txt
@@ -0,0 +1,19 @@
+This is the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+Copyright (c) Alex Grönholm
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this
+software and associated documentation files (the "Software"), to deal in the Software
+without restriction, including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
+to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or
+substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/METADATA b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/METADATA
new file mode 100644
index 00000000..ba40f6be
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/METADATA
@@ -0,0 +1,133 @@
+Metadata-Version: 2.1
+Name: APScheduler
+Version: 3.6.3
+Summary: In-process task scheduler with Cron-like capabilities
+Home-page: https://github.com/agronholm/apscheduler
+Author: Alex Grönholm
+Author-email: apscheduler@nextday.fi
+License: MIT
+Keywords: scheduling cron
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Requires-Dist: setuptools (>=0.7)
+Requires-Dist: six (>=1.4.0)
+Requires-Dist: pytz
+Requires-Dist: tzlocal (>=1.2)
+Requires-Dist: futures ; python_version == "2.7"
+Requires-Dist: funcsigs ; python_version == "2.7"
+Provides-Extra: asyncio
+Requires-Dist: trollius ; (python_version == "2.7") and extra == 'asyncio'
+Provides-Extra: doc
+Requires-Dist: sphinx ; extra == 'doc'
+Requires-Dist: sphinx-rtd-theme ; extra == 'doc'
+Provides-Extra: gevent
+Requires-Dist: gevent ; extra == 'gevent'
+Provides-Extra: mongodb
+Requires-Dist: pymongo (>=2.8) ; extra == 'mongodb'
+Provides-Extra: redis
+Requires-Dist: redis (>=3.0) ; extra == 'redis'
+Provides-Extra: rethinkdb
+Requires-Dist: rethinkdb (>=2.4.0) ; extra == 'rethinkdb'
+Provides-Extra: sqlalchemy
+Requires-Dist: sqlalchemy (>=0.8) ; extra == 'sqlalchemy'
+Provides-Extra: testing
+Requires-Dist: pytest ; extra == 'testing'
+Requires-Dist: pytest-cov ; extra == 'testing'
+Requires-Dist: pytest-tornado5 ; extra == 'testing'
+Requires-Dist: mock ; (python_version == "2.7") and extra == 'testing'
+Requires-Dist: pytest-asyncio (<0.6) ; (python_version == "3.4") and extra == 'testing'
+Requires-Dist: pytest-asyncio ; (python_version >= "3.5") and extra == 'testing'
+Provides-Extra: tornado
+Requires-Dist: tornado (>=4.3) ; extra == 'tornado'
+Provides-Extra: twisted
+Requires-Dist: twisted ; extra == 'twisted'
+Provides-Extra: zookeeper
+Requires-Dist: kazoo ; extra == 'zookeeper'
+
+.. image:: https://travis-ci.com/agronholm/apscheduler.svg?branch=master
+ :target: https://travis-ci.com/agronholm/apscheduler
+ :alt: Build Status
+.. image:: https://coveralls.io/repos/github/agronholm/apscheduler/badge.svg?branch=master
+ :target: https://coveralls.io/github/agronholm/apscheduler?branch=master
+ :alt: Code Coverage
+
+Advanced Python Scheduler (APScheduler) is a Python library that lets you schedule your Python code
+to be executed later, either just once or periodically. You can add new jobs or remove old ones on
+the fly as you please. If you store your jobs in a database, they will also survive scheduler
+restarts and maintain their state. When the scheduler is restarted, it will then run all the jobs
+it should have run while it was offline [#f1]_.
+
+Among other things, APScheduler can be used as a cross-platform, application specific replacement
+to platform specific schedulers, such as the cron daemon or the Windows task scheduler. Please
+note, however, that APScheduler is **not** a daemon or service itself, nor does it come with any
+command line tools. It is primarily meant to be run inside existing applications. That said,
+APScheduler does provide some building blocks for you to build a scheduler service or to run a
+dedicated scheduler process.
+
+APScheduler has three built-in scheduling systems you can use:
+
+* Cron-style scheduling (with optional start/end times)
+* Interval-based execution (runs jobs on even intervals, with optional start/end times)
+* One-off delayed execution (runs jobs once, on a set date/time)
+
+You can mix and match scheduling systems and the backends where the jobs are stored any way you
+like. Supported backends for storing jobs include:
+
+* Memory
+* `SQLAlchemy `_ (any RDBMS supported by SQLAlchemy works)
+* `MongoDB `_
+* `Redis `_
+* `RethinkDB `_
+* `ZooKeeper `_
+
+APScheduler also integrates with several common Python frameworks, like:
+
+* `asyncio `_ (:pep:`3156`)
+* `gevent `_
+* `Tornado `_
+* `Twisted `_
+* `Qt `_ (using either
+ `PyQt `_ or
+ `PySide `_)
+
+.. [#f1] The cutoff period for this is also configurable.
+
+
+Documentation
+-------------
+
+Documentation can be found `here `_.
+
+
+Source
+------
+
+The source can be browsed at `Github `_.
+
+
+Reporting bugs
+--------------
+
+A `bug tracker `_ is provided by Github.
+
+
+Getting help
+------------
+
+If you have problems or other questions, you can either:
+
+* Ask in the `apscheduler `_ room on Gitter
+* Ask on the `APScheduler Google group `_, or
+* Ask on `StackOverflow `_ and tag your
+ question with the ``apscheduler`` tag
+
+
diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/RECORD b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/RECORD
new file mode 100644
index 00000000..60fa4acb
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/RECORD
@@ -0,0 +1,83 @@
+APScheduler-3.6.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+APScheduler-3.6.3.dist-info/LICENSE.txt,sha256=YWP3mH37ONa8MgzitwsvArhivEESZRbVUu8c1DJH51g,1130
+APScheduler-3.6.3.dist-info/METADATA,sha256=VHah1X4AqMCGgcvEm06M-pAqmNC9q4tOQRbUv3b0Jh0,5398
+APScheduler-3.6.3.dist-info/RECORD,,
+APScheduler-3.6.3.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+APScheduler-3.6.3.dist-info/entry_points.txt,sha256=7RgkYN_OYyCUQtIGhj-UNcelnIjsNm7nC9rogdMQh3U,1148
+APScheduler-3.6.3.dist-info/top_level.txt,sha256=O3oMCWxG-AHkecUoO6Ze7-yYjWrttL95uHO8-RFdYvE,12
+apscheduler/__init__.py,sha256=qFEK2ysRBcLiYmm3deyJJ1avUOugaM_nCGHMD42WMBw,380
+apscheduler/__pycache__/__init__.cpython-36.pyc,,
+apscheduler/__pycache__/events.cpython-36.pyc,,
+apscheduler/__pycache__/job.cpython-36.pyc,,
+apscheduler/__pycache__/util.cpython-36.pyc,,
+apscheduler/events.py,sha256=KRMTDQUS6d2uVnrQvPoz3ZPV5V9XKsCAZLsgx913FFo,3593
+apscheduler/executors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+apscheduler/executors/__pycache__/__init__.cpython-36.pyc,,
+apscheduler/executors/__pycache__/asyncio.cpython-36.pyc,,
+apscheduler/executors/__pycache__/base.cpython-36.pyc,,
+apscheduler/executors/__pycache__/base_py3.cpython-36.pyc,,
+apscheduler/executors/__pycache__/debug.cpython-36.pyc,,
+apscheduler/executors/__pycache__/gevent.cpython-36.pyc,,
+apscheduler/executors/__pycache__/pool.cpython-36.pyc,,
+apscheduler/executors/__pycache__/tornado.cpython-36.pyc,,
+apscheduler/executors/__pycache__/twisted.cpython-36.pyc,,
+apscheduler/executors/asyncio.py,sha256=ji5f6Qm2uGhov-3w52CXHZi8jc5U_gS56lisQylKTBQ,2087
+apscheduler/executors/base.py,sha256=hogiMc_t-huw6BMod0HEeY2FhRNmAAUyNNuBHvIX31M,5336
+apscheduler/executors/base_py3.py,sha256=s_4siAjBHrr7JZnm64VVow9zyvs2JBc-VRPkPuDeBTI,1775
+apscheduler/executors/debug.py,sha256=15_ogSBzl8RRCfBYDnkIV2uMH8cLk1KImYmBa_NVGpc,573
+apscheduler/executors/gevent.py,sha256=aulrNmoefyBgrOkH9awRhFiXIDnSCnZ4U0o0_JXIXgc,777
+apscheduler/executors/pool.py,sha256=q9TC6KzwWI9tpLNxQhdrKRWFtsN5dmx_Vegu23BV-Sk,1672
+apscheduler/executors/tornado.py,sha256=DU75VaQ9R6nBuy8lbPUvDKUgsuJcZqwAvURC5vg3r6w,1780
+apscheduler/executors/twisted.py,sha256=bRoU0C4BoVcS6_BjKD5wfUs0IJpGkmLsRAcMH2rJJss,778
+apscheduler/job.py,sha256=zT9_GuOpxuxEPVZU38tantw9383tAPRBPoH6dd4uHGA,11088
+apscheduler/jobstores/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+apscheduler/jobstores/__pycache__/__init__.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/base.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/memory.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/mongodb.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/redis.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/rethinkdb.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/sqlalchemy.cpython-36.pyc,,
+apscheduler/jobstores/__pycache__/zookeeper.cpython-36.pyc,,
+apscheduler/jobstores/base.py,sha256=DXzSW9XscueHZHMvy1qFiG-vYqUl_MMv0n0uBSZWXGo,4523
+apscheduler/jobstores/memory.py,sha256=ZxWiKsqfsCHFvac-6X9BztuhnuSxlOYi1dhT6g-pjQo,3655
+apscheduler/jobstores/mongodb.py,sha256=e9KNzPFrjiVpiM3iPT_c0ONxZQT70VCF2rDXW0-22zk,5296
+apscheduler/jobstores/redis.py,sha256=kjQDIzPXz-Yq976U9HK3aMkcCI_QRLKgTADQWKewtik,5483
+apscheduler/jobstores/rethinkdb.py,sha256=k1rSLYJqejuhQxJY3pXwHAQYcpZ1QFJsoQ8n0oEu5MM,5863
+apscheduler/jobstores/sqlalchemy.py,sha256=5H5T05cQ2ZtkRuRb8hKkcLzZSQneAT13NMKXby3nzWE,6122
+apscheduler/jobstores/zookeeper.py,sha256=BzyqZ08XIDcbu5frQWGmDVEHAEScNxjt8oML6Tty8j8,6406
+apscheduler/schedulers/__init__.py,sha256=jM63xA_K7GSToBenhsz-SCcqfhk1pdEVb6ajwoO5Kqg,406
+apscheduler/schedulers/__pycache__/__init__.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/asyncio.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/background.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/base.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/blocking.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/gevent.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/qt.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/tornado.cpython-36.pyc,,
+apscheduler/schedulers/__pycache__/twisted.cpython-36.pyc,,
+apscheduler/schedulers/asyncio.py,sha256=0j0mcDpf-zI_vQHcUCZZtBfEEZEiocEOZ767efIZ5YM,2082
+apscheduler/schedulers/background.py,sha256=dGX0T0z6T6HzZHG7njWgp90SFHpetZ4ZBUV2gGOSqoc,1505
+apscheduler/schedulers/base.py,sha256=EUGbQ5R2jGA4PEEehU2ASuKVe0SsLqtWESAtTqAJW50,42863
+apscheduler/schedulers/blocking.py,sha256=c-5YR-dKn3D82tPt38t50KGPJrAiC852v8ai2Vwanmg,924
+apscheduler/schedulers/gevent.py,sha256=csPBvV75FGcboXXsdex6fCD7J54QgBddYNdWj62ZO9g,1031
+apscheduler/schedulers/qt.py,sha256=AhHU62ybOOVSD4OhMwoPRRUCoM5cf5q26uD3hPglfnc,1297
+apscheduler/schedulers/tornado.py,sha256=D9Vaq3Ee9EFiXa1jDy9tedI048gR_YT_LAFUWqO_uEw,1926
+apscheduler/schedulers/twisted.py,sha256=D5EBjjMRtMBxy0_aAURcULAI8Ky2IvCTr9tK9sO1rYk,1844
+apscheduler/triggers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+apscheduler/triggers/__pycache__/__init__.cpython-36.pyc,,
+apscheduler/triggers/__pycache__/base.cpython-36.pyc,,
+apscheduler/triggers/__pycache__/combining.cpython-36.pyc,,
+apscheduler/triggers/__pycache__/date.cpython-36.pyc,,
+apscheduler/triggers/__pycache__/interval.cpython-36.pyc,,
+apscheduler/triggers/base.py,sha256=WMo5f2g14fjO5VzpIxFQtk47Z9VEUDDPSxjoPL9FGSQ,1837
+apscheduler/triggers/combining.py,sha256=WTEnaEkBHysF1009sCvBaQa99hiy9l5Oz-hHyjy3jv8,3473
+apscheduler/triggers/cron/__init__.py,sha256=a8ASzvM7ci-djOI2jIL2XErL6zEx4Wr1012aD1XJw_w,9246
+apscheduler/triggers/cron/__pycache__/__init__.cpython-36.pyc,,
+apscheduler/triggers/cron/__pycache__/expressions.cpython-36.pyc,,
+apscheduler/triggers/cron/__pycache__/fields.cpython-36.pyc,,
+apscheduler/triggers/cron/expressions.py,sha256=hu1kq0mKvivIw7U0D0Nnrbuk3q01dCuhZ7SHRPw6qhI,9184
+apscheduler/triggers/cron/fields.py,sha256=NWPClh1NgSOpTlJ3sm1TXM_ViC2qJGKWkd_vg0xsw7o,3510
+apscheduler/triggers/date.py,sha256=RrfB1PNO9G9e91p1BOf-y_TseVHQQR-KJPhNdPpAHcU,1705
+apscheduler/triggers/interval.py,sha256=LiIunGOd96yaiAceG1XGP8eY3JxSyHDWCipVhQWMzDU,4381
+apscheduler/util.py,sha256=bQLVYP-RHtjypxol40a_JPT1Ta9BYSlTNdsDTc7dNMU,13963
diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/WHEEL b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/WHEEL
new file mode 100644
index 00000000..8b701e93
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/entry_points.txt b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/entry_points.txt
new file mode 100644
index 00000000..d2bd62b9
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/entry_points.txt
@@ -0,0 +1,24 @@
+[apscheduler.executors]
+asyncio = apscheduler.executors.asyncio:AsyncIOExecutor [asyncio]
+debug = apscheduler.executors.debug:DebugExecutor
+gevent = apscheduler.executors.gevent:GeventExecutor [gevent]
+processpool = apscheduler.executors.pool:ProcessPoolExecutor
+threadpool = apscheduler.executors.pool:ThreadPoolExecutor
+tornado = apscheduler.executors.tornado:TornadoExecutor [tornado]
+twisted = apscheduler.executors.twisted:TwistedExecutor [twisted]
+
+[apscheduler.jobstores]
+memory = apscheduler.jobstores.memory:MemoryJobStore
+mongodb = apscheduler.jobstores.mongodb:MongoDBJobStore [mongodb]
+redis = apscheduler.jobstores.redis:RedisJobStore [redis]
+rethinkdb = apscheduler.jobstores.rethinkdb:RethinkDBJobStore [rethinkdb]
+sqlalchemy = apscheduler.jobstores.sqlalchemy:SQLAlchemyJobStore [sqlalchemy]
+zookeeper = apscheduler.jobstores.zookeeper:ZooKeeperJobStore [zookeeper]
+
+[apscheduler.triggers]
+and = apscheduler.triggers.combining:AndTrigger
+cron = apscheduler.triggers.cron:CronTrigger
+date = apscheduler.triggers.date:DateTrigger
+interval = apscheduler.triggers.interval:IntervalTrigger
+or = apscheduler.triggers.combining:OrTrigger
+
diff --git a/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/top_level.txt b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/top_level.txt
new file mode 100644
index 00000000..d31d10dd
--- /dev/null
+++ b/venv/Lib/site-packages/APScheduler-3.6.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+apscheduler
diff --git a/venv/Lib/site-packages/apscheduler/__init__.py b/venv/Lib/site-packages/apscheduler/__init__.py
new file mode 100644
index 00000000..968169a9
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/__init__.py
@@ -0,0 +1,10 @@
+from pkg_resources import get_distribution, DistributionNotFound
+
+try:
+ release = get_distribution('APScheduler').version.split('-')[0]
+except DistributionNotFound:
+ release = '3.5.0'
+
+version_info = tuple(int(x) if x.isdigit() else x for x in release.split('.'))
+version = __version__ = '.'.join(str(x) for x in version_info[:3])
+del get_distribution, DistributionNotFound
diff --git a/venv/Lib/site-packages/apscheduler/events.py b/venv/Lib/site-packages/apscheduler/events.py
new file mode 100644
index 00000000..016da03c
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/events.py
@@ -0,0 +1,94 @@
+__all__ = ('EVENT_SCHEDULER_STARTED', 'EVENT_SCHEDULER_SHUTDOWN', 'EVENT_SCHEDULER_PAUSED',
+ 'EVENT_SCHEDULER_RESUMED', 'EVENT_EXECUTOR_ADDED', 'EVENT_EXECUTOR_REMOVED',
+ 'EVENT_JOBSTORE_ADDED', 'EVENT_JOBSTORE_REMOVED', 'EVENT_ALL_JOBS_REMOVED',
+ 'EVENT_JOB_ADDED', 'EVENT_JOB_REMOVED', 'EVENT_JOB_MODIFIED', 'EVENT_JOB_EXECUTED',
+ 'EVENT_JOB_ERROR', 'EVENT_JOB_MISSED', 'EVENT_JOB_SUBMITTED', 'EVENT_JOB_MAX_INSTANCES',
+ 'SchedulerEvent', 'JobEvent', 'JobExecutionEvent', 'JobSubmissionEvent')
+
+
+EVENT_SCHEDULER_STARTED = EVENT_SCHEDULER_START = 2 ** 0
+EVENT_SCHEDULER_SHUTDOWN = 2 ** 1
+EVENT_SCHEDULER_PAUSED = 2 ** 2
+EVENT_SCHEDULER_RESUMED = 2 ** 3
+EVENT_EXECUTOR_ADDED = 2 ** 4
+EVENT_EXECUTOR_REMOVED = 2 ** 5
+EVENT_JOBSTORE_ADDED = 2 ** 6
+EVENT_JOBSTORE_REMOVED = 2 ** 7
+EVENT_ALL_JOBS_REMOVED = 2 ** 8
+EVENT_JOB_ADDED = 2 ** 9
+EVENT_JOB_REMOVED = 2 ** 10
+EVENT_JOB_MODIFIED = 2 ** 11
+EVENT_JOB_EXECUTED = 2 ** 12
+EVENT_JOB_ERROR = 2 ** 13
+EVENT_JOB_MISSED = 2 ** 14
+EVENT_JOB_SUBMITTED = 2 ** 15
+EVENT_JOB_MAX_INSTANCES = 2 ** 16
+EVENT_ALL = (EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_PAUSED |
+ EVENT_SCHEDULER_RESUMED | EVENT_EXECUTOR_ADDED | EVENT_EXECUTOR_REMOVED |
+ EVENT_JOBSTORE_ADDED | EVENT_JOBSTORE_REMOVED | EVENT_ALL_JOBS_REMOVED |
+ EVENT_JOB_ADDED | EVENT_JOB_REMOVED | EVENT_JOB_MODIFIED | EVENT_JOB_EXECUTED |
+ EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_SUBMITTED | EVENT_JOB_MAX_INSTANCES)
+
+
+class SchedulerEvent(object):
+ """
+ An event that concerns the scheduler itself.
+
+ :ivar code: the type code of this event
+ :ivar alias: alias of the job store or executor that was added or removed (if applicable)
+ """
+
+ def __init__(self, code, alias=None):
+ super(SchedulerEvent, self).__init__()
+ self.code = code
+ self.alias = alias
+
+ def __repr__(self):
+ return '<%s (code=%d)>' % (self.__class__.__name__, self.code)
+
+
+class JobEvent(SchedulerEvent):
+ """
+ An event that concerns a job.
+
+ :ivar code: the type code of this event
+ :ivar job_id: identifier of the job in question
+ :ivar jobstore: alias of the job store containing the job in question
+ """
+
+ def __init__(self, code, job_id, jobstore):
+ super(JobEvent, self).__init__(code)
+ self.code = code
+ self.job_id = job_id
+ self.jobstore = jobstore
+
+
+class JobSubmissionEvent(JobEvent):
+ """
+ An event that concerns the submission of a job to its executor.
+
+ :ivar scheduled_run_times: a list of datetimes when the job was intended to run
+ """
+
+ def __init__(self, code, job_id, jobstore, scheduled_run_times):
+ super(JobSubmissionEvent, self).__init__(code, job_id, jobstore)
+ self.scheduled_run_times = scheduled_run_times
+
+
+class JobExecutionEvent(JobEvent):
+ """
+ An event that concerns the running of a job within its executor.
+
+ :ivar scheduled_run_time: the time when the job was scheduled to be run
+ :ivar retval: the return value of the successfully executed job
+ :ivar exception: the exception raised by the job
+ :ivar traceback: a formatted traceback for the exception
+ """
+
+ def __init__(self, code, job_id, jobstore, scheduled_run_time, retval=None, exception=None,
+ traceback=None):
+ super(JobExecutionEvent, self).__init__(code, job_id, jobstore)
+ self.scheduled_run_time = scheduled_run_time
+ self.retval = retval
+ self.exception = exception
+ self.traceback = traceback
diff --git a/venv/Lib/site-packages/apscheduler/executors/__init__.py b/venv/Lib/site-packages/apscheduler/executors/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/venv/Lib/site-packages/apscheduler/executors/asyncio.py b/venv/Lib/site-packages/apscheduler/executors/asyncio.py
new file mode 100644
index 00000000..06fc7f96
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/asyncio.py
@@ -0,0 +1,59 @@
+from __future__ import absolute_import
+
+import sys
+
+from apscheduler.executors.base import BaseExecutor, run_job
+from apscheduler.util import iscoroutinefunction_partial
+
+try:
+ from apscheduler.executors.base_py3 import run_coroutine_job
+except ImportError:
+ run_coroutine_job = None
+
+
+class AsyncIOExecutor(BaseExecutor):
+ """
+ Runs jobs in the default executor of the event loop.
+
+ If the job function is a native coroutine function, it is scheduled to be run directly in the
+ event loop as soon as possible. All other functions are run in the event loop's default
+ executor which is usually a thread pool.
+
+ Plugin alias: ``asyncio``
+ """
+
+ def start(self, scheduler, alias):
+ super(AsyncIOExecutor, self).start(scheduler, alias)
+ self._eventloop = scheduler._eventloop
+ self._pending_futures = set()
+
+ def shutdown(self, wait=True):
+ # There is no way to honor wait=True without converting this method into a coroutine method
+ for f in self._pending_futures:
+ if not f.done():
+ f.cancel()
+
+ self._pending_futures.clear()
+
+ def _do_submit_job(self, job, run_times):
+ def callback(f):
+ self._pending_futures.discard(f)
+ try:
+ events = f.result()
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
+
+ if iscoroutinefunction_partial(job.func):
+ if run_coroutine_job is not None:
+ coro = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
+ f = self._eventloop.create_task(coro)
+ else:
+ raise Exception('Executing coroutine based jobs is not supported with Trollius')
+ else:
+ f = self._eventloop.run_in_executor(None, run_job, job, job._jobstore_alias, run_times,
+ self._logger.name)
+
+ f.add_done_callback(callback)
+ self._pending_futures.add(f)
diff --git a/venv/Lib/site-packages/apscheduler/executors/base.py b/venv/Lib/site-packages/apscheduler/executors/base.py
new file mode 100644
index 00000000..4c09fc11
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/base.py
@@ -0,0 +1,146 @@
+from abc import ABCMeta, abstractmethod
+from collections import defaultdict
+from datetime import datetime, timedelta
+from traceback import format_tb
+import logging
+import sys
+
+from pytz import utc
+import six
+
+from apscheduler.events import (
+ JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED)
+
+
+class MaxInstancesReachedError(Exception):
+ def __init__(self, job):
+ super(MaxInstancesReachedError, self).__init__(
+ 'Job "%s" has already reached its maximum number of instances (%d)' %
+ (job.id, job.max_instances))
+
+
+class BaseExecutor(six.with_metaclass(ABCMeta, object)):
+ """Abstract base class that defines the interface that every executor must implement."""
+
+ _scheduler = None
+ _lock = None
+ _logger = logging.getLogger('apscheduler.executors')
+
+ def __init__(self):
+ super(BaseExecutor, self).__init__()
+ self._instances = defaultdict(lambda: 0)
+
+ def start(self, scheduler, alias):
+ """
+ Called by the scheduler when the scheduler is being started or when the executor is being
+ added to an already running scheduler.
+
+ :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
+ this executor
+ :param str|unicode alias: alias of this executor as it was assigned to the scheduler
+
+ """
+ self._scheduler = scheduler
+ self._lock = scheduler._create_lock()
+ self._logger = logging.getLogger('apscheduler.executors.%s' % alias)
+
+ def shutdown(self, wait=True):
+ """
+ Shuts down this executor.
+
+ :param bool wait: ``True`` to wait until all submitted jobs
+ have been executed
+ """
+
+ def submit_job(self, job, run_times):
+ """
+ Submits job for execution.
+
+ :param Job job: job to execute
+ :param list[datetime] run_times: list of datetimes specifying
+ when the job should have been run
+ :raises MaxInstancesReachedError: if the maximum number of
+ allowed instances for this job has been reached
+
+ """
+ assert self._lock is not None, 'This executor has not been started yet'
+ with self._lock:
+ if self._instances[job.id] >= job.max_instances:
+ raise MaxInstancesReachedError(job)
+
+ self._do_submit_job(job, run_times)
+ self._instances[job.id] += 1
+
+ @abstractmethod
+ def _do_submit_job(self, job, run_times):
+ """Performs the actual task of scheduling `run_job` to be called."""
+
+ def _run_job_success(self, job_id, events):
+ """
+ Called by the executor with the list of generated events when :func:`run_job` has been
+ successfully called.
+
+ """
+ with self._lock:
+ self._instances[job_id] -= 1
+ if self._instances[job_id] == 0:
+ del self._instances[job_id]
+
+ for event in events:
+ self._scheduler._dispatch_event(event)
+
+ def _run_job_error(self, job_id, exc, traceback=None):
+ """Called by the executor with the exception if there is an error calling `run_job`."""
+ with self._lock:
+ self._instances[job_id] -= 1
+ if self._instances[job_id] == 0:
+ del self._instances[job_id]
+
+ exc_info = (exc.__class__, exc, traceback)
+ self._logger.error('Error running job %s', job_id, exc_info=exc_info)
+
+
+def run_job(job, jobstore_alias, run_times, logger_name):
+ """
+ Called by executors to run the job. Returns a list of scheduler events to be dispatched by the
+ scheduler.
+
+ """
+ events = []
+ logger = logging.getLogger(logger_name)
+ for run_time in run_times:
+ # See if the job missed its run time window, and handle
+ # possible misfires accordingly
+ if job.misfire_grace_time is not None:
+ difference = datetime.now(utc) - run_time
+ grace_time = timedelta(seconds=job.misfire_grace_time)
+ if difference > grace_time:
+ events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
+ run_time))
+ logger.warning('Run time of job "%s" was missed by %s', job, difference)
+ continue
+
+ logger.info('Running job "%s" (scheduled at %s)', job, run_time)
+ try:
+ retval = job.func(*job.args, **job.kwargs)
+ except BaseException:
+ exc, tb = sys.exc_info()[1:]
+ formatted_tb = ''.join(format_tb(tb))
+ events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
+ exception=exc, traceback=formatted_tb))
+ logger.exception('Job "%s" raised an exception', job)
+
+ # This is to prevent cyclic references that would lead to memory leaks
+ if six.PY2:
+ sys.exc_clear()
+ del tb
+ else:
+ import traceback
+ traceback.clear_frames(tb)
+ del tb
+ else:
+ events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
+ retval=retval))
+ logger.info('Job "%s" executed successfully', job)
+
+ return events
diff --git a/venv/Lib/site-packages/apscheduler/executors/base_py3.py b/venv/Lib/site-packages/apscheduler/executors/base_py3.py
new file mode 100644
index 00000000..61abd842
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/base_py3.py
@@ -0,0 +1,41 @@
+import logging
+import sys
+from datetime import datetime, timedelta
+from traceback import format_tb
+
+from pytz import utc
+
+from apscheduler.events import (
+ JobExecutionEvent, EVENT_JOB_MISSED, EVENT_JOB_ERROR, EVENT_JOB_EXECUTED)
+
+
+async def run_coroutine_job(job, jobstore_alias, run_times, logger_name):
+ """Coroutine version of run_job()."""
+ events = []
+ logger = logging.getLogger(logger_name)
+ for run_time in run_times:
+ # See if the job missed its run time window, and handle possible misfires accordingly
+ if job.misfire_grace_time is not None:
+ difference = datetime.now(utc) - run_time
+ grace_time = timedelta(seconds=job.misfire_grace_time)
+ if difference > grace_time:
+ events.append(JobExecutionEvent(EVENT_JOB_MISSED, job.id, jobstore_alias,
+ run_time))
+ logger.warning('Run time of job "%s" was missed by %s', job, difference)
+ continue
+
+ logger.info('Running job "%s" (scheduled at %s)', job, run_time)
+ try:
+ retval = await job.func(*job.args, **job.kwargs)
+ except BaseException:
+ exc, tb = sys.exc_info()[1:]
+ formatted_tb = ''.join(format_tb(tb))
+ events.append(JobExecutionEvent(EVENT_JOB_ERROR, job.id, jobstore_alias, run_time,
+ exception=exc, traceback=formatted_tb))
+ logger.exception('Job "%s" raised an exception', job)
+ else:
+ events.append(JobExecutionEvent(EVENT_JOB_EXECUTED, job.id, jobstore_alias, run_time,
+ retval=retval))
+ logger.info('Job "%s" executed successfully', job)
+
+ return events
diff --git a/venv/Lib/site-packages/apscheduler/executors/debug.py b/venv/Lib/site-packages/apscheduler/executors/debug.py
new file mode 100644
index 00000000..ac739aeb
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/debug.py
@@ -0,0 +1,20 @@
+import sys
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+class DebugExecutor(BaseExecutor):
+ """
+ A special executor that executes the target callable directly instead of deferring it to a
+ thread or process.
+
+ Plugin alias: ``debug``
+ """
+
+ def _do_submit_job(self, job, run_times):
+ try:
+ events = run_job(job, job._jobstore_alias, run_times, self._logger.name)
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
diff --git a/venv/Lib/site-packages/apscheduler/executors/gevent.py b/venv/Lib/site-packages/apscheduler/executors/gevent.py
new file mode 100644
index 00000000..1235bb6e
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/gevent.py
@@ -0,0 +1,30 @@
+from __future__ import absolute_import
+import sys
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+try:
+ import gevent
+except ImportError: # pragma: nocover
+ raise ImportError('GeventExecutor requires gevent installed')
+
+
+class GeventExecutor(BaseExecutor):
+ """
+ Runs jobs as greenlets.
+
+ Plugin alias: ``gevent``
+ """
+
+ def _do_submit_job(self, job, run_times):
+ def callback(greenlet):
+ try:
+ events = greenlet.get()
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
+
+ gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\
+ link(callback)
diff --git a/venv/Lib/site-packages/apscheduler/executors/pool.py b/venv/Lib/site-packages/apscheduler/executors/pool.py
new file mode 100644
index 00000000..2f4ef455
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/pool.py
@@ -0,0 +1,54 @@
+from abc import abstractmethod
+import concurrent.futures
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+class BasePoolExecutor(BaseExecutor):
+ @abstractmethod
+ def __init__(self, pool):
+ super(BasePoolExecutor, self).__init__()
+ self._pool = pool
+
+ def _do_submit_job(self, job, run_times):
+ def callback(f):
+ exc, tb = (f.exception_info() if hasattr(f, 'exception_info') else
+ (f.exception(), getattr(f.exception(), '__traceback__', None)))
+ if exc:
+ self._run_job_error(job.id, exc, tb)
+ else:
+ self._run_job_success(job.id, f.result())
+
+ f = self._pool.submit(run_job, job, job._jobstore_alias, run_times, self._logger.name)
+ f.add_done_callback(callback)
+
+ def shutdown(self, wait=True):
+ self._pool.shutdown(wait)
+
+
+class ThreadPoolExecutor(BasePoolExecutor):
+ """
+ An executor that runs jobs in a concurrent.futures thread pool.
+
+ Plugin alias: ``threadpool``
+
+ :param max_workers: the maximum number of spawned threads.
+ """
+
+ def __init__(self, max_workers=10):
+ pool = concurrent.futures.ThreadPoolExecutor(int(max_workers))
+ super(ThreadPoolExecutor, self).__init__(pool)
+
+
+class ProcessPoolExecutor(BasePoolExecutor):
+ """
+ An executor that runs jobs in a concurrent.futures process pool.
+
+ Plugin alias: ``processpool``
+
+ :param max_workers: the maximum number of spawned processes.
+ """
+
+ def __init__(self, max_workers=10):
+ pool = concurrent.futures.ProcessPoolExecutor(int(max_workers))
+ super(ProcessPoolExecutor, self).__init__(pool)
diff --git a/venv/Lib/site-packages/apscheduler/executors/tornado.py b/venv/Lib/site-packages/apscheduler/executors/tornado.py
new file mode 100644
index 00000000..3b97eec9
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/tornado.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import
+
+import sys
+from concurrent.futures import ThreadPoolExecutor
+
+from tornado.gen import convert_yielded
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+try:
+ from apscheduler.executors.base_py3 import run_coroutine_job
+ from apscheduler.util import iscoroutinefunction_partial
+except ImportError:
+ def iscoroutinefunction_partial(func):
+ return False
+
+
+class TornadoExecutor(BaseExecutor):
+ """
+ Runs jobs either in a thread pool or directly on the I/O loop.
+
+ If the job function is a native coroutine function, it is scheduled to be run directly in the
+ I/O loop as soon as possible. All other functions are run in a thread pool.
+
+ Plugin alias: ``tornado``
+
+ :param int max_workers: maximum number of worker threads in the thread pool
+ """
+
+ def __init__(self, max_workers=10):
+ super(TornadoExecutor, self).__init__()
+ self.executor = ThreadPoolExecutor(max_workers)
+
+ def start(self, scheduler, alias):
+ super(TornadoExecutor, self).start(scheduler, alias)
+ self._ioloop = scheduler._ioloop
+
+ def _do_submit_job(self, job, run_times):
+ def callback(f):
+ try:
+ events = f.result()
+ except BaseException:
+ self._run_job_error(job.id, *sys.exc_info()[1:])
+ else:
+ self._run_job_success(job.id, events)
+
+ if iscoroutinefunction_partial(job.func):
+ f = run_coroutine_job(job, job._jobstore_alias, run_times, self._logger.name)
+ else:
+ f = self.executor.submit(run_job, job, job._jobstore_alias, run_times,
+ self._logger.name)
+
+ f = convert_yielded(f)
+ f.add_done_callback(callback)
diff --git a/venv/Lib/site-packages/apscheduler/executors/twisted.py b/venv/Lib/site-packages/apscheduler/executors/twisted.py
new file mode 100644
index 00000000..c7bcf647
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/executors/twisted.py
@@ -0,0 +1,25 @@
+from __future__ import absolute_import
+
+from apscheduler.executors.base import BaseExecutor, run_job
+
+
+class TwistedExecutor(BaseExecutor):
+ """
+ Runs jobs in the reactor's thread pool.
+
+ Plugin alias: ``twisted``
+ """
+
+ def start(self, scheduler, alias):
+ super(TwistedExecutor, self).start(scheduler, alias)
+ self._reactor = scheduler._reactor
+
+ def _do_submit_job(self, job, run_times):
+ def callback(success, result):
+ if success:
+ self._run_job_success(job.id, result)
+ else:
+ self._run_job_error(job.id, result.value, result.tb)
+
+ self._reactor.getThreadPool().callInThreadWithCallback(
+ callback, run_job, job, job._jobstore_alias, run_times, self._logger.name)
diff --git a/venv/Lib/site-packages/apscheduler/job.py b/venv/Lib/site-packages/apscheduler/job.py
new file mode 100644
index 00000000..d676ca89
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/job.py
@@ -0,0 +1,301 @@
+from inspect import ismethod, isclass
+from uuid import uuid4
+
+import six
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import (
+ ref_to_obj, obj_to_ref, datetime_repr, repr_escape, get_callable_name, check_callable_args,
+ convert_to_datetime)
+
+try:
+ from collections.abc import Iterable, Mapping
+except ImportError:
+ from collections import Iterable, Mapping
+
+
+class Job(object):
+ """
+ Contains the options given when scheduling callables and its current schedule and other state.
+ This class should never be instantiated by the user.
+
+ :var str id: the unique identifier of this job
+ :var str name: the description of this job
+ :var func: the callable to execute
+ :var tuple|list args: positional arguments to the callable
+ :var dict kwargs: keyword arguments to the callable
+ :var bool coalesce: whether to only run the job once when several run times are due
+ :var trigger: the trigger object that controls the schedule of this job
+ :var str executor: the name of the executor that will run this job
+ :var int misfire_grace_time: the time (in seconds) how much this job's execution is allowed to
+ be late
+ :var int max_instances: the maximum number of concurrently executing instances allowed for this
+ job
+ :var datetime.datetime next_run_time: the next scheduled run time of this job
+
+ .. note::
+ The ``misfire_grace_time`` has some non-obvious effects on job execution. See the
+ :ref:`missed-job-executions` section in the documentation for an in-depth explanation.
+ """
+
+ __slots__ = ('_scheduler', '_jobstore_alias', 'id', 'trigger', 'executor', 'func', 'func_ref',
+ 'args', 'kwargs', 'name', 'misfire_grace_time', 'coalesce', 'max_instances',
+ 'next_run_time')
+
+ def __init__(self, scheduler, id=None, **kwargs):
+ super(Job, self).__init__()
+ self._scheduler = scheduler
+ self._jobstore_alias = None
+ self._modify(id=id or uuid4().hex, **kwargs)
+
+ def modify(self, **changes):
+ """
+ Makes the given changes to this job and saves it in the associated job store.
+
+ Accepted keyword arguments are the same as the variables on this class.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.modify_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.modify_job(self.id, self._jobstore_alias, **changes)
+ return self
+
+ def reschedule(self, trigger, **trigger_args):
+ """
+ Shortcut for switching the trigger on this job.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.reschedule_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.reschedule_job(self.id, self._jobstore_alias, trigger, **trigger_args)
+ return self
+
+ def pause(self):
+ """
+ Temporarily suspend the execution of this job.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.pause_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.pause_job(self.id, self._jobstore_alias)
+ return self
+
+ def resume(self):
+ """
+ Resume the schedule of this job if previously paused.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.resume_job`
+
+ :return Job: this job instance
+
+ """
+ self._scheduler.resume_job(self.id, self._jobstore_alias)
+ return self
+
+ def remove(self):
+ """
+ Unschedules this job and removes it from its associated job store.
+
+ .. seealso:: :meth:`~apscheduler.schedulers.base.BaseScheduler.remove_job`
+
+ """
+ self._scheduler.remove_job(self.id, self._jobstore_alias)
+
+ @property
+ def pending(self):
+ """
+ Returns ``True`` if the referenced job is still waiting to be added to its designated job
+ store.
+
+ """
+ return self._jobstore_alias is None
+
+ #
+ # Private API
+ #
+
+ def _get_run_times(self, now):
+ """
+ Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive).
+
+ :type now: datetime.datetime
+ :rtype: list[datetime.datetime]
+
+ """
+ run_times = []
+ next_run_time = self.next_run_time
+ while next_run_time and next_run_time <= now:
+ run_times.append(next_run_time)
+ next_run_time = self.trigger.get_next_fire_time(next_run_time, now)
+
+ return run_times
+
+ def _modify(self, **changes):
+ """
+ Validates the changes to the Job and makes the modifications if and only if all of them
+ validate.
+
+ """
+ approved = {}
+
+ if 'id' in changes:
+ value = changes.pop('id')
+ if not isinstance(value, six.string_types):
+ raise TypeError("id must be a nonempty string")
+ if hasattr(self, 'id'):
+ raise ValueError('The job ID may not be changed')
+ approved['id'] = value
+
+ if 'func' in changes or 'args' in changes or 'kwargs' in changes:
+ func = changes.pop('func') if 'func' in changes else self.func
+ args = changes.pop('args') if 'args' in changes else self.args
+ kwargs = changes.pop('kwargs') if 'kwargs' in changes else self.kwargs
+
+ if isinstance(func, six.string_types):
+ func_ref = func
+ func = ref_to_obj(func)
+ elif callable(func):
+ try:
+ func_ref = obj_to_ref(func)
+ except ValueError:
+ # If this happens, this Job won't be serializable
+ func_ref = None
+ else:
+ raise TypeError('func must be a callable or a textual reference to one')
+
+ if not hasattr(self, 'name') and changes.get('name', None) is None:
+ changes['name'] = get_callable_name(func)
+
+ if isinstance(args, six.string_types) or not isinstance(args, Iterable):
+ raise TypeError('args must be a non-string iterable')
+ if isinstance(kwargs, six.string_types) or not isinstance(kwargs, Mapping):
+ raise TypeError('kwargs must be a dict-like object')
+
+ check_callable_args(func, args, kwargs)
+
+ approved['func'] = func
+ approved['func_ref'] = func_ref
+ approved['args'] = args
+ approved['kwargs'] = kwargs
+
+ if 'name' in changes:
+ value = changes.pop('name')
+ if not value or not isinstance(value, six.string_types):
+ raise TypeError("name must be a nonempty string")
+ approved['name'] = value
+
+ if 'misfire_grace_time' in changes:
+ value = changes.pop('misfire_grace_time')
+ if value is not None and (not isinstance(value, six.integer_types) or value <= 0):
+ raise TypeError('misfire_grace_time must be either None or a positive integer')
+ approved['misfire_grace_time'] = value
+
+ if 'coalesce' in changes:
+ value = bool(changes.pop('coalesce'))
+ approved['coalesce'] = value
+
+ if 'max_instances' in changes:
+ value = changes.pop('max_instances')
+ if not isinstance(value, six.integer_types) or value <= 0:
+ raise TypeError('max_instances must be a positive integer')
+ approved['max_instances'] = value
+
+ if 'trigger' in changes:
+ trigger = changes.pop('trigger')
+ if not isinstance(trigger, BaseTrigger):
+ raise TypeError('Expected a trigger instance, got %s instead' %
+ trigger.__class__.__name__)
+
+ approved['trigger'] = trigger
+
+ if 'executor' in changes:
+ value = changes.pop('executor')
+ if not isinstance(value, six.string_types):
+ raise TypeError('executor must be a string')
+ approved['executor'] = value
+
+ if 'next_run_time' in changes:
+ value = changes.pop('next_run_time')
+ approved['next_run_time'] = convert_to_datetime(value, self._scheduler.timezone,
+ 'next_run_time')
+
+ if changes:
+ raise AttributeError('The following are not modifiable attributes of Job: %s' %
+ ', '.join(changes))
+
+ for key, value in six.iteritems(approved):
+ setattr(self, key, value)
+
+ def __getstate__(self):
+ # Don't allow this Job to be serialized if the function reference could not be determined
+ if not self.func_ref:
+ raise ValueError(
+ 'This Job cannot be serialized since the reference to its callable (%r) could not '
+ 'be determined. Consider giving a textual reference (module:function name) '
+ 'instead.' % (self.func,))
+
+ # Instance methods cannot survive serialization as-is, so store the "self" argument
+ # explicitly
+ if ismethod(self.func) and not isclass(self.func.__self__):
+ args = (self.func.__self__,) + tuple(self.args)
+ else:
+ args = self.args
+
+ return {
+ 'version': 1,
+ 'id': self.id,
+ 'func': self.func_ref,
+ 'trigger': self.trigger,
+ 'executor': self.executor,
+ 'args': args,
+ 'kwargs': self.kwargs,
+ 'name': self.name,
+ 'misfire_grace_time': self.misfire_grace_time,
+ 'coalesce': self.coalesce,
+ 'max_instances': self.max_instances,
+ 'next_run_time': self.next_run_time
+ }
+
+ def __setstate__(self, state):
+ if state.get('version', 1) > 1:
+ raise ValueError('Job has version %s, but only version 1 can be handled' %
+ state['version'])
+
+ self.id = state['id']
+ self.func_ref = state['func']
+ self.func = ref_to_obj(self.func_ref)
+ self.trigger = state['trigger']
+ self.executor = state['executor']
+ self.args = state['args']
+ self.kwargs = state['kwargs']
+ self.name = state['name']
+ self.misfire_grace_time = state['misfire_grace_time']
+ self.coalesce = state['coalesce']
+ self.max_instances = state['max_instances']
+ self.next_run_time = state['next_run_time']
+
+ def __eq__(self, other):
+ if isinstance(other, Job):
+ return self.id == other.id
+ return NotImplemented
+
+ def __repr__(self):
+ return '' % (repr_escape(self.id), repr_escape(self.name))
+
+ def __str__(self):
+ return repr_escape(self.__unicode__())
+
+ def __unicode__(self):
+ if hasattr(self, 'next_run_time'):
+ status = ('next run at: ' + datetime_repr(self.next_run_time) if
+ self.next_run_time else 'paused')
+ else:
+ status = 'pending'
+
+ return u'%s (trigger: %s, %s)' % (self.name, self.trigger, status)
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/__init__.py b/venv/Lib/site-packages/apscheduler/jobstores/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/base.py b/venv/Lib/site-packages/apscheduler/jobstores/base.py
new file mode 100644
index 00000000..9cff66c4
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/base.py
@@ -0,0 +1,143 @@
+from abc import ABCMeta, abstractmethod
+import logging
+
+import six
+
+
+class JobLookupError(KeyError):
+ """Raised when the job store cannot find a job for update or removal."""
+
+ def __init__(self, job_id):
+ super(JobLookupError, self).__init__(u'No job by the id of %s was found' % job_id)
+
+
+class ConflictingIdError(KeyError):
+ """Raised when the uniqueness of job IDs is being violated."""
+
+ def __init__(self, job_id):
+ super(ConflictingIdError, self).__init__(
+ u'Job identifier (%s) conflicts with an existing job' % job_id)
+
+
+class TransientJobError(ValueError):
+ """
+ Raised when an attempt to add transient (with no func_ref) job to a persistent job store is
+ detected.
+ """
+
+ def __init__(self, job_id):
+ super(TransientJobError, self).__init__(
+ u'Job (%s) cannot be added to this job store because a reference to the callable '
+ u'could not be determined.' % job_id)
+
+
+class BaseJobStore(six.with_metaclass(ABCMeta)):
+ """Abstract base class that defines the interface that every job store must implement."""
+
+ _scheduler = None
+ _alias = None
+ _logger = logging.getLogger('apscheduler.jobstores')
+
+ def start(self, scheduler, alias):
+ """
+ Called by the scheduler when the scheduler is being started or when the job store is being
+ added to an already running scheduler.
+
+ :param apscheduler.schedulers.base.BaseScheduler scheduler: the scheduler that is starting
+ this job store
+ :param str|unicode alias: alias of this job store as it was assigned to the scheduler
+ """
+
+ self._scheduler = scheduler
+ self._alias = alias
+ self._logger = logging.getLogger('apscheduler.jobstores.%s' % alias)
+
+ def shutdown(self):
+ """Frees any resources still bound to this job store."""
+
+ def _fix_paused_jobs_sorting(self, jobs):
+ for i, job in enumerate(jobs):
+ if job.next_run_time is not None:
+ if i > 0:
+ paused_jobs = jobs[:i]
+ del jobs[:i]
+ jobs.extend(paused_jobs)
+ break
+
+ @abstractmethod
+ def lookup_job(self, job_id):
+ """
+ Returns a specific job, or ``None`` if it isn't found..
+
+ The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
+ the returned job to point to the scheduler and itself, respectively.
+
+ :param str|unicode job_id: identifier of the job
+ :rtype: Job
+ """
+
+ @abstractmethod
+ def get_due_jobs(self, now):
+ """
+ Returns the list of jobs that have ``next_run_time`` earlier or equal to ``now``.
+ The returned jobs must be sorted by next run time (ascending).
+
+ :param datetime.datetime now: the current (timezone aware) datetime
+ :rtype: list[Job]
+ """
+
+ @abstractmethod
+ def get_next_run_time(self):
+ """
+ Returns the earliest run time of all the jobs stored in this job store, or ``None`` if
+ there are no active jobs.
+
+ :rtype: datetime.datetime
+ """
+
+ @abstractmethod
+ def get_all_jobs(self):
+ """
+ Returns a list of all jobs in this job store.
+ The returned jobs should be sorted by next run time (ascending).
+ Paused jobs (next_run_time == None) should be sorted last.
+
+ The job store is responsible for setting the ``scheduler`` and ``jobstore`` attributes of
+ the returned jobs to point to the scheduler and itself, respectively.
+
+ :rtype: list[Job]
+ """
+
+ @abstractmethod
+ def add_job(self, job):
+ """
+ Adds the given job to this store.
+
+ :param Job job: the job to add
+ :raises ConflictingIdError: if there is another job in this store with the same ID
+ """
+
+ @abstractmethod
+ def update_job(self, job):
+ """
+ Replaces the job in the store with the given newer version.
+
+ :param Job job: the job to update
+ :raises JobLookupError: if the job does not exist
+ """
+
+ @abstractmethod
+ def remove_job(self, job_id):
+ """
+ Removes the given job from this store.
+
+ :param str|unicode job_id: identifier of the job
+ :raises JobLookupError: if the job does not exist
+ """
+
+ @abstractmethod
+ def remove_all_jobs(self):
+ """Removes all jobs from this store."""
+
+ def __repr__(self):
+ return '<%s>' % self.__class__.__name__
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/memory.py b/venv/Lib/site-packages/apscheduler/jobstores/memory.py
new file mode 100644
index 00000000..abfe7c6c
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/memory.py
@@ -0,0 +1,108 @@
+from __future__ import absolute_import
+
+from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
+from apscheduler.util import datetime_to_utc_timestamp
+
+
+class MemoryJobStore(BaseJobStore):
+ """
+ Stores jobs in an array in RAM. Provides no persistence support.
+
+ Plugin alias: ``memory``
+ """
+
+ def __init__(self):
+ super(MemoryJobStore, self).__init__()
+ # list of (job, timestamp), sorted by next_run_time and job id (ascending)
+ self._jobs = []
+ self._jobs_index = {} # id -> (job, timestamp) lookup table
+
+ def lookup_job(self, job_id):
+ return self._jobs_index.get(job_id, (None, None))[0]
+
+ def get_due_jobs(self, now):
+ now_timestamp = datetime_to_utc_timestamp(now)
+ pending = []
+ for job, timestamp in self._jobs:
+ if timestamp is None or timestamp > now_timestamp:
+ break
+ pending.append(job)
+
+ return pending
+
+ def get_next_run_time(self):
+ return self._jobs[0][0].next_run_time if self._jobs else None
+
+ def get_all_jobs(self):
+ return [j[0] for j in self._jobs]
+
+ def add_job(self, job):
+ if job.id in self._jobs_index:
+ raise ConflictingIdError(job.id)
+
+ timestamp = datetime_to_utc_timestamp(job.next_run_time)
+ index = self._get_job_index(timestamp, job.id)
+ self._jobs.insert(index, (job, timestamp))
+ self._jobs_index[job.id] = (job, timestamp)
+
+ def update_job(self, job):
+ old_job, old_timestamp = self._jobs_index.get(job.id, (None, None))
+ if old_job is None:
+ raise JobLookupError(job.id)
+
+ # If the next run time has not changed, simply replace the job in its present index.
+ # Otherwise, reinsert the job to the list to preserve the ordering.
+ old_index = self._get_job_index(old_timestamp, old_job.id)
+ new_timestamp = datetime_to_utc_timestamp(job.next_run_time)
+ if old_timestamp == new_timestamp:
+ self._jobs[old_index] = (job, new_timestamp)
+ else:
+ del self._jobs[old_index]
+ new_index = self._get_job_index(new_timestamp, job.id)
+ self._jobs.insert(new_index, (job, new_timestamp))
+
+ self._jobs_index[old_job.id] = (job, new_timestamp)
+
+ def remove_job(self, job_id):
+ job, timestamp = self._jobs_index.get(job_id, (None, None))
+ if job is None:
+ raise JobLookupError(job_id)
+
+ index = self._get_job_index(timestamp, job_id)
+ del self._jobs[index]
+ del self._jobs_index[job.id]
+
+ def remove_all_jobs(self):
+ self._jobs = []
+ self._jobs_index = {}
+
+ def shutdown(self):
+ self.remove_all_jobs()
+
+ def _get_job_index(self, timestamp, job_id):
+ """
+ Returns the index of the given job, or if it's not found, the index where the job should be
+ inserted based on the given timestamp.
+
+ :type timestamp: int
+ :type job_id: str
+
+ """
+ lo, hi = 0, len(self._jobs)
+ timestamp = float('inf') if timestamp is None else timestamp
+ while lo < hi:
+ mid = (lo + hi) // 2
+ mid_job, mid_timestamp = self._jobs[mid]
+ mid_timestamp = float('inf') if mid_timestamp is None else mid_timestamp
+ if mid_timestamp > timestamp:
+ hi = mid
+ elif mid_timestamp < timestamp:
+ lo = mid + 1
+ elif mid_job.id > job_id:
+ hi = mid
+ elif mid_job.id < job_id:
+ lo = mid + 1
+ else:
+ return mid
+
+ return lo
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/mongodb.py b/venv/Lib/site-packages/apscheduler/jobstores/mongodb.py
new file mode 100644
index 00000000..7dbc3b12
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/mongodb.py
@@ -0,0 +1,141 @@
+from __future__ import absolute_import
+import warnings
+
+from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
+from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
+from apscheduler.job import Job
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: nocover
+ import pickle
+
+try:
+ from bson.binary import Binary
+ from pymongo.errors import DuplicateKeyError
+ from pymongo import MongoClient, ASCENDING
+except ImportError: # pragma: nocover
+ raise ImportError('MongoDBJobStore requires PyMongo installed')
+
+
+class MongoDBJobStore(BaseJobStore):
+ """
+ Stores jobs in a MongoDB database. Any leftover keyword arguments are directly passed to
+ pymongo's `MongoClient
+ `_.
+
+ Plugin alias: ``mongodb``
+
+ :param str database: database to store jobs in
+ :param str collection: collection to store jobs in
+ :param client: a :class:`~pymongo.mongo_client.MongoClient` instance to use instead of
+ providing connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(self, database='apscheduler', collection='jobs', client=None,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
+ super(MongoDBJobStore, self).__init__()
+ self.pickle_protocol = pickle_protocol
+
+ if not database:
+ raise ValueError('The "database" parameter must not be empty')
+ if not collection:
+ raise ValueError('The "collection" parameter must not be empty')
+
+ if client:
+ self.client = maybe_ref(client)
+ else:
+ connect_args.setdefault('w', 1)
+ self.client = MongoClient(**connect_args)
+
+ self.collection = self.client[database][collection]
+
+ def start(self, scheduler, alias):
+ super(MongoDBJobStore, self).start(scheduler, alias)
+ self.collection.ensure_index('next_run_time', sparse=True)
+
+ @property
+ def connection(self):
+ warnings.warn('The "connection" member is deprecated -- use "client" instead',
+ DeprecationWarning)
+ return self.client
+
+ def lookup_job(self, job_id):
+ document = self.collection.find_one(job_id, ['job_state'])
+ return self._reconstitute_job(document['job_state']) if document else None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ return self._get_jobs({'next_run_time': {'$lte': timestamp}})
+
+ def get_next_run_time(self):
+ document = self.collection.find_one({'next_run_time': {'$ne': None}},
+ projection=['next_run_time'],
+ sort=[('next_run_time', ASCENDING)])
+ return utc_timestamp_to_datetime(document['next_run_time']) if document else None
+
+ def get_all_jobs(self):
+ jobs = self._get_jobs({})
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ try:
+ self.collection.insert({
+ '_id': job.id,
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
+ })
+ except DuplicateKeyError:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ changes = {
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': Binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
+ }
+ result = self.collection.update({'_id': job.id}, {'$set': changes})
+ if result and result['n'] == 0:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ result = self.collection.remove(job_id)
+ if result and result['n'] == 0:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ self.collection.remove()
+
+ def shutdown(self):
+ self.client.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self, conditions):
+ jobs = []
+ failed_job_ids = []
+ for document in self.collection.find(conditions, ['_id', 'job_state'],
+ sort=[('next_run_time', ASCENDING)]):
+ try:
+ jobs.append(self._reconstitute_job(document['job_state']))
+ except BaseException:
+ self._logger.exception('Unable to restore job "%s" -- removing it',
+ document['_id'])
+ failed_job_ids.append(document['_id'])
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ self.collection.remove({'_id': {'$in': failed_job_ids}})
+
+ return jobs
+
+ def __repr__(self):
+ return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/redis.py b/venv/Lib/site-packages/apscheduler/jobstores/redis.py
new file mode 100644
index 00000000..5bb69d63
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/redis.py
@@ -0,0 +1,150 @@
+from __future__ import absolute_import
+from datetime import datetime
+
+from pytz import utc
+import six
+
+from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
+from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
+from apscheduler.job import Job
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: nocover
+ import pickle
+
+try:
+ from redis import Redis
+except ImportError: # pragma: nocover
+ raise ImportError('RedisJobStore requires redis installed')
+
+
+class RedisJobStore(BaseJobStore):
+ """
+ Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's
+ :class:`~redis.StrictRedis`.
+
+ Plugin alias: ``redis``
+
+ :param int db: the database number to store jobs in
+ :param str jobs_key: key to store jobs in
+ :param str run_times_key: key to store the jobs' run times in
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
+ pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
+ super(RedisJobStore, self).__init__()
+
+ if db is None:
+ raise ValueError('The "db" parameter must not be empty')
+ if not jobs_key:
+ raise ValueError('The "jobs_key" parameter must not be empty')
+ if not run_times_key:
+ raise ValueError('The "run_times_key" parameter must not be empty')
+
+ self.pickle_protocol = pickle_protocol
+ self.jobs_key = jobs_key
+ self.run_times_key = run_times_key
+ self.redis = Redis(db=int(db), **connect_args)
+
+ def lookup_job(self, job_id):
+ job_state = self.redis.hget(self.jobs_key, job_id)
+ return self._reconstitute_job(job_state) if job_state else None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
+ if job_ids:
+ job_states = self.redis.hmget(self.jobs_key, *job_ids)
+ return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
+ return []
+
+ def get_next_run_time(self):
+ next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
+ if next_run_time:
+ return utc_timestamp_to_datetime(next_run_time[0][1])
+
+ def get_all_jobs(self):
+ job_states = self.redis.hgetall(self.jobs_key)
+ jobs = self._reconstitute_jobs(six.iteritems(job_states))
+ paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
+ return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
+
+ def add_job(self, job):
+ if self.redis.hexists(self.jobs_key, job.id):
+ raise ConflictingIdError(job.id)
+
+ with self.redis.pipeline() as pipe:
+ pipe.multi()
+ pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
+ self.pickle_protocol))
+ if job.next_run_time:
+ pipe.zadd(self.run_times_key,
+ {job.id: datetime_to_utc_timestamp(job.next_run_time)})
+
+ pipe.execute()
+
+ def update_job(self, job):
+ if not self.redis.hexists(self.jobs_key, job.id):
+ raise JobLookupError(job.id)
+
+ with self.redis.pipeline() as pipe:
+ pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(),
+ self.pickle_protocol))
+ if job.next_run_time:
+ pipe.zadd(self.run_times_key,
+ {job.id: datetime_to_utc_timestamp(job.next_run_time)})
+ else:
+ pipe.zrem(self.run_times_key, job.id)
+
+ pipe.execute()
+
+ def remove_job(self, job_id):
+ if not self.redis.hexists(self.jobs_key, job_id):
+ raise JobLookupError(job_id)
+
+ with self.redis.pipeline() as pipe:
+ pipe.hdel(self.jobs_key, job_id)
+ pipe.zrem(self.run_times_key, job_id)
+ pipe.execute()
+
+ def remove_all_jobs(self):
+ with self.redis.pipeline() as pipe:
+ pipe.delete(self.jobs_key)
+ pipe.delete(self.run_times_key)
+ pipe.execute()
+
+ def shutdown(self):
+ self.redis.connection_pool.disconnect()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _reconstitute_jobs(self, job_states):
+ jobs = []
+ failed_job_ids = []
+ for job_id, job_state in job_states:
+ try:
+ jobs.append(self._reconstitute_job(job_state))
+ except BaseException:
+ self._logger.exception('Unable to restore job "%s" -- removing it', job_id)
+ failed_job_ids.append(job_id)
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ with self.redis.pipeline() as pipe:
+ pipe.hdel(self.jobs_key, *failed_job_ids)
+ pipe.zrem(self.run_times_key, *failed_job_ids)
+ pipe.execute()
+
+ return jobs
+
+ def __repr__(self):
+ return '<%s>' % self.__class__.__name__
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/rethinkdb.py b/venv/Lib/site-packages/apscheduler/jobstores/rethinkdb.py
new file mode 100644
index 00000000..d8a78cde
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/rethinkdb.py
@@ -0,0 +1,155 @@
+from __future__ import absolute_import
+
+from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
+from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
+from apscheduler.job import Job
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: nocover
+ import pickle
+
+try:
+ from rethinkdb import RethinkDB
+except ImportError: # pragma: nocover
+ raise ImportError('RethinkDBJobStore requires rethinkdb installed')
+
+
+class RethinkDBJobStore(BaseJobStore):
+ """
+ Stores jobs in a RethinkDB database. Any leftover keyword arguments are directly passed to
+ rethinkdb's `RethinkdbClient `_.
+
+ Plugin alias: ``rethinkdb``
+
+ :param str database: database to store jobs in
+ :param str collection: collection to store jobs in
+ :param client: a :class:`rethinkdb.net.Connection` instance to use instead of providing
+ connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(self, database='apscheduler', table='jobs', client=None,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
+ super(RethinkDBJobStore, self).__init__()
+
+ if not database:
+ raise ValueError('The "database" parameter must not be empty')
+ if not table:
+ raise ValueError('The "table" parameter must not be empty')
+
+ self.database = database
+ self.table_name = table
+ self.table = None
+ self.client = client
+ self.pickle_protocol = pickle_protocol
+ self.connect_args = connect_args
+ self.r = RethinkDB()
+ self.conn = None
+
+ def start(self, scheduler, alias):
+ super(RethinkDBJobStore, self).start(scheduler, alias)
+
+ if self.client:
+ self.conn = maybe_ref(self.client)
+ else:
+ self.conn = self.r.connect(db=self.database, **self.connect_args)
+
+ if self.database not in self.r.db_list().run(self.conn):
+ self.r.db_create(self.database).run(self.conn)
+
+ if self.table_name not in self.r.table_list().run(self.conn):
+ self.r.table_create(self.table_name).run(self.conn)
+
+ if 'next_run_time' not in self.r.table(self.table_name).index_list().run(self.conn):
+ self.r.table(self.table_name).index_create('next_run_time').run(self.conn)
+
+ self.table = self.r.db(self.database).table(self.table_name)
+
+ def lookup_job(self, job_id):
+ results = list(self.table.get_all(job_id).pluck('job_state').run(self.conn))
+ return self._reconstitute_job(results[0]['job_state']) if results else None
+
+ def get_due_jobs(self, now):
+ return self._get_jobs(self.r.row['next_run_time'] <= datetime_to_utc_timestamp(now))
+
+ def get_next_run_time(self):
+ results = list(
+ self.table
+ .filter(self.r.row['next_run_time'] != None) # noqa
+ .order_by(self.r.asc('next_run_time'))
+ .map(lambda x: x['next_run_time'])
+ .limit(1)
+ .run(self.conn)
+ )
+ return utc_timestamp_to_datetime(results[0]) if results else None
+
+ def get_all_jobs(self):
+ jobs = self._get_jobs()
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ job_dict = {
+ 'id': job.id,
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
+ }
+ results = self.table.insert(job_dict).run(self.conn)
+ if results['errors'] > 0:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ changes = {
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': self.r.binary(pickle.dumps(job.__getstate__(), self.pickle_protocol))
+ }
+ results = self.table.get_all(job.id).update(changes).run(self.conn)
+ skipped = False in map(lambda x: results[x] == 0, results.keys())
+ if results['skipped'] > 0 or results['errors'] > 0 or not skipped:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ results = self.table.get_all(job_id).delete().run(self.conn)
+ if results['deleted'] + results['skipped'] != 1:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ self.table.delete().run(self.conn)
+
+ def shutdown(self):
+ self.conn.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self, predicate=None):
+ jobs = []
+ failed_job_ids = []
+ query = (self.table.filter(self.r.row['next_run_time'] != None).filter(predicate) # noqa
+ if predicate else self.table)
+ query = query.order_by('next_run_time', 'id').pluck('id', 'job_state')
+
+ for document in query.run(self.conn):
+ try:
+ jobs.append(self._reconstitute_job(document['job_state']))
+ except Exception:
+ self._logger.exception('Unable to restore job "%s" -- removing it', document['id'])
+ failed_job_ids.append(document['id'])
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ self.r.expr(failed_job_ids).for_each(
+ lambda job_id: self.table.get_all(job_id).delete()).run(self.conn)
+
+ return jobs
+
+ def __repr__(self):
+ connection = self.conn
+ return '<%s (connection=%s)>' % (self.__class__.__name__, connection)
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/sqlalchemy.py b/venv/Lib/site-packages/apscheduler/jobstores/sqlalchemy.py
new file mode 100644
index 00000000..fecbd834
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/sqlalchemy.py
@@ -0,0 +1,154 @@
+from __future__ import absolute_import
+
+from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
+from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
+from apscheduler.job import Job
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: nocover
+ import pickle
+
+try:
+ from sqlalchemy import (
+ create_engine, Table, Column, MetaData, Unicode, Float, LargeBinary, select)
+ from sqlalchemy.exc import IntegrityError
+ from sqlalchemy.sql.expression import null
+except ImportError: # pragma: nocover
+ raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
+
+
+class SQLAlchemyJobStore(BaseJobStore):
+ """
+ Stores jobs in a database table using SQLAlchemy.
+ The table will be created if it doesn't exist in the database.
+
+ Plugin alias: ``sqlalchemy``
+
+ :param str url: connection string (see
+ :ref:`SQLAlchemy documentation ` on this)
+ :param engine: an SQLAlchemy :class:`~sqlalchemy.engine.Engine` to use instead of creating a
+ new one based on ``url``
+ :param str tablename: name of the table to store jobs in
+ :param metadata: a :class:`~sqlalchemy.schema.MetaData` instance to use instead of creating a
+ new one
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ :param str tableschema: name of the (existing) schema in the target database where the table
+ should be
+ :param dict engine_options: keyword arguments to :func:`~sqlalchemy.create_engine`
+ (ignored if ``engine`` is given)
+ """
+
+ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL, tableschema=None, engine_options=None):
+ super(SQLAlchemyJobStore, self).__init__()
+ self.pickle_protocol = pickle_protocol
+ metadata = maybe_ref(metadata) or MetaData()
+
+ if engine:
+ self.engine = maybe_ref(engine)
+ elif url:
+ self.engine = create_engine(url, **(engine_options or {}))
+ else:
+ raise ValueError('Need either "engine" or "url" defined')
+
+ # 191 = max key length in MySQL for InnoDB/utf8mb4 tables,
+ # 25 = precision that translates to an 8-byte float
+ self.jobs_t = Table(
+ tablename, metadata,
+ Column('id', Unicode(191, _warn_on_bytestring=False), primary_key=True),
+ Column('next_run_time', Float(25), index=True),
+ Column('job_state', LargeBinary, nullable=False),
+ schema=tableschema
+ )
+
+ def start(self, scheduler, alias):
+ super(SQLAlchemyJobStore, self).start(scheduler, alias)
+ self.jobs_t.create(self.engine, True)
+
+ def lookup_job(self, job_id):
+ selectable = select([self.jobs_t.c.job_state]).where(self.jobs_t.c.id == job_id)
+ job_state = self.engine.execute(selectable).scalar()
+ return self._reconstitute_job(job_state) if job_state else None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ return self._get_jobs(self.jobs_t.c.next_run_time <= timestamp)
+
+ def get_next_run_time(self):
+ selectable = select([self.jobs_t.c.next_run_time]).\
+ where(self.jobs_t.c.next_run_time != null()).\
+ order_by(self.jobs_t.c.next_run_time).limit(1)
+ next_run_time = self.engine.execute(selectable).scalar()
+ return utc_timestamp_to_datetime(next_run_time)
+
+ def get_all_jobs(self):
+ jobs = self._get_jobs()
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ insert = self.jobs_t.insert().values(**{
+ 'id': job.id,
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
+ })
+ try:
+ self.engine.execute(insert)
+ except IntegrityError:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ update = self.jobs_t.update().values(**{
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol)
+ }).where(self.jobs_t.c.id == job.id)
+ result = self.engine.execute(update)
+ if result.rowcount == 0:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ delete = self.jobs_t.delete().where(self.jobs_t.c.id == job_id)
+ result = self.engine.execute(delete)
+ if result.rowcount == 0:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ delete = self.jobs_t.delete()
+ self.engine.execute(delete)
+
+ def shutdown(self):
+ self.engine.dispose()
+
+ def _reconstitute_job(self, job_state):
+ job_state = pickle.loads(job_state)
+ job_state['jobstore'] = self
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self, *conditions):
+ jobs = []
+ selectable = select([self.jobs_t.c.id, self.jobs_t.c.job_state]).\
+ order_by(self.jobs_t.c.next_run_time)
+ selectable = selectable.where(*conditions) if conditions else selectable
+ failed_job_ids = set()
+ for row in self.engine.execute(selectable):
+ try:
+ jobs.append(self._reconstitute_job(row.job_state))
+ except BaseException:
+ self._logger.exception('Unable to restore job "%s" -- removing it', row.id)
+ failed_job_ids.add(row.id)
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ delete = self.jobs_t.delete().where(self.jobs_t.c.id.in_(failed_job_ids))
+ self.engine.execute(delete)
+
+ return jobs
+
+ def __repr__(self):
+ return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
diff --git a/venv/Lib/site-packages/apscheduler/jobstores/zookeeper.py b/venv/Lib/site-packages/apscheduler/jobstores/zookeeper.py
new file mode 100644
index 00000000..2cca83e8
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/jobstores/zookeeper.py
@@ -0,0 +1,179 @@
+from __future__ import absolute_import
+
+import os
+from datetime import datetime
+
+from pytz import utc
+from kazoo.exceptions import NoNodeError, NodeExistsError
+
+from apscheduler.jobstores.base import BaseJobStore, JobLookupError, ConflictingIdError
+from apscheduler.util import maybe_ref, datetime_to_utc_timestamp, utc_timestamp_to_datetime
+from apscheduler.job import Job
+
+try:
+ import cPickle as pickle
+except ImportError: # pragma: nocover
+ import pickle
+
+try:
+ from kazoo.client import KazooClient
+except ImportError: # pragma: nocover
+ raise ImportError('ZooKeeperJobStore requires Kazoo installed')
+
+
+class ZooKeeperJobStore(BaseJobStore):
+ """
+ Stores jobs in a ZooKeeper tree. Any leftover keyword arguments are directly passed to
+ kazoo's `KazooClient
+ `_.
+
+ Plugin alias: ``zookeeper``
+
+ :param str path: path to store jobs in
+ :param client: a :class:`~kazoo.client.KazooClient` instance to use instead of
+ providing connection arguments
+ :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the
+ highest available
+ """
+
+ def __init__(self, path='/apscheduler', client=None, close_connection_on_exit=False,
+ pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
+ super(ZooKeeperJobStore, self).__init__()
+ self.pickle_protocol = pickle_protocol
+ self.close_connection_on_exit = close_connection_on_exit
+
+ if not path:
+ raise ValueError('The "path" parameter must not be empty')
+
+ self.path = path
+
+ if client:
+ self.client = maybe_ref(client)
+ else:
+ self.client = KazooClient(**connect_args)
+ self._ensured_path = False
+
+ def _ensure_paths(self):
+ if not self._ensured_path:
+ self.client.ensure_path(self.path)
+ self._ensured_path = True
+
+ def start(self, scheduler, alias):
+ super(ZooKeeperJobStore, self).start(scheduler, alias)
+ if not self.client.connected:
+ self.client.start()
+
+ def lookup_job(self, job_id):
+ self._ensure_paths()
+ node_path = os.path.join(self.path, job_id)
+ try:
+ content, _ = self.client.get(node_path)
+ doc = pickle.loads(content)
+ job = self._reconstitute_job(doc['job_state'])
+ return job
+ except BaseException:
+ return None
+
+ def get_due_jobs(self, now):
+ timestamp = datetime_to_utc_timestamp(now)
+ jobs = [job_def['job'] for job_def in self._get_jobs()
+ if job_def['next_run_time'] is not None and job_def['next_run_time'] <= timestamp]
+ return jobs
+
+ def get_next_run_time(self):
+ next_runs = [job_def['next_run_time'] for job_def in self._get_jobs()
+ if job_def['next_run_time'] is not None]
+ return utc_timestamp_to_datetime(min(next_runs)) if len(next_runs) > 0 else None
+
+ def get_all_jobs(self):
+ jobs = [job_def['job'] for job_def in self._get_jobs()]
+ self._fix_paused_jobs_sorting(jobs)
+ return jobs
+
+ def add_job(self, job):
+ self._ensure_paths()
+ node_path = os.path.join(self.path, str(job.id))
+ value = {
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': job.__getstate__()
+ }
+ data = pickle.dumps(value, self.pickle_protocol)
+ try:
+ self.client.create(node_path, value=data)
+ except NodeExistsError:
+ raise ConflictingIdError(job.id)
+
+ def update_job(self, job):
+ self._ensure_paths()
+ node_path = os.path.join(self.path, str(job.id))
+ changes = {
+ 'next_run_time': datetime_to_utc_timestamp(job.next_run_time),
+ 'job_state': job.__getstate__()
+ }
+ data = pickle.dumps(changes, self.pickle_protocol)
+ try:
+ self.client.set(node_path, value=data)
+ except NoNodeError:
+ raise JobLookupError(job.id)
+
+ def remove_job(self, job_id):
+ self._ensure_paths()
+ node_path = os.path.join(self.path, str(job_id))
+ try:
+ self.client.delete(node_path)
+ except NoNodeError:
+ raise JobLookupError(job_id)
+
+ def remove_all_jobs(self):
+ try:
+ self.client.delete(self.path, recursive=True)
+ except NoNodeError:
+ pass
+ self._ensured_path = False
+
+ def shutdown(self):
+ if self.close_connection_on_exit:
+ self.client.stop()
+ self.client.close()
+
+ def _reconstitute_job(self, job_state):
+ job_state = job_state
+ job = Job.__new__(Job)
+ job.__setstate__(job_state)
+ job._scheduler = self._scheduler
+ job._jobstore_alias = self._alias
+ return job
+
+ def _get_jobs(self):
+ self._ensure_paths()
+ jobs = []
+ failed_job_ids = []
+ all_ids = self.client.get_children(self.path)
+ for node_name in all_ids:
+ try:
+ node_path = os.path.join(self.path, node_name)
+ content, _ = self.client.get(node_path)
+ doc = pickle.loads(content)
+ job_def = {
+ 'job_id': node_name,
+ 'next_run_time': doc['next_run_time'] if doc['next_run_time'] else None,
+ 'job_state': doc['job_state'],
+ 'job': self._reconstitute_job(doc['job_state']),
+ 'creation_time': _.ctime
+ }
+ jobs.append(job_def)
+ except BaseException:
+ self._logger.exception('Unable to restore job "%s" -- removing it' % node_name)
+ failed_job_ids.append(node_name)
+
+ # Remove all the jobs we failed to restore
+ if failed_job_ids:
+ for failed_id in failed_job_ids:
+ self.remove_job(failed_id)
+ paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
+ return sorted(jobs, key=lambda job_def: (job_def['job'].next_run_time or paused_sort_key,
+ job_def['creation_time']))
+
+ def __repr__(self):
+ self._logger.exception('<%s (client=%s)>' % (self.__class__.__name__, self.client))
+ return '<%s (client=%s)>' % (self.__class__.__name__, self.client)
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/__init__.py b/venv/Lib/site-packages/apscheduler/schedulers/__init__.py
new file mode 100644
index 00000000..bd8a7900
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/__init__.py
@@ -0,0 +1,12 @@
+class SchedulerAlreadyRunningError(Exception):
+ """Raised when attempting to start or configure the scheduler when it's already running."""
+
+ def __str__(self):
+ return 'Scheduler is already running'
+
+
+class SchedulerNotRunningError(Exception):
+ """Raised when attempting to shutdown the scheduler when it's not running."""
+
+ def __str__(self):
+ return 'Scheduler is not running'
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/asyncio.py b/venv/Lib/site-packages/apscheduler/schedulers/asyncio.py
new file mode 100644
index 00000000..289ef13f
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/asyncio.py
@@ -0,0 +1,68 @@
+from __future__ import absolute_import
+from functools import wraps, partial
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.util import maybe_ref
+
+try:
+ import asyncio
+except ImportError: # pragma: nocover
+ try:
+ import trollius as asyncio
+ except ImportError:
+ raise ImportError(
+ 'AsyncIOScheduler requires either Python 3.4 or the asyncio package installed')
+
+
+def run_in_event_loop(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ wrapped = partial(func, self, *args, **kwargs)
+ self._eventloop.call_soon_threadsafe(wrapped)
+ return wrapper
+
+
+class AsyncIOScheduler(BaseScheduler):
+ """
+ A scheduler that runs on an asyncio (:pep:`3156`) event loop.
+
+ The default executor can run jobs based on native coroutines (``async def``).
+
+ Extra options:
+
+ ============== =============================================================
+ ``event_loop`` AsyncIO event loop to use (defaults to the global event loop)
+ ============== =============================================================
+ """
+
+ _eventloop = None
+ _timeout = None
+
+ @run_in_event_loop
+ def shutdown(self, wait=True):
+ super(AsyncIOScheduler, self).shutdown(wait)
+ self._stop_timer()
+
+ def _configure(self, config):
+ self._eventloop = maybe_ref(config.pop('event_loop', None)) or asyncio.get_event_loop()
+ super(AsyncIOScheduler, self)._configure(config)
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ self._timeout = self._eventloop.call_later(wait_seconds, self.wakeup)
+
+ def _stop_timer(self):
+ if self._timeout:
+ self._timeout.cancel()
+ del self._timeout
+
+ @run_in_event_loop
+ def wakeup(self):
+ self._stop_timer()
+ wait_seconds = self._process_jobs()
+ self._start_timer(wait_seconds)
+
+ def _create_default_executor(self):
+ from apscheduler.executors.asyncio import AsyncIOExecutor
+ return AsyncIOExecutor()
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/background.py b/venv/Lib/site-packages/apscheduler/schedulers/background.py
new file mode 100644
index 00000000..03f29822
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/background.py
@@ -0,0 +1,41 @@
+from __future__ import absolute_import
+
+from threading import Thread, Event
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.util import asbool
+
+
+class BackgroundScheduler(BlockingScheduler):
+ """
+ A scheduler that runs in the background using a separate thread
+ (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will return immediately).
+
+ Extra options:
+
+ ========== =============================================================================
+ ``daemon`` Set the ``daemon`` option in the background thread (defaults to ``True``, see
+ `the documentation
+ `_
+ for further details)
+ ========== =============================================================================
+ """
+
+ _thread = None
+
+ def _configure(self, config):
+ self._daemon = asbool(config.pop('daemon', True))
+ super(BackgroundScheduler, self)._configure(config)
+
+ def start(self, *args, **kwargs):
+ self._event = Event()
+ BaseScheduler.start(self, *args, **kwargs)
+ self._thread = Thread(target=self._main_loop, name='APScheduler')
+ self._thread.daemon = self._daemon
+ self._thread.start()
+
+ def shutdown(self, *args, **kwargs):
+ super(BackgroundScheduler, self).shutdown(*args, **kwargs)
+ self._thread.join()
+ del self._thread
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/base.py b/venv/Lib/site-packages/apscheduler/schedulers/base.py
new file mode 100644
index 00000000..8e711549
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/base.py
@@ -0,0 +1,1022 @@
+from __future__ import print_function
+
+from abc import ABCMeta, abstractmethod
+from threading import RLock
+from datetime import datetime, timedelta
+from logging import getLogger
+import warnings
+import sys
+
+from pkg_resources import iter_entry_points
+from tzlocal import get_localzone
+import six
+
+from apscheduler.schedulers import SchedulerAlreadyRunningError, SchedulerNotRunningError
+from apscheduler.executors.base import MaxInstancesReachedError, BaseExecutor
+from apscheduler.executors.pool import ThreadPoolExecutor
+from apscheduler.jobstores.base import ConflictingIdError, JobLookupError, BaseJobStore
+from apscheduler.jobstores.memory import MemoryJobStore
+from apscheduler.job import Job
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import (
+ asbool, asint, astimezone, maybe_ref, timedelta_seconds, undefined, TIMEOUT_MAX)
+from apscheduler.events import (
+ SchedulerEvent, JobEvent, JobSubmissionEvent, EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN,
+ EVENT_JOBSTORE_ADDED, EVENT_JOBSTORE_REMOVED, EVENT_ALL, EVENT_JOB_MODIFIED, EVENT_JOB_REMOVED,
+ EVENT_JOB_ADDED, EVENT_EXECUTOR_ADDED, EVENT_EXECUTOR_REMOVED, EVENT_ALL_JOBS_REMOVED,
+ EVENT_JOB_SUBMITTED, EVENT_JOB_MAX_INSTANCES, EVENT_SCHEDULER_RESUMED, EVENT_SCHEDULER_PAUSED)
+
+try:
+ from collections.abc import MutableMapping
+except ImportError:
+ from collections import MutableMapping
+
+#: constant indicating a scheduler's stopped state
+STATE_STOPPED = 0
+#: constant indicating a scheduler's running state (started and processing jobs)
+STATE_RUNNING = 1
+#: constant indicating a scheduler's paused state (started but not processing jobs)
+STATE_PAUSED = 2
+
+
+class BaseScheduler(six.with_metaclass(ABCMeta)):
+ """
+ Abstract base class for all schedulers.
+
+ Takes the following keyword arguments:
+
+ :param str|logging.Logger logger: logger to use for the scheduler's logging (defaults to
+ apscheduler.scheduler)
+ :param str|datetime.tzinfo timezone: the default time zone (defaults to the local timezone)
+ :param int|float jobstore_retry_interval: the minimum number of seconds to wait between
+ retries in the scheduler's main loop if the job store raises an exception when getting
+ the list of due jobs
+ :param dict job_defaults: default values for newly added jobs
+ :param dict jobstores: a dictionary of job store alias -> job store instance or configuration
+ dict
+ :param dict executors: a dictionary of executor alias -> executor instance or configuration
+ dict
+
+ :ivar int state: current running state of the scheduler (one of the following constants from
+ ``apscheduler.schedulers.base``: ``STATE_STOPPED``, ``STATE_RUNNING``, ``STATE_PAUSED``)
+
+ .. seealso:: :ref:`scheduler-config`
+ """
+
+ _trigger_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.triggers'))
+ _trigger_classes = {}
+ _executor_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.executors'))
+ _executor_classes = {}
+ _jobstore_plugins = dict((ep.name, ep) for ep in iter_entry_points('apscheduler.jobstores'))
+ _jobstore_classes = {}
+
+ #
+ # Public API
+ #
+
+ def __init__(self, gconfig={}, **options):
+ super(BaseScheduler, self).__init__()
+ self._executors = {}
+ self._executors_lock = self._create_lock()
+ self._jobstores = {}
+ self._jobstores_lock = self._create_lock()
+ self._listeners = []
+ self._listeners_lock = self._create_lock()
+ self._pending_jobs = []
+ self.state = STATE_STOPPED
+ self.configure(gconfig, **options)
+
+ def configure(self, gconfig={}, prefix='apscheduler.', **options):
+ """
+ Reconfigures the scheduler with the given options.
+
+ Can only be done when the scheduler isn't running.
+
+ :param dict gconfig: a "global" configuration dictionary whose values can be overridden by
+ keyword arguments to this method
+ :param str|unicode prefix: pick only those keys from ``gconfig`` that are prefixed with
+ this string (pass an empty string or ``None`` to use all keys)
+ :raises SchedulerAlreadyRunningError: if the scheduler is already running
+
+ """
+ if self.state != STATE_STOPPED:
+ raise SchedulerAlreadyRunningError
+
+ # If a non-empty prefix was given, strip it from the keys in the
+ # global configuration dict
+ if prefix:
+ prefixlen = len(prefix)
+ gconfig = dict((key[prefixlen:], value) for key, value in six.iteritems(gconfig)
+ if key.startswith(prefix))
+
+ # Create a structure from the dotted options
+ # (e.g. "a.b.c = d" -> {'a': {'b': {'c': 'd'}}})
+ config = {}
+ for key, value in six.iteritems(gconfig):
+ parts = key.split('.')
+ parent = config
+ key = parts.pop(0)
+ while parts:
+ parent = parent.setdefault(key, {})
+ key = parts.pop(0)
+ parent[key] = value
+
+ # Override any options with explicit keyword arguments
+ config.update(options)
+ self._configure(config)
+
+ def start(self, paused=False):
+ """
+ Start the configured executors and job stores and begin processing scheduled jobs.
+
+ :param bool paused: if ``True``, don't start job processing until :meth:`resume` is called
+ :raises SchedulerAlreadyRunningError: if the scheduler is already running
+ :raises RuntimeError: if running under uWSGI with threads disabled
+
+ """
+ if self.state != STATE_STOPPED:
+ raise SchedulerAlreadyRunningError
+
+ self._check_uwsgi()
+
+ with self._executors_lock:
+ # Create a default executor if nothing else is configured
+ if 'default' not in self._executors:
+ self.add_executor(self._create_default_executor(), 'default')
+
+ # Start all the executors
+ for alias, executor in six.iteritems(self._executors):
+ executor.start(self, alias)
+
+ with self._jobstores_lock:
+ # Create a default job store if nothing else is configured
+ if 'default' not in self._jobstores:
+ self.add_jobstore(self._create_default_jobstore(), 'default')
+
+ # Start all the job stores
+ for alias, store in six.iteritems(self._jobstores):
+ store.start(self, alias)
+
+ # Schedule all pending jobs
+ for job, jobstore_alias, replace_existing in self._pending_jobs:
+ self._real_add_job(job, jobstore_alias, replace_existing)
+ del self._pending_jobs[:]
+
+ self.state = STATE_PAUSED if paused else STATE_RUNNING
+ self._logger.info('Scheduler started')
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_START))
+
+ if not paused:
+ self.wakeup()
+
+ @abstractmethod
+ def shutdown(self, wait=True):
+ """
+ Shuts down the scheduler, along with its executors and job stores.
+
+ Does not interrupt any currently running jobs.
+
+ :param bool wait: ``True`` to wait until all currently executing jobs have finished
+ :raises SchedulerNotRunningError: if the scheduler has not been started yet
+
+ """
+ if self.state == STATE_STOPPED:
+ raise SchedulerNotRunningError
+
+ self.state = STATE_STOPPED
+
+ # Shut down all executors
+ with self._executors_lock:
+ for executor in six.itervalues(self._executors):
+ executor.shutdown(wait)
+
+ # Shut down all job stores
+ with self._jobstores_lock:
+ for jobstore in six.itervalues(self._jobstores):
+ jobstore.shutdown()
+
+ self._logger.info('Scheduler has been shut down')
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
+
+ def pause(self):
+ """
+ Pause job processing in the scheduler.
+
+ This will prevent the scheduler from waking up to do job processing until :meth:`resume`
+ is called. It will not however stop any already running job processing.
+
+ """
+ if self.state == STATE_STOPPED:
+ raise SchedulerNotRunningError
+ elif self.state == STATE_RUNNING:
+ self.state = STATE_PAUSED
+ self._logger.info('Paused scheduler job processing')
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_PAUSED))
+
+ def resume(self):
+ """Resume job processing in the scheduler."""
+ if self.state == STATE_STOPPED:
+ raise SchedulerNotRunningError
+ elif self.state == STATE_PAUSED:
+ self.state = STATE_RUNNING
+ self._logger.info('Resumed scheduler job processing')
+ self._dispatch_event(SchedulerEvent(EVENT_SCHEDULER_RESUMED))
+ self.wakeup()
+
+ @property
+ def running(self):
+ """
+ Return ``True`` if the scheduler has been started.
+
+ This is a shortcut for ``scheduler.state != STATE_STOPPED``.
+
+ """
+ return self.state != STATE_STOPPED
+
+ def add_executor(self, executor, alias='default', **executor_opts):
+ """
+ Adds an executor to this scheduler.
+
+ Any extra keyword arguments will be passed to the executor plugin's constructor, assuming
+ that the first argument is the name of an executor plugin.
+
+ :param str|unicode|apscheduler.executors.base.BaseExecutor executor: either an executor
+ instance or the name of an executor plugin
+ :param str|unicode alias: alias for the scheduler
+ :raises ValueError: if there is already an executor by the given alias
+
+ """
+ with self._executors_lock:
+ if alias in self._executors:
+ raise ValueError('This scheduler already has an executor by the alias of "%s"' %
+ alias)
+
+ if isinstance(executor, BaseExecutor):
+ self._executors[alias] = executor
+ elif isinstance(executor, six.string_types):
+ self._executors[alias] = executor = self._create_plugin_instance(
+ 'executor', executor, executor_opts)
+ else:
+ raise TypeError('Expected an executor instance or a string, got %s instead' %
+ executor.__class__.__name__)
+
+ # Start the executor right away if the scheduler is running
+ if self.state != STATE_STOPPED:
+ executor.start(self, alias)
+
+ self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_ADDED, alias))
+
+ def remove_executor(self, alias, shutdown=True):
+ """
+ Removes the executor by the given alias from this scheduler.
+
+ :param str|unicode alias: alias of the executor
+ :param bool shutdown: ``True`` to shut down the executor after
+ removing it
+
+ """
+ with self._executors_lock:
+ executor = self._lookup_executor(alias)
+ del self._executors[alias]
+
+ if shutdown:
+ executor.shutdown()
+
+ self._dispatch_event(SchedulerEvent(EVENT_EXECUTOR_REMOVED, alias))
+
+ def add_jobstore(self, jobstore, alias='default', **jobstore_opts):
+ """
+ Adds a job store to this scheduler.
+
+ Any extra keyword arguments will be passed to the job store plugin's constructor, assuming
+ that the first argument is the name of a job store plugin.
+
+ :param str|unicode|apscheduler.jobstores.base.BaseJobStore jobstore: job store to be added
+ :param str|unicode alias: alias for the job store
+ :raises ValueError: if there is already a job store by the given alias
+
+ """
+ with self._jobstores_lock:
+ if alias in self._jobstores:
+ raise ValueError('This scheduler already has a job store by the alias of "%s"' %
+ alias)
+
+ if isinstance(jobstore, BaseJobStore):
+ self._jobstores[alias] = jobstore
+ elif isinstance(jobstore, six.string_types):
+ self._jobstores[alias] = jobstore = self._create_plugin_instance(
+ 'jobstore', jobstore, jobstore_opts)
+ else:
+ raise TypeError('Expected a job store instance or a string, got %s instead' %
+ jobstore.__class__.__name__)
+
+ # Start the job store right away if the scheduler isn't stopped
+ if self.state != STATE_STOPPED:
+ jobstore.start(self, alias)
+
+ # Notify listeners that a new job store has been added
+ self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_ADDED, alias))
+
+ # Notify the scheduler so it can scan the new job store for jobs
+ if self.state != STATE_STOPPED:
+ self.wakeup()
+
+ def remove_jobstore(self, alias, shutdown=True):
+ """
+ Removes the job store by the given alias from this scheduler.
+
+ :param str|unicode alias: alias of the job store
+ :param bool shutdown: ``True`` to shut down the job store after removing it
+
+ """
+ with self._jobstores_lock:
+ jobstore = self._lookup_jobstore(alias)
+ del self._jobstores[alias]
+
+ if shutdown:
+ jobstore.shutdown()
+
+ self._dispatch_event(SchedulerEvent(EVENT_JOBSTORE_REMOVED, alias))
+
+ def add_listener(self, callback, mask=EVENT_ALL):
+ """
+ add_listener(callback, mask=EVENT_ALL)
+
+ Adds a listener for scheduler events.
+
+ When a matching event occurs, ``callback`` is executed with the event object as its
+ sole argument. If the ``mask`` parameter is not provided, the callback will receive events
+ of all types.
+
+ :param callback: any callable that takes one argument
+ :param int mask: bitmask that indicates which events should be
+ listened to
+
+ .. seealso:: :mod:`apscheduler.events`
+ .. seealso:: :ref:`scheduler-events`
+
+ """
+ with self._listeners_lock:
+ self._listeners.append((callback, mask))
+
+ def remove_listener(self, callback):
+ """Removes a previously added event listener."""
+
+ with self._listeners_lock:
+ for i, (cb, _) in enumerate(self._listeners):
+ if callback == cb:
+ del self._listeners[i]
+
+ def add_job(self, func, trigger=None, args=None, kwargs=None, id=None, name=None,
+ misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined,
+ next_run_time=undefined, jobstore='default', executor='default',
+ replace_existing=False, **trigger_args):
+ """
+ add_job(func, trigger=None, args=None, kwargs=None, id=None, \
+ name=None, misfire_grace_time=undefined, coalesce=undefined, \
+ max_instances=undefined, next_run_time=undefined, \
+ jobstore='default', executor='default', \
+ replace_existing=False, **trigger_args)
+
+ Adds the given job to the job list and wakes up the scheduler if it's already running.
+
+ Any option that defaults to ``undefined`` will be replaced with the corresponding default
+ value when the job is scheduled (which happens when the scheduler is started, or
+ immediately if the scheduler is already running).
+
+ The ``func`` argument can be given either as a callable object or a textual reference in
+ the ``package.module:some.object`` format, where the first half (separated by ``:``) is an
+ importable module and the second half is a reference to the callable object, relative to
+ the module.
+
+ The ``trigger`` argument can either be:
+ #. the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``), in which case
+ any extra keyword arguments to this method are passed on to the trigger's constructor
+ #. an instance of a trigger class
+
+ :param func: callable (or a textual reference to one) to run at the given time
+ :param str|apscheduler.triggers.base.BaseTrigger trigger: trigger that determines when
+ ``func`` is called
+ :param list|tuple args: list of positional arguments to call func with
+ :param dict kwargs: dict of keyword arguments to call func with
+ :param str|unicode id: explicit identifier for the job (for modifying it later)
+ :param str|unicode name: textual description of the job
+ :param int misfire_grace_time: seconds after the designated runtime that the job is still
+ allowed to be run
+ :param bool coalesce: run once instead of many times if the scheduler determines that the
+ job should be run more than once in succession
+ :param int max_instances: maximum number of concurrently running instances allowed for this
+ job
+ :param datetime next_run_time: when to first run the job, regardless of the trigger (pass
+ ``None`` to add the job as paused)
+ :param str|unicode jobstore: alias of the job store to store the job in
+ :param str|unicode executor: alias of the executor to run the job with
+ :param bool replace_existing: ``True`` to replace an existing job with the same ``id``
+ (but retain the number of runs from the existing one)
+ :rtype: Job
+
+ """
+ job_kwargs = {
+ 'trigger': self._create_trigger(trigger, trigger_args),
+ 'executor': executor,
+ 'func': func,
+ 'args': tuple(args) if args is not None else (),
+ 'kwargs': dict(kwargs) if kwargs is not None else {},
+ 'id': id,
+ 'name': name,
+ 'misfire_grace_time': misfire_grace_time,
+ 'coalesce': coalesce,
+ 'max_instances': max_instances,
+ 'next_run_time': next_run_time
+ }
+ job_kwargs = dict((key, value) for key, value in six.iteritems(job_kwargs) if
+ value is not undefined)
+ job = Job(self, **job_kwargs)
+
+ # Don't really add jobs to job stores before the scheduler is up and running
+ with self._jobstores_lock:
+ if self.state == STATE_STOPPED:
+ self._pending_jobs.append((job, jobstore, replace_existing))
+ self._logger.info('Adding job tentatively -- it will be properly scheduled when '
+ 'the scheduler starts')
+ else:
+ self._real_add_job(job, jobstore, replace_existing)
+
+ return job
+
+ def scheduled_job(self, trigger, args=None, kwargs=None, id=None, name=None,
+ misfire_grace_time=undefined, coalesce=undefined, max_instances=undefined,
+ next_run_time=undefined, jobstore='default', executor='default',
+ **trigger_args):
+ """
+ scheduled_job(trigger, args=None, kwargs=None, id=None, \
+ name=None, misfire_grace_time=undefined, \
+ coalesce=undefined, max_instances=undefined, \
+ next_run_time=undefined, jobstore='default', \
+ executor='default',**trigger_args)
+
+ A decorator version of :meth:`add_job`, except that ``replace_existing`` is always
+ ``True``.
+
+ .. important:: The ``id`` argument must be given if scheduling a job in a persistent job
+ store. The scheduler cannot, however, enforce this requirement.
+
+ """
+ def inner(func):
+ self.add_job(func, trigger, args, kwargs, id, name, misfire_grace_time, coalesce,
+ max_instances, next_run_time, jobstore, executor, True, **trigger_args)
+ return func
+ return inner
+
+ def modify_job(self, job_id, jobstore=None, **changes):
+ """
+ Modifies the properties of a single job.
+
+ Modifications are passed to this method as extra keyword arguments.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :return Job: the relevant job instance
+
+ """
+ with self._jobstores_lock:
+ job, jobstore = self._lookup_job(job_id, jobstore)
+ job._modify(**changes)
+ if jobstore:
+ self._lookup_jobstore(jobstore).update_job(job)
+
+ self._dispatch_event(JobEvent(EVENT_JOB_MODIFIED, job_id, jobstore))
+
+ # Wake up the scheduler since the job's next run time may have been changed
+ if self.state == STATE_RUNNING:
+ self.wakeup()
+
+ return job
+
+ def reschedule_job(self, job_id, jobstore=None, trigger=None, **trigger_args):
+ """
+ Constructs a new trigger for a job and updates its next run time.
+
+ Extra keyword arguments are passed directly to the trigger's constructor.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :param trigger: alias of the trigger type or a trigger instance
+ :return Job: the relevant job instance
+
+ """
+ trigger = self._create_trigger(trigger, trigger_args)
+ now = datetime.now(self.timezone)
+ next_run_time = trigger.get_next_fire_time(None, now)
+ return self.modify_job(job_id, jobstore, trigger=trigger, next_run_time=next_run_time)
+
+ def pause_job(self, job_id, jobstore=None):
+ """
+ Causes the given job not to be executed until it is explicitly resumed.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :return Job: the relevant job instance
+
+ """
+ return self.modify_job(job_id, jobstore, next_run_time=None)
+
+ def resume_job(self, job_id, jobstore=None):
+ """
+ Resumes the schedule of the given job, or removes the job if its schedule is finished.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :return Job|None: the relevant job instance if the job was rescheduled, or ``None`` if no
+ next run time could be calculated and the job was removed
+
+ """
+ with self._jobstores_lock:
+ job, jobstore = self._lookup_job(job_id, jobstore)
+ now = datetime.now(self.timezone)
+ next_run_time = job.trigger.get_next_fire_time(None, now)
+ if next_run_time:
+ return self.modify_job(job_id, jobstore, next_run_time=next_run_time)
+ else:
+ self.remove_job(job.id, jobstore)
+
+ def get_jobs(self, jobstore=None, pending=None):
+ """
+ Returns a list of pending jobs (if the scheduler hasn't been started yet) and scheduled
+ jobs, either from a specific job store or from all of them.
+
+ If the scheduler has not been started yet, only pending jobs can be returned because the
+ job stores haven't been started yet either.
+
+ :param str|unicode jobstore: alias of the job store
+ :param bool pending: **DEPRECATED**
+ :rtype: list[Job]
+
+ """
+ if pending is not None:
+ warnings.warn('The "pending" option is deprecated -- get_jobs() always returns '
+ 'scheduled jobs if the scheduler has been started and pending jobs '
+ 'otherwise', DeprecationWarning)
+
+ with self._jobstores_lock:
+ jobs = []
+ if self.state == STATE_STOPPED:
+ for job, alias, replace_existing in self._pending_jobs:
+ if jobstore is None or alias == jobstore:
+ jobs.append(job)
+ else:
+ for alias, store in six.iteritems(self._jobstores):
+ if jobstore is None or alias == jobstore:
+ jobs.extend(store.get_all_jobs())
+
+ return jobs
+
+ def get_job(self, job_id, jobstore=None):
+ """
+ Returns the Job that matches the given ``job_id``.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that most likely contains the job
+ :return: the Job by the given ID, or ``None`` if it wasn't found
+ :rtype: Job
+
+ """
+ with self._jobstores_lock:
+ try:
+ return self._lookup_job(job_id, jobstore)[0]
+ except JobLookupError:
+ return
+
+ def remove_job(self, job_id, jobstore=None):
+ """
+ Removes a job, preventing it from being run any more.
+
+ :param str|unicode job_id: the identifier of the job
+ :param str|unicode jobstore: alias of the job store that contains the job
+ :raises JobLookupError: if the job was not found
+
+ """
+ jobstore_alias = None
+ with self._jobstores_lock:
+ # Check if the job is among the pending jobs
+ if self.state == STATE_STOPPED:
+ for i, (job, alias, replace_existing) in enumerate(self._pending_jobs):
+ if job.id == job_id and jobstore in (None, alias):
+ del self._pending_jobs[i]
+ jobstore_alias = alias
+ break
+ else:
+ # Otherwise, try to remove it from each store until it succeeds or we run out of
+ # stores to check
+ for alias, store in six.iteritems(self._jobstores):
+ if jobstore in (None, alias):
+ try:
+ store.remove_job(job_id)
+ jobstore_alias = alias
+ break
+ except JobLookupError:
+ continue
+
+ if jobstore_alias is None:
+ raise JobLookupError(job_id)
+
+ # Notify listeners that a job has been removed
+ event = JobEvent(EVENT_JOB_REMOVED, job_id, jobstore_alias)
+ self._dispatch_event(event)
+
+ self._logger.info('Removed job %s', job_id)
+
+ def remove_all_jobs(self, jobstore=None):
+ """
+ Removes all jobs from the specified job store, or all job stores if none is given.
+
+ :param str|unicode jobstore: alias of the job store
+
+ """
+ with self._jobstores_lock:
+ if self.state == STATE_STOPPED:
+ if jobstore:
+ self._pending_jobs = [pending for pending in self._pending_jobs if
+ pending[1] != jobstore]
+ else:
+ self._pending_jobs = []
+ else:
+ for alias, store in six.iteritems(self._jobstores):
+ if jobstore in (None, alias):
+ store.remove_all_jobs()
+
+ self._dispatch_event(SchedulerEvent(EVENT_ALL_JOBS_REMOVED, jobstore))
+
+ def print_jobs(self, jobstore=None, out=None):
+ """
+ print_jobs(jobstore=None, out=sys.stdout)
+
+ Prints out a textual listing of all jobs currently scheduled on either all job stores or
+ just a specific one.
+
+ :param str|unicode jobstore: alias of the job store, ``None`` to list jobs from all stores
+ :param file out: a file-like object to print to (defaults to **sys.stdout** if nothing is
+ given)
+
+ """
+ out = out or sys.stdout
+ with self._jobstores_lock:
+ if self.state == STATE_STOPPED:
+ print(u'Pending jobs:', file=out)
+ if self._pending_jobs:
+ for job, jobstore_alias, replace_existing in self._pending_jobs:
+ if jobstore in (None, jobstore_alias):
+ print(u' %s' % job, file=out)
+ else:
+ print(u' No pending jobs', file=out)
+ else:
+ for alias, store in sorted(six.iteritems(self._jobstores)):
+ if jobstore in (None, alias):
+ print(u'Jobstore %s:' % alias, file=out)
+ jobs = store.get_all_jobs()
+ if jobs:
+ for job in jobs:
+ print(u' %s' % job, file=out)
+ else:
+ print(u' No scheduled jobs', file=out)
+
+ @abstractmethod
+ def wakeup(self):
+ """
+ Notifies the scheduler that there may be jobs due for execution.
+ Triggers :meth:`_process_jobs` to be run in an implementation specific manner.
+ """
+
+ #
+ # Private API
+ #
+
+ def _configure(self, config):
+ # Set general options
+ self._logger = maybe_ref(config.pop('logger', None)) or getLogger('apscheduler.scheduler')
+ self.timezone = astimezone(config.pop('timezone', None)) or get_localzone()
+ self.jobstore_retry_interval = float(config.pop('jobstore_retry_interval', 10))
+
+ # Set the job defaults
+ job_defaults = config.get('job_defaults', {})
+ self._job_defaults = {
+ 'misfire_grace_time': asint(job_defaults.get('misfire_grace_time', 1)),
+ 'coalesce': asbool(job_defaults.get('coalesce', True)),
+ 'max_instances': asint(job_defaults.get('max_instances', 1))
+ }
+
+ # Configure executors
+ self._executors.clear()
+ for alias, value in six.iteritems(config.get('executors', {})):
+ if isinstance(value, BaseExecutor):
+ self.add_executor(value, alias)
+ elif isinstance(value, MutableMapping):
+ executor_class = value.pop('class', None)
+ plugin = value.pop('type', None)
+ if plugin:
+ executor = self._create_plugin_instance('executor', plugin, value)
+ elif executor_class:
+ cls = maybe_ref(executor_class)
+ executor = cls(**value)
+ else:
+ raise ValueError(
+ 'Cannot create executor "%s" -- either "type" or "class" must be defined' %
+ alias)
+
+ self.add_executor(executor, alias)
+ else:
+ raise TypeError(
+ "Expected executor instance or dict for executors['%s'], got %s instead" %
+ (alias, value.__class__.__name__))
+
+ # Configure job stores
+ self._jobstores.clear()
+ for alias, value in six.iteritems(config.get('jobstores', {})):
+ if isinstance(value, BaseJobStore):
+ self.add_jobstore(value, alias)
+ elif isinstance(value, MutableMapping):
+ jobstore_class = value.pop('class', None)
+ plugin = value.pop('type', None)
+ if plugin:
+ jobstore = self._create_plugin_instance('jobstore', plugin, value)
+ elif jobstore_class:
+ cls = maybe_ref(jobstore_class)
+ jobstore = cls(**value)
+ else:
+ raise ValueError(
+ 'Cannot create job store "%s" -- either "type" or "class" must be '
+ 'defined' % alias)
+
+ self.add_jobstore(jobstore, alias)
+ else:
+ raise TypeError(
+ "Expected job store instance or dict for jobstores['%s'], got %s instead" %
+ (alias, value.__class__.__name__))
+
+ def _create_default_executor(self):
+ """Creates a default executor store, specific to the particular scheduler type."""
+ return ThreadPoolExecutor()
+
+ def _create_default_jobstore(self):
+ """Creates a default job store, specific to the particular scheduler type."""
+ return MemoryJobStore()
+
+ def _lookup_executor(self, alias):
+ """
+ Returns the executor instance by the given name from the list of executors that were added
+ to this scheduler.
+
+ :type alias: str
+ :raises KeyError: if no executor by the given alias is not found
+
+ """
+ try:
+ return self._executors[alias]
+ except KeyError:
+ raise KeyError('No such executor: %s' % alias)
+
+ def _lookup_jobstore(self, alias):
+ """
+ Returns the job store instance by the given name from the list of job stores that were
+ added to this scheduler.
+
+ :type alias: str
+ :raises KeyError: if no job store by the given alias is not found
+
+ """
+ try:
+ return self._jobstores[alias]
+ except KeyError:
+ raise KeyError('No such job store: %s' % alias)
+
+ def _lookup_job(self, job_id, jobstore_alias):
+ """
+ Finds a job by its ID.
+
+ :type job_id: str
+ :param str jobstore_alias: alias of a job store to look in
+ :return tuple[Job, str]: a tuple of job, jobstore alias (jobstore alias is None in case of
+ a pending job)
+ :raises JobLookupError: if no job by the given ID is found.
+
+ """
+ if self.state == STATE_STOPPED:
+ # Check if the job is among the pending jobs
+ for job, alias, replace_existing in self._pending_jobs:
+ if job.id == job_id:
+ return job, None
+ else:
+ # Look in all job stores
+ for alias, store in six.iteritems(self._jobstores):
+ if jobstore_alias in (None, alias):
+ job = store.lookup_job(job_id)
+ if job is not None:
+ return job, alias
+
+ raise JobLookupError(job_id)
+
+ def _dispatch_event(self, event):
+ """
+ Dispatches the given event to interested listeners.
+
+ :param SchedulerEvent event: the event to send
+
+ """
+ with self._listeners_lock:
+ listeners = tuple(self._listeners)
+
+ for cb, mask in listeners:
+ if event.code & mask:
+ try:
+ cb(event)
+ except BaseException:
+ self._logger.exception('Error notifying listener')
+
+ def _check_uwsgi(self):
+ """Check if we're running under uWSGI with threads disabled."""
+ uwsgi_module = sys.modules.get('uwsgi')
+ if not getattr(uwsgi_module, 'has_threads', True):
+ raise RuntimeError('The scheduler seems to be running under uWSGI, but threads have '
+ 'been disabled. You must run uWSGI with the --enable-threads '
+ 'option for the scheduler to work.')
+
+ def _real_add_job(self, job, jobstore_alias, replace_existing):
+ """
+ :param Job job: the job to add
+ :param bool replace_existing: ``True`` to use update_job() in case the job already exists
+ in the store
+
+ """
+ # Fill in undefined values with defaults
+ replacements = {}
+ for key, value in six.iteritems(self._job_defaults):
+ if not hasattr(job, key):
+ replacements[key] = value
+
+ # Calculate the next run time if there is none defined
+ if not hasattr(job, 'next_run_time'):
+ now = datetime.now(self.timezone)
+ replacements['next_run_time'] = job.trigger.get_next_fire_time(None, now)
+
+ # Apply any replacements
+ job._modify(**replacements)
+
+ # Add the job to the given job store
+ store = self._lookup_jobstore(jobstore_alias)
+ try:
+ store.add_job(job)
+ except ConflictingIdError:
+ if replace_existing:
+ store.update_job(job)
+ else:
+ raise
+
+ # Mark the job as no longer pending
+ job._jobstore_alias = jobstore_alias
+
+ # Notify listeners that a new job has been added
+ event = JobEvent(EVENT_JOB_ADDED, job.id, jobstore_alias)
+ self._dispatch_event(event)
+
+ self._logger.info('Added job "%s" to job store "%s"', job.name, jobstore_alias)
+
+ # Notify the scheduler about the new job
+ if self.state == STATE_RUNNING:
+ self.wakeup()
+
+ def _create_plugin_instance(self, type_, alias, constructor_kwargs):
+ """Creates an instance of the given plugin type, loading the plugin first if necessary."""
+ plugin_container, class_container, base_class = {
+ 'trigger': (self._trigger_plugins, self._trigger_classes, BaseTrigger),
+ 'jobstore': (self._jobstore_plugins, self._jobstore_classes, BaseJobStore),
+ 'executor': (self._executor_plugins, self._executor_classes, BaseExecutor)
+ }[type_]
+
+ try:
+ plugin_cls = class_container[alias]
+ except KeyError:
+ if alias in plugin_container:
+ plugin_cls = class_container[alias] = plugin_container[alias].load()
+ if not issubclass(plugin_cls, base_class):
+ raise TypeError('The {0} entry point does not point to a {0} class'.
+ format(type_))
+ else:
+ raise LookupError('No {0} by the name "{1}" was found'.format(type_, alias))
+
+ return plugin_cls(**constructor_kwargs)
+
+ def _create_trigger(self, trigger, trigger_args):
+ if isinstance(trigger, BaseTrigger):
+ return trigger
+ elif trigger is None:
+ trigger = 'date'
+ elif not isinstance(trigger, six.string_types):
+ raise TypeError('Expected a trigger instance or string, got %s instead' %
+ trigger.__class__.__name__)
+
+ # Use the scheduler's time zone if nothing else is specified
+ trigger_args.setdefault('timezone', self.timezone)
+
+ # Instantiate the trigger class
+ return self._create_plugin_instance('trigger', trigger, trigger_args)
+
+ def _create_lock(self):
+ """Creates a reentrant lock object."""
+ return RLock()
+
+ def _process_jobs(self):
+ """
+ Iterates through jobs in every jobstore, starts jobs that are due and figures out how long
+ to wait for the next round.
+
+ If the ``get_due_jobs()`` call raises an exception, a new wakeup is scheduled in at least
+ ``jobstore_retry_interval`` seconds.
+
+ """
+ if self.state == STATE_PAUSED:
+ self._logger.debug('Scheduler is paused -- not processing jobs')
+ return None
+
+ self._logger.debug('Looking for jobs to run')
+ now = datetime.now(self.timezone)
+ next_wakeup_time = None
+ events = []
+
+ with self._jobstores_lock:
+ for jobstore_alias, jobstore in six.iteritems(self._jobstores):
+ try:
+ due_jobs = jobstore.get_due_jobs(now)
+ except Exception as e:
+ # Schedule a wakeup at least in jobstore_retry_interval seconds
+ self._logger.warning('Error getting due jobs from job store %r: %s',
+ jobstore_alias, e)
+ retry_wakeup_time = now + timedelta(seconds=self.jobstore_retry_interval)
+ if not next_wakeup_time or next_wakeup_time > retry_wakeup_time:
+ next_wakeup_time = retry_wakeup_time
+
+ continue
+
+ for job in due_jobs:
+ # Look up the job's executor
+ try:
+ executor = self._lookup_executor(job.executor)
+ except BaseException:
+ self._logger.error(
+ 'Executor lookup ("%s") failed for job "%s" -- removing it from the '
+ 'job store', job.executor, job)
+ self.remove_job(job.id, jobstore_alias)
+ continue
+
+ run_times = job._get_run_times(now)
+ run_times = run_times[-1:] if run_times and job.coalesce else run_times
+ if run_times:
+ try:
+ executor.submit_job(job, run_times)
+ except MaxInstancesReachedError:
+ self._logger.warning(
+ 'Execution of job "%s" skipped: maximum number of running '
+ 'instances reached (%d)', job, job.max_instances)
+ event = JobSubmissionEvent(EVENT_JOB_MAX_INSTANCES, job.id,
+ jobstore_alias, run_times)
+ events.append(event)
+ except BaseException:
+ self._logger.exception('Error submitting job "%s" to executor "%s"',
+ job, job.executor)
+ else:
+ event = JobSubmissionEvent(EVENT_JOB_SUBMITTED, job.id, jobstore_alias,
+ run_times)
+ events.append(event)
+
+ # Update the job if it has a next execution time.
+ # Otherwise remove it from the job store.
+ job_next_run = job.trigger.get_next_fire_time(run_times[-1], now)
+ if job_next_run:
+ job._modify(next_run_time=job_next_run)
+ jobstore.update_job(job)
+ else:
+ self.remove_job(job.id, jobstore_alias)
+
+ # Set a new next wakeup time if there isn't one yet or
+ # the jobstore has an even earlier one
+ jobstore_next_run_time = jobstore.get_next_run_time()
+ if jobstore_next_run_time and (next_wakeup_time is None or
+ jobstore_next_run_time < next_wakeup_time):
+ next_wakeup_time = jobstore_next_run_time.astimezone(self.timezone)
+
+ # Dispatch collected events
+ for event in events:
+ self._dispatch_event(event)
+
+ # Determine the delay until this method should be called again
+ if self.state == STATE_PAUSED:
+ wait_seconds = None
+ self._logger.debug('Scheduler is paused; waiting until resume() is called')
+ elif next_wakeup_time is None:
+ wait_seconds = None
+ self._logger.debug('No jobs; waiting until a job is added')
+ else:
+ wait_seconds = min(max(timedelta_seconds(next_wakeup_time - now), 0), TIMEOUT_MAX)
+ self._logger.debug('Next wakeup is due at %s (in %f seconds)', next_wakeup_time,
+ wait_seconds)
+
+ return wait_seconds
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/blocking.py b/venv/Lib/site-packages/apscheduler/schedulers/blocking.py
new file mode 100644
index 00000000..e6171575
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/blocking.py
@@ -0,0 +1,33 @@
+from __future__ import absolute_import
+
+from threading import Event
+
+from apscheduler.schedulers.base import BaseScheduler, STATE_STOPPED
+from apscheduler.util import TIMEOUT_MAX
+
+
+class BlockingScheduler(BaseScheduler):
+ """
+ A scheduler that runs in the foreground
+ (:meth:`~apscheduler.schedulers.base.BaseScheduler.start` will block).
+ """
+ _event = None
+
+ def start(self, *args, **kwargs):
+ self._event = Event()
+ super(BlockingScheduler, self).start(*args, **kwargs)
+ self._main_loop()
+
+ def shutdown(self, wait=True):
+ super(BlockingScheduler, self).shutdown(wait)
+ self._event.set()
+
+ def _main_loop(self):
+ wait_seconds = TIMEOUT_MAX
+ while self.state != STATE_STOPPED:
+ self._event.wait(wait_seconds)
+ self._event.clear()
+ wait_seconds = self._process_jobs()
+
+ def wakeup(self):
+ self._event.set()
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/gevent.py b/venv/Lib/site-packages/apscheduler/schedulers/gevent.py
new file mode 100644
index 00000000..d48ed74a
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/gevent.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+
+from apscheduler.schedulers.blocking import BlockingScheduler
+from apscheduler.schedulers.base import BaseScheduler
+
+try:
+ from gevent.event import Event
+ from gevent.lock import RLock
+ import gevent
+except ImportError: # pragma: nocover
+ raise ImportError('GeventScheduler requires gevent installed')
+
+
+class GeventScheduler(BlockingScheduler):
+ """A scheduler that runs as a Gevent greenlet."""
+
+ _greenlet = None
+
+ def start(self, *args, **kwargs):
+ self._event = Event()
+ BaseScheduler.start(self, *args, **kwargs)
+ self._greenlet = gevent.spawn(self._main_loop)
+ return self._greenlet
+
+ def shutdown(self, *args, **kwargs):
+ super(GeventScheduler, self).shutdown(*args, **kwargs)
+ self._greenlet.join()
+ del self._greenlet
+
+ def _create_lock(self):
+ return RLock()
+
+ def _create_default_executor(self):
+ from apscheduler.executors.gevent import GeventExecutor
+ return GeventExecutor()
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/qt.py b/venv/Lib/site-packages/apscheduler/schedulers/qt.py
new file mode 100644
index 00000000..0329a000
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/qt.py
@@ -0,0 +1,43 @@
+from __future__ import absolute_import
+
+from apscheduler.schedulers.base import BaseScheduler
+
+try:
+ from PyQt5.QtCore import QObject, QTimer
+except (ImportError, RuntimeError): # pragma: nocover
+ try:
+ from PyQt4.QtCore import QObject, QTimer
+ except ImportError:
+ try:
+ from PySide.QtCore import QObject, QTimer # noqa
+ except ImportError:
+ raise ImportError('QtScheduler requires either PyQt5, PyQt4 or PySide installed')
+
+
+class QtScheduler(BaseScheduler):
+ """A scheduler that runs in a Qt event loop."""
+
+ _timer = None
+
+ def shutdown(self, *args, **kwargs):
+ super(QtScheduler, self).shutdown(*args, **kwargs)
+ self._stop_timer()
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ wait_time = min(wait_seconds * 1000, 2147483647)
+ self._timer = QTimer.singleShot(wait_time, self._process_jobs)
+
+ def _stop_timer(self):
+ if self._timer:
+ if self._timer.isActive():
+ self._timer.stop()
+ del self._timer
+
+ def wakeup(self):
+ self._start_timer(0)
+
+ def _process_jobs(self):
+ wait_seconds = super(QtScheduler, self)._process_jobs()
+ self._start_timer(wait_seconds)
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/tornado.py b/venv/Lib/site-packages/apscheduler/schedulers/tornado.py
new file mode 100644
index 00000000..0a9171f2
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/tornado.py
@@ -0,0 +1,63 @@
+from __future__ import absolute_import
+
+from datetime import timedelta
+from functools import wraps
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.util import maybe_ref
+
+try:
+ from tornado.ioloop import IOLoop
+except ImportError: # pragma: nocover
+ raise ImportError('TornadoScheduler requires tornado installed')
+
+
+def run_in_ioloop(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ self._ioloop.add_callback(func, self, *args, **kwargs)
+ return wrapper
+
+
+class TornadoScheduler(BaseScheduler):
+ """
+ A scheduler that runs on a Tornado IOLoop.
+
+ The default executor can run jobs based on native coroutines (``async def``).
+
+ =========== ===============================================================
+ ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop)
+ =========== ===============================================================
+ """
+
+ _ioloop = None
+ _timeout = None
+
+ @run_in_ioloop
+ def shutdown(self, wait=True):
+ super(TornadoScheduler, self).shutdown(wait)
+ self._stop_timer()
+
+ def _configure(self, config):
+ self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current()
+ super(TornadoScheduler, self)._configure(config)
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup)
+
+ def _stop_timer(self):
+ if self._timeout:
+ self._ioloop.remove_timeout(self._timeout)
+ del self._timeout
+
+ def _create_default_executor(self):
+ from apscheduler.executors.tornado import TornadoExecutor
+ return TornadoExecutor()
+
+ @run_in_ioloop
+ def wakeup(self):
+ self._stop_timer()
+ wait_seconds = self._process_jobs()
+ self._start_timer(wait_seconds)
diff --git a/venv/Lib/site-packages/apscheduler/schedulers/twisted.py b/venv/Lib/site-packages/apscheduler/schedulers/twisted.py
new file mode 100644
index 00000000..6b43a84b
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/schedulers/twisted.py
@@ -0,0 +1,62 @@
+from __future__ import absolute_import
+
+from functools import wraps
+
+from apscheduler.schedulers.base import BaseScheduler
+from apscheduler.util import maybe_ref
+
+try:
+ from twisted.internet import reactor as default_reactor
+except ImportError: # pragma: nocover
+ raise ImportError('TwistedScheduler requires Twisted installed')
+
+
+def run_in_reactor(func):
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+ self._reactor.callFromThread(func, self, *args, **kwargs)
+ return wrapper
+
+
+class TwistedScheduler(BaseScheduler):
+ """
+ A scheduler that runs on a Twisted reactor.
+
+ Extra options:
+
+ =========== ========================================================
+ ``reactor`` Reactor instance to use (defaults to the global reactor)
+ =========== ========================================================
+ """
+
+ _reactor = None
+ _delayedcall = None
+
+ def _configure(self, config):
+ self._reactor = maybe_ref(config.pop('reactor', default_reactor))
+ super(TwistedScheduler, self)._configure(config)
+
+ @run_in_reactor
+ def shutdown(self, wait=True):
+ super(TwistedScheduler, self).shutdown(wait)
+ self._stop_timer()
+
+ def _start_timer(self, wait_seconds):
+ self._stop_timer()
+ if wait_seconds is not None:
+ self._delayedcall = self._reactor.callLater(wait_seconds, self.wakeup)
+
+ def _stop_timer(self):
+ if self._delayedcall and self._delayedcall.active():
+ self._delayedcall.cancel()
+ del self._delayedcall
+
+ @run_in_reactor
+ def wakeup(self):
+ self._stop_timer()
+ wait_seconds = self._process_jobs()
+ self._start_timer(wait_seconds)
+
+ def _create_default_executor(self):
+ from apscheduler.executors.twisted import TwistedExecutor
+ return TwistedExecutor()
diff --git a/venv/Lib/site-packages/apscheduler/triggers/__init__.py b/venv/Lib/site-packages/apscheduler/triggers/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/venv/Lib/site-packages/apscheduler/triggers/base.py b/venv/Lib/site-packages/apscheduler/triggers/base.py
new file mode 100644
index 00000000..ce2526a8
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/base.py
@@ -0,0 +1,48 @@
+from abc import ABCMeta, abstractmethod
+from datetime import timedelta
+import random
+
+import six
+
+
+class BaseTrigger(six.with_metaclass(ABCMeta)):
+ """Abstract base class that defines the interface that every trigger must implement."""
+
+ __slots__ = ()
+
+ @abstractmethod
+ def get_next_fire_time(self, previous_fire_time, now):
+ """
+ Returns the next datetime to fire on, If no such datetime can be calculated, returns
+ ``None``.
+
+ :param datetime.datetime previous_fire_time: the previous time the trigger was fired
+ :param datetime.datetime now: current datetime
+ """
+
+ def _apply_jitter(self, next_fire_time, jitter, now):
+ """
+ Randomize ``next_fire_time`` by adding or subtracting a random value (the jitter). If the
+ resulting datetime is in the past, returns the initial ``next_fire_time`` without jitter.
+
+ ``next_fire_time - jitter <= result <= next_fire_time + jitter``
+
+ :param datetime.datetime|None next_fire_time: next fire time without jitter applied. If
+ ``None``, returns ``None``.
+ :param int|None jitter: maximum number of seconds to add or subtract to
+ ``next_fire_time``. If ``None`` or ``0``, returns ``next_fire_time``
+ :param datetime.datetime now: current datetime
+ :return datetime.datetime|None: next fire time with a jitter.
+ """
+ if next_fire_time is None or not jitter:
+ return next_fire_time
+
+ next_fire_time_with_jitter = next_fire_time + timedelta(
+ seconds=random.uniform(-jitter, jitter))
+
+ if next_fire_time_with_jitter < now:
+ # Next fire time with jitter is in the past.
+ # Ignore jitter to avoid false misfire.
+ return next_fire_time
+
+ return next_fire_time_with_jitter
diff --git a/venv/Lib/site-packages/apscheduler/triggers/combining.py b/venv/Lib/site-packages/apscheduler/triggers/combining.py
new file mode 100644
index 00000000..64f83011
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/combining.py
@@ -0,0 +1,95 @@
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import obj_to_ref, ref_to_obj
+
+
+class BaseCombiningTrigger(BaseTrigger):
+ __slots__ = ('triggers', 'jitter')
+
+ def __init__(self, triggers, jitter=None):
+ self.triggers = triggers
+ self.jitter = jitter
+
+ def __getstate__(self):
+ return {
+ 'version': 1,
+ 'triggers': [(obj_to_ref(trigger.__class__), trigger.__getstate__())
+ for trigger in self.triggers],
+ 'jitter': self.jitter
+ }
+
+ def __setstate__(self, state):
+ if state.get('version', 1) > 1:
+ raise ValueError(
+ 'Got serialized data for version %s of %s, but only versions up to 1 can be '
+ 'handled' % (state['version'], self.__class__.__name__))
+
+ self.jitter = state['jitter']
+ self.triggers = []
+ for clsref, state in state['triggers']:
+ cls = ref_to_obj(clsref)
+ trigger = cls.__new__(cls)
+ trigger.__setstate__(state)
+ self.triggers.append(trigger)
+
+ def __repr__(self):
+ return '<{}({}{})>'.format(self.__class__.__name__, self.triggers,
+ ', jitter={}'.format(self.jitter) if self.jitter else '')
+
+
+class AndTrigger(BaseCombiningTrigger):
+ """
+ Always returns the earliest next fire time that all the given triggers can agree on.
+ The trigger is considered to be finished when any of the given triggers has finished its
+ schedule.
+
+ Trigger alias: ``and``
+
+ :param list triggers: triggers to combine
+ :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
+ """
+
+ __slots__ = ()
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ while True:
+ fire_times = [trigger.get_next_fire_time(previous_fire_time, now)
+ for trigger in self.triggers]
+ if None in fire_times:
+ return None
+ elif min(fire_times) == max(fire_times):
+ return self._apply_jitter(fire_times[0], self.jitter, now)
+ else:
+ now = max(fire_times)
+
+ def __str__(self):
+ return 'and[{}]'.format(', '.join(str(trigger) for trigger in self.triggers))
+
+
+class OrTrigger(BaseCombiningTrigger):
+ """
+ Always returns the earliest next fire time produced by any of the given triggers.
+ The trigger is considered finished when all the given triggers have finished their schedules.
+
+ Trigger alias: ``or``
+
+ :param list triggers: triggers to combine
+ :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
+
+ .. note:: Triggers that depends on the previous fire time, such as the interval trigger, may
+ seem to behave strangely since they are always passed the previous fire time produced by
+ any of the given triggers.
+ """
+
+ __slots__ = ()
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ fire_times = [trigger.get_next_fire_time(previous_fire_time, now)
+ for trigger in self.triggers]
+ fire_times = [fire_time for fire_time in fire_times if fire_time is not None]
+ if fire_times:
+ return self._apply_jitter(min(fire_times), self.jitter, now)
+ else:
+ return None
+
+ def __str__(self):
+ return 'or[{}]'.format(', '.join(str(trigger) for trigger in self.triggers))
diff --git a/venv/Lib/site-packages/apscheduler/triggers/cron/__init__.py b/venv/Lib/site-packages/apscheduler/triggers/cron/__init__.py
new file mode 100644
index 00000000..ce675dd9
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/cron/__init__.py
@@ -0,0 +1,238 @@
+from datetime import datetime, timedelta
+
+from tzlocal import get_localzone
+import six
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.triggers.cron.fields import (
+ BaseField, MonthField, WeekField, DayOfMonthField, DayOfWeekField, DEFAULT_VALUES)
+from apscheduler.util import datetime_ceil, convert_to_datetime, datetime_repr, astimezone
+
+
+class CronTrigger(BaseTrigger):
+ """
+ Triggers when current time matches all specified time constraints,
+ similarly to how the UNIX cron scheduler works.
+
+ :param int|str year: 4-digit year
+ :param int|str month: month (1-12)
+ :param int|str day: day of the (1-31)
+ :param int|str week: ISO week (1-53)
+ :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
+ :param int|str hour: hour (0-23)
+ :param int|str minute: minute (0-59)
+ :param int|str second: second (0-59)
+ :param datetime|str start_date: earliest possible date/time to trigger on (inclusive)
+ :param datetime|str end_date: latest possible date/time to trigger on (inclusive)
+ :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (defaults
+ to scheduler timezone)
+ :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
+
+ .. note:: The first weekday is always **monday**.
+ """
+
+ FIELD_NAMES = ('year', 'month', 'day', 'week', 'day_of_week', 'hour', 'minute', 'second')
+ FIELDS_MAP = {
+ 'year': BaseField,
+ 'month': MonthField,
+ 'week': WeekField,
+ 'day': DayOfMonthField,
+ 'day_of_week': DayOfWeekField,
+ 'hour': BaseField,
+ 'minute': BaseField,
+ 'second': BaseField
+ }
+
+ __slots__ = 'timezone', 'start_date', 'end_date', 'fields', 'jitter'
+
+ def __init__(self, year=None, month=None, day=None, week=None, day_of_week=None, hour=None,
+ minute=None, second=None, start_date=None, end_date=None, timezone=None,
+ jitter=None):
+ if timezone:
+ self.timezone = astimezone(timezone)
+ elif isinstance(start_date, datetime) and start_date.tzinfo:
+ self.timezone = start_date.tzinfo
+ elif isinstance(end_date, datetime) and end_date.tzinfo:
+ self.timezone = end_date.tzinfo
+ else:
+ self.timezone = get_localzone()
+
+ self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
+ self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
+
+ self.jitter = jitter
+
+ values = dict((key, value) for (key, value) in six.iteritems(locals())
+ if key in self.FIELD_NAMES and value is not None)
+ self.fields = []
+ assign_defaults = False
+ for field_name in self.FIELD_NAMES:
+ if field_name in values:
+ exprs = values.pop(field_name)
+ is_default = False
+ assign_defaults = not values
+ elif assign_defaults:
+ exprs = DEFAULT_VALUES[field_name]
+ is_default = True
+ else:
+ exprs = '*'
+ is_default = True
+
+ field_class = self.FIELDS_MAP[field_name]
+ field = field_class(field_name, exprs, is_default)
+ self.fields.append(field)
+
+ @classmethod
+ def from_crontab(cls, expr, timezone=None):
+ """
+ Create a :class:`~CronTrigger` from a standard crontab expression.
+
+ See https://en.wikipedia.org/wiki/Cron for more information on the format accepted here.
+
+ :param expr: minute, hour, day of month, month, day of week
+ :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations (
+ defaults to scheduler timezone)
+ :return: a :class:`~CronTrigger` instance
+
+ """
+ values = expr.split()
+ if len(values) != 5:
+ raise ValueError('Wrong number of fields; got {}, expected 5'.format(len(values)))
+
+ return cls(minute=values[0], hour=values[1], day=values[2], month=values[3],
+ day_of_week=values[4], timezone=timezone)
+
+ def _increment_field_value(self, dateval, fieldnum):
+ """
+ Increments the designated field and resets all less significant fields to their minimum
+ values.
+
+ :type dateval: datetime
+ :type fieldnum: int
+ :return: a tuple containing the new date, and the number of the field that was actually
+ incremented
+ :rtype: tuple
+ """
+
+ values = {}
+ i = 0
+ while i < len(self.fields):
+ field = self.fields[i]
+ if not field.REAL:
+ if i == fieldnum:
+ fieldnum -= 1
+ i -= 1
+ else:
+ i += 1
+ continue
+
+ if i < fieldnum:
+ values[field.name] = field.get_value(dateval)
+ i += 1
+ elif i > fieldnum:
+ values[field.name] = field.get_min(dateval)
+ i += 1
+ else:
+ value = field.get_value(dateval)
+ maxval = field.get_max(dateval)
+ if value == maxval:
+ fieldnum -= 1
+ i -= 1
+ else:
+ values[field.name] = value + 1
+ i += 1
+
+ difference = datetime(**values) - dateval.replace(tzinfo=None)
+ return self.timezone.normalize(dateval + difference), fieldnum
+
+ def _set_field_value(self, dateval, fieldnum, new_value):
+ values = {}
+ for i, field in enumerate(self.fields):
+ if field.REAL:
+ if i < fieldnum:
+ values[field.name] = field.get_value(dateval)
+ elif i > fieldnum:
+ values[field.name] = field.get_min(dateval)
+ else:
+ values[field.name] = new_value
+
+ return self.timezone.localize(datetime(**values))
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ if previous_fire_time:
+ start_date = min(now, previous_fire_time + timedelta(microseconds=1))
+ if start_date == previous_fire_time:
+ start_date += timedelta(microseconds=1)
+ else:
+ start_date = max(now, self.start_date) if self.start_date else now
+
+ fieldnum = 0
+ next_date = datetime_ceil(start_date).astimezone(self.timezone)
+ while 0 <= fieldnum < len(self.fields):
+ field = self.fields[fieldnum]
+ curr_value = field.get_value(next_date)
+ next_value = field.get_next_value(next_date)
+
+ if next_value is None:
+ # No valid value was found
+ next_date, fieldnum = self._increment_field_value(next_date, fieldnum - 1)
+ elif next_value > curr_value:
+ # A valid, but higher than the starting value, was found
+ if field.REAL:
+ next_date = self._set_field_value(next_date, fieldnum, next_value)
+ fieldnum += 1
+ else:
+ next_date, fieldnum = self._increment_field_value(next_date, fieldnum)
+ else:
+ # A valid value was found, no changes necessary
+ fieldnum += 1
+
+ # Return if the date has rolled past the end date
+ if self.end_date and next_date > self.end_date:
+ return None
+
+ if fieldnum >= 0:
+ next_date = self._apply_jitter(next_date, self.jitter, now)
+ return min(next_date, self.end_date) if self.end_date else next_date
+
+ def __getstate__(self):
+ return {
+ 'version': 2,
+ 'timezone': self.timezone,
+ 'start_date': self.start_date,
+ 'end_date': self.end_date,
+ 'fields': self.fields,
+ 'jitter': self.jitter,
+ }
+
+ def __setstate__(self, state):
+ # This is for compatibility with APScheduler 3.0.x
+ if isinstance(state, tuple):
+ state = state[1]
+
+ if state.get('version', 1) > 2:
+ raise ValueError(
+ 'Got serialized data for version %s of %s, but only versions up to 2 can be '
+ 'handled' % (state['version'], self.__class__.__name__))
+
+ self.timezone = state['timezone']
+ self.start_date = state['start_date']
+ self.end_date = state['end_date']
+ self.fields = state['fields']
+ self.jitter = state.get('jitter')
+
+ def __str__(self):
+ options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
+ return 'cron[%s]' % (', '.join(options))
+
+ def __repr__(self):
+ options = ["%s='%s'" % (f.name, f) for f in self.fields if not f.is_default]
+ if self.start_date:
+ options.append("start_date=%r" % datetime_repr(self.start_date))
+ if self.end_date:
+ options.append("end_date=%r" % datetime_repr(self.end_date))
+ if self.jitter:
+ options.append('jitter=%s' % self.jitter)
+
+ return "<%s (%s, timezone='%s')>" % (
+ self.__class__.__name__, ', '.join(options), self.timezone)
diff --git a/venv/Lib/site-packages/apscheduler/triggers/cron/expressions.py b/venv/Lib/site-packages/apscheduler/triggers/cron/expressions.py
new file mode 100644
index 00000000..55a37167
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/cron/expressions.py
@@ -0,0 +1,251 @@
+"""This module contains the expressions applicable for CronTrigger's fields."""
+
+from calendar import monthrange
+import re
+
+from apscheduler.util import asint
+
+__all__ = ('AllExpression', 'RangeExpression', 'WeekdayRangeExpression',
+ 'WeekdayPositionExpression', 'LastDayOfMonthExpression')
+
+
+WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+MONTHS = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
+
+
+class AllExpression(object):
+ value_re = re.compile(r'\*(?:/(?P\d+))?$')
+
+ def __init__(self, step=None):
+ self.step = asint(step)
+ if self.step == 0:
+ raise ValueError('Increment must be higher than 0')
+
+ def validate_range(self, field_name):
+ from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES
+
+ value_range = MAX_VALUES[field_name] - MIN_VALUES[field_name]
+ if self.step and self.step > value_range:
+ raise ValueError('the step value ({}) is higher than the total range of the '
+ 'expression ({})'.format(self.step, value_range))
+
+ def get_next_value(self, date, field):
+ start = field.get_value(date)
+ minval = field.get_min(date)
+ maxval = field.get_max(date)
+ start = max(start, minval)
+
+ if not self.step:
+ next = start
+ else:
+ distance_to_next = (self.step - (start - minval)) % self.step
+ next = start + distance_to_next
+
+ if next <= maxval:
+ return next
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.step == other.step
+
+ def __str__(self):
+ if self.step:
+ return '*/%d' % self.step
+ return '*'
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, self.step)
+
+
+class RangeExpression(AllExpression):
+ value_re = re.compile(
+ r'(?P\d+)(?:-(?P\d+))?(?:/(?P\d+))?$')
+
+ def __init__(self, first, last=None, step=None):
+ super(RangeExpression, self).__init__(step)
+ first = asint(first)
+ last = asint(last)
+ if last is None and step is None:
+ last = first
+ if last is not None and first > last:
+ raise ValueError('The minimum value in a range must not be higher than the maximum')
+ self.first = first
+ self.last = last
+
+ def validate_range(self, field_name):
+ from apscheduler.triggers.cron.fields import MIN_VALUES, MAX_VALUES
+
+ super(RangeExpression, self).validate_range(field_name)
+ if self.first < MIN_VALUES[field_name]:
+ raise ValueError('the first value ({}) is lower than the minimum value ({})'
+ .format(self.first, MIN_VALUES[field_name]))
+ if self.last is not None and self.last > MAX_VALUES[field_name]:
+ raise ValueError('the last value ({}) is higher than the maximum value ({})'
+ .format(self.last, MAX_VALUES[field_name]))
+ value_range = (self.last or MAX_VALUES[field_name]) - self.first
+ if self.step and self.step > value_range:
+ raise ValueError('the step value ({}) is higher than the total range of the '
+ 'expression ({})'.format(self.step, value_range))
+
+ def get_next_value(self, date, field):
+ startval = field.get_value(date)
+ minval = field.get_min(date)
+ maxval = field.get_max(date)
+
+ # Apply range limits
+ minval = max(minval, self.first)
+ maxval = min(maxval, self.last) if self.last is not None else maxval
+ nextval = max(minval, startval)
+
+ # Apply the step if defined
+ if self.step:
+ distance_to_next = (self.step - (nextval - minval)) % self.step
+ nextval += distance_to_next
+
+ return nextval if nextval <= maxval else None
+
+ def __eq__(self, other):
+ return (isinstance(other, self.__class__) and self.first == other.first and
+ self.last == other.last)
+
+ def __str__(self):
+ if self.last != self.first and self.last is not None:
+ range = '%d-%d' % (self.first, self.last)
+ else:
+ range = str(self.first)
+
+ if self.step:
+ return '%s/%d' % (range, self.step)
+ return range
+
+ def __repr__(self):
+ args = [str(self.first)]
+ if self.last != self.first and self.last is not None or self.step:
+ args.append(str(self.last))
+ if self.step:
+ args.append(str(self.step))
+ return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
+
+
+class MonthRangeExpression(RangeExpression):
+ value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE)
+
+ def __init__(self, first, last=None):
+ try:
+ first_num = MONTHS.index(first.lower()) + 1
+ except ValueError:
+ raise ValueError('Invalid month name "%s"' % first)
+
+ if last:
+ try:
+ last_num = MONTHS.index(last.lower()) + 1
+ except ValueError:
+ raise ValueError('Invalid month name "%s"' % last)
+ else:
+ last_num = None
+
+ super(MonthRangeExpression, self).__init__(first_num, last_num)
+
+ def __str__(self):
+ if self.last != self.first and self.last is not None:
+ return '%s-%s' % (MONTHS[self.first - 1], MONTHS[self.last - 1])
+ return MONTHS[self.first - 1]
+
+ def __repr__(self):
+ args = ["'%s'" % MONTHS[self.first]]
+ if self.last != self.first and self.last is not None:
+ args.append("'%s'" % MONTHS[self.last - 1])
+ return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
+
+
+class WeekdayRangeExpression(RangeExpression):
+ value_re = re.compile(r'(?P[a-z]+)(?:-(?P[a-z]+))?', re.IGNORECASE)
+
+ def __init__(self, first, last=None):
+ try:
+ first_num = WEEKDAYS.index(first.lower())
+ except ValueError:
+ raise ValueError('Invalid weekday name "%s"' % first)
+
+ if last:
+ try:
+ last_num = WEEKDAYS.index(last.lower())
+ except ValueError:
+ raise ValueError('Invalid weekday name "%s"' % last)
+ else:
+ last_num = None
+
+ super(WeekdayRangeExpression, self).__init__(first_num, last_num)
+
+ def __str__(self):
+ if self.last != self.first and self.last is not None:
+ return '%s-%s' % (WEEKDAYS[self.first], WEEKDAYS[self.last])
+ return WEEKDAYS[self.first]
+
+ def __repr__(self):
+ args = ["'%s'" % WEEKDAYS[self.first]]
+ if self.last != self.first and self.last is not None:
+ args.append("'%s'" % WEEKDAYS[self.last])
+ return "%s(%s)" % (self.__class__.__name__, ', '.join(args))
+
+
+class WeekdayPositionExpression(AllExpression):
+ options = ['1st', '2nd', '3rd', '4th', '5th', 'last']
+ value_re = re.compile(r'(?P%s) +(?P(?:\d+|\w+))' %
+ '|'.join(options), re.IGNORECASE)
+
+ def __init__(self, option_name, weekday_name):
+ super(WeekdayPositionExpression, self).__init__(None)
+ try:
+ self.option_num = self.options.index(option_name.lower())
+ except ValueError:
+ raise ValueError('Invalid weekday position "%s"' % option_name)
+
+ try:
+ self.weekday = WEEKDAYS.index(weekday_name.lower())
+ except ValueError:
+ raise ValueError('Invalid weekday name "%s"' % weekday_name)
+
+ def get_next_value(self, date, field):
+ # Figure out the weekday of the month's first day and the number of days in that month
+ first_day_wday, last_day = monthrange(date.year, date.month)
+
+ # Calculate which day of the month is the first of the target weekdays
+ first_hit_day = self.weekday - first_day_wday + 1
+ if first_hit_day <= 0:
+ first_hit_day += 7
+
+ # Calculate what day of the month the target weekday would be
+ if self.option_num < 5:
+ target_day = first_hit_day + self.option_num * 7
+ else:
+ target_day = first_hit_day + ((last_day - first_hit_day) // 7) * 7
+
+ if target_day <= last_day and target_day >= date.day:
+ return target_day
+
+ def __eq__(self, other):
+ return (super(WeekdayPositionExpression, self).__eq__(other) and
+ self.option_num == other.option_num and self.weekday == other.weekday)
+
+ def __str__(self):
+ return '%s %s' % (self.options[self.option_num], WEEKDAYS[self.weekday])
+
+ def __repr__(self):
+ return "%s('%s', '%s')" % (self.__class__.__name__, self.options[self.option_num],
+ WEEKDAYS[self.weekday])
+
+
+class LastDayOfMonthExpression(AllExpression):
+ value_re = re.compile(r'last', re.IGNORECASE)
+
+ def __init__(self):
+ super(LastDayOfMonthExpression, self).__init__(None)
+
+ def get_next_value(self, date, field):
+ return monthrange(date.year, date.month)[1]
+
+ def __str__(self):
+ return 'last'
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
diff --git a/venv/Lib/site-packages/apscheduler/triggers/cron/fields.py b/venv/Lib/site-packages/apscheduler/triggers/cron/fields.py
new file mode 100644
index 00000000..86d620c4
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/cron/fields.py
@@ -0,0 +1,111 @@
+"""Fields represent CronTrigger options which map to :class:`~datetime.datetime` fields."""
+
+from calendar import monthrange
+import re
+
+import six
+
+from apscheduler.triggers.cron.expressions import (
+ AllExpression, RangeExpression, WeekdayPositionExpression, LastDayOfMonthExpression,
+ WeekdayRangeExpression, MonthRangeExpression)
+
+
+__all__ = ('MIN_VALUES', 'MAX_VALUES', 'DEFAULT_VALUES', 'BaseField', 'WeekField',
+ 'DayOfMonthField', 'DayOfWeekField')
+
+
+MIN_VALUES = {'year': 1970, 'month': 1, 'day': 1, 'week': 1, 'day_of_week': 0, 'hour': 0,
+ 'minute': 0, 'second': 0}
+MAX_VALUES = {'year': 9999, 'month': 12, 'day': 31, 'week': 53, 'day_of_week': 6, 'hour': 23,
+ 'minute': 59, 'second': 59}
+DEFAULT_VALUES = {'year': '*', 'month': 1, 'day': 1, 'week': '*', 'day_of_week': '*', 'hour': 0,
+ 'minute': 0, 'second': 0}
+SEPARATOR = re.compile(' *, *')
+
+
+class BaseField(object):
+ REAL = True
+ COMPILERS = [AllExpression, RangeExpression]
+
+ def __init__(self, name, exprs, is_default=False):
+ self.name = name
+ self.is_default = is_default
+ self.compile_expressions(exprs)
+
+ def get_min(self, dateval):
+ return MIN_VALUES[self.name]
+
+ def get_max(self, dateval):
+ return MAX_VALUES[self.name]
+
+ def get_value(self, dateval):
+ return getattr(dateval, self.name)
+
+ def get_next_value(self, dateval):
+ smallest = None
+ for expr in self.expressions:
+ value = expr.get_next_value(dateval, self)
+ if smallest is None or (value is not None and value < smallest):
+ smallest = value
+
+ return smallest
+
+ def compile_expressions(self, exprs):
+ self.expressions = []
+
+ # Split a comma-separated expression list, if any
+ for expr in SEPARATOR.split(str(exprs).strip()):
+ self.compile_expression(expr)
+
+ def compile_expression(self, expr):
+ for compiler in self.COMPILERS:
+ match = compiler.value_re.match(expr)
+ if match:
+ compiled_expr = compiler(**match.groupdict())
+
+ try:
+ compiled_expr.validate_range(self.name)
+ except ValueError as e:
+ exc = ValueError('Error validating expression {!r}: {}'.format(expr, e))
+ six.raise_from(exc, None)
+
+ self.expressions.append(compiled_expr)
+ return
+
+ raise ValueError('Unrecognized expression "%s" for field "%s"' % (expr, self.name))
+
+ def __eq__(self, other):
+ return isinstance(self, self.__class__) and self.expressions == other.expressions
+
+ def __str__(self):
+ expr_strings = (str(e) for e in self.expressions)
+ return ','.join(expr_strings)
+
+ def __repr__(self):
+ return "%s('%s', '%s')" % (self.__class__.__name__, self.name, self)
+
+
+class WeekField(BaseField):
+ REAL = False
+
+ def get_value(self, dateval):
+ return dateval.isocalendar()[1]
+
+
+class DayOfMonthField(BaseField):
+ COMPILERS = BaseField.COMPILERS + [WeekdayPositionExpression, LastDayOfMonthExpression]
+
+ def get_max(self, dateval):
+ return monthrange(dateval.year, dateval.month)[1]
+
+
+class DayOfWeekField(BaseField):
+ REAL = False
+ COMPILERS = BaseField.COMPILERS + [WeekdayRangeExpression]
+
+ def get_value(self, dateval):
+ return dateval.weekday()
+
+
+class MonthField(BaseField):
+ COMPILERS = BaseField.COMPILERS + [MonthRangeExpression]
diff --git a/venv/Lib/site-packages/apscheduler/triggers/date.py b/venv/Lib/site-packages/apscheduler/triggers/date.py
new file mode 100644
index 00000000..07681008
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/date.py
@@ -0,0 +1,51 @@
+from datetime import datetime
+
+from tzlocal import get_localzone
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import convert_to_datetime, datetime_repr, astimezone
+
+
+class DateTrigger(BaseTrigger):
+ """
+ Triggers once on the given datetime. If ``run_date`` is left empty, current time is used.
+
+ :param datetime|str run_date: the date/time to run the job at
+ :param datetime.tzinfo|str timezone: time zone for ``run_date`` if it doesn't have one already
+ """
+
+ __slots__ = 'run_date'
+
+ def __init__(self, run_date=None, timezone=None):
+ timezone = astimezone(timezone) or get_localzone()
+ if run_date is not None:
+ self.run_date = convert_to_datetime(run_date, timezone, 'run_date')
+ else:
+ self.run_date = datetime.now(timezone)
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ return self.run_date if previous_fire_time is None else None
+
+ def __getstate__(self):
+ return {
+ 'version': 1,
+ 'run_date': self.run_date
+ }
+
+ def __setstate__(self, state):
+ # This is for compatibility with APScheduler 3.0.x
+ if isinstance(state, tuple):
+ state = state[1]
+
+ if state.get('version', 1) > 1:
+ raise ValueError(
+ 'Got serialized data for version %s of %s, but only version 1 can be handled' %
+ (state['version'], self.__class__.__name__))
+
+ self.run_date = state['run_date']
+
+ def __str__(self):
+ return 'date[%s]' % datetime_repr(self.run_date)
+
+ def __repr__(self):
+ return "<%s (run_date='%s')>" % (self.__class__.__name__, datetime_repr(self.run_date))
diff --git a/venv/Lib/site-packages/apscheduler/triggers/interval.py b/venv/Lib/site-packages/apscheduler/triggers/interval.py
new file mode 100644
index 00000000..831ba383
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/triggers/interval.py
@@ -0,0 +1,106 @@
+from datetime import timedelta, datetime
+from math import ceil
+
+from tzlocal import get_localzone
+
+from apscheduler.triggers.base import BaseTrigger
+from apscheduler.util import convert_to_datetime, timedelta_seconds, datetime_repr, astimezone
+
+
+class IntervalTrigger(BaseTrigger):
+ """
+ Triggers on specified intervals, starting on ``start_date`` if specified, ``datetime.now()`` +
+ interval otherwise.
+
+ :param int weeks: number of weeks to wait
+ :param int days: number of days to wait
+ :param int hours: number of hours to wait
+ :param int minutes: number of minutes to wait
+ :param int seconds: number of seconds to wait
+ :param datetime|str start_date: starting point for the interval calculation
+ :param datetime|str end_date: latest possible date/time to trigger on
+ :param datetime.tzinfo|str timezone: time zone to use for the date/time calculations
+ :param int|None jitter: advance or delay the job execution by ``jitter`` seconds at most.
+ """
+
+ __slots__ = 'timezone', 'start_date', 'end_date', 'interval', 'interval_length', 'jitter'
+
+ def __init__(self, weeks=0, days=0, hours=0, minutes=0, seconds=0, start_date=None,
+ end_date=None, timezone=None, jitter=None):
+ self.interval = timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes,
+ seconds=seconds)
+ self.interval_length = timedelta_seconds(self.interval)
+ if self.interval_length == 0:
+ self.interval = timedelta(seconds=1)
+ self.interval_length = 1
+
+ if timezone:
+ self.timezone = astimezone(timezone)
+ elif isinstance(start_date, datetime) and start_date.tzinfo:
+ self.timezone = start_date.tzinfo
+ elif isinstance(end_date, datetime) and end_date.tzinfo:
+ self.timezone = end_date.tzinfo
+ else:
+ self.timezone = get_localzone()
+
+ start_date = start_date or (datetime.now(self.timezone) + self.interval)
+ self.start_date = convert_to_datetime(start_date, self.timezone, 'start_date')
+ self.end_date = convert_to_datetime(end_date, self.timezone, 'end_date')
+
+ self.jitter = jitter
+
+ def get_next_fire_time(self, previous_fire_time, now):
+ if previous_fire_time:
+ next_fire_time = previous_fire_time + self.interval
+ elif self.start_date > now:
+ next_fire_time = self.start_date
+ else:
+ timediff_seconds = timedelta_seconds(now - self.start_date)
+ next_interval_num = int(ceil(timediff_seconds / self.interval_length))
+ next_fire_time = self.start_date + self.interval * next_interval_num
+
+ if self.jitter is not None:
+ next_fire_time = self._apply_jitter(next_fire_time, self.jitter, now)
+
+ if not self.end_date or next_fire_time <= self.end_date:
+ return self.timezone.normalize(next_fire_time)
+
+ def __getstate__(self):
+ return {
+ 'version': 2,
+ 'timezone': self.timezone,
+ 'start_date': self.start_date,
+ 'end_date': self.end_date,
+ 'interval': self.interval,
+ 'jitter': self.jitter,
+ }
+
+ def __setstate__(self, state):
+ # This is for compatibility with APScheduler 3.0.x
+ if isinstance(state, tuple):
+ state = state[1]
+
+ if state.get('version', 1) > 2:
+ raise ValueError(
+ 'Got serialized data for version %s of %s, but only versions up to 2 can be '
+ 'handled' % (state['version'], self.__class__.__name__))
+
+ self.timezone = state['timezone']
+ self.start_date = state['start_date']
+ self.end_date = state['end_date']
+ self.interval = state['interval']
+ self.interval_length = timedelta_seconds(self.interval)
+ self.jitter = state.get('jitter')
+
+ def __str__(self):
+ return 'interval[%s]' % str(self.interval)
+
+ def __repr__(self):
+ options = ['interval=%r' % self.interval, 'start_date=%r' % datetime_repr(self.start_date)]
+ if self.end_date:
+ options.append("end_date=%r" % datetime_repr(self.end_date))
+ if self.jitter:
+ options.append('jitter=%s' % self.jitter)
+
+ return "<%s (%s, timezone='%s')>" % (
+ self.__class__.__name__, ', '.join(options), self.timezone)
diff --git a/venv/Lib/site-packages/apscheduler/util.py b/venv/Lib/site-packages/apscheduler/util.py
new file mode 100644
index 00000000..8b7b3f5e
--- /dev/null
+++ b/venv/Lib/site-packages/apscheduler/util.py
@@ -0,0 +1,429 @@
+"""This module contains several handy functions primarily meant for internal use."""
+
+from __future__ import division
+
+from datetime import date, datetime, time, timedelta, tzinfo
+from calendar import timegm
+from functools import partial
+from inspect import isclass, ismethod
+import re
+
+from pytz import timezone, utc, FixedOffset
+import six
+
+try:
+ from inspect import signature
+except ImportError: # pragma: nocover
+ from funcsigs import signature
+
+try:
+ from threading import TIMEOUT_MAX
+except ImportError:
+ TIMEOUT_MAX = 4294967 # Maximum value accepted by Event.wait() on Windows
+
+try:
+ from asyncio import iscoroutinefunction
+except ImportError:
+ try:
+ from trollius import iscoroutinefunction
+ except ImportError:
+ def iscoroutinefunction(func):
+ return False
+
+__all__ = ('asint', 'asbool', 'astimezone', 'convert_to_datetime', 'datetime_to_utc_timestamp',
+ 'utc_timestamp_to_datetime', 'timedelta_seconds', 'datetime_ceil', 'get_callable_name',
+ 'obj_to_ref', 'ref_to_obj', 'maybe_ref', 'repr_escape', 'check_callable_args',
+ 'TIMEOUT_MAX')
+
+
+class _Undefined(object):
+ def __nonzero__(self):
+ return False
+
+ def __bool__(self):
+ return False
+
+ def __repr__(self):
+ return ''
+
+
+undefined = _Undefined() #: a unique object that only signifies that no value is defined
+
+
+def asint(text):
+ """
+ Safely converts a string to an integer, returning ``None`` if the string is ``None``.
+
+ :type text: str
+ :rtype: int
+
+ """
+ if text is not None:
+ return int(text)
+
+
+def asbool(obj):
+ """
+ Interprets an object as a boolean value.
+
+ :rtype: bool
+
+ """
+ if isinstance(obj, str):
+ obj = obj.strip().lower()
+ if obj in ('true', 'yes', 'on', 'y', 't', '1'):
+ return True
+ if obj in ('false', 'no', 'off', 'n', 'f', '0'):
+ return False
+ raise ValueError('Unable to interpret value "%s" as boolean' % obj)
+ return bool(obj)
+
+
+def astimezone(obj):
+ """
+ Interprets an object as a timezone.
+
+ :rtype: tzinfo
+
+ """
+ if isinstance(obj, six.string_types):
+ return timezone(obj)
+ if isinstance(obj, tzinfo):
+ if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'):
+ raise TypeError('Only timezones from the pytz library are supported')
+ if obj.zone == 'local':
+ raise ValueError(
+ 'Unable to determine the name of the local timezone -- you must explicitly '
+ 'specify the name of the local timezone. Please refrain from using timezones like '
+ 'EST to prevent problems with daylight saving time. Instead, use a locale based '
+ 'timezone name (such as Europe/Helsinki).')
+ return obj
+ if obj is not None:
+ raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__)
+
+
+_DATE_REGEX = re.compile(
+ r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})'
+ r'(?:[ T](?P\d{1,2}):(?P\d{1,2}):(?P\d{1,2})'
+ r'(?:\.(?P\d{1,6}))?'
+ r'(?PZ|[+-]\d\d:\d\d)?)?$')
+
+
+def convert_to_datetime(input, tz, arg_name):
+ """
+ Converts the given object to a timezone aware datetime object.
+
+ If a timezone aware datetime object is passed, it is returned unmodified.
+ If a native datetime object is passed, it is given the specified timezone.
+ If the input is a string, it is parsed as a datetime with the given timezone.
+
+ Date strings are accepted in three different forms: date only (Y-m-d), date with time
+ (Y-m-d H:M:S) or with date+time with microseconds (Y-m-d H:M:S.micro). Additionally you can
+ override the time zone by giving a specific offset in the format specified by ISO 8601:
+ Z (UTC), +HH:MM or -HH:MM.
+
+ :param str|datetime input: the datetime or string to convert to a timezone aware datetime
+ :param datetime.tzinfo tz: timezone to interpret ``input`` in
+ :param str arg_name: the name of the argument (used in an error message)
+ :rtype: datetime
+
+ """
+ if input is None:
+ return
+ elif isinstance(input, datetime):
+ datetime_ = input
+ elif isinstance(input, date):
+ datetime_ = datetime.combine(input, time())
+ elif isinstance(input, six.string_types):
+ m = _DATE_REGEX.match(input)
+ if not m:
+ raise ValueError('Invalid date string')
+
+ values = m.groupdict()
+ tzname = values.pop('timezone')
+ if tzname == 'Z':
+ tz = utc
+ elif tzname:
+ hours, minutes = (int(x) for x in tzname[1:].split(':'))
+ sign = 1 if tzname[0] == '+' else -1
+ tz = FixedOffset(sign * (hours * 60 + minutes))
+
+ values = {k: int(v or 0) for k, v in values.items()}
+ datetime_ = datetime(**values)
+ else:
+ raise TypeError('Unsupported type for %s: %s' % (arg_name, input.__class__.__name__))
+
+ if datetime_.tzinfo is not None:
+ return datetime_
+ if tz is None:
+ raise ValueError(
+ 'The "tz" argument must be specified if %s has no timezone information' % arg_name)
+ if isinstance(tz, six.string_types):
+ tz = timezone(tz)
+
+ try:
+ return tz.localize(datetime_, is_dst=None)
+ except AttributeError:
+ raise TypeError(
+ 'Only pytz timezones are supported (need the localize() and normalize() methods)')
+
+
+def datetime_to_utc_timestamp(timeval):
+ """
+ Converts a datetime instance to a timestamp.
+
+ :type timeval: datetime
+ :rtype: float
+
+ """
+ if timeval is not None:
+ return timegm(timeval.utctimetuple()) + timeval.microsecond / 1000000
+
+
+def utc_timestamp_to_datetime(timestamp):
+ """
+ Converts the given timestamp to a datetime instance.
+
+ :type timestamp: float
+ :rtype: datetime
+
+ """
+ if timestamp is not None:
+ return datetime.fromtimestamp(timestamp, utc)
+
+
+def timedelta_seconds(delta):
+ """
+ Converts the given timedelta to seconds.
+
+ :type delta: timedelta
+ :rtype: float
+
+ """
+ return delta.days * 24 * 60 * 60 + delta.seconds + \
+ delta.microseconds / 1000000.0
+
+
+def datetime_ceil(dateval):
+ """
+ Rounds the given datetime object upwards.
+
+ :type dateval: datetime
+
+ """
+ if dateval.microsecond > 0:
+ return dateval + timedelta(seconds=1, microseconds=-dateval.microsecond)
+ return dateval
+
+
+def datetime_repr(dateval):
+ return dateval.strftime('%Y-%m-%d %H:%M:%S %Z') if dateval else 'None'
+
+
+def get_callable_name(func):
+ """
+ Returns the best available display name for the given function/callable.
+
+ :rtype: str
+
+ """
+ # the easy case (on Python 3.3+)
+ if hasattr(func, '__qualname__'):
+ return func.__qualname__
+
+ # class methods, bound and unbound methods
+ f_self = getattr(func, '__self__', None) or getattr(func, 'im_self', None)
+ if f_self and hasattr(func, '__name__'):
+ f_class = f_self if isclass(f_self) else f_self.__class__
+ else:
+ f_class = getattr(func, 'im_class', None)
+
+ if f_class and hasattr(func, '__name__'):
+ return '%s.%s' % (f_class.__name__, func.__name__)
+
+ # class or class instance
+ if hasattr(func, '__call__'):
+ # class
+ if hasattr(func, '__name__'):
+ return func.__name__
+
+ # instance of a class with a __call__ method
+ return func.__class__.__name__
+
+ raise TypeError('Unable to determine a name for %r -- maybe it is not a callable?' % func)
+
+
+def obj_to_ref(obj):
+ """
+ Returns the path to the given callable.
+
+ :rtype: str
+ :raises TypeError: if the given object is not callable
+ :raises ValueError: if the given object is a :class:`~functools.partial`, lambda or a nested
+ function
+
+ """
+ if isinstance(obj, partial):
+ raise ValueError('Cannot create a reference to a partial()')
+
+ name = get_callable_name(obj)
+ if '' in name:
+ raise ValueError('Cannot create a reference to a lambda')
+ if '' in name:
+ raise ValueError('Cannot create a reference to a nested function')
+
+ if ismethod(obj):
+ if hasattr(obj, 'im_self') and obj.im_self:
+ # bound method
+ module = obj.im_self.__module__
+ elif hasattr(obj, 'im_class') and obj.im_class:
+ # unbound method
+ module = obj.im_class.__module__
+ else:
+ module = obj.__module__
+ else:
+ module = obj.__module__
+ return '%s:%s' % (module, name)
+
+
+def ref_to_obj(ref):
+ """
+ Returns the object pointed to by ``ref``.
+
+ :type ref: str
+
+ """
+ if not isinstance(ref, six.string_types):
+ raise TypeError('References must be strings')
+ if ':' not in ref:
+ raise ValueError('Invalid reference')
+
+ modulename, rest = ref.split(':', 1)
+ try:
+ obj = __import__(modulename, fromlist=[rest])
+ except ImportError:
+ raise LookupError('Error resolving reference %s: could not import module' % ref)
+
+ try:
+ for name in rest.split('.'):
+ obj = getattr(obj, name)
+ return obj
+ except Exception:
+ raise LookupError('Error resolving reference %s: error looking up object' % ref)
+
+
+def maybe_ref(ref):
+ """
+ Returns the object that the given reference points to, if it is indeed a reference.
+ If it is not a reference, the object is returned as-is.
+
+ """
+ if not isinstance(ref, str):
+ return ref
+ return ref_to_obj(ref)
+
+
+if six.PY2:
+ def repr_escape(string):
+ if isinstance(string, six.text_type):
+ return string.encode('ascii', 'backslashreplace')
+ return string
+else:
+ def repr_escape(string):
+ return string
+
+
+def check_callable_args(func, args, kwargs):
+ """
+ Ensures that the given callable can be called with the given arguments.
+
+ :type args: tuple
+ :type kwargs: dict
+
+ """
+ pos_kwargs_conflicts = [] # parameters that have a match in both args and kwargs
+ positional_only_kwargs = [] # positional-only parameters that have a match in kwargs
+ unsatisfied_args = [] # parameters in signature that don't have a match in args or kwargs
+ unsatisfied_kwargs = [] # keyword-only arguments that don't have a match in kwargs
+ unmatched_args = list(args) # args that didn't match any of the parameters in the signature
+ # kwargs that didn't match any of the parameters in the signature
+ unmatched_kwargs = list(kwargs)
+ # indicates if the signature defines *args and **kwargs respectively
+ has_varargs = has_var_kwargs = False
+
+ try:
+ sig = signature(func)
+ except ValueError:
+ # signature() doesn't work against every kind of callable
+ return
+
+ for param in six.itervalues(sig.parameters):
+ if param.kind == param.POSITIONAL_OR_KEYWORD:
+ if param.name in unmatched_kwargs and unmatched_args:
+ pos_kwargs_conflicts.append(param.name)
+ elif unmatched_args:
+ del unmatched_args[0]
+ elif param.name in unmatched_kwargs:
+ unmatched_kwargs.remove(param.name)
+ elif param.default is param.empty:
+ unsatisfied_args.append(param.name)
+ elif param.kind == param.POSITIONAL_ONLY:
+ if unmatched_args:
+ del unmatched_args[0]
+ elif param.name in unmatched_kwargs:
+ unmatched_kwargs.remove(param.name)
+ positional_only_kwargs.append(param.name)
+ elif param.default is param.empty:
+ unsatisfied_args.append(param.name)
+ elif param.kind == param.KEYWORD_ONLY:
+ if param.name in unmatched_kwargs:
+ unmatched_kwargs.remove(param.name)
+ elif param.default is param.empty:
+ unsatisfied_kwargs.append(param.name)
+ elif param.kind == param.VAR_POSITIONAL:
+ has_varargs = True
+ elif param.kind == param.VAR_KEYWORD:
+ has_var_kwargs = True
+
+ # Make sure there are no conflicts between args and kwargs
+ if pos_kwargs_conflicts:
+ raise ValueError('The following arguments are supplied in both args and kwargs: %s' %
+ ', '.join(pos_kwargs_conflicts))
+
+ # Check if keyword arguments are being fed to positional-only parameters
+ if positional_only_kwargs:
+ raise ValueError('The following arguments cannot be given as keyword arguments: %s' %
+ ', '.join(positional_only_kwargs))
+
+ # Check that the number of positional arguments minus the number of matched kwargs matches the
+ # argspec
+ if unsatisfied_args:
+ raise ValueError('The following arguments have not been supplied: %s' %
+ ', '.join(unsatisfied_args))
+
+ # Check that all keyword-only arguments have been supplied
+ if unsatisfied_kwargs:
+ raise ValueError(
+ 'The following keyword-only arguments have not been supplied in kwargs: %s' %
+ ', '.join(unsatisfied_kwargs))
+
+ # Check that the callable can accept the given number of positional arguments
+ if not has_varargs and unmatched_args:
+ raise ValueError(
+ 'The list of positional arguments is longer than the target callable can handle '
+ '(allowed: %d, given in args: %d)' % (len(args) - len(unmatched_args), len(args)))
+
+ # Check that the callable can accept the given keyword arguments
+ if not has_var_kwargs and unmatched_kwargs:
+ raise ValueError(
+ 'The target callable does not accept the following keyword arguments: %s' %
+ ', '.join(unmatched_kwargs))
+
+
+def iscoroutinefunction_partial(f):
+ while isinstance(f, partial):
+ f = f.func
+
+ # The asyncio version of iscoroutinefunction includes testing for @coroutine
+ # decorations vs. the inspect version which does not.
+ return iscoroutinefunction(f)
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/INSTALLER b/venv/Lib/site-packages/tzlocal-2.1.dist-info/INSTALLER
new file mode 100644
index 00000000..a1b589e3
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/LICENSE.txt b/venv/Lib/site-packages/tzlocal-2.1.dist-info/LICENSE.txt
new file mode 100644
index 00000000..9be1d2fe
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright 2011-2017 Lennart Regebro
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/METADATA b/venv/Lib/site-packages/tzlocal-2.1.dist-info/METADATA
new file mode 100644
index 00000000..7bafd427
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/METADATA
@@ -0,0 +1,326 @@
+Metadata-Version: 2.1
+Name: tzlocal
+Version: 2.1
+Summary: tzinfo object for the local timezone
+Home-page: https://github.com/regebro/tzlocal
+Author: Lennart Regebro
+Author-email: regebro@gmail.com
+License: MIT
+Keywords: timezone pytz
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: Unix
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Requires-Dist: pytz
+
+tzlocal
+=======
+
+This Python module returns a ``tzinfo`` object with the local timezone information under Unix and Win-32.
+It requires ``pytz``, and returns ``pytz`` ``tzinfo`` objects.
+
+This module attempts to fix a glaring hole in ``pytz``, that there is no way to
+get the local timezone information, unless you know the zoneinfo name, and
+under several Linux distros that's hard or impossible to figure out.
+
+Also, with Windows different timezone system using pytz isn't of much use
+unless you separately configure the zoneinfo timezone name.
+
+With ``tzlocal`` you only need to call ``get_localzone()`` and you will get a
+``tzinfo`` object with the local time zone info. On some Unices you will still
+not get to know what the timezone name is, but you don't need that when you
+have the tzinfo file. However, if the timezone name is readily available it
+will be used.
+
+
+Supported systems
+-----------------
+
+These are the systems that are in theory supported:
+
+ * Windows 2000 and later
+
+ * Any unix-like system with a ``/etc/localtime`` or ``/usr/local/etc/localtime``
+
+If you have one of the above systems and it does not work, it's a bug.
+Please report it.
+
+Please note that if you getting a time zone called ``local``, this is not a bug, it's
+actually the main feature of ``tzlocal``, that even if your system does NOT have a configuration file
+with the zoneinfo name of your time zone, it will still work.
+
+You can also use ``tzlocal`` to get the name of your local timezone, but only if your system is
+configured to make that possible. ``tzlocal`` looks for the timezone name in ``/etc/timezone``, ``/var/db/zoneinfo``,
+``/etc/sysconfig/clock`` and ``/etc/conf.d/clock``. If your ``/etc/localtime`` is a symlink it can also extract the
+name from that symlink.
+
+If you need the name of your local time zone, then please make sure your system is properly configured to allow that.
+If it isn't configured, tzlocal will default to UTC.
+
+Usage
+-----
+
+Load the local timezone:
+
+ >>> from tzlocal import get_localzone
+ >>> tz = get_localzone()
+ >>> tz
+
+
+Create a local datetime:
+
+ >>> from datetime import datetime
+ >>> dt = tz.localize(datetime(2015, 4, 10, 7, 22))
+ >>> dt
+ datetime.datetime(2015, 4, 10, 7, 22, tzinfo=)
+
+Lookup another timezone with `pytz`:
+
+ >>> import pytz
+ >>> eastern = pytz.timezone('US/Eastern')
+
+Convert the datetime:
+
+ >>> dt.astimezone(eastern)
+ datetime.datetime(2015, 4, 10, 1, 22, tzinfo=)
+
+
+Maintainer
+----------
+
+* Lennart Regebro, regebro@gmail.com
+
+Contributors
+------------
+
+* Marc Van Olmen
+* Benjamen Meyer
+* Manuel Ebert
+* Xiaokun Zhu
+* Cameris
+* Edward Betts
+* McK KIM
+* Cris Ewing
+* Ayala Shachar
+* Lev Maximov
+* Jakub Wilk
+* John Quarles
+* Preston Landers
+* Victor Torres
+* Jean Jordaan
+* Zackary Welch
+* Mickaël Schoentgen
+* Gabriel Corona
+
+(Sorry if I forgot someone)
+
+License
+-------
+
+* MIT https://opensource.org/licenses/MIT
+
+
+Changes
+=======
+
+2.1 (2020-05-08)
+----------------
+
+- No changes.
+
+
+2.1b1 (2020-02-08)
+------------------
+
+- The is_dst flag is wrong for Europe/Dublin on some Unix releases.
+ I changed to another way of determining if DST is in effect or not.
+
+- Added support for Python 3.7 and 3.8. Dropped 3.5 although it still works.
+
+
+2.0.0 (2019-07-23)
+------------------
+
+- No differences since 2.0.0b3
+
+Major differences since 1.5.1
+.............................
+
+- When no time zone configuration can be find, tzlocal now return UTC.
+ This is a major difference from 1.x, where an exception would be raised.
+ This change is because Docker images often have no configuration at all,
+ and the unix utilities will then default to UTC, so we follow that.
+
+- If tzlocal on Unix finds a timezone name in a /etc config file, then
+ tzlocal now verifies that the timezone it fouds has the same offset as
+ the local computer is configured with. If it doesn't, something is
+ configured incorrectly. (Victor Torres, regebro)
+
+- Get timezone via Termux `getprop` wrapper on Android. It's not officially
+ supported because we can't test it, but at least we make an effort.
+ (Jean Jordaan)
+
+Minor differences and bug fixes
+...............................
+
+- Skip comment lines when parsing /etc/timezone. (Edward Betts)
+
+- Don't load timezone from current directory. (Gabriel Corona)
+
+- Now verifies that the config files actually contain something before
+ reading them. (Zackary Welch, regebro)
+
+- Got rid of a BytesWarning (Mickaël Schoentgen)
+
+- Now handles if config file paths exists, but are directories.
+
+- Moved tests out from distributions
+
+- Support wheels
+
+
+1.5.1 (2017-12-01)
+------------------
+
+- 1.5 had a bug that slipped through testing, fixed that,
+ increased test coverage.
+
+
+1.5 (2017-11-30)
+----------------
+
+- No longer treats macOS as special, but as a unix.
+
+- get_windows_info.py is renamed to update_windows_mappings.py
+
+- Windows mappings now also contain mappings from deprecated zoneinfo names.
+ (Preston-Landers, regebro)
+
+
+1.4 (2017-04-18)
+----------------
+
+- I use MIT on my other projects, so relicensing.
+
+
+1.4b1 (2017-04-14)
+------------------
+
+- Dropping support for Python versions nobody uses (2.5, 3.1, 3.2), adding 3.6
+ Python 3.1 and 3.2 still works, 2.5 has been broken for some time.
+
+- Ayalash's OS X fix didn't work on Python 2.7, fixed that.
+
+
+1.3.2 (2017-04-12)
+------------------
+
+- Ensure closing of subprocess on OS X (ayalash)
+
+- Removed unused imports (jwilk)
+
+- Closes stdout and stderr to get rid of ResourceWarnings (johnwquarles)
+
+- Updated Windows timezones (axil)
+
+
+1.3 (2016-10-15)
+----------------
+
+- #34: Added support for /var/db/zoneinfo
+
+
+1.2.2 (2016-03-02)
+------------------
+
+- #30: Fixed a bug on OS X.
+
+
+1.2.1 (2016-02-28)
+------------------
+
+- Tests failed if TZ was set in the environment. (EdwardBetts)
+
+- Replaces os.popen() with subprocess.Popen() for OS X to
+ handle when systemsetup doesn't exist. (mckabi, cewing)
+
+
+1.2 (2015-06-14)
+----------------
+
+- Systemd stores no time zone name, forcing us to look at the name of the file
+ that localtime symlinks to. (cameris)
+
+
+1.1.2 (2014-10-18)
+------------------
+
+- Timezones that has 3 items did not work on Mac OS X.
+ (Marc Van Olmen)
+
+- Now doesn't fail if the TZ environment variable isn't an Olsen time zone.
+
+- Some timezones on Windows can apparently be empty (perhaps the are deleted).
+ Now these are ignored.
+ (Xiaokun Zhu)
+
+
+1.1.1 (2014-01-29)
+------------------
+
+- I forgot to add Etc/UTC as an alias for Etc/GMT.
+
+
+1.1 (2014-01-28)
+----------------
+
+- Adding better support for OS X.
+
+- Added support to map from tzdata/Olsen names to Windows names.
+ (Thanks to Benjamen Meyer).
+
+
+1.0 (2013-05-29)
+----------------
+
+- Fixed some more cases where spaces needs replacing with underscores.
+
+- Better handling of misconfigured /etc/timezone.
+
+- Better error message on Windows if we can't find a timezone at all.
+
+
+0.3 (2012-09-13)
+----------------
+
+- Windows 7 support.
+
+- Python 2.5 supported; because it only needed a __future__ import.
+
+- Python 3.3 tested, it worked.
+
+- Got rid of relative imports, because I don't actually like them,
+ so I don't know why I used them in the first place.
+
+- For each Windows zone, use the default zoneinfo zone, not the last one.
+
+
+0.2 (2012-09-12)
+----------------
+
+- Python 3 support.
+
+
+0.1 (2012-09-11)
+----------------
+
+- Initial release.
+
+
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/RECORD b/venv/Lib/site-packages/tzlocal-2.1.dist-info/RECORD
new file mode 100644
index 00000000..b697c754
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/RECORD
@@ -0,0 +1,17 @@
+tzlocal-2.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+tzlocal-2.1.dist-info/LICENSE.txt,sha256=2ZqyCa6xaq0sJckP_YPBqYHikP__dqQgoqsD4D8EG4w,1060
+tzlocal-2.1.dist-info/METADATA,sha256=CFvLexLJNXCk-hBmflVJxv7P2Izms0iDeVubwshTF1g,8227
+tzlocal-2.1.dist-info/RECORD,,
+tzlocal-2.1.dist-info/WHEEL,sha256=aSdOKpzTGLLkKenfdFGiq92od_Dmr98YfEe8iw7iZoo,110
+tzlocal-2.1.dist-info/top_level.txt,sha256=QR6vZWP520waETnkotApPQPyVh9VnjoYPoAVHLK1DrE,8
+tzlocal-2.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+tzlocal/__init__.py,sha256=zOXBN5IP3Nc1gNiL8aVwHAhXAYTFfkTSDJ6VdjmifCQ,168
+tzlocal/__pycache__/__init__.cpython-36.pyc,,
+tzlocal/__pycache__/unix.cpython-36.pyc,,
+tzlocal/__pycache__/utils.cpython-36.pyc,,
+tzlocal/__pycache__/win32.cpython-36.pyc,,
+tzlocal/__pycache__/windows_tz.cpython-36.pyc,,
+tzlocal/unix.py,sha256=7dFkjHfqNz4k9F_-PseJaKHCy8uHLKYckbIydpMGXo0,6062
+tzlocal/utils.py,sha256=FYqtaomESB2nQWR8cJalSLoQ9uq7QE0Sx0Hhud1kpTM,1692
+tzlocal/win32.py,sha256=GlvUX_yS1OGEkGmHvW_A3GR5Arxr6lrn0DetVcRanKg,3265
+tzlocal/windows_tz.py,sha256=J-5L3_TUGPiyg69GhqxFnAHhiHsjrZKfkgR1WbofaLk,31441
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/WHEEL b/venv/Lib/site-packages/tzlocal-2.1.dist-info/WHEEL
new file mode 100644
index 00000000..131c7a86
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/top_level.txt b/venv/Lib/site-packages/tzlocal-2.1.dist-info/top_level.txt
new file mode 100644
index 00000000..cd5e9b12
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+tzlocal
diff --git a/venv/Lib/site-packages/tzlocal-2.1.dist-info/zip-safe b/venv/Lib/site-packages/tzlocal-2.1.dist-info/zip-safe
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal-2.1.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/venv/Lib/site-packages/tzlocal/__init__.py b/venv/Lib/site-packages/tzlocal/__init__.py
new file mode 100644
index 00000000..c8196d66
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal/__init__.py
@@ -0,0 +1,5 @@
+import sys
+if sys.platform == 'win32':
+ from tzlocal.win32 import get_localzone, reload_localzone
+else:
+ from tzlocal.unix import get_localzone, reload_localzone
diff --git a/venv/Lib/site-packages/tzlocal/unix.py b/venv/Lib/site-packages/tzlocal/unix.py
new file mode 100644
index 00000000..8574965a
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal/unix.py
@@ -0,0 +1,174 @@
+import os
+import pytz
+import re
+import warnings
+
+from tzlocal import utils
+
+_cache_tz = None
+
+
+def _tz_from_env(tzenv):
+ if tzenv[0] == ':':
+ tzenv = tzenv[1:]
+
+ # TZ specifies a file
+ if os.path.isabs(tzenv) and os.path.exists(tzenv):
+ with open(tzenv, 'rb') as tzfile:
+ return pytz.tzfile.build_tzinfo('local', tzfile)
+
+ # TZ specifies a zoneinfo zone.
+ try:
+ tz = pytz.timezone(tzenv)
+ # That worked, so we return this:
+ return tz
+ except pytz.UnknownTimeZoneError:
+ raise pytz.UnknownTimeZoneError(
+ "tzlocal() does not support non-zoneinfo timezones like %s. \n"
+ "Please use a timezone in the form of Continent/City")
+
+
+def _try_tz_from_env():
+ tzenv = os.environ.get('TZ')
+ if tzenv:
+ try:
+ return _tz_from_env(tzenv)
+ except pytz.UnknownTimeZoneError:
+ pass
+
+
+def _get_localzone(_root='/'):
+ """Tries to find the local timezone configuration.
+
+ This method prefers finding the timezone name and passing that to pytz,
+ over passing in the localtime file, as in the later case the zoneinfo
+ name is unknown.
+
+ The parameter _root makes the function look for files like /etc/localtime
+ beneath the _root directory. This is primarily used by the tests.
+ In normal usage you call the function without parameters."""
+
+ tzenv = _try_tz_from_env()
+ if tzenv:
+ return tzenv
+
+ # Are we under Termux on Android?
+ if os.path.exists('/system/bin/getprop'):
+ import subprocess
+ androidtz = subprocess.check_output(['getprop', 'persist.sys.timezone']).strip().decode()
+ return pytz.timezone(androidtz)
+
+ # Now look for distribution specific configuration files
+ # that contain the timezone name.
+ for configfile in ('etc/timezone', 'var/db/zoneinfo'):
+ tzpath = os.path.join(_root, configfile)
+ try:
+ with open(tzpath, 'rb') as tzfile:
+ data = tzfile.read()
+
+ # Issue #3 was that /etc/timezone was a zoneinfo file.
+ # That's a misconfiguration, but we need to handle it gracefully:
+ if data[:5] == b'TZif2':
+ continue
+
+ etctz = data.strip().decode()
+ if not etctz:
+ # Empty file, skip
+ continue
+ for etctz in data.decode().splitlines():
+ # Get rid of host definitions and comments:
+ if ' ' in etctz:
+ etctz, dummy = etctz.split(' ', 1)
+ if '#' in etctz:
+ etctz, dummy = etctz.split('#', 1)
+ if not etctz:
+ continue
+ tz = pytz.timezone(etctz.replace(' ', '_'))
+ if _root == '/':
+ # We are using a file in etc to name the timezone.
+ # Verify that the timezone specified there is actually used:
+ utils.assert_tz_offset(tz)
+ return tz
+
+ except IOError:
+ # File doesn't exist or is a directory
+ continue
+
+ # CentOS has a ZONE setting in /etc/sysconfig/clock,
+ # OpenSUSE has a TIMEZONE setting in /etc/sysconfig/clock and
+ # Gentoo has a TIMEZONE setting in /etc/conf.d/clock
+ # We look through these files for a timezone:
+
+ zone_re = re.compile(r'\s*ZONE\s*=\s*\"')
+ timezone_re = re.compile(r'\s*TIMEZONE\s*=\s*\"')
+ end_re = re.compile('\"')
+
+ for filename in ('etc/sysconfig/clock', 'etc/conf.d/clock'):
+ tzpath = os.path.join(_root, filename)
+ try:
+ with open(tzpath, 'rt') as tzfile:
+ data = tzfile.readlines()
+
+ for line in data:
+ # Look for the ZONE= setting.
+ match = zone_re.match(line)
+ if match is None:
+ # No ZONE= setting. Look for the TIMEZONE= setting.
+ match = timezone_re.match(line)
+ if match is not None:
+ # Some setting existed
+ line = line[match.end():]
+ etctz = line[:end_re.search(line).start()]
+
+ # We found a timezone
+ tz = pytz.timezone(etctz.replace(' ', '_'))
+ if _root == '/':
+ # We are using a file in etc to name the timezone.
+ # Verify that the timezone specified there is actually used:
+ utils.assert_tz_offset(tz)
+ return tz
+
+ except IOError:
+ # File doesn't exist or is a directory
+ continue
+
+ # systemd distributions use symlinks that include the zone name,
+ # see manpage of localtime(5) and timedatectl(1)
+ tzpath = os.path.join(_root, 'etc/localtime')
+ if os.path.exists(tzpath) and os.path.islink(tzpath):
+ tzpath = os.path.realpath(tzpath)
+ start = tzpath.find("/")+1
+ while start != 0:
+ tzpath = tzpath[start:]
+ try:
+ return pytz.timezone(tzpath)
+ except pytz.UnknownTimeZoneError:
+ pass
+ start = tzpath.find("/")+1
+
+ # No explicit setting existed. Use localtime
+ for filename in ('etc/localtime', 'usr/local/etc/localtime'):
+ tzpath = os.path.join(_root, filename)
+
+ if not os.path.exists(tzpath):
+ continue
+ with open(tzpath, 'rb') as tzfile:
+ return pytz.tzfile.build_tzinfo('local', tzfile)
+
+ warnings.warn('Can not find any timezone configuration, defaulting to UTC.')
+ return pytz.utc
+
+def get_localzone():
+ """Get the computers configured local timezone, if any."""
+ global _cache_tz
+ if _cache_tz is None:
+ _cache_tz = _get_localzone()
+
+ return _cache_tz
+
+
+def reload_localzone():
+ """Reload the cached localzone. You need to call this if the timezone has changed."""
+ global _cache_tz
+ _cache_tz = _get_localzone()
+ return _cache_tz
diff --git a/venv/Lib/site-packages/tzlocal/utils.py b/venv/Lib/site-packages/tzlocal/utils.py
new file mode 100644
index 00000000..5a677990
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal/utils.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+import time
+import datetime
+import calendar
+
+
+def get_system_offset():
+ """Get system's timezone offset using built-in library time.
+
+ For the Timezone constants (altzone, daylight, timezone, and tzname), the
+ value is determined by the timezone rules in effect at module load time or
+ the last time tzset() is called and may be incorrect for times in the past.
+
+ To keep compatibility with Windows, we're always importing time module here.
+ """
+
+ localtime = calendar.timegm(time.localtime())
+ gmtime = calendar.timegm(time.gmtime())
+ offset = gmtime - localtime
+ # We could get the localtime and gmtime on either side of a second switch
+ # so we check that the difference is less than one minute, because nobody
+ # has that small DST differences.
+ if abs(offset - time.altzone) < 60:
+ return -time.altzone
+ else:
+ return -time.timezone
+
+
+def get_tz_offset(tz):
+ """Get timezone's offset using built-in function datetime.utcoffset()."""
+ return int(datetime.datetime.now(tz).utcoffset().total_seconds())
+
+
+def assert_tz_offset(tz):
+ """Assert that system's timezone offset equals to the timezone offset found.
+
+ If they don't match, we probably have a misconfiguration, for example, an
+ incorrect timezone set in /etc/timezone file in systemd distributions."""
+ tz_offset = get_tz_offset(tz)
+ system_offset = get_system_offset()
+ if tz_offset != system_offset:
+ msg = ('Timezone offset does not match system offset: {0} != {1}. '
+ 'Please, check your config files.').format(
+ tz_offset, system_offset
+ )
+ raise ValueError(msg)
diff --git a/venv/Lib/site-packages/tzlocal/win32.py b/venv/Lib/site-packages/tzlocal/win32.py
new file mode 100644
index 00000000..fcc42a23
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal/win32.py
@@ -0,0 +1,104 @@
+try:
+ import _winreg as winreg
+except ImportError:
+ import winreg
+
+import pytz
+
+from tzlocal.windows_tz import win_tz
+from tzlocal import utils
+
+_cache_tz = None
+
+
+def valuestodict(key):
+ """Convert a registry key's values to a dictionary."""
+ dict = {}
+ size = winreg.QueryInfoKey(key)[1]
+ for i in range(size):
+ data = winreg.EnumValue(key, i)
+ dict[data[0]] = data[1]
+ return dict
+
+
+def get_localzone_name():
+ # Windows is special. It has unique time zone names (in several
+ # meanings of the word) available, but unfortunately, they can be
+ # translated to the language of the operating system, so we need to
+ # do a backwards lookup, by going through all time zones and see which
+ # one matches.
+ handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+
+ TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
+ localtz = winreg.OpenKey(handle, TZLOCALKEYNAME)
+ keyvalues = valuestodict(localtz)
+ localtz.Close()
+
+ if 'TimeZoneKeyName' in keyvalues:
+ # Windows 7 (and Vista?)
+
+ # For some reason this returns a string with loads of NUL bytes at
+ # least on some systems. I don't know if this is a bug somewhere, I
+ # just work around it.
+ tzkeyname = keyvalues['TimeZoneKeyName'].split('\x00', 1)[0]
+ else:
+ # Windows 2000 or XP
+
+ # This is the localized name:
+ tzwin = keyvalues['StandardName']
+
+ # Open the list of timezones to look up the real name:
+ TZKEYNAME = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
+ tzkey = winreg.OpenKey(handle, TZKEYNAME)
+
+ # Now, match this value to Time Zone information
+ tzkeyname = None
+ for i in range(winreg.QueryInfoKey(tzkey)[0]):
+ subkey = winreg.EnumKey(tzkey, i)
+ sub = winreg.OpenKey(tzkey, subkey)
+ data = valuestodict(sub)
+ sub.Close()
+ try:
+ if data['Std'] == tzwin:
+ tzkeyname = subkey
+ break
+ except KeyError:
+ # This timezone didn't have proper configuration.
+ # Ignore it.
+ pass
+
+ tzkey.Close()
+ handle.Close()
+
+ if tzkeyname is None:
+ raise LookupError('Can not find Windows timezone configuration')
+
+ timezone = win_tz.get(tzkeyname)
+ if timezone is None:
+ # Nope, that didn't work. Try adding "Standard Time",
+ # it seems to work a lot of times:
+ timezone = win_tz.get(tzkeyname + " Standard Time")
+
+ # Return what we have.
+ if timezone is None:
+ raise pytz.UnknownTimeZoneError('Can not find timezone ' + tzkeyname)
+
+ return timezone
+
+
+def get_localzone():
+ """Returns the zoneinfo-based tzinfo object that matches the Windows-configured timezone."""
+ global _cache_tz
+ if _cache_tz is None:
+ _cache_tz = pytz.timezone(get_localzone_name())
+
+ utils.assert_tz_offset(_cache_tz)
+ return _cache_tz
+
+
+def reload_localzone():
+ """Reload the cached localzone. You need to call this if the timezone has changed."""
+ global _cache_tz
+ _cache_tz = pytz.timezone(get_localzone_name())
+ utils.assert_tz_offset(_cache_tz)
+ return _cache_tz
diff --git a/venv/Lib/site-packages/tzlocal/windows_tz.py b/venv/Lib/site-packages/tzlocal/windows_tz.py
new file mode 100644
index 00000000..86ba807d
--- /dev/null
+++ b/venv/Lib/site-packages/tzlocal/windows_tz.py
@@ -0,0 +1,697 @@
+# This file is autogenerated by the update_windows_mapping.py script
+# Do not edit.
+win_tz = {'AUS Central Standard Time': 'Australia/Darwin',
+ 'AUS Eastern Standard Time': 'Australia/Sydney',
+ 'Afghanistan Standard Time': 'Asia/Kabul',
+ 'Alaskan Standard Time': 'America/Anchorage',
+ 'Aleutian Standard Time': 'America/Adak',
+ 'Altai Standard Time': 'Asia/Barnaul',
+ 'Arab Standard Time': 'Asia/Riyadh',
+ 'Arabian Standard Time': 'Asia/Dubai',
+ 'Arabic Standard Time': 'Asia/Baghdad',
+ 'Argentina Standard Time': 'America/Buenos_Aires',
+ 'Astrakhan Standard Time': 'Europe/Astrakhan',
+ 'Atlantic Standard Time': 'America/Halifax',
+ 'Aus Central W. Standard Time': 'Australia/Eucla',
+ 'Azerbaijan Standard Time': 'Asia/Baku',
+ 'Azores Standard Time': 'Atlantic/Azores',
+ 'Bahia Standard Time': 'America/Bahia',
+ 'Bangladesh Standard Time': 'Asia/Dhaka',
+ 'Belarus Standard Time': 'Europe/Minsk',
+ 'Bougainville Standard Time': 'Pacific/Bougainville',
+ 'Canada Central Standard Time': 'America/Regina',
+ 'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
+ 'Caucasus Standard Time': 'Asia/Yerevan',
+ 'Cen. Australia Standard Time': 'Australia/Adelaide',
+ 'Central America Standard Time': 'America/Guatemala',
+ 'Central Asia Standard Time': 'Asia/Almaty',
+ 'Central Brazilian Standard Time': 'America/Cuiaba',
+ 'Central Europe Standard Time': 'Europe/Budapest',
+ 'Central European Standard Time': 'Europe/Warsaw',
+ 'Central Pacific Standard Time': 'Pacific/Guadalcanal',
+ 'Central Standard Time': 'America/Chicago',
+ 'Central Standard Time (Mexico)': 'America/Mexico_City',
+ 'Chatham Islands Standard Time': 'Pacific/Chatham',
+ 'China Standard Time': 'Asia/Shanghai',
+ 'Cuba Standard Time': 'America/Havana',
+ 'Dateline Standard Time': 'Etc/GMT+12',
+ 'E. Africa Standard Time': 'Africa/Nairobi',
+ 'E. Australia Standard Time': 'Australia/Brisbane',
+ 'E. Europe Standard Time': 'Europe/Chisinau',
+ 'E. South America Standard Time': 'America/Sao_Paulo',
+ 'Easter Island Standard Time': 'Pacific/Easter',
+ 'Eastern Standard Time': 'America/New_York',
+ 'Eastern Standard Time (Mexico)': 'America/Cancun',
+ 'Egypt Standard Time': 'Africa/Cairo',
+ 'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
+ 'FLE Standard Time': 'Europe/Kiev',
+ 'Fiji Standard Time': 'Pacific/Fiji',
+ 'GMT Standard Time': 'Europe/London',
+ 'GTB Standard Time': 'Europe/Bucharest',
+ 'Georgian Standard Time': 'Asia/Tbilisi',
+ 'Greenland Standard Time': 'America/Godthab',
+ 'Greenwich Standard Time': 'Atlantic/Reykjavik',
+ 'Haiti Standard Time': 'America/Port-au-Prince',
+ 'Hawaiian Standard Time': 'Pacific/Honolulu',
+ 'India Standard Time': 'Asia/Calcutta',
+ 'Iran Standard Time': 'Asia/Tehran',
+ 'Israel Standard Time': 'Asia/Jerusalem',
+ 'Jordan Standard Time': 'Asia/Amman',
+ 'Kaliningrad Standard Time': 'Europe/Kaliningrad',
+ 'Korea Standard Time': 'Asia/Seoul',
+ 'Libya Standard Time': 'Africa/Tripoli',
+ 'Line Islands Standard Time': 'Pacific/Kiritimati',
+ 'Lord Howe Standard Time': 'Australia/Lord_Howe',
+ 'Magadan Standard Time': 'Asia/Magadan',
+ 'Magallanes Standard Time': 'America/Punta_Arenas',
+ 'Marquesas Standard Time': 'Pacific/Marquesas',
+ 'Mauritius Standard Time': 'Indian/Mauritius',
+ 'Middle East Standard Time': 'Asia/Beirut',
+ 'Montevideo Standard Time': 'America/Montevideo',
+ 'Morocco Standard Time': 'Africa/Casablanca',
+ 'Mountain Standard Time': 'America/Denver',
+ 'Mountain Standard Time (Mexico)': 'America/Chihuahua',
+ 'Myanmar Standard Time': 'Asia/Rangoon',
+ 'N. Central Asia Standard Time': 'Asia/Novosibirsk',
+ 'Namibia Standard Time': 'Africa/Windhoek',
+ 'Nepal Standard Time': 'Asia/Katmandu',
+ 'New Zealand Standard Time': 'Pacific/Auckland',
+ 'Newfoundland Standard Time': 'America/St_Johns',
+ 'Norfolk Standard Time': 'Pacific/Norfolk',
+ 'North Asia East Standard Time': 'Asia/Irkutsk',
+ 'North Asia Standard Time': 'Asia/Krasnoyarsk',
+ 'North Korea Standard Time': 'Asia/Pyongyang',
+ 'Omsk Standard Time': 'Asia/Omsk',
+ 'Pacific SA Standard Time': 'America/Santiago',
+ 'Pacific Standard Time': 'America/Los_Angeles',
+ 'Pacific Standard Time (Mexico)': 'America/Tijuana',
+ 'Pakistan Standard Time': 'Asia/Karachi',
+ 'Paraguay Standard Time': 'America/Asuncion',
+ 'Qyzylorda Standard Time': 'Asia/Qyzylorda',
+ 'Romance Standard Time': 'Europe/Paris',
+ 'Russia Time Zone 10': 'Asia/Srednekolymsk',
+ 'Russia Time Zone 11': 'Asia/Kamchatka',
+ 'Russia Time Zone 3': 'Europe/Samara',
+ 'Russian Standard Time': 'Europe/Moscow',
+ 'SA Eastern Standard Time': 'America/Cayenne',
+ 'SA Pacific Standard Time': 'America/Bogota',
+ 'SA Western Standard Time': 'America/La_Paz',
+ 'SE Asia Standard Time': 'Asia/Bangkok',
+ 'Saint Pierre Standard Time': 'America/Miquelon',
+ 'Sakhalin Standard Time': 'Asia/Sakhalin',
+ 'Samoa Standard Time': 'Pacific/Apia',
+ 'Sao Tome Standard Time': 'Africa/Sao_Tome',
+ 'Saratov Standard Time': 'Europe/Saratov',
+ 'Singapore Standard Time': 'Asia/Singapore',
+ 'South Africa Standard Time': 'Africa/Johannesburg',
+ 'Sri Lanka Standard Time': 'Asia/Colombo',
+ 'Sudan Standard Time': 'Africa/Khartoum',
+ 'Syria Standard Time': 'Asia/Damascus',
+ 'Taipei Standard Time': 'Asia/Taipei',
+ 'Tasmania Standard Time': 'Australia/Hobart',
+ 'Tocantins Standard Time': 'America/Araguaina',
+ 'Tokyo Standard Time': 'Asia/Tokyo',
+ 'Tomsk Standard Time': 'Asia/Tomsk',
+ 'Tonga Standard Time': 'Pacific/Tongatapu',
+ 'Transbaikal Standard Time': 'Asia/Chita',
+ 'Turkey Standard Time': 'Europe/Istanbul',
+ 'Turks And Caicos Standard Time': 'America/Grand_Turk',
+ 'US Eastern Standard Time': 'America/Indianapolis',
+ 'US Mountain Standard Time': 'America/Phoenix',
+ 'UTC': 'Etc/GMT',
+ 'UTC+12': 'Etc/GMT-12',
+ 'UTC+13': 'Etc/GMT-13',
+ 'UTC-02': 'Etc/GMT+2',
+ 'UTC-08': 'Etc/GMT+8',
+ 'UTC-09': 'Etc/GMT+9',
+ 'UTC-11': 'Etc/GMT+11',
+ 'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
+ 'Venezuela Standard Time': 'America/Caracas',
+ 'Vladivostok Standard Time': 'Asia/Vladivostok',
+ 'Volgograd Standard Time': 'Europe/Volgograd',
+ 'W. Australia Standard Time': 'Australia/Perth',
+ 'W. Central Africa Standard Time': 'Africa/Lagos',
+ 'W. Europe Standard Time': 'Europe/Berlin',
+ 'W. Mongolia Standard Time': 'Asia/Hovd',
+ 'West Asia Standard Time': 'Asia/Tashkent',
+ 'West Bank Standard Time': 'Asia/Hebron',
+ 'West Pacific Standard Time': 'Pacific/Port_Moresby',
+ 'Yakutsk Standard Time': 'Asia/Yakutsk'}
+
+# Old name for the win_tz variable:
+tz_names = win_tz
+
+tz_win = {'Africa/Abidjan': 'Greenwich Standard Time',
+ 'Africa/Accra': 'Greenwich Standard Time',
+ 'Africa/Addis_Ababa': 'E. Africa Standard Time',
+ 'Africa/Algiers': 'W. Central Africa Standard Time',
+ 'Africa/Asmera': 'E. Africa Standard Time',
+ 'Africa/Bamako': 'Greenwich Standard Time',
+ 'Africa/Bangui': 'W. Central Africa Standard Time',
+ 'Africa/Banjul': 'Greenwich Standard Time',
+ 'Africa/Bissau': 'Greenwich Standard Time',
+ 'Africa/Blantyre': 'South Africa Standard Time',
+ 'Africa/Brazzaville': 'W. Central Africa Standard Time',
+ 'Africa/Bujumbura': 'South Africa Standard Time',
+ 'Africa/Cairo': 'Egypt Standard Time',
+ 'Africa/Casablanca': 'Morocco Standard Time',
+ 'Africa/Ceuta': 'Romance Standard Time',
+ 'Africa/Conakry': 'Greenwich Standard Time',
+ 'Africa/Dakar': 'Greenwich Standard Time',
+ 'Africa/Dar_es_Salaam': 'E. Africa Standard Time',
+ 'Africa/Djibouti': 'E. Africa Standard Time',
+ 'Africa/Douala': 'W. Central Africa Standard Time',
+ 'Africa/El_Aaiun': 'Morocco Standard Time',
+ 'Africa/Freetown': 'Greenwich Standard Time',
+ 'Africa/Gaborone': 'South Africa Standard Time',
+ 'Africa/Harare': 'South Africa Standard Time',
+ 'Africa/Johannesburg': 'South Africa Standard Time',
+ 'Africa/Juba': 'E. Africa Standard Time',
+ 'Africa/Kampala': 'E. Africa Standard Time',
+ 'Africa/Khartoum': 'Sudan Standard Time',
+ 'Africa/Kigali': 'South Africa Standard Time',
+ 'Africa/Kinshasa': 'W. Central Africa Standard Time',
+ 'Africa/Lagos': 'W. Central Africa Standard Time',
+ 'Africa/Libreville': 'W. Central Africa Standard Time',
+ 'Africa/Lome': 'Greenwich Standard Time',
+ 'Africa/Luanda': 'W. Central Africa Standard Time',
+ 'Africa/Lubumbashi': 'South Africa Standard Time',
+ 'Africa/Lusaka': 'South Africa Standard Time',
+ 'Africa/Malabo': 'W. Central Africa Standard Time',
+ 'Africa/Maputo': 'South Africa Standard Time',
+ 'Africa/Maseru': 'South Africa Standard Time',
+ 'Africa/Mbabane': 'South Africa Standard Time',
+ 'Africa/Mogadishu': 'E. Africa Standard Time',
+ 'Africa/Monrovia': 'Greenwich Standard Time',
+ 'Africa/Nairobi': 'E. Africa Standard Time',
+ 'Africa/Ndjamena': 'W. Central Africa Standard Time',
+ 'Africa/Niamey': 'W. Central Africa Standard Time',
+ 'Africa/Nouakchott': 'Greenwich Standard Time',
+ 'Africa/Ouagadougou': 'Greenwich Standard Time',
+ 'Africa/Porto-Novo': 'W. Central Africa Standard Time',
+ 'Africa/Sao_Tome': 'Sao Tome Standard Time',
+ 'Africa/Timbuktu': 'Greenwich Standard Time',
+ 'Africa/Tripoli': 'Libya Standard Time',
+ 'Africa/Tunis': 'W. Central Africa Standard Time',
+ 'Africa/Windhoek': 'Namibia Standard Time',
+ 'America/Adak': 'Aleutian Standard Time',
+ 'America/Anchorage': 'Alaskan Standard Time',
+ 'America/Anguilla': 'SA Western Standard Time',
+ 'America/Antigua': 'SA Western Standard Time',
+ 'America/Araguaina': 'Tocantins Standard Time',
+ 'America/Argentina/La_Rioja': 'Argentina Standard Time',
+ 'America/Argentina/Rio_Gallegos': 'Argentina Standard Time',
+ 'America/Argentina/Salta': 'Argentina Standard Time',
+ 'America/Argentina/San_Juan': 'Argentina Standard Time',
+ 'America/Argentina/San_Luis': 'Argentina Standard Time',
+ 'America/Argentina/Tucuman': 'Argentina Standard Time',
+ 'America/Argentina/Ushuaia': 'Argentina Standard Time',
+ 'America/Aruba': 'SA Western Standard Time',
+ 'America/Asuncion': 'Paraguay Standard Time',
+ 'America/Atka': 'Aleutian Standard Time',
+ 'America/Bahia': 'Bahia Standard Time',
+ 'America/Bahia_Banderas': 'Central Standard Time (Mexico)',
+ 'America/Barbados': 'SA Western Standard Time',
+ 'America/Belem': 'SA Eastern Standard Time',
+ 'America/Belize': 'Central America Standard Time',
+ 'America/Blanc-Sablon': 'SA Western Standard Time',
+ 'America/Boa_Vista': 'SA Western Standard Time',
+ 'America/Bogota': 'SA Pacific Standard Time',
+ 'America/Boise': 'Mountain Standard Time',
+ 'America/Buenos_Aires': 'Argentina Standard Time',
+ 'America/Cambridge_Bay': 'Mountain Standard Time',
+ 'America/Campo_Grande': 'Central Brazilian Standard Time',
+ 'America/Cancun': 'Eastern Standard Time (Mexico)',
+ 'America/Caracas': 'Venezuela Standard Time',
+ 'America/Catamarca': 'Argentina Standard Time',
+ 'America/Cayenne': 'SA Eastern Standard Time',
+ 'America/Cayman': 'SA Pacific Standard Time',
+ 'America/Chicago': 'Central Standard Time',
+ 'America/Chihuahua': 'Mountain Standard Time (Mexico)',
+ 'America/Coral_Harbour': 'SA Pacific Standard Time',
+ 'America/Cordoba': 'Argentina Standard Time',
+ 'America/Costa_Rica': 'Central America Standard Time',
+ 'America/Creston': 'US Mountain Standard Time',
+ 'America/Cuiaba': 'Central Brazilian Standard Time',
+ 'America/Curacao': 'SA Western Standard Time',
+ 'America/Danmarkshavn': 'UTC',
+ 'America/Dawson': 'Pacific Standard Time',
+ 'America/Dawson_Creek': 'US Mountain Standard Time',
+ 'America/Denver': 'Mountain Standard Time',
+ 'America/Detroit': 'Eastern Standard Time',
+ 'America/Dominica': 'SA Western Standard Time',
+ 'America/Edmonton': 'Mountain Standard Time',
+ 'America/Eirunepe': 'SA Pacific Standard Time',
+ 'America/El_Salvador': 'Central America Standard Time',
+ 'America/Ensenada': 'Pacific Standard Time (Mexico)',
+ 'America/Fort_Nelson': 'US Mountain Standard Time',
+ 'America/Fortaleza': 'SA Eastern Standard Time',
+ 'America/Glace_Bay': 'Atlantic Standard Time',
+ 'America/Godthab': 'Greenland Standard Time',
+ 'America/Goose_Bay': 'Atlantic Standard Time',
+ 'America/Grand_Turk': 'Turks And Caicos Standard Time',
+ 'America/Grenada': 'SA Western Standard Time',
+ 'America/Guadeloupe': 'SA Western Standard Time',
+ 'America/Guatemala': 'Central America Standard Time',
+ 'America/Guayaquil': 'SA Pacific Standard Time',
+ 'America/Guyana': 'SA Western Standard Time',
+ 'America/Halifax': 'Atlantic Standard Time',
+ 'America/Havana': 'Cuba Standard Time',
+ 'America/Hermosillo': 'US Mountain Standard Time',
+ 'America/Indiana/Knox': 'Central Standard Time',
+ 'America/Indiana/Marengo': 'US Eastern Standard Time',
+ 'America/Indiana/Petersburg': 'Eastern Standard Time',
+ 'America/Indiana/Tell_City': 'Central Standard Time',
+ 'America/Indiana/Vevay': 'US Eastern Standard Time',
+ 'America/Indiana/Vincennes': 'Eastern Standard Time',
+ 'America/Indiana/Winamac': 'Eastern Standard Time',
+ 'America/Indianapolis': 'US Eastern Standard Time',
+ 'America/Inuvik': 'Mountain Standard Time',
+ 'America/Iqaluit': 'Eastern Standard Time',
+ 'America/Jamaica': 'SA Pacific Standard Time',
+ 'America/Jujuy': 'Argentina Standard Time',
+ 'America/Juneau': 'Alaskan Standard Time',
+ 'America/Kentucky/Monticello': 'Eastern Standard Time',
+ 'America/Knox_IN': 'Central Standard Time',
+ 'America/Kralendijk': 'SA Western Standard Time',
+ 'America/La_Paz': 'SA Western Standard Time',
+ 'America/Lima': 'SA Pacific Standard Time',
+ 'America/Los_Angeles': 'Pacific Standard Time',
+ 'America/Louisville': 'Eastern Standard Time',
+ 'America/Lower_Princes': 'SA Western Standard Time',
+ 'America/Maceio': 'SA Eastern Standard Time',
+ 'America/Managua': 'Central America Standard Time',
+ 'America/Manaus': 'SA Western Standard Time',
+ 'America/Marigot': 'SA Western Standard Time',
+ 'America/Martinique': 'SA Western Standard Time',
+ 'America/Matamoros': 'Central Standard Time',
+ 'America/Mazatlan': 'Mountain Standard Time (Mexico)',
+ 'America/Mendoza': 'Argentina Standard Time',
+ 'America/Menominee': 'Central Standard Time',
+ 'America/Merida': 'Central Standard Time (Mexico)',
+ 'America/Metlakatla': 'Alaskan Standard Time',
+ 'America/Mexico_City': 'Central Standard Time (Mexico)',
+ 'America/Miquelon': 'Saint Pierre Standard Time',
+ 'America/Moncton': 'Atlantic Standard Time',
+ 'America/Monterrey': 'Central Standard Time (Mexico)',
+ 'America/Montevideo': 'Montevideo Standard Time',
+ 'America/Montreal': 'Eastern Standard Time',
+ 'America/Montserrat': 'SA Western Standard Time',
+ 'America/Nassau': 'Eastern Standard Time',
+ 'America/New_York': 'Eastern Standard Time',
+ 'America/Nipigon': 'Eastern Standard Time',
+ 'America/Nome': 'Alaskan Standard Time',
+ 'America/Noronha': 'UTC-02',
+ 'America/North_Dakota/Beulah': 'Central Standard Time',
+ 'America/North_Dakota/Center': 'Central Standard Time',
+ 'America/North_Dakota/New_Salem': 'Central Standard Time',
+ 'America/Ojinaga': 'Mountain Standard Time',
+ 'America/Panama': 'SA Pacific Standard Time',
+ 'America/Pangnirtung': 'Eastern Standard Time',
+ 'America/Paramaribo': 'SA Eastern Standard Time',
+ 'America/Phoenix': 'US Mountain Standard Time',
+ 'America/Port-au-Prince': 'Haiti Standard Time',
+ 'America/Port_of_Spain': 'SA Western Standard Time',
+ 'America/Porto_Acre': 'SA Pacific Standard Time',
+ 'America/Porto_Velho': 'SA Western Standard Time',
+ 'America/Puerto_Rico': 'SA Western Standard Time',
+ 'America/Punta_Arenas': 'Magallanes Standard Time',
+ 'America/Rainy_River': 'Central Standard Time',
+ 'America/Rankin_Inlet': 'Central Standard Time',
+ 'America/Recife': 'SA Eastern Standard Time',
+ 'America/Regina': 'Canada Central Standard Time',
+ 'America/Resolute': 'Central Standard Time',
+ 'America/Rio_Branco': 'SA Pacific Standard Time',
+ 'America/Santa_Isabel': 'Pacific Standard Time (Mexico)',
+ 'America/Santarem': 'SA Eastern Standard Time',
+ 'America/Santiago': 'Pacific SA Standard Time',
+ 'America/Santo_Domingo': 'SA Western Standard Time',
+ 'America/Sao_Paulo': 'E. South America Standard Time',
+ 'America/Scoresbysund': 'Azores Standard Time',
+ 'America/Shiprock': 'Mountain Standard Time',
+ 'America/Sitka': 'Alaskan Standard Time',
+ 'America/St_Barthelemy': 'SA Western Standard Time',
+ 'America/St_Johns': 'Newfoundland Standard Time',
+ 'America/St_Kitts': 'SA Western Standard Time',
+ 'America/St_Lucia': 'SA Western Standard Time',
+ 'America/St_Thomas': 'SA Western Standard Time',
+ 'America/St_Vincent': 'SA Western Standard Time',
+ 'America/Swift_Current': 'Canada Central Standard Time',
+ 'America/Tegucigalpa': 'Central America Standard Time',
+ 'America/Thule': 'Atlantic Standard Time',
+ 'America/Thunder_Bay': 'Eastern Standard Time',
+ 'America/Tijuana': 'Pacific Standard Time (Mexico)',
+ 'America/Toronto': 'Eastern Standard Time',
+ 'America/Tortola': 'SA Western Standard Time',
+ 'America/Vancouver': 'Pacific Standard Time',
+ 'America/Virgin': 'SA Western Standard Time',
+ 'America/Whitehorse': 'Pacific Standard Time',
+ 'America/Winnipeg': 'Central Standard Time',
+ 'America/Yakutat': 'Alaskan Standard Time',
+ 'America/Yellowknife': 'Mountain Standard Time',
+ 'Antarctica/Casey': 'Singapore Standard Time',
+ 'Antarctica/Davis': 'SE Asia Standard Time',
+ 'Antarctica/DumontDUrville': 'West Pacific Standard Time',
+ 'Antarctica/Macquarie': 'Central Pacific Standard Time',
+ 'Antarctica/Mawson': 'West Asia Standard Time',
+ 'Antarctica/McMurdo': 'New Zealand Standard Time',
+ 'Antarctica/Palmer': 'SA Eastern Standard Time',
+ 'Antarctica/Rothera': 'SA Eastern Standard Time',
+ 'Antarctica/South_Pole': 'New Zealand Standard Time',
+ 'Antarctica/Syowa': 'E. Africa Standard Time',
+ 'Antarctica/Vostok': 'Central Asia Standard Time',
+ 'Arctic/Longyearbyen': 'W. Europe Standard Time',
+ 'Asia/Aden': 'Arab Standard Time',
+ 'Asia/Almaty': 'Central Asia Standard Time',
+ 'Asia/Amman': 'Jordan Standard Time',
+ 'Asia/Anadyr': 'Russia Time Zone 11',
+ 'Asia/Aqtau': 'West Asia Standard Time',
+ 'Asia/Aqtobe': 'West Asia Standard Time',
+ 'Asia/Ashgabat': 'West Asia Standard Time',
+ 'Asia/Ashkhabad': 'West Asia Standard Time',
+ 'Asia/Atyrau': 'West Asia Standard Time',
+ 'Asia/Baghdad': 'Arabic Standard Time',
+ 'Asia/Bahrain': 'Arab Standard Time',
+ 'Asia/Baku': 'Azerbaijan Standard Time',
+ 'Asia/Bangkok': 'SE Asia Standard Time',
+ 'Asia/Barnaul': 'Altai Standard Time',
+ 'Asia/Beirut': 'Middle East Standard Time',
+ 'Asia/Bishkek': 'Central Asia Standard Time',
+ 'Asia/Brunei': 'Singapore Standard Time',
+ 'Asia/Calcutta': 'India Standard Time',
+ 'Asia/Chita': 'Transbaikal Standard Time',
+ 'Asia/Choibalsan': 'Ulaanbaatar Standard Time',
+ 'Asia/Chongqing': 'China Standard Time',
+ 'Asia/Chungking': 'China Standard Time',
+ 'Asia/Colombo': 'Sri Lanka Standard Time',
+ 'Asia/Dacca': 'Bangladesh Standard Time',
+ 'Asia/Damascus': 'Syria Standard Time',
+ 'Asia/Dhaka': 'Bangladesh Standard Time',
+ 'Asia/Dili': 'Tokyo Standard Time',
+ 'Asia/Dubai': 'Arabian Standard Time',
+ 'Asia/Dushanbe': 'West Asia Standard Time',
+ 'Asia/Famagusta': 'GTB Standard Time',
+ 'Asia/Gaza': 'West Bank Standard Time',
+ 'Asia/Harbin': 'China Standard Time',
+ 'Asia/Hebron': 'West Bank Standard Time',
+ 'Asia/Hong_Kong': 'China Standard Time',
+ 'Asia/Hovd': 'W. Mongolia Standard Time',
+ 'Asia/Irkutsk': 'North Asia East Standard Time',
+ 'Asia/Jakarta': 'SE Asia Standard Time',
+ 'Asia/Jayapura': 'Tokyo Standard Time',
+ 'Asia/Jerusalem': 'Israel Standard Time',
+ 'Asia/Kabul': 'Afghanistan Standard Time',
+ 'Asia/Kamchatka': 'Russia Time Zone 11',
+ 'Asia/Karachi': 'Pakistan Standard Time',
+ 'Asia/Kashgar': 'Central Asia Standard Time',
+ 'Asia/Katmandu': 'Nepal Standard Time',
+ 'Asia/Khandyga': 'Yakutsk Standard Time',
+ 'Asia/Krasnoyarsk': 'North Asia Standard Time',
+ 'Asia/Kuala_Lumpur': 'Singapore Standard Time',
+ 'Asia/Kuching': 'Singapore Standard Time',
+ 'Asia/Kuwait': 'Arab Standard Time',
+ 'Asia/Macao': 'China Standard Time',
+ 'Asia/Macau': 'China Standard Time',
+ 'Asia/Magadan': 'Magadan Standard Time',
+ 'Asia/Makassar': 'Singapore Standard Time',
+ 'Asia/Manila': 'Singapore Standard Time',
+ 'Asia/Muscat': 'Arabian Standard Time',
+ 'Asia/Nicosia': 'GTB Standard Time',
+ 'Asia/Novokuznetsk': 'North Asia Standard Time',
+ 'Asia/Novosibirsk': 'N. Central Asia Standard Time',
+ 'Asia/Omsk': 'Omsk Standard Time',
+ 'Asia/Oral': 'West Asia Standard Time',
+ 'Asia/Phnom_Penh': 'SE Asia Standard Time',
+ 'Asia/Pontianak': 'SE Asia Standard Time',
+ 'Asia/Pyongyang': 'North Korea Standard Time',
+ 'Asia/Qatar': 'Arab Standard Time',
+ 'Asia/Qostanay': 'Central Asia Standard Time',
+ 'Asia/Qyzylorda': 'Qyzylorda Standard Time',
+ 'Asia/Rangoon': 'Myanmar Standard Time',
+ 'Asia/Riyadh': 'Arab Standard Time',
+ 'Asia/Saigon': 'SE Asia Standard Time',
+ 'Asia/Sakhalin': 'Sakhalin Standard Time',
+ 'Asia/Samarkand': 'West Asia Standard Time',
+ 'Asia/Seoul': 'Korea Standard Time',
+ 'Asia/Shanghai': 'China Standard Time',
+ 'Asia/Singapore': 'Singapore Standard Time',
+ 'Asia/Srednekolymsk': 'Russia Time Zone 10',
+ 'Asia/Taipei': 'Taipei Standard Time',
+ 'Asia/Tashkent': 'West Asia Standard Time',
+ 'Asia/Tbilisi': 'Georgian Standard Time',
+ 'Asia/Tehran': 'Iran Standard Time',
+ 'Asia/Tel_Aviv': 'Israel Standard Time',
+ 'Asia/Thimbu': 'Bangladesh Standard Time',
+ 'Asia/Thimphu': 'Bangladesh Standard Time',
+ 'Asia/Tokyo': 'Tokyo Standard Time',
+ 'Asia/Tomsk': 'Tomsk Standard Time',
+ 'Asia/Ujung_Pandang': 'Singapore Standard Time',
+ 'Asia/Ulaanbaatar': 'Ulaanbaatar Standard Time',
+ 'Asia/Ulan_Bator': 'Ulaanbaatar Standard Time',
+ 'Asia/Urumqi': 'Central Asia Standard Time',
+ 'Asia/Ust-Nera': 'Vladivostok Standard Time',
+ 'Asia/Vientiane': 'SE Asia Standard Time',
+ 'Asia/Vladivostok': 'Vladivostok Standard Time',
+ 'Asia/Yakutsk': 'Yakutsk Standard Time',
+ 'Asia/Yekaterinburg': 'Ekaterinburg Standard Time',
+ 'Asia/Yerevan': 'Caucasus Standard Time',
+ 'Atlantic/Azores': 'Azores Standard Time',
+ 'Atlantic/Bermuda': 'Atlantic Standard Time',
+ 'Atlantic/Canary': 'GMT Standard Time',
+ 'Atlantic/Cape_Verde': 'Cape Verde Standard Time',
+ 'Atlantic/Faeroe': 'GMT Standard Time',
+ 'Atlantic/Jan_Mayen': 'W. Europe Standard Time',
+ 'Atlantic/Madeira': 'GMT Standard Time',
+ 'Atlantic/Reykjavik': 'Greenwich Standard Time',
+ 'Atlantic/South_Georgia': 'UTC-02',
+ 'Atlantic/St_Helena': 'Greenwich Standard Time',
+ 'Atlantic/Stanley': 'SA Eastern Standard Time',
+ 'Australia/ACT': 'AUS Eastern Standard Time',
+ 'Australia/Adelaide': 'Cen. Australia Standard Time',
+ 'Australia/Brisbane': 'E. Australia Standard Time',
+ 'Australia/Broken_Hill': 'Cen. Australia Standard Time',
+ 'Australia/Canberra': 'AUS Eastern Standard Time',
+ 'Australia/Currie': 'Tasmania Standard Time',
+ 'Australia/Darwin': 'AUS Central Standard Time',
+ 'Australia/Eucla': 'Aus Central W. Standard Time',
+ 'Australia/Hobart': 'Tasmania Standard Time',
+ 'Australia/LHI': 'Lord Howe Standard Time',
+ 'Australia/Lindeman': 'E. Australia Standard Time',
+ 'Australia/Lord_Howe': 'Lord Howe Standard Time',
+ 'Australia/Melbourne': 'AUS Eastern Standard Time',
+ 'Australia/NSW': 'AUS Eastern Standard Time',
+ 'Australia/North': 'AUS Central Standard Time',
+ 'Australia/Perth': 'W. Australia Standard Time',
+ 'Australia/Queensland': 'E. Australia Standard Time',
+ 'Australia/South': 'Cen. Australia Standard Time',
+ 'Australia/Sydney': 'AUS Eastern Standard Time',
+ 'Australia/Tasmania': 'Tasmania Standard Time',
+ 'Australia/Victoria': 'AUS Eastern Standard Time',
+ 'Australia/West': 'W. Australia Standard Time',
+ 'Australia/Yancowinna': 'Cen. Australia Standard Time',
+ 'Brazil/Acre': 'SA Pacific Standard Time',
+ 'Brazil/DeNoronha': 'UTC-02',
+ 'Brazil/East': 'E. South America Standard Time',
+ 'Brazil/West': 'SA Western Standard Time',
+ 'CST6CDT': 'Central Standard Time',
+ 'Canada/Atlantic': 'Atlantic Standard Time',
+ 'Canada/Central': 'Central Standard Time',
+ 'Canada/Eastern': 'Eastern Standard Time',
+ 'Canada/Mountain': 'Mountain Standard Time',
+ 'Canada/Newfoundland': 'Newfoundland Standard Time',
+ 'Canada/Pacific': 'Pacific Standard Time',
+ 'Canada/Saskatchewan': 'Canada Central Standard Time',
+ 'Canada/Yukon': 'Pacific Standard Time',
+ 'Chile/Continental': 'Pacific SA Standard Time',
+ 'Chile/EasterIsland': 'Easter Island Standard Time',
+ 'Cuba': 'Cuba Standard Time',
+ 'EST5EDT': 'Eastern Standard Time',
+ 'Egypt': 'Egypt Standard Time',
+ 'Eire': 'GMT Standard Time',
+ 'Etc/GMT': 'UTC',
+ 'Etc/GMT+1': 'Cape Verde Standard Time',
+ 'Etc/GMT+10': 'Hawaiian Standard Time',
+ 'Etc/GMT+11': 'UTC-11',
+ 'Etc/GMT+12': 'Dateline Standard Time',
+ 'Etc/GMT+2': 'UTC-02',
+ 'Etc/GMT+3': 'SA Eastern Standard Time',
+ 'Etc/GMT+4': 'SA Western Standard Time',
+ 'Etc/GMT+5': 'SA Pacific Standard Time',
+ 'Etc/GMT+6': 'Central America Standard Time',
+ 'Etc/GMT+7': 'US Mountain Standard Time',
+ 'Etc/GMT+8': 'UTC-08',
+ 'Etc/GMT+9': 'UTC-09',
+ 'Etc/GMT-1': 'W. Central Africa Standard Time',
+ 'Etc/GMT-10': 'West Pacific Standard Time',
+ 'Etc/GMT-11': 'Central Pacific Standard Time',
+ 'Etc/GMT-12': 'UTC+12',
+ 'Etc/GMT-13': 'UTC+13',
+ 'Etc/GMT-14': 'Line Islands Standard Time',
+ 'Etc/GMT-2': 'South Africa Standard Time',
+ 'Etc/GMT-3': 'E. Africa Standard Time',
+ 'Etc/GMT-4': 'Arabian Standard Time',
+ 'Etc/GMT-5': 'West Asia Standard Time',
+ 'Etc/GMT-6': 'Central Asia Standard Time',
+ 'Etc/GMT-7': 'SE Asia Standard Time',
+ 'Etc/GMT-8': 'Singapore Standard Time',
+ 'Etc/GMT-9': 'Tokyo Standard Time',
+ 'Etc/UCT': 'UTC',
+ 'Etc/UTC': 'UTC',
+ 'Europe/Amsterdam': 'W. Europe Standard Time',
+ 'Europe/Andorra': 'W. Europe Standard Time',
+ 'Europe/Astrakhan': 'Astrakhan Standard Time',
+ 'Europe/Athens': 'GTB Standard Time',
+ 'Europe/Belfast': 'GMT Standard Time',
+ 'Europe/Belgrade': 'Central Europe Standard Time',
+ 'Europe/Berlin': 'W. Europe Standard Time',
+ 'Europe/Bratislava': 'Central Europe Standard Time',
+ 'Europe/Brussels': 'Romance Standard Time',
+ 'Europe/Bucharest': 'GTB Standard Time',
+ 'Europe/Budapest': 'Central Europe Standard Time',
+ 'Europe/Busingen': 'W. Europe Standard Time',
+ 'Europe/Chisinau': 'E. Europe Standard Time',
+ 'Europe/Copenhagen': 'Romance Standard Time',
+ 'Europe/Dublin': 'GMT Standard Time',
+ 'Europe/Gibraltar': 'W. Europe Standard Time',
+ 'Europe/Guernsey': 'GMT Standard Time',
+ 'Europe/Helsinki': 'FLE Standard Time',
+ 'Europe/Isle_of_Man': 'GMT Standard Time',
+ 'Europe/Istanbul': 'Turkey Standard Time',
+ 'Europe/Jersey': 'GMT Standard Time',
+ 'Europe/Kaliningrad': 'Kaliningrad Standard Time',
+ 'Europe/Kiev': 'FLE Standard Time',
+ 'Europe/Kirov': 'Russian Standard Time',
+ 'Europe/Lisbon': 'GMT Standard Time',
+ 'Europe/Ljubljana': 'Central Europe Standard Time',
+ 'Europe/London': 'GMT Standard Time',
+ 'Europe/Luxembourg': 'W. Europe Standard Time',
+ 'Europe/Madrid': 'Romance Standard Time',
+ 'Europe/Malta': 'W. Europe Standard Time',
+ 'Europe/Mariehamn': 'FLE Standard Time',
+ 'Europe/Minsk': 'Belarus Standard Time',
+ 'Europe/Monaco': 'W. Europe Standard Time',
+ 'Europe/Moscow': 'Russian Standard Time',
+ 'Europe/Oslo': 'W. Europe Standard Time',
+ 'Europe/Paris': 'Romance Standard Time',
+ 'Europe/Podgorica': 'Central Europe Standard Time',
+ 'Europe/Prague': 'Central Europe Standard Time',
+ 'Europe/Riga': 'FLE Standard Time',
+ 'Europe/Rome': 'W. Europe Standard Time',
+ 'Europe/Samara': 'Russia Time Zone 3',
+ 'Europe/San_Marino': 'W. Europe Standard Time',
+ 'Europe/Sarajevo': 'Central European Standard Time',
+ 'Europe/Saratov': 'Saratov Standard Time',
+ 'Europe/Simferopol': 'Russian Standard Time',
+ 'Europe/Skopje': 'Central European Standard Time',
+ 'Europe/Sofia': 'FLE Standard Time',
+ 'Europe/Stockholm': 'W. Europe Standard Time',
+ 'Europe/Tallinn': 'FLE Standard Time',
+ 'Europe/Tirane': 'Central Europe Standard Time',
+ 'Europe/Tiraspol': 'E. Europe Standard Time',
+ 'Europe/Ulyanovsk': 'Astrakhan Standard Time',
+ 'Europe/Uzhgorod': 'FLE Standard Time',
+ 'Europe/Vaduz': 'W. Europe Standard Time',
+ 'Europe/Vatican': 'W. Europe Standard Time',
+ 'Europe/Vienna': 'W. Europe Standard Time',
+ 'Europe/Vilnius': 'FLE Standard Time',
+ 'Europe/Volgograd': 'Volgograd Standard Time',
+ 'Europe/Warsaw': 'Central European Standard Time',
+ 'Europe/Zagreb': 'Central European Standard Time',
+ 'Europe/Zaporozhye': 'FLE Standard Time',
+ 'Europe/Zurich': 'W. Europe Standard Time',
+ 'GB': 'GMT Standard Time',
+ 'GB-Eire': 'GMT Standard Time',
+ 'GMT+0': 'UTC',
+ 'GMT-0': 'UTC',
+ 'GMT0': 'UTC',
+ 'Greenwich': 'UTC',
+ 'Hongkong': 'China Standard Time',
+ 'Iceland': 'Greenwich Standard Time',
+ 'Indian/Antananarivo': 'E. Africa Standard Time',
+ 'Indian/Chagos': 'Central Asia Standard Time',
+ 'Indian/Christmas': 'SE Asia Standard Time',
+ 'Indian/Cocos': 'Myanmar Standard Time',
+ 'Indian/Comoro': 'E. Africa Standard Time',
+ 'Indian/Kerguelen': 'West Asia Standard Time',
+ 'Indian/Mahe': 'Mauritius Standard Time',
+ 'Indian/Maldives': 'West Asia Standard Time',
+ 'Indian/Mauritius': 'Mauritius Standard Time',
+ 'Indian/Mayotte': 'E. Africa Standard Time',
+ 'Indian/Reunion': 'Mauritius Standard Time',
+ 'Iran': 'Iran Standard Time',
+ 'Israel': 'Israel Standard Time',
+ 'Jamaica': 'SA Pacific Standard Time',
+ 'Japan': 'Tokyo Standard Time',
+ 'Kwajalein': 'UTC+12',
+ 'Libya': 'Libya Standard Time',
+ 'MST7MDT': 'Mountain Standard Time',
+ 'Mexico/BajaNorte': 'Pacific Standard Time (Mexico)',
+ 'Mexico/BajaSur': 'Mountain Standard Time (Mexico)',
+ 'Mexico/General': 'Central Standard Time (Mexico)',
+ 'NZ': 'New Zealand Standard Time',
+ 'NZ-CHAT': 'Chatham Islands Standard Time',
+ 'Navajo': 'Mountain Standard Time',
+ 'PRC': 'China Standard Time',
+ 'PST8PDT': 'Pacific Standard Time',
+ 'Pacific/Apia': 'Samoa Standard Time',
+ 'Pacific/Auckland': 'New Zealand Standard Time',
+ 'Pacific/Bougainville': 'Bougainville Standard Time',
+ 'Pacific/Chatham': 'Chatham Islands Standard Time',
+ 'Pacific/Easter': 'Easter Island Standard Time',
+ 'Pacific/Efate': 'Central Pacific Standard Time',
+ 'Pacific/Enderbury': 'UTC+13',
+ 'Pacific/Fakaofo': 'UTC+13',
+ 'Pacific/Fiji': 'Fiji Standard Time',
+ 'Pacific/Funafuti': 'UTC+12',
+ 'Pacific/Galapagos': 'Central America Standard Time',
+ 'Pacific/Gambier': 'UTC-09',
+ 'Pacific/Guadalcanal': 'Central Pacific Standard Time',
+ 'Pacific/Guam': 'West Pacific Standard Time',
+ 'Pacific/Honolulu': 'Hawaiian Standard Time',
+ 'Pacific/Johnston': 'Hawaiian Standard Time',
+ 'Pacific/Kiritimati': 'Line Islands Standard Time',
+ 'Pacific/Kosrae': 'Central Pacific Standard Time',
+ 'Pacific/Kwajalein': 'UTC+12',
+ 'Pacific/Majuro': 'UTC+12',
+ 'Pacific/Marquesas': 'Marquesas Standard Time',
+ 'Pacific/Midway': 'UTC-11',
+ 'Pacific/Nauru': 'UTC+12',
+ 'Pacific/Niue': 'UTC-11',
+ 'Pacific/Norfolk': 'Norfolk Standard Time',
+ 'Pacific/Noumea': 'Central Pacific Standard Time',
+ 'Pacific/Pago_Pago': 'UTC-11',
+ 'Pacific/Palau': 'Tokyo Standard Time',
+ 'Pacific/Pitcairn': 'UTC-08',
+ 'Pacific/Ponape': 'Central Pacific Standard Time',
+ 'Pacific/Port_Moresby': 'West Pacific Standard Time',
+ 'Pacific/Rarotonga': 'Hawaiian Standard Time',
+ 'Pacific/Saipan': 'West Pacific Standard Time',
+ 'Pacific/Samoa': 'UTC-11',
+ 'Pacific/Tahiti': 'Hawaiian Standard Time',
+ 'Pacific/Tarawa': 'UTC+12',
+ 'Pacific/Tongatapu': 'Tonga Standard Time',
+ 'Pacific/Truk': 'West Pacific Standard Time',
+ 'Pacific/Wake': 'UTC+12',
+ 'Pacific/Wallis': 'UTC+12',
+ 'Poland': 'Central European Standard Time',
+ 'Portugal': 'GMT Standard Time',
+ 'ROC': 'Taipei Standard Time',
+ 'ROK': 'Korea Standard Time',
+ 'Singapore': 'Singapore Standard Time',
+ 'Turkey': 'Turkey Standard Time',
+ 'UCT': 'UTC',
+ 'US/Alaska': 'Alaskan Standard Time',
+ 'US/Aleutian': 'Aleutian Standard Time',
+ 'US/Arizona': 'US Mountain Standard Time',
+ 'US/Central': 'Central Standard Time',
+ 'US/Eastern': 'Eastern Standard Time',
+ 'US/Hawaii': 'Hawaiian Standard Time',
+ 'US/Indiana-Starke': 'Central Standard Time',
+ 'US/Michigan': 'Eastern Standard Time',
+ 'US/Mountain': 'Mountain Standard Time',
+ 'US/Pacific': 'Pacific Standard Time',
+ 'US/Samoa': 'UTC-11',
+ 'UTC': 'UTC',
+ 'Universal': 'UTC',
+ 'W-SU': 'Russian Standard Time',
+ 'Zulu': 'UTC'}