migrate to Python3 (from alejeune)
This commit is contained in:
parent
1655977e64
commit
bc4b9811ed
9
.gitignore
vendored
9
.gitignore
vendored
@ -6,4 +6,11 @@
|
|||||||
/tasks.sqlite-wal
|
/tasks.sqlite-wal
|
||||||
/srvinstallation
|
/srvinstallation
|
||||||
/tasks.sqlite-shm
|
/tasks.sqlite-shm
|
||||||
.idea
|
.idea
|
||||||
|
/deb/builddir
|
||||||
|
/deb/*.deb
|
||||||
|
/lib
|
||||||
|
/rpm/*.rpm
|
||||||
|
/rpm/RPMS
|
||||||
|
/rpm/BUILD
|
||||||
|
/rpm/__VERSION__
|
||||||
|
22
.vscode/launch.json
vendored
Normal file
22
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
// Use IntelliSense to learn about possible attributes.
|
||||||
|
// Hover to view descriptions of existing attributes.
|
||||||
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Python: Current File",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "${file}",
|
||||||
|
// "args": [
|
||||||
|
// "-ldebug",
|
||||||
|
// "backup"
|
||||||
|
// ],
|
||||||
|
"args": [
|
||||||
|
"register_existing"
|
||||||
|
],
|
||||||
|
"console": "integratedTerminal"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -1,9 +1,9 @@
|
|||||||
Package: tis-tisbackup
|
Package: tis-tisbackup
|
||||||
Version: 1:__VERSION__
|
Version: 1-__VERSION__
|
||||||
Section: base
|
Section: base
|
||||||
Priority: optional
|
Priority: optional
|
||||||
Architecture: all
|
Architecture: all
|
||||||
Depends: unzip, ssh, rsync, python-paramiko, python-pyvmomi, python-pexpect, python-flask,python-simplejson
|
Depends: unzip, ssh, rsync, python3-paramiko, python3-pyvmomi, python3-pexpect, python3-flask,python3-simplejson, python3-pip
|
||||||
Maintainer: Tranquil-IT <technique@tranquil.it>
|
Maintainer: Tranquil-IT <technique@tranquil.it>
|
||||||
Description: TISBackup backup management
|
Description: TISBackup backup management
|
||||||
Homepage: https://www.tranquil.it
|
Homepage: https://www.tranquil.it
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
VERSION_DEB=$(cat /etc/debian_version | cut -d "." -f 1)
|
||||||
VERSION_SHORT=$(cat ../tisbackup.py | grep "__version__" | cut -d "=" -f 2 | sed 's/"//g')
|
VERSION_SHORT=$(cat ../tisbackup.py | grep "__version__" | cut -d "=" -f 2 | sed 's/"//g')
|
||||||
GIT_COUNT=`git rev-list HEAD --count`
|
GIT_COUNT=`git rev-list HEAD --count`
|
||||||
VERSION="${VERSION_SHORT}.${GIT_COUNT}"
|
VERSION="${VERSION_SHORT}.${GIT_COUNT}-deb${VERSION_DEB}"
|
||||||
|
|
||||||
rm -f *.deb
|
rm -f *.deb
|
||||||
rm -Rf builddir
|
rm -Rf builddir
|
||||||
@ -10,23 +11,26 @@ mkdir builddir
|
|||||||
mkdir builddir/DEBIAN
|
mkdir builddir/DEBIAN
|
||||||
cp ./control ./builddir/DEBIAN
|
cp ./control ./builddir/DEBIAN
|
||||||
cp ./postinst ./builddir/DEBIAN
|
cp ./postinst ./builddir/DEBIAN
|
||||||
|
cp ./prerm ./builddir/DEBIAN
|
||||||
|
cp ./postrm ./builddir/DEBIAN
|
||||||
|
|
||||||
sed "s/__VERSION__/$VERSION/" -i ./builddir/DEBIAN/control
|
sed "s/__VERSION__/$VERSION/" -i ./builddir/DEBIAN/control
|
||||||
|
|
||||||
mkdir -p builddir/opt/tisbackup/
|
mkdir -p ./builddir/opt/tisbackup/
|
||||||
mkdir -p ./builddir/usr/lib/systemd/system/
|
mkdir -p ./builddir/usr/lib/systemd/system/
|
||||||
mkdir -p ./builddir/etc/tis
|
mkdir -p ./builddir/etc/tis
|
||||||
mkdir -p ./builddir/etc/cron.d/
|
mkdir -p ./builddir/etc/cron.d/
|
||||||
|
|
||||||
|
pip3 install -r ../requirements.txt -t ./builddir/opt/tisbackup/lib
|
||||||
|
|
||||||
rsync -aP --exclude "deb/" --exclude "doc/" --exclude "rpm/" --exclude ".git" ../ ./builddir/opt/tisbackup
|
rsync -aP --exclude "deb/" --exclude "doc/" --exclude "rpm/" --exclude ".git" ../ ./builddir/opt/tisbackup
|
||||||
rsync -aP ../scripts/tisbackup_gui.service ./builddir/usr/lib/systemd/system/
|
rsync -aP ../scripts/tisbackup_gui.service ./builddir/usr/lib/systemd/system/
|
||||||
rsync -aP ../scripts/tisbackup_huey.service ./builddir/usr/lib/systemd/system/
|
rsync -aP ../scripts/tisbackup_huey.service ./builddir/usr/lib/systemd/system/
|
||||||
rsync -aP ../samples/tisbackup_gui.ini ./builddir/etc/tis
|
rsync -aP ../samples/tisbackup_gui.ini ./builddir/etc/tis
|
||||||
rsync -aP ../samples/tisbackup-config.ini.sample ./builddir/etc/tis/tisbackup-config.ini.sample
|
rsync -aP ../samples/tisbackup-config.ini.sample ./builddir/etc/tis/tisbackup-config.ini.sample
|
||||||
rsync -aP ../lib/huey/bin/huey_consumer.py ./builddir/opt/tisbackup/
|
|
||||||
|
|
||||||
chmod 755 /opt/tisbackup/tisbackup.py
|
chmod 755 ./builddir/opt/tisbackup/tisbackup.py
|
||||||
|
|
||||||
dpkg-deb --build builddir tis-tisbackup-1:${VERSION}.deb
|
dpkg-deb --build builddir tis-tisbackup-1-${VERSION}.deb
|
||||||
|
|
||||||
|
|
||||||
|
@ -10,3 +10,5 @@ if [ ! -f /etc/cron.d/tisbackup ]; then
|
|||||||
cp /opt/tisbackup/samples/tisbackup.cron /etc/cron.d/tisbackup
|
cp /opt/tisbackup/samples/tisbackup.cron /etc/cron.d/tisbackup
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
find /opt/tisbackup -name "*.pyc" -exec rm -rf {} \;
|
||||||
|
python3 -m compileall /opt/tisbackup/
|
||||||
|
2
deb/postrm
Normal file
2
deb/postrm
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
rm -rf /opt/tisbackup
|
3
deb/prerm
Normal file
3
deb/prerm
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
find /opt/tisbackup/ -name *.pyo -exec rm -f {} \;
|
@ -21,9 +21,9 @@ Installing and configuring TISBackup on Debian
|
|||||||
Setting up the GNU/Linux Debian server
|
Setting up the GNU/Linux Debian server
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
|
||||||
In order to install a fresh Debian Linux 10 *Buster* (physical or virtual)
|
In order to install a fresh Debian Linux 11 *Bullseye* (physical or virtual)
|
||||||
without graphical interface, please refer to the
|
without graphical interface, please refer to the
|
||||||
`Debian GNU/Linux Installation Guide <https://www.debian.org/releases/buster/amd64/>`_.
|
`Debian GNU/Linux Installation Guide <https://www.debian.org/releases/bullseye/amd64/>`_.
|
||||||
|
|
||||||
Configuring network parameters
|
Configuring network parameters
|
||||||
++++++++++++++++++++++++++++++
|
++++++++++++++++++++++++++++++
|
||||||
@ -138,14 +138,51 @@ and :ref:`install TISBackup on your Debian<install_tisbackup_debian>`.
|
|||||||
|
|
||||||
.. _install_tisbackup_debian:
|
.. _install_tisbackup_debian:
|
||||||
|
|
||||||
Installing the TISBackup server on Debian Linux
|
Installing the TISBackup server
|
||||||
+++++++++++++++++++++++++++++++++++++++++++++++
|
+++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
From Tranquil IT's repository
|
||||||
|
"""""""""""""""""""""""""""""
|
||||||
|
|
||||||
|
The easiest way is to install the package from Tranquil IT repository :
|
||||||
|
|
||||||
|
.. tabs::
|
||||||
|
|
||||||
|
.. code-tab:: bash On CentOS8 and derivate
|
||||||
|
|
||||||
|
wget https://srvinstallation.tranquil.it/tisbackup/tis-tisbackup-162-1.el8.x86_64.rpm -O tis-tisbackup.rpm
|
||||||
|
yum install -y tis-tisbackup.rpm
|
||||||
|
|
||||||
|
.. code-tab:: bash On CentOS7
|
||||||
|
|
||||||
|
wget https://srvinstallation.tranquil.it/tisbackup/tis-tisbackup-162-1.el7.x86_64.rpm -O tis-tisbackup.rpm
|
||||||
|
yum install -y tis-tisbackup.rpm
|
||||||
|
|
||||||
|
.. code-tab:: bash On Debian 11
|
||||||
|
|
||||||
|
wget https://srvinstallation.tranquil.it/tisbackup/tis-tisbackup-1-2.0.163-deb11.deb -O tis-tisbackup.deb
|
||||||
|
apt install unzip python3-paramiko python3-pyvmomi python3-pexpect python3-flask python3-simplejson python3-pip
|
||||||
|
dpkg -i tis-tisbackup.deb
|
||||||
|
|
||||||
|
|
||||||
|
From sources
|
||||||
|
""""""""""""
|
||||||
|
|
||||||
* install the required dependencies:
|
* install the required dependencies:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. tabs::
|
||||||
|
|
||||||
apt-get install unzip ssh rsync python-paramiko python-pyvmomi python-pexpect
|
.. code-tab:: bash On CentOS8 and derivate
|
||||||
|
|
||||||
|
unzip, ssh, rsync, python3-paramiko, python3-pyvmomi, python3-pexpect, python3-flask,python3-simplejson, python3-pip
|
||||||
|
|
||||||
|
.. code-tab:: bash On CentOS7 and derivate
|
||||||
|
|
||||||
|
unzip rsync python3-paramiko python3-pyvmomi nfs-utils python3-flask python3-simplejson autofs python3-pexpect
|
||||||
|
|
||||||
|
.. code-tab:: bash on Debian 11
|
||||||
|
|
||||||
|
unzip rsync python36-paramiko python3-pyvmomi nfs-utils python3-flask python3-simplejson autofs pexpect
|
||||||
|
|
||||||
* retrieve the git sources from https://github.com/tranquilit/TISbackup
|
* retrieve the git sources from https://github.com/tranquilit/TISbackup
|
||||||
and place them in the :file:`/opt` folder on your server:
|
and place them in the :file:`/opt` folder on your server:
|
||||||
@ -156,6 +193,7 @@ Installing the TISBackup server on Debian Linux
|
|||||||
wget --no-check-certificate https://github.com/tranquilit/TISbackup/archive/master.zip
|
wget --no-check-certificate https://github.com/tranquilit/TISbackup/archive/master.zip
|
||||||
unzip master.zip
|
unzip master.zip
|
||||||
mv TISbackup-master tisbackup
|
mv TISbackup-master tisbackup
|
||||||
|
pip3 install huey iniparse -t /opt/tisbackup/lib
|
||||||
chmod 755 /opt/tisbackup/tisbackup.py
|
chmod 755 /opt/tisbackup/tisbackup.py
|
||||||
ln -sb /opt/tisbackup/tisbackup.py /usr/local/bin/tisbackup
|
ln -sb /opt/tisbackup/tisbackup.py /usr/local/bin/tisbackup
|
||||||
|
|
||||||
@ -227,11 +265,6 @@ and :ref:`configure the backup jobs for your TISBackup<configuring_backup_jobs>`
|
|||||||
Setting up the graphical user interface for the TISBackup server
|
Setting up the graphical user interface for the TISBackup server
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
apt-get install python2.7 python-simplejson python-flask python-setuptools sudo
|
|
||||||
python /usr/lib/python2.7/dist-packages/easy_install.py "huey<=0.4.9"
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
cp /opt/tisbackup/samples/tisbackup_gui.ini /etc/tis/
|
cp /opt/tisbackup/samples/tisbackup_gui.ini /etc/tis/
|
||||||
|
@ -21,9 +21,9 @@ Installing and configuring TISBackup on Debian
|
|||||||
Setting up the GNU/Linux Debian server
|
Setting up the GNU/Linux Debian server
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
|
||||||
In order to install a fresh Debian Linux 10 *Buster* (physical or virtual)
|
In order to install a fresh Debian Linux 11 *Bullseye* (physical or virtual)
|
||||||
without graphical interface, please refer to the
|
without graphical interface, please refer to the
|
||||||
`Debian GNU/Linux Installation Guide <https://www.debian.org/releases/buster/amd64/>`_.
|
`Debian GNU/Linux Installation Guide <https://www.debian.org/releases/bullseye/amd64/>`_.
|
||||||
|
|
||||||
Configuring network parameters
|
Configuring network parameters
|
||||||
++++++++++++++++++++++++++++++
|
++++++++++++++++++++++++++++++
|
||||||
@ -138,14 +138,51 @@ and :ref:`install TISBackup on your Debian<install_tisbackup_debian>`.
|
|||||||
|
|
||||||
.. _install_tisbackup_debian:
|
.. _install_tisbackup_debian:
|
||||||
|
|
||||||
Installing the TISBackup server on Debian Linux
|
Installing the TISBackup server
|
||||||
+++++++++++++++++++++++++++++++++++++++++++++++
|
+++++++++++++++++++++++++++++++
|
||||||
|
|
||||||
|
From Tranquil IT's repository
|
||||||
|
"""""""""""""""""""""""""""""
|
||||||
|
|
||||||
|
The easiest way is to install the package from Tranquil IT repository :
|
||||||
|
|
||||||
|
.. tabs::
|
||||||
|
|
||||||
|
.. code-tab:: bash On CentOS8 and derivate
|
||||||
|
|
||||||
|
wget https://srvinstallation.tranquil.it/tisbackup/tis-tisbackup-162-1.el8.x86_64.rpm -O tis-tisbackup.rpm
|
||||||
|
yum install -y tis-tisbackup.rpm
|
||||||
|
|
||||||
|
.. code-tab:: bash On CentOS7
|
||||||
|
|
||||||
|
wget https://srvinstallation.tranquil.it/tisbackup/tis-tisbackup-162-1.el7.x86_64.rpm -O tis-tisbackup.rpm
|
||||||
|
yum install -y tis-tisbackup.rpm
|
||||||
|
|
||||||
|
.. code-tab:: bash On Debian 11
|
||||||
|
|
||||||
|
wget https://srvinstallation.tranquil.it/tisbackup/tis-tisbackup-1-2.0.163-deb11.deb -O tis-tisbackup.deb
|
||||||
|
apt install unzip python3-paramiko python3-pyvmomi python3-pexpect python3-flask python3-simplejson python3-pip
|
||||||
|
dpkg -i tis-tisbackup.deb
|
||||||
|
|
||||||
|
|
||||||
|
From sources
|
||||||
|
""""""""""""
|
||||||
|
|
||||||
* install the required dependencies:
|
* install the required dependencies:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. tabs::
|
||||||
|
|
||||||
apt-get install unzip ssh rsync python-paramiko python-pyvmomi python-pexpect
|
.. code-tab:: bash On CentOS8 and derivate
|
||||||
|
|
||||||
|
unzip, ssh, rsync, python3-paramiko, python3-pyvmomi, python3-pexpect, python3-flask,python3-simplejson, python3-pip
|
||||||
|
|
||||||
|
.. code-tab:: bash On CentOS7 and derivate
|
||||||
|
|
||||||
|
unzip rsync python3-paramiko python3-pyvmomi nfs-utils python3-flask python3-simplejson autofs python3-pexpect
|
||||||
|
|
||||||
|
.. code-tab:: bash on Debian 11
|
||||||
|
|
||||||
|
unzip rsync python36-paramiko python3-pyvmomi nfs-utils python3-flask python3-simplejson autofs pexpect
|
||||||
|
|
||||||
* retrieve the git sources from https://github.com/tranquilit/TISbackup
|
* retrieve the git sources from https://github.com/tranquilit/TISbackup
|
||||||
and place them in the :file:`/opt` folder on your server:
|
and place them in the :file:`/opt` folder on your server:
|
||||||
@ -156,6 +193,7 @@ Installing the TISBackup server on Debian Linux
|
|||||||
wget --no-check-certificate https://github.com/tranquilit/TISbackup/archive/master.zip
|
wget --no-check-certificate https://github.com/tranquilit/TISbackup/archive/master.zip
|
||||||
unzip master.zip
|
unzip master.zip
|
||||||
mv TISbackup-master tisbackup
|
mv TISbackup-master tisbackup
|
||||||
|
pip3 install huey iniparse -t /opt/tisbackup/lib
|
||||||
chmod 755 /opt/tisbackup/tisbackup.py
|
chmod 755 /opt/tisbackup/tisbackup.py
|
||||||
ln -sb /opt/tisbackup/tisbackup.py /usr/local/bin/tisbackup
|
ln -sb /opt/tisbackup/tisbackup.py /usr/local/bin/tisbackup
|
||||||
|
|
||||||
@ -227,11 +265,6 @@ and :ref:`configure the backup jobs for your TISBackup<configuring_backup_jobs>`
|
|||||||
Setting up the graphical user interface for the TISBackup server
|
Setting up the graphical user interface for the TISBackup server
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
|
|
||||||
.. code-block:: bash
|
|
||||||
|
|
||||||
apt-get install python2.7 python-simplejson python-flask python-setuptools sudo
|
|
||||||
python /usr/lib/python2.7/dist-packages/easy_install.py "huey<=0.4.9"
|
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
cp /opt/tisbackup/samples/tisbackup_gui.ini /etc/tis/
|
cp /opt/tisbackup/samples/tisbackup_gui.ini /etc/tis/
|
||||||
|
@ -1,62 +0,0 @@
|
|||||||
__author__ = 'Charles Leifer'
|
|
||||||
__license__ = 'MIT'
|
|
||||||
__version__ = '0.4.9'
|
|
||||||
|
|
||||||
from huey.api import Huey, crontab
|
|
||||||
|
|
||||||
try:
|
|
||||||
import redis
|
|
||||||
from huey.backends.redis_backend import RedisBlockingQueue
|
|
||||||
from huey.backends.redis_backend import RedisDataStore
|
|
||||||
from huey.backends.redis_backend import RedisEventEmitter
|
|
||||||
from huey.backends.redis_backend import RedisSchedule
|
|
||||||
|
|
||||||
class RedisHuey(Huey):
|
|
||||||
def __init__(self, name='huey', store_none=False, always_eager=False,
|
|
||||||
read_timeout=None, **conn_kwargs):
|
|
||||||
queue = RedisBlockingQueue(
|
|
||||||
name,
|
|
||||||
read_timeout=read_timeout,
|
|
||||||
**conn_kwargs)
|
|
||||||
result_store = RedisDataStore(name, **conn_kwargs)
|
|
||||||
schedule = RedisSchedule(name, **conn_kwargs)
|
|
||||||
events = RedisEventEmitter(name, **conn_kwargs)
|
|
||||||
super(RedisHuey, self).__init__(
|
|
||||||
queue=queue,
|
|
||||||
result_store=result_store,
|
|
||||||
schedule=schedule,
|
|
||||||
events=events,
|
|
||||||
store_none=store_none,
|
|
||||||
always_eager=always_eager)
|
|
||||||
|
|
||||||
except ImportError:
|
|
||||||
class RedisHuey(object):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
raise RuntimeError('Error, "redis" is not installed. Install '
|
|
||||||
'using pip: "pip install redis"')
|
|
||||||
|
|
||||||
try:
|
|
||||||
from huey.backends.sqlite_backend import SqliteQueue
|
|
||||||
from huey.backends.sqlite_backend import SqliteDataStore
|
|
||||||
from huey.backends.sqlite_backend import SqliteSchedule
|
|
||||||
|
|
||||||
class SqliteHuey(Huey):
|
|
||||||
def __init__(self, name='huey', store_none=False, always_eager=False,
|
|
||||||
location=None):
|
|
||||||
if location is None:
|
|
||||||
raise ValueError("Please specify a database file with the "
|
|
||||||
"'location' parameter")
|
|
||||||
queue = SqliteQueue(name, location)
|
|
||||||
result_store = SqliteDataStore(name, location)
|
|
||||||
schedule = SqliteSchedule(name, location)
|
|
||||||
super(SqliteHuey, self).__init__(
|
|
||||||
queue=queue,
|
|
||||||
result_store=result_store,
|
|
||||||
schedule=schedule,
|
|
||||||
events=None,
|
|
||||||
store_none=store_none,
|
|
||||||
always_eager=always_eager)
|
|
||||||
except ImportError:
|
|
||||||
class SqliteHuey(object):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
raise RuntimeError('Error, "sqlite" is not installed.')
|
|
513
lib/huey/api.py
513
lib/huey/api.py
@ -1,513 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import json
|
|
||||||
import pickle
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import traceback
|
|
||||||
import uuid
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
from huey.backends.dummy import DummySchedule
|
|
||||||
from huey.exceptions import DataStoreGetException
|
|
||||||
from huey.exceptions import DataStorePutException
|
|
||||||
from huey.exceptions import DataStoreTimeout
|
|
||||||
from huey.exceptions import QueueException
|
|
||||||
from huey.exceptions import QueueReadException
|
|
||||||
from huey.exceptions import QueueRemoveException
|
|
||||||
from huey.exceptions import QueueWriteException
|
|
||||||
from huey.exceptions import ScheduleAddException
|
|
||||||
from huey.exceptions import ScheduleReadException
|
|
||||||
from huey.registry import registry
|
|
||||||
from huey.utils import EmptyData
|
|
||||||
from huey.utils import local_to_utc
|
|
||||||
from huey.utils import wrap_exception
|
|
||||||
|
|
||||||
|
|
||||||
class Huey(object):
|
|
||||||
"""
|
|
||||||
Huey executes tasks by exposing function decorators that cause the function
|
|
||||||
call to be enqueued for execution by the consumer.
|
|
||||||
|
|
||||||
Typically your application will only need one Huey instance, but you can
|
|
||||||
have as many as you like -- the only caveat is that one consumer process
|
|
||||||
must be executed for each Huey instance.
|
|
||||||
|
|
||||||
:param queue: a queue instance, e.g. ``RedisQueue()``
|
|
||||||
:param result_store: a place to store results, e.g. ``RedisResultStore()``
|
|
||||||
:param schedule: a place to store pending tasks, e.g. ``RedisSchedule()``
|
|
||||||
:param events: channel to send events on, e.g. ``RedisEventEmitter()``
|
|
||||||
:param store_none: Flag to indicate whether tasks that return ``None``
|
|
||||||
should store their results in the result store.
|
|
||||||
:param always_eager: Useful for testing, this will execute all tasks
|
|
||||||
immediately, without enqueueing them.
|
|
||||||
|
|
||||||
Example usage::
|
|
||||||
|
|
||||||
from huey.api import Huey, crontab
|
|
||||||
from huey.backends.redis_backend import RedisQueue, RedisDataStore, RedisSchedule
|
|
||||||
|
|
||||||
queue = RedisQueue('my-app')
|
|
||||||
result_store = RedisDataStore('my-app')
|
|
||||||
schedule = RedisSchedule('my-app')
|
|
||||||
huey = Huey(queue, result_store, schedule)
|
|
||||||
|
|
||||||
# This is equivalent to the previous 4 lines:
|
|
||||||
# huey = RedisHuey('my-app', {'host': 'localhost', 'port': 6379})
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def slow_function(some_arg):
|
|
||||||
# ... do something ...
|
|
||||||
return some_arg
|
|
||||||
|
|
||||||
@huey.periodic_task(crontab(minute='0', hour='3'))
|
|
||||||
def backup():
|
|
||||||
# do a backup every day at 3am
|
|
||||||
return
|
|
||||||
"""
|
|
||||||
def __init__(self, queue, result_store=None, schedule=None, events=None,
|
|
||||||
store_none=False, always_eager=False):
|
|
||||||
self.queue = queue
|
|
||||||
self.result_store = result_store
|
|
||||||
self.schedule = schedule or DummySchedule(self.queue.name)
|
|
||||||
self.events = events
|
|
||||||
self.blocking = self.queue.blocking
|
|
||||||
self.store_none = store_none
|
|
||||||
self.always_eager = always_eager
|
|
||||||
|
|
||||||
def task(self, retries=0, retry_delay=0, retries_as_argument=False,
|
|
||||||
include_task=False, name=None):
|
|
||||||
def decorator(func):
|
|
||||||
"""
|
|
||||||
Decorator to execute a function out-of-band via the consumer.
|
|
||||||
"""
|
|
||||||
klass = create_task(
|
|
||||||
QueueTask,
|
|
||||||
func,
|
|
||||||
retries_as_argument,
|
|
||||||
name,
|
|
||||||
include_task)
|
|
||||||
|
|
||||||
def schedule(args=None, kwargs=None, eta=None, delay=None,
|
|
||||||
convert_utc=True, task_id=None):
|
|
||||||
if delay and eta:
|
|
||||||
raise ValueError('Both a delay and an eta cannot be '
|
|
||||||
'specified at the same time')
|
|
||||||
if delay:
|
|
||||||
eta = (datetime.datetime.now() +
|
|
||||||
datetime.timedelta(seconds=delay))
|
|
||||||
if convert_utc and eta:
|
|
||||||
eta = local_to_utc(eta)
|
|
||||||
cmd = klass(
|
|
||||||
(args or (), kwargs or {}),
|
|
||||||
execute_time=eta,
|
|
||||||
retries=retries,
|
|
||||||
retry_delay=retry_delay,
|
|
||||||
task_id=task_id)
|
|
||||||
return self.enqueue(cmd)
|
|
||||||
|
|
||||||
func.schedule = schedule
|
|
||||||
func.task_class = klass
|
|
||||||
|
|
||||||
@wraps(func)
|
|
||||||
def inner_run(*args, **kwargs):
|
|
||||||
cmd = klass(
|
|
||||||
(args, kwargs),
|
|
||||||
retries=retries,
|
|
||||||
retry_delay=retry_delay)
|
|
||||||
return self.enqueue(cmd)
|
|
||||||
|
|
||||||
inner_run.call_local = func
|
|
||||||
return inner_run
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
def periodic_task(self, validate_datetime, name=None):
|
|
||||||
"""
|
|
||||||
Decorator to execute a function on a specific schedule.
|
|
||||||
"""
|
|
||||||
def decorator(func):
|
|
||||||
def method_validate(self, dt):
|
|
||||||
return validate_datetime(dt)
|
|
||||||
|
|
||||||
klass = create_task(
|
|
||||||
PeriodicQueueTask,
|
|
||||||
func,
|
|
||||||
task_name=name,
|
|
||||||
validate_datetime=method_validate,
|
|
||||||
)
|
|
||||||
|
|
||||||
func.task_class = klass
|
|
||||||
|
|
||||||
def _revoke(revoke_until=None, revoke_once=False):
|
|
||||||
self.revoke(klass(), revoke_until, revoke_once)
|
|
||||||
func.revoke = _revoke
|
|
||||||
|
|
||||||
def _is_revoked(dt=None, peek=True):
|
|
||||||
return self.is_revoked(klass(), dt, peek)
|
|
||||||
func.is_revoked = _is_revoked
|
|
||||||
|
|
||||||
def _restore():
|
|
||||||
return self.restore(klass())
|
|
||||||
func.restore = _restore
|
|
||||||
|
|
||||||
return func
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
def _wrapped_operation(exc_class):
|
|
||||||
def decorator(fn):
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return fn(*args, **kwargs)
|
|
||||||
except:
|
|
||||||
wrap_exception(exc_class)
|
|
||||||
return inner
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
@_wrapped_operation(QueueWriteException)
|
|
||||||
def _write(self, msg):
|
|
||||||
self.queue.write(msg)
|
|
||||||
|
|
||||||
@_wrapped_operation(QueueReadException)
|
|
||||||
def _read(self):
|
|
||||||
return self.queue.read()
|
|
||||||
|
|
||||||
@_wrapped_operation(QueueRemoveException)
|
|
||||||
def _remove(self, msg):
|
|
||||||
return self.queue.remove(msg)
|
|
||||||
|
|
||||||
@_wrapped_operation(DataStoreGetException)
|
|
||||||
def _get(self, key, peek=False):
|
|
||||||
if peek:
|
|
||||||
return self.result_store.peek(key)
|
|
||||||
else:
|
|
||||||
return self.result_store.get(key)
|
|
||||||
|
|
||||||
@_wrapped_operation(DataStorePutException)
|
|
||||||
def _put(self, key, value):
|
|
||||||
return self.result_store.put(key, value)
|
|
||||||
|
|
||||||
@_wrapped_operation(ScheduleAddException)
|
|
||||||
def _add_schedule(self, data, ts):
|
|
||||||
if self.schedule is None:
|
|
||||||
raise AttributeError('Schedule not specified.')
|
|
||||||
self.schedule.add(data, ts)
|
|
||||||
|
|
||||||
@_wrapped_operation(ScheduleReadException)
|
|
||||||
def _read_schedule(self, ts):
|
|
||||||
if self.schedule is None:
|
|
||||||
raise AttributeError('Schedule not specified.')
|
|
||||||
return self.schedule.read(ts)
|
|
||||||
|
|
||||||
def emit(self, message):
|
|
||||||
"""Events should always fail silently."""
|
|
||||||
try:
|
|
||||||
self.events.emit(message)
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def enqueue(self, task):
|
|
||||||
if self.always_eager:
|
|
||||||
return task.execute()
|
|
||||||
|
|
||||||
self._write(registry.get_message_for_task(task))
|
|
||||||
|
|
||||||
if self.result_store:
|
|
||||||
return AsyncData(self, task)
|
|
||||||
|
|
||||||
def dequeue(self):
|
|
||||||
message = self._read()
|
|
||||||
if message:
|
|
||||||
return registry.get_task_for_message(message)
|
|
||||||
|
|
||||||
def _format_time(self, dt):
|
|
||||||
if dt is None:
|
|
||||||
return None
|
|
||||||
return time.mktime(dt.timetuple())
|
|
||||||
|
|
||||||
def emit_task(self, status, task, error=False):
|
|
||||||
if self.events:
|
|
||||||
message_data = {'status': status}
|
|
||||||
message_data.update({
|
|
||||||
'id': task.task_id,
|
|
||||||
'task': type(task).__name__,
|
|
||||||
'retries': task.retries,
|
|
||||||
'retry_delay': task.retry_delay,
|
|
||||||
'execute_time': self._format_time(task.execute_time),
|
|
||||||
'error': error})
|
|
||||||
if error:
|
|
||||||
message_data['traceback'] = traceback.format_exc()
|
|
||||||
self.emit(json.dumps(message_data))
|
|
||||||
|
|
||||||
def execute(self, task):
|
|
||||||
if not isinstance(task, QueueTask):
|
|
||||||
raise TypeError('Unknown object: %s' % task)
|
|
||||||
|
|
||||||
result = task.execute()
|
|
||||||
|
|
||||||
if result is None and not self.store_none:
|
|
||||||
return
|
|
||||||
|
|
||||||
if self.result_store and not isinstance(task, PeriodicQueueTask):
|
|
||||||
self._put(task.task_id, pickle.dumps(result))
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
def revoke(self, task, revoke_until=None, revoke_once=False):
|
|
||||||
if not self.result_store:
|
|
||||||
raise QueueException('A DataStore is required to revoke task')
|
|
||||||
|
|
||||||
serialized = pickle.dumps((revoke_until, revoke_once))
|
|
||||||
self._put(task.revoke_id, serialized)
|
|
||||||
|
|
||||||
def restore(self, task):
|
|
||||||
self._get(task.revoke_id) # simply get and delete if there
|
|
||||||
|
|
||||||
def is_revoked(self, task, dt=None, peek=True):
|
|
||||||
if not self.result_store:
|
|
||||||
return False
|
|
||||||
res = self._get(task.revoke_id, peek=True)
|
|
||||||
if res is EmptyData:
|
|
||||||
return False
|
|
||||||
revoke_until, revoke_once = pickle.loads(res)
|
|
||||||
if revoke_once:
|
|
||||||
# This task *was* revoked for one run, but now it should be
|
|
||||||
# restored to normal execution.
|
|
||||||
if not peek:
|
|
||||||
self.restore(task)
|
|
||||||
return True
|
|
||||||
return revoke_until is None or revoke_until > dt
|
|
||||||
|
|
||||||
def add_schedule(self, task):
|
|
||||||
msg = registry.get_message_for_task(task)
|
|
||||||
ex_time = task.execute_time or datetime.datetime.fromtimestamp(0)
|
|
||||||
self._add_schedule(msg, ex_time)
|
|
||||||
|
|
||||||
def read_schedule(self, ts):
|
|
||||||
return [
|
|
||||||
registry.get_task_for_message(m) for m in self._read_schedule(ts)]
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self.queue.flush()
|
|
||||||
|
|
||||||
def ready_to_run(self, cmd, dt=None):
|
|
||||||
dt = dt or datetime.datetime.utcnow()
|
|
||||||
return cmd.execute_time is None or cmd.execute_time <= dt
|
|
||||||
|
|
||||||
|
|
||||||
class AsyncData(object):
|
|
||||||
def __init__(self, huey, task):
|
|
||||||
self.huey = huey
|
|
||||||
self.task = task
|
|
||||||
|
|
||||||
self._result = EmptyData
|
|
||||||
|
|
||||||
def _get(self):
|
|
||||||
task_id = self.task.task_id
|
|
||||||
if self._result is EmptyData:
|
|
||||||
res = self.huey._get(task_id)
|
|
||||||
|
|
||||||
if res is not EmptyData:
|
|
||||||
self._result = pickle.loads(res)
|
|
||||||
return self._result
|
|
||||||
else:
|
|
||||||
return res
|
|
||||||
else:
|
|
||||||
return self._result
|
|
||||||
|
|
||||||
def get(self, blocking=False, timeout=None, backoff=1.15, max_delay=1.0,
|
|
||||||
revoke_on_timeout=False):
|
|
||||||
if not blocking:
|
|
||||||
res = self._get()
|
|
||||||
if res is not EmptyData:
|
|
||||||
return res
|
|
||||||
else:
|
|
||||||
start = time.time()
|
|
||||||
delay = .1
|
|
||||||
while self._result is EmptyData:
|
|
||||||
if timeout and time.time() - start >= timeout:
|
|
||||||
if revoke_on_timeout:
|
|
||||||
self.revoke()
|
|
||||||
raise DataStoreTimeout
|
|
||||||
if delay > max_delay:
|
|
||||||
delay = max_delay
|
|
||||||
if self._get() is EmptyData:
|
|
||||||
time.sleep(delay)
|
|
||||||
delay *= backoff
|
|
||||||
|
|
||||||
return self._result
|
|
||||||
|
|
||||||
def revoke(self):
|
|
||||||
self.huey.revoke(self.task)
|
|
||||||
|
|
||||||
def restore(self):
|
|
||||||
self.huey.restore(self.task)
|
|
||||||
|
|
||||||
|
|
||||||
def with_metaclass(meta, base=object):
|
|
||||||
return meta("NewBase", (base,), {})
|
|
||||||
|
|
||||||
|
|
||||||
class QueueTaskMetaClass(type):
|
|
||||||
def __init__(cls, name, bases, attrs):
|
|
||||||
"""
|
|
||||||
Metaclass to ensure that all task classes are registered
|
|
||||||
"""
|
|
||||||
registry.register(cls)
|
|
||||||
|
|
||||||
|
|
||||||
class QueueTask(with_metaclass(QueueTaskMetaClass)):
|
|
||||||
"""
|
|
||||||
A class that encapsulates the logic necessary to 'do something' given some
|
|
||||||
arbitrary data. When enqueued with the :class:`Huey`, it will be
|
|
||||||
stored in a queue for out-of-band execution via the consumer. See also
|
|
||||||
the :meth:`task` decorator, which can be used to automatically
|
|
||||||
execute any function out-of-band.
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
class SendEmailTask(QueueTask):
|
|
||||||
def execute(self):
|
|
||||||
data = self.get_data()
|
|
||||||
send_email(data['recipient'], data['subject'], data['body'])
|
|
||||||
|
|
||||||
huey.enqueue(
|
|
||||||
SendEmailTask({
|
|
||||||
'recipient': 'somebody@spam.com',
|
|
||||||
'subject': 'look at this awesome website',
|
|
||||||
'body': 'http://youtube.com'
|
|
||||||
})
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, data=None, task_id=None, execute_time=None, retries=0,
|
|
||||||
retry_delay=0):
|
|
||||||
self.set_data(data)
|
|
||||||
self.task_id = task_id or self.create_id()
|
|
||||||
self.revoke_id = 'r:%s' % self.task_id
|
|
||||||
self.execute_time = execute_time
|
|
||||||
self.retries = retries
|
|
||||||
self.retry_delay = retry_delay
|
|
||||||
|
|
||||||
def create_id(self):
|
|
||||||
return str(uuid.uuid4())
|
|
||||||
|
|
||||||
def get_data(self):
|
|
||||||
return self.data
|
|
||||||
|
|
||||||
def set_data(self, data):
|
|
||||||
self.data = data
|
|
||||||
|
|
||||||
def execute(self):
|
|
||||||
"""Execute any arbitary code here"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def __eq__(self, rhs):
|
|
||||||
return (
|
|
||||||
self.task_id == rhs.task_id and
|
|
||||||
self.execute_time == rhs.execute_time and
|
|
||||||
type(self) == type(rhs))
|
|
||||||
|
|
||||||
|
|
||||||
class PeriodicQueueTask(QueueTask):
|
|
||||||
def create_id(self):
|
|
||||||
return registry.task_to_string(type(self))
|
|
||||||
|
|
||||||
def validate_datetime(self, dt):
|
|
||||||
"""Validate that the task should execute at the given datetime"""
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def create_task(task_class, func, retries_as_argument=False, task_name=None,
|
|
||||||
include_task=False, **kwargs):
|
|
||||||
def execute(self):
|
|
||||||
args, kwargs = self.data or ((), {})
|
|
||||||
if retries_as_argument:
|
|
||||||
kwargs['retries'] = self.retries
|
|
||||||
if include_task:
|
|
||||||
kwargs['task'] = self
|
|
||||||
return func(*args, **kwargs)
|
|
||||||
|
|
||||||
attrs = {
|
|
||||||
'execute': execute,
|
|
||||||
'__module__': func.__module__,
|
|
||||||
'__doc__': func.__doc__
|
|
||||||
}
|
|
||||||
attrs.update(kwargs)
|
|
||||||
|
|
||||||
klass = type(
|
|
||||||
task_name or 'queuecmd_%s' % (func.__name__),
|
|
||||||
(task_class,),
|
|
||||||
attrs
|
|
||||||
)
|
|
||||||
|
|
||||||
return klass
|
|
||||||
|
|
||||||
dash_re = re.compile('(\d+)-(\d+)')
|
|
||||||
every_re = re.compile('\*\/(\d+)')
|
|
||||||
|
|
||||||
def crontab(month='*', day='*', day_of_week='*', hour='*', minute='*'):
|
|
||||||
"""
|
|
||||||
Convert a "crontab"-style set of parameters into a test function that will
|
|
||||||
return True when the given datetime matches the parameters set forth in
|
|
||||||
the crontab.
|
|
||||||
|
|
||||||
Acceptable inputs:
|
|
||||||
* = every distinct value
|
|
||||||
*/n = run every "n" times, i.e. hours='*/4' == 0, 4, 8, 12, 16, 20
|
|
||||||
m-n = run every time m..n
|
|
||||||
m,n = run on m and n
|
|
||||||
"""
|
|
||||||
validation = (
|
|
||||||
('m', month, range(1, 13)),
|
|
||||||
('d', day, range(1, 32)),
|
|
||||||
('w', day_of_week, range(7)),
|
|
||||||
('H', hour, range(24)),
|
|
||||||
('M', minute, range(60))
|
|
||||||
)
|
|
||||||
cron_settings = []
|
|
||||||
|
|
||||||
for (date_str, value, acceptable) in validation:
|
|
||||||
settings = set([])
|
|
||||||
|
|
||||||
if isinstance(value, int):
|
|
||||||
value = str(value)
|
|
||||||
|
|
||||||
for piece in value.split(','):
|
|
||||||
if piece == '*':
|
|
||||||
settings.update(acceptable)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if piece.isdigit():
|
|
||||||
piece = int(piece)
|
|
||||||
if piece not in acceptable:
|
|
||||||
raise ValueError('%d is not a valid input' % piece)
|
|
||||||
settings.add(piece)
|
|
||||||
|
|
||||||
else:
|
|
||||||
dash_match = dash_re.match(piece)
|
|
||||||
if dash_match:
|
|
||||||
lhs, rhs = map(int, dash_match.groups())
|
|
||||||
if lhs not in acceptable or rhs not in acceptable:
|
|
||||||
raise ValueError('%s is not a valid input' % piece)
|
|
||||||
settings.update(range(lhs, rhs+1))
|
|
||||||
continue
|
|
||||||
|
|
||||||
every_match = every_re.match(piece)
|
|
||||||
if every_match:
|
|
||||||
interval = int(every_match.groups()[0])
|
|
||||||
settings.update(acceptable[::interval])
|
|
||||||
|
|
||||||
cron_settings.append(sorted(list(settings)))
|
|
||||||
|
|
||||||
def validate_date(dt):
|
|
||||||
_, m, d, H, M, _, w, _, _ = dt.timetuple()
|
|
||||||
|
|
||||||
# fix the weekday to be sunday=0
|
|
||||||
w = (w + 1) % 7
|
|
||||||
|
|
||||||
for (date_piece, selection) in zip([m, d, w, H, M], cron_settings):
|
|
||||||
if date_piece not in selection:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
return validate_date
|
|
@ -1,113 +0,0 @@
|
|||||||
class BaseQueue(object):
|
|
||||||
"""
|
|
||||||
Base implementation for a Queue, all backends should subclass
|
|
||||||
"""
|
|
||||||
|
|
||||||
# whether this backend blocks while waiting for new results or should be
|
|
||||||
# polled by the consumer
|
|
||||||
blocking = False
|
|
||||||
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
"""
|
|
||||||
Initialize the Queue - this happens once when the module is loaded
|
|
||||||
|
|
||||||
:param name: A string representation of the name for this queue
|
|
||||||
:param connection: Connection parameters for the queue
|
|
||||||
"""
|
|
||||||
self.name = name
|
|
||||||
self.connection = connection
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
"""
|
|
||||||
Push 'data' onto the queue
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
"""
|
|
||||||
Pop 'data' from the queue, returning None if no data is available --
|
|
||||||
an empty queue should not raise an Exception!
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def remove(self, data):
|
|
||||||
"""
|
|
||||||
Remove the given data from the queue
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
"""
|
|
||||||
Delete everything from the queue
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
"""
|
|
||||||
Used primarily in tests, but return the number of items in the queue
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class BaseSchedule(object):
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
"""
|
|
||||||
Initialize the Queue - this happens once when the module is loaded
|
|
||||||
|
|
||||||
:param name: A string representation of the name for this queue
|
|
||||||
:param connection: Connection parameters for the queue
|
|
||||||
"""
|
|
||||||
self.name = name
|
|
||||||
self.connection = connection
|
|
||||||
|
|
||||||
def add(self, data, ts):
|
|
||||||
"""
|
|
||||||
Add the timestamped data to the task schedule.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def read(self, ts):
|
|
||||||
"""
|
|
||||||
Read scheduled items for the given timestamp
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
"""Delete all items in schedule."""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class BaseDataStore(object):
|
|
||||||
"""
|
|
||||||
Base implementation for a data store
|
|
||||||
"""
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
"""
|
|
||||||
Initialize the data store
|
|
||||||
"""
|
|
||||||
self.name = name
|
|
||||||
self.connection = connection
|
|
||||||
|
|
||||||
def put(self, key, value):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def peek(self, key):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
class BaseEventEmitter(object):
|
|
||||||
def __init__(self, channel, **connection):
|
|
||||||
self.channel = channel
|
|
||||||
self.connection = connection
|
|
||||||
|
|
||||||
def emit(self, message):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
|
|
||||||
Components = (BaseQueue, BaseDataStore, BaseSchedule, BaseEventEmitter)
|
|
@ -1,103 +0,0 @@
|
|||||||
"""
|
|
||||||
Test-only implementations of Queue and DataStore. These will not work for
|
|
||||||
real applications because they only store tasks/results in memory.
|
|
||||||
"""
|
|
||||||
from collections import deque
|
|
||||||
import heapq
|
|
||||||
|
|
||||||
from huey.backends.base import BaseDataStore
|
|
||||||
from huey.backends.base import BaseEventEmitter
|
|
||||||
from huey.backends.base import BaseQueue
|
|
||||||
from huey.backends.base import BaseSchedule
|
|
||||||
from huey.utils import EmptyData
|
|
||||||
|
|
||||||
|
|
||||||
class DummyQueue(BaseQueue):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(DummyQueue, self).__init__(*args, **kwargs)
|
|
||||||
self._queue = []
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
self._queue.insert(0, data)
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
try:
|
|
||||||
return self._queue.pop()
|
|
||||||
except IndexError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self._queue = []
|
|
||||||
|
|
||||||
def remove(self, data):
|
|
||||||
clone = []
|
|
||||||
ct = 0
|
|
||||||
for elem in self._queue:
|
|
||||||
if elem == data:
|
|
||||||
ct += 1
|
|
||||||
else:
|
|
||||||
clone.append(elem)
|
|
||||||
self._queue = clone
|
|
||||||
return ct
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return len(self._queue)
|
|
||||||
|
|
||||||
|
|
||||||
class DummySchedule(BaseSchedule):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(DummySchedule, self).__init__(*args, **kwargs)
|
|
||||||
self._schedule = []
|
|
||||||
|
|
||||||
def add(self, data, ts):
|
|
||||||
heapq.heappush(self._schedule, (ts, data))
|
|
||||||
|
|
||||||
def read(self, ts):
|
|
||||||
res = []
|
|
||||||
while len(self._schedule):
|
|
||||||
sts, data = heapq.heappop(self._schedule)
|
|
||||||
if sts <= ts:
|
|
||||||
res.append(data)
|
|
||||||
else:
|
|
||||||
self.add(data, sts)
|
|
||||||
break
|
|
||||||
return res
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self._schedule = []
|
|
||||||
|
|
||||||
|
|
||||||
class DummyDataStore(BaseDataStore):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(DummyDataStore, self).__init__(*args, **kwargs)
|
|
||||||
self._results = {}
|
|
||||||
|
|
||||||
def put(self, key, value):
|
|
||||||
self._results[key] = value
|
|
||||||
|
|
||||||
def peek(self, key):
|
|
||||||
return self._results.get(key, EmptyData)
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
return self._results.pop(key, EmptyData)
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self._results = {}
|
|
||||||
|
|
||||||
|
|
||||||
class DummyEventEmitter(BaseEventEmitter):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(DummyEventEmitter, self).__init__(*args, **kwargs)
|
|
||||||
self._events = deque()
|
|
||||||
self.__size = 100
|
|
||||||
|
|
||||||
def emit(self, message):
|
|
||||||
self._events.appendleft(message)
|
|
||||||
num_events = len(self._events)
|
|
||||||
if num_events > self.__size * 1.5:
|
|
||||||
while num_events > self.__size:
|
|
||||||
self._events.popright()
|
|
||||||
num_events -= 1
|
|
||||||
|
|
||||||
|
|
||||||
Components = (DummyQueue, DummyDataStore, DummySchedule, DummyEventEmitter)
|
|
@ -1,153 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
__author__ = 'deathowl'
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import pika
|
|
||||||
from pika.exceptions import AMQPConnectionError
|
|
||||||
|
|
||||||
from huey.backends.base import BaseEventEmitter
|
|
||||||
from huey.backends.base import BaseQueue
|
|
||||||
|
|
||||||
|
|
||||||
def clean_name(name):
|
|
||||||
return re.sub('[^a-z0-9]', '', name)
|
|
||||||
|
|
||||||
|
|
||||||
class RabbitQueue(BaseQueue):
|
|
||||||
"""
|
|
||||||
A simple Queue that uses the rabbit to store messages
|
|
||||||
"""
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
"""
|
|
||||||
connection = {
|
|
||||||
'host': 'localhost',
|
|
||||||
'port': 5672,
|
|
||||||
'username': 'guest',
|
|
||||||
'password': 'guest',
|
|
||||||
'vhost': '/',
|
|
||||||
'ssl': False
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
super(RabbitQueue, self).__init__(name, **connection)
|
|
||||||
|
|
||||||
self.queue_name = 'huey.rabbit.%s' % clean_name(name)
|
|
||||||
credentials = pika.PlainCredentials(
|
|
||||||
connection.get('username', 'guest'),
|
|
||||||
connection.get('password', 'guest'))
|
|
||||||
connection_params = pika.ConnectionParameters(
|
|
||||||
host=connection.get('host', 'localhost'),
|
|
||||||
port=connection.get('port', 5672),
|
|
||||||
credentials=credentials,
|
|
||||||
virtual_host=connection.get('vhost', '/'),
|
|
||||||
ssl=connection.get('ssl', False))
|
|
||||||
|
|
||||||
self.conn = pika.BlockingConnection(connection_params)
|
|
||||||
self.channel = self.conn.channel()
|
|
||||||
self.channel.queue_declare(self.queue_name, durable=True)
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
self.channel.basic_publish(
|
|
||||||
exchange='',
|
|
||||||
routing_key=self.queue_name,
|
|
||||||
body=data)
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
return self.get_data_from_queue(self.queue_name)
|
|
||||||
|
|
||||||
def remove(self, data):
|
|
||||||
# This is not something you usually do in rabbit, this is the only
|
|
||||||
# operation, which is not atomic, but this "hack" should do the trick.
|
|
||||||
amount = 0
|
|
||||||
idx = 0
|
|
||||||
qlen = len(self)
|
|
||||||
|
|
||||||
for method_frame, _, body in self.channel.consume(self.queue_name):
|
|
||||||
idx += 1
|
|
||||||
if body == data:
|
|
||||||
self.channel.basic_ack(method_frame.delivery_tag)
|
|
||||||
amount += 1
|
|
||||||
else:
|
|
||||||
self.channel.basic_nack(
|
|
||||||
method_frame.delivery_tag,
|
|
||||||
requeue=True)
|
|
||||||
|
|
||||||
if idx >= qlen:
|
|
||||||
break
|
|
||||||
|
|
||||||
self.channel.cancel()
|
|
||||||
return amount
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self.channel.queue_purge(queue=self.queue_name)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
queue = self.channel.queue_declare(self.queue_name, durable=True)
|
|
||||||
return queue.method.message_count
|
|
||||||
|
|
||||||
def get_data_from_queue(self, queue):
|
|
||||||
data = None
|
|
||||||
if len(self) == 0:
|
|
||||||
return None
|
|
||||||
|
|
||||||
for method_frame, _, body in self.channel.consume(queue):
|
|
||||||
data = body
|
|
||||||
self.channel.basic_ack(method_frame.delivery_tag)
|
|
||||||
break
|
|
||||||
|
|
||||||
self.channel.cancel()
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class RabbitBlockingQueue(RabbitQueue):
|
|
||||||
"""
|
|
||||||
Use the blocking right pop, should result in messages getting
|
|
||||||
executed close to immediately by the consumer as opposed to
|
|
||||||
being polled for
|
|
||||||
"""
|
|
||||||
blocking = True
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
try:
|
|
||||||
return self.get_data_from_queue(self.queue_name)
|
|
||||||
except AMQPConnectionError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class RabbitEventEmitter(BaseEventEmitter):
|
|
||||||
def __init__(self, channel, **connection):
|
|
||||||
super(RabbitEventEmitter, self).__init__(channel, **connection)
|
|
||||||
credentials = pika.PlainCredentials(
|
|
||||||
connection.get('username', 'guest'),
|
|
||||||
connection.get('password', 'guest'))
|
|
||||||
connection_params = pika.ConnectionParameters(
|
|
||||||
host=connection.get('host', 'localhost'),
|
|
||||||
port=connection.get('port', 5672),
|
|
||||||
credentials=credentials,
|
|
||||||
virtual_host=connection.get('vhost', '/'),
|
|
||||||
ssl=connection.get('ssl', False))
|
|
||||||
|
|
||||||
self.conn = pika.BlockingConnection(connection_params)
|
|
||||||
self.channel = self.conn.channel()
|
|
||||||
self.exchange_name = 'huey.events'
|
|
||||||
self.channel.exchange_declare(
|
|
||||||
exchange=self.exchange_name,
|
|
||||||
type='fanout',
|
|
||||||
auto_delete=False,
|
|
||||||
durable=True)
|
|
||||||
|
|
||||||
def emit(self, message):
|
|
||||||
properties = pika.BasicProperties(
|
|
||||||
content_type="text/plain",
|
|
||||||
delivery_mode=2)
|
|
||||||
|
|
||||||
self.channel.basic_publish(
|
|
||||||
exchange=self.exchange_name,
|
|
||||||
routing_key='',
|
|
||||||
body=message,
|
|
||||||
properties=properties)
|
|
||||||
|
|
||||||
|
|
||||||
Components = (RabbitBlockingQueue, None, None, RabbitEventEmitter)
|
|
@ -1,153 +0,0 @@
|
|||||||
import re
|
|
||||||
import time
|
|
||||||
|
|
||||||
import redis
|
|
||||||
from redis.exceptions import ConnectionError
|
|
||||||
|
|
||||||
from huey.backends.base import BaseDataStore
|
|
||||||
from huey.backends.base import BaseEventEmitter
|
|
||||||
from huey.backends.base import BaseQueue
|
|
||||||
from huey.backends.base import BaseSchedule
|
|
||||||
from huey.utils import EmptyData
|
|
||||||
|
|
||||||
|
|
||||||
def clean_name(name):
|
|
||||||
return re.sub('[^a-z0-9]', '', name)
|
|
||||||
|
|
||||||
|
|
||||||
class RedisQueue(BaseQueue):
|
|
||||||
"""
|
|
||||||
A simple Queue that uses the redis to store messages
|
|
||||||
"""
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
"""
|
|
||||||
connection = {
|
|
||||||
'host': 'localhost',
|
|
||||||
'port': 6379,
|
|
||||||
'db': 0,
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
super(RedisQueue, self).__init__(name, **connection)
|
|
||||||
|
|
||||||
self.queue_name = 'huey.redis.%s' % clean_name(name)
|
|
||||||
self.conn = redis.Redis(**connection)
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
self.conn.lpush(self.queue_name, data)
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
return self.conn.rpop(self.queue_name)
|
|
||||||
|
|
||||||
def remove(self, data):
|
|
||||||
return self.conn.lrem(self.queue_name, data)
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self.conn.delete(self.queue_name)
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
return self.conn.llen(self.queue_name)
|
|
||||||
|
|
||||||
|
|
||||||
class RedisBlockingQueue(RedisQueue):
|
|
||||||
"""
|
|
||||||
Use the blocking right pop, should result in messages getting
|
|
||||||
executed close to immediately by the consumer as opposed to
|
|
||||||
being polled for
|
|
||||||
"""
|
|
||||||
blocking = True
|
|
||||||
|
|
||||||
def __init__(self, name, read_timeout=None, **connection):
|
|
||||||
"""
|
|
||||||
connection = {
|
|
||||||
'host': 'localhost',
|
|
||||||
'port': 6379,
|
|
||||||
'db': 0,
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
super(RedisBlockingQueue, self).__init__(name, **connection)
|
|
||||||
self.read_timeout = read_timeout
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
try:
|
|
||||||
return self.conn.brpop(
|
|
||||||
self.queue_name,
|
|
||||||
timeout=self.read_timeout)[1]
|
|
||||||
except (ConnectionError, TypeError, IndexError):
|
|
||||||
# unfortunately, there is no way to differentiate a socket timing
|
|
||||||
# out and a host being unreachable
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
# a custom lua script to pass to redis that will read tasks from the schedule
|
|
||||||
# and atomically pop them from the sorted set and return them.
|
|
||||||
# it won't return anything if it isn't able to remove the items it reads.
|
|
||||||
SCHEDULE_POP_LUA = """
|
|
||||||
local key = KEYS[1]
|
|
||||||
local unix_ts = ARGV[1]
|
|
||||||
local res = redis.call('zrangebyscore', key, '-inf', unix_ts)
|
|
||||||
if #res and redis.call('zremrangebyscore', key, '-inf', unix_ts) == #res then
|
|
||||||
return res
|
|
||||||
end
|
|
||||||
"""
|
|
||||||
|
|
||||||
class RedisSchedule(BaseSchedule):
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
super(RedisSchedule, self).__init__(name, **connection)
|
|
||||||
|
|
||||||
self.key = 'huey.schedule.%s' % clean_name(name)
|
|
||||||
self.conn = redis.Redis(**connection)
|
|
||||||
self._pop = self.conn.register_script(SCHEDULE_POP_LUA)
|
|
||||||
|
|
||||||
def convert_ts(self, ts):
|
|
||||||
return time.mktime(ts.timetuple())
|
|
||||||
|
|
||||||
def add(self, data, ts):
|
|
||||||
self.conn.zadd(self.key, data, self.convert_ts(ts))
|
|
||||||
|
|
||||||
def read(self, ts):
|
|
||||||
unix_ts = self.convert_ts(ts)
|
|
||||||
# invoke the redis lua script that will atomically pop off
|
|
||||||
# all the tasks older than the given timestamp
|
|
||||||
tasks = self._pop(keys=[self.key], args=[unix_ts])
|
|
||||||
return [] if tasks is None else tasks
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self.conn.delete(self.key)
|
|
||||||
|
|
||||||
|
|
||||||
class RedisDataStore(BaseDataStore):
|
|
||||||
def __init__(self, name, **connection):
|
|
||||||
super(RedisDataStore, self).__init__(name, **connection)
|
|
||||||
|
|
||||||
self.storage_name = 'huey.results.%s' % clean_name(name)
|
|
||||||
self.conn = redis.Redis(**connection)
|
|
||||||
|
|
||||||
def put(self, key, value):
|
|
||||||
self.conn.hset(self.storage_name, key, value)
|
|
||||||
|
|
||||||
def peek(self, key):
|
|
||||||
if self.conn.hexists(self.storage_name, key):
|
|
||||||
return self.conn.hget(self.storage_name, key)
|
|
||||||
return EmptyData
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
val = self.peek(key)
|
|
||||||
if val is not EmptyData:
|
|
||||||
self.conn.hdel(self.storage_name, key)
|
|
||||||
return val
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
self.conn.delete(self.storage_name)
|
|
||||||
|
|
||||||
|
|
||||||
class RedisEventEmitter(BaseEventEmitter):
|
|
||||||
def __init__(self, channel, **connection):
|
|
||||||
super(RedisEventEmitter, self).__init__(channel, **connection)
|
|
||||||
self.conn = redis.Redis(**connection)
|
|
||||||
|
|
||||||
def emit(self, message):
|
|
||||||
self.conn.publish(self.channel, message)
|
|
||||||
|
|
||||||
|
|
||||||
Components = (RedisBlockingQueue, RedisDataStore, RedisSchedule,
|
|
||||||
RedisEventEmitter)
|
|
@ -1,205 +0,0 @@
|
|||||||
""" SQLite backend for Huey.
|
|
||||||
|
|
||||||
Inspired from a snippet by Thiago Arruda [1]
|
|
||||||
|
|
||||||
[1] http://flask.pocoo.org/snippets/88/
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
import sqlite3
|
|
||||||
import time
|
|
||||||
try:
|
|
||||||
from thread import get_ident
|
|
||||||
except ImportError: # Python 3
|
|
||||||
try:
|
|
||||||
from threading import get_ident
|
|
||||||
except ImportError:
|
|
||||||
from _thread import get_ident
|
|
||||||
buffer = memoryview
|
|
||||||
|
|
||||||
from huey.backends.base import BaseDataStore
|
|
||||||
from huey.backends.base import BaseEventEmitter
|
|
||||||
from huey.backends.base import BaseQueue
|
|
||||||
from huey.backends.base import BaseSchedule
|
|
||||||
from huey.utils import EmptyData
|
|
||||||
|
|
||||||
|
|
||||||
class _SqliteDatabase(object):
|
|
||||||
def __init__(self, location):
|
|
||||||
if location == ':memory:':
|
|
||||||
raise ValueError("Database location has to be a file path, "
|
|
||||||
"in-memory databases are not supported.")
|
|
||||||
self.location = location
|
|
||||||
self._conn_cache = {}
|
|
||||||
with self.get_connection() as conn:
|
|
||||||
# Enable write-ahead logging
|
|
||||||
conn.execute("PRAGMA journal_mode=WAL;")
|
|
||||||
# Hand over syncing responsibility to OS
|
|
||||||
conn.execute("PRAGMA synchronous=OFF;")
|
|
||||||
# Store temporary tables and indices in memory
|
|
||||||
conn.execute("PRAGMA temp_store=MEMORY;")
|
|
||||||
|
|
||||||
def get_connection(self, immediate=False):
|
|
||||||
""" Obtain a sqlite3.Connection instance for the database.
|
|
||||||
|
|
||||||
Connections are cached on a by-thread basis, i.e. every calling thread
|
|
||||||
will always get the same Connection object back.
|
|
||||||
"""
|
|
||||||
if immediate:
|
|
||||||
return sqlite3.Connection(self.location, timeout=60,
|
|
||||||
isolation_level="IMMEDIATE")
|
|
||||||
id = get_ident()
|
|
||||||
if id not in self._conn_cache:
|
|
||||||
self._conn_cache[id] = sqlite3.Connection(
|
|
||||||
self.location, timeout=60)
|
|
||||||
return self._conn_cache[id]
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteQueue(BaseQueue):
|
|
||||||
"""
|
|
||||||
A simple Queue that uses SQLite to store messages
|
|
||||||
"""
|
|
||||||
_create = """
|
|
||||||
CREATE TABLE IF NOT EXISTS {0}
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
item BLOB
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
_count = "SELECT COUNT(*) FROM {0}"
|
|
||||||
_append = "INSERT INTO {0} (item) VALUES (?)"
|
|
||||||
_get = "SELECT id, item FROM {0} ORDER BY id LIMIT 1"
|
|
||||||
_remove_by_value = "DELETE FROM {0} WHERE item = ?"
|
|
||||||
_remove_by_id = "DELETE FROM {0} WHERE id = ?"
|
|
||||||
_flush = "DELETE FROM {0}"
|
|
||||||
|
|
||||||
def __init__(self, name, location):
|
|
||||||
super(SqliteQueue, self).__init__(name, location=location)
|
|
||||||
self.queue_name = 'huey_queue_{0}'.format(name)
|
|
||||||
self._db = _SqliteDatabase(location)
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._create.format(self.queue_name))
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._append.format(self.queue_name), (data,))
|
|
||||||
|
|
||||||
def read(self):
|
|
||||||
with self._db.get_connection(immediate=True) as conn:
|
|
||||||
cursor = conn.execute(self._get.format(self.queue_name))
|
|
||||||
try:
|
|
||||||
id, data = next(cursor)
|
|
||||||
except StopIteration:
|
|
||||||
return None
|
|
||||||
if id:
|
|
||||||
conn.execute(self._remove_by_id.format(self.queue_name), (id,))
|
|
||||||
return data
|
|
||||||
|
|
||||||
def remove(self, data):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
return conn.execute(self._remove_by_value.format(self.queue_name),
|
|
||||||
(data,)).rowcount
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._flush.format(self.queue_name,))
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
return next(conn.execute(self._count.format(self.queue_name)))[0]
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteSchedule(BaseSchedule):
|
|
||||||
_create = """
|
|
||||||
CREATE TABLE IF NOT EXISTS {0}
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
item BLOB,
|
|
||||||
timestamp INTEGER
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
_read_items = """
|
|
||||||
SELECT item, timestamp FROM {0} WHERE timestamp <= ?
|
|
||||||
ORDER BY timestamp
|
|
||||||
"""
|
|
||||||
_delete_items = "DELETE FROM {0} WHERE timestamp <= ?"
|
|
||||||
_add_item = "INSERT INTO {0} (item, timestamp) VALUES (?, ?)"
|
|
||||||
_flush = "DELETE FROM {0}"
|
|
||||||
|
|
||||||
def __init__(self, name, location):
|
|
||||||
super(SqliteSchedule, self).__init__(name, location=location)
|
|
||||||
self._db = _SqliteDatabase(location)
|
|
||||||
self.name = 'huey_schedule_{0}'.format(name)
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._create.format(self.name))
|
|
||||||
|
|
||||||
def convert_ts(self, ts):
|
|
||||||
return time.mktime(ts.timetuple())
|
|
||||||
|
|
||||||
def add(self, data, ts):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._add_item.format(self.name),
|
|
||||||
(data, self.convert_ts(ts)))
|
|
||||||
|
|
||||||
def read(self, ts):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
results = conn.execute(self._read_items.format(self.name),
|
|
||||||
(self.convert_ts(ts),)).fetchall()
|
|
||||||
conn.execute(self._delete_items.format(self.name),
|
|
||||||
(self.convert_ts(ts),))
|
|
||||||
return [data for data, _ in results]
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._flush.format(self.name))
|
|
||||||
|
|
||||||
|
|
||||||
class SqliteDataStore(BaseDataStore):
|
|
||||||
_create = """
|
|
||||||
CREATE TABLE IF NOT EXISTS {0}
|
|
||||||
(
|
|
||||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
||||||
key TEXT,
|
|
||||||
result BLOB
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
_put = "INSERT INTO {0} (key, result) VALUES (?, ?)"
|
|
||||||
_peek = "SELECT result FROM {0} WHERE key = ?"
|
|
||||||
_remove = "DELETE FROM {0} WHERE key = ?"
|
|
||||||
_flush = "DELETE FROM {0}"
|
|
||||||
|
|
||||||
def __init__(self, name, location):
|
|
||||||
super(SqliteDataStore, self).__init__(name, location=location)
|
|
||||||
self._db = _SqliteDatabase(location)
|
|
||||||
self.name = 'huey_results_{0}'.format(name)
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._create.format(self.name))
|
|
||||||
|
|
||||||
def put(self, key, value):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._remove.format(self.name), (key,))
|
|
||||||
conn.execute(self._put.format(self.name), (key, value))
|
|
||||||
|
|
||||||
def peek(self, key):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
try:
|
|
||||||
return next(conn.execute(self._peek.format(self.name),
|
|
||||||
(key,)))[0]
|
|
||||||
except StopIteration:
|
|
||||||
return EmptyData
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
try:
|
|
||||||
data = next(conn.execute(self._peek.format(self.name),
|
|
||||||
(key,)))[0]
|
|
||||||
conn.execute(self._remove.format(self.name), (key,))
|
|
||||||
return data
|
|
||||||
except StopIteration:
|
|
||||||
return EmptyData
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
with self._db.get_connection() as conn:
|
|
||||||
conn.execute(self._flush.format(self.name))
|
|
||||||
|
|
||||||
|
|
||||||
Components = (SqliteQueue, SqliteDataStore, SqliteSchedule, None)
|
|
@ -1,121 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import optparse
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from logging.handlers import RotatingFileHandler
|
|
||||||
|
|
||||||
from huey.consumer import Consumer
|
|
||||||
from huey.utils import load_class
|
|
||||||
|
|
||||||
|
|
||||||
def err(s):
|
|
||||||
sys.stderr.write('\033[91m%s\033[0m\n' % s)
|
|
||||||
|
|
||||||
|
|
||||||
def get_loglevel(verbose=None):
|
|
||||||
if verbose is None:
|
|
||||||
return logging.INFO
|
|
||||||
elif verbose:
|
|
||||||
return logging.DEBUG
|
|
||||||
return logging.ERROR
|
|
||||||
|
|
||||||
|
|
||||||
def setup_logger(loglevel, logfile):
|
|
||||||
log_format = ('%(threadName)s %(asctime)s %(name)s '
|
|
||||||
'%(levelname)s %(message)s')
|
|
||||||
logging.basicConfig(level=loglevel, format=log_format)
|
|
||||||
|
|
||||||
if logfile:
|
|
||||||
handler = RotatingFileHandler(
|
|
||||||
logfile, maxBytes=1024*1024, backupCount=3)
|
|
||||||
handler.setFormatter(logging.Formatter(log_format))
|
|
||||||
logging.getLogger().addHandler(handler)
|
|
||||||
|
|
||||||
|
|
||||||
def get_option_parser():
|
|
||||||
parser = optparse.OptionParser(
|
|
||||||
'Usage: %prog [options] path.to.huey_instance')
|
|
||||||
parser.add_option('-l', '--logfile', dest='logfile',
|
|
||||||
help='write logs to FILE', metavar='FILE')
|
|
||||||
parser.add_option('-v', '--verbose', dest='verbose',
|
|
||||||
help='verbose logging', action='store_true')
|
|
||||||
parser.add_option('-q', '--quiet', dest='verbose',
|
|
||||||
help='log exceptions only', action='store_false')
|
|
||||||
parser.add_option('-w', '--workers', dest='workers', type='int',
|
|
||||||
help='worker threads (default=1)', default=1)
|
|
||||||
parser.add_option('-t', '--threads', dest='workers', type='int',
|
|
||||||
help='same as "workers"', default=1)
|
|
||||||
parser.add_option('-p', '--periodic', dest='periodic', default=True,
|
|
||||||
help='execute periodic tasks (default=True)',
|
|
||||||
action='store_true')
|
|
||||||
parser.add_option('-n', '--no-periodic', dest='periodic',
|
|
||||||
help='do NOT execute periodic tasks',
|
|
||||||
action='store_false')
|
|
||||||
parser.add_option('-d', '--delay', dest='initial_delay', type='float',
|
|
||||||
help='initial delay in seconds (default=0.1)',
|
|
||||||
default=0.1)
|
|
||||||
parser.add_option('-m', '--max-delay', dest='max_delay', type='float',
|
|
||||||
help='maximum time to wait between polling the queue '
|
|
||||||
'(default=10)',
|
|
||||||
default=10)
|
|
||||||
parser.add_option('-b', '--backoff', dest='backoff', type='float',
|
|
||||||
help='amount to backoff delay when no results present '
|
|
||||||
'(default=1.15)',
|
|
||||||
default=1.15)
|
|
||||||
parser.add_option('-P', '--periodic-task-interval',
|
|
||||||
dest='periodic_task_interval',
|
|
||||||
type='float', help='Granularity of periodic tasks.',
|
|
||||||
default=60.0)
|
|
||||||
parser.add_option('-S', '--scheduler-interval', dest='scheduler_interval',
|
|
||||||
type='float', help='Granularity of scheduler.',
|
|
||||||
default=1.0)
|
|
||||||
parser.add_option('-u', '--utc', dest='utc', action='store_true',
|
|
||||||
help='use UTC time for all tasks (default=True)',
|
|
||||||
default=True)
|
|
||||||
parser.add_option('--localtime', dest='utc', action='store_false',
|
|
||||||
help='use local time for all tasks')
|
|
||||||
return parser
|
|
||||||
|
|
||||||
|
|
||||||
def load_huey(path):
|
|
||||||
try:
|
|
||||||
return load_class(path)
|
|
||||||
except:
|
|
||||||
cur_dir = os.getcwd()
|
|
||||||
if cur_dir not in sys.path:
|
|
||||||
sys.path.insert(0, cur_dir)
|
|
||||||
return load_huey(path)
|
|
||||||
err('Error importing %s' % path)
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def consumer_main():
|
|
||||||
parser = get_option_parser()
|
|
||||||
options, args = parser.parse_args()
|
|
||||||
|
|
||||||
setup_logger(get_loglevel(options.verbose), options.logfile)
|
|
||||||
|
|
||||||
if len(args) == 0:
|
|
||||||
err('Error: missing import path to `Huey` instance')
|
|
||||||
err('Example: huey_consumer.py app.queue.huey_instance')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
huey_instance = load_huey(args[0])
|
|
||||||
|
|
||||||
consumer = Consumer(
|
|
||||||
huey_instance,
|
|
||||||
options.workers,
|
|
||||||
options.periodic,
|
|
||||||
options.initial_delay,
|
|
||||||
options.backoff,
|
|
||||||
options.max_delay,
|
|
||||||
options.utc,
|
|
||||||
options.scheduler_interval,
|
|
||||||
options.periodic_task_interval)
|
|
||||||
consumer.run()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
consumer_main()
|
|
@ -1,279 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import logging
|
|
||||||
import signal
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from huey.exceptions import DataStoreGetException
|
|
||||||
from huey.exceptions import QueueException
|
|
||||||
from huey.exceptions import QueueReadException
|
|
||||||
from huey.exceptions import DataStorePutException
|
|
||||||
from huey.exceptions import QueueWriteException
|
|
||||||
from huey.exceptions import ScheduleAddException
|
|
||||||
from huey.exceptions import ScheduleReadException
|
|
||||||
from huey.registry import registry
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerThread(threading.Thread):
|
|
||||||
def __init__(self, huey, utc, shutdown, interval=60):
|
|
||||||
self.huey = huey
|
|
||||||
self.utc = utc
|
|
||||||
self.shutdown = shutdown
|
|
||||||
self.interval = interval
|
|
||||||
self._logger = logging.getLogger('huey.consumer.ConsumerThread')
|
|
||||||
super(ConsumerThread, self).__init__()
|
|
||||||
|
|
||||||
def get_now(self):
|
|
||||||
if self.utc:
|
|
||||||
return datetime.datetime.utcnow()
|
|
||||||
return datetime.datetime.now()
|
|
||||||
|
|
||||||
def on_shutdown(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def loop(self, now):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
while not self.shutdown.is_set():
|
|
||||||
self.loop()
|
|
||||||
self._logger.debug('Thread shutting down')
|
|
||||||
self.on_shutdown()
|
|
||||||
|
|
||||||
def enqueue(self, task):
|
|
||||||
try:
|
|
||||||
self.huey.enqueue(task)
|
|
||||||
self.huey.emit_task('enqueued', task)
|
|
||||||
except QueueWriteException:
|
|
||||||
self._logger.error('Error enqueueing task: %s' % task)
|
|
||||||
|
|
||||||
def add_schedule(self, task):
|
|
||||||
try:
|
|
||||||
self.huey.add_schedule(task)
|
|
||||||
self.huey.emit_task('scheduled', task)
|
|
||||||
except ScheduleAddException:
|
|
||||||
self._logger.error('Error adding task to schedule: %s' % task)
|
|
||||||
|
|
||||||
def is_revoked(self, task, ts):
|
|
||||||
try:
|
|
||||||
if self.huey.is_revoked(task, ts, peek=False):
|
|
||||||
self.huey.emit_task('revoked', task)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
except DataStoreGetException:
|
|
||||||
self._logger.error('Error checking if task is revoked: %s' % task)
|
|
||||||
return True
|
|
||||||
|
|
||||||
def sleep_for_interval(self, start_ts):
|
|
||||||
delta = time.time() - start_ts
|
|
||||||
if delta < self.interval:
|
|
||||||
time.sleep(self.interval - (time.time() - start_ts))
|
|
||||||
|
|
||||||
|
|
||||||
class PeriodicTaskThread(ConsumerThread):
|
|
||||||
def loop(self, now=None):
|
|
||||||
now = now or self.get_now()
|
|
||||||
self._logger.debug('Checking periodic command registry')
|
|
||||||
start = time.time()
|
|
||||||
for task in registry.get_periodic_tasks():
|
|
||||||
if task.validate_datetime(now):
|
|
||||||
self._logger.info('Scheduling %s for execution' % task)
|
|
||||||
self.enqueue(task)
|
|
||||||
|
|
||||||
self.sleep_for_interval(start)
|
|
||||||
|
|
||||||
|
|
||||||
class SchedulerThread(ConsumerThread):
|
|
||||||
def read_schedule(self, ts):
|
|
||||||
try:
|
|
||||||
return self.huey.read_schedule(ts)
|
|
||||||
except ScheduleReadException:
|
|
||||||
self._logger.error('Error reading schedule', exc_info=1)
|
|
||||||
return []
|
|
||||||
|
|
||||||
def loop(self, now=None):
|
|
||||||
now = now or self.get_now()
|
|
||||||
start = time.time()
|
|
||||||
|
|
||||||
for task in self.read_schedule(now):
|
|
||||||
self._logger.info('Scheduling %s for execution' % task)
|
|
||||||
self.enqueue(task)
|
|
||||||
|
|
||||||
self.sleep_for_interval(start)
|
|
||||||
|
|
||||||
|
|
||||||
class WorkerThread(ConsumerThread):
|
|
||||||
def __init__(self, huey, default_delay, max_delay, backoff, utc,
|
|
||||||
shutdown):
|
|
||||||
self.delay = self.default_delay = default_delay
|
|
||||||
self.max_delay = max_delay
|
|
||||||
self.backoff = backoff
|
|
||||||
self._logger = logging.getLogger('huey.consumer.WorkerThread')
|
|
||||||
super(WorkerThread, self).__init__(huey, utc, shutdown)
|
|
||||||
|
|
||||||
def loop(self):
|
|
||||||
self.check_message()
|
|
||||||
|
|
||||||
def check_message(self):
|
|
||||||
self._logger.debug('Checking for message')
|
|
||||||
task = exc_raised = None
|
|
||||||
try:
|
|
||||||
task = self.huey.dequeue()
|
|
||||||
except QueueReadException:
|
|
||||||
self._logger.error('Error reading from queue', exc_info=1)
|
|
||||||
exc_raised = True
|
|
||||||
except QueueException:
|
|
||||||
self._logger.error('Queue exception', exc_info=1)
|
|
||||||
exc_raised = True
|
|
||||||
except:
|
|
||||||
self._logger.error('Unknown exception', exc_info=1)
|
|
||||||
exc_raised = True
|
|
||||||
|
|
||||||
if task:
|
|
||||||
self.delay = self.default_delay
|
|
||||||
self.handle_task(task, self.get_now())
|
|
||||||
elif exc_raised or not self.huey.blocking:
|
|
||||||
self.sleep()
|
|
||||||
|
|
||||||
def sleep(self):
|
|
||||||
if self.delay > self.max_delay:
|
|
||||||
self.delay = self.max_delay
|
|
||||||
|
|
||||||
self._logger.debug('No messages, sleeping for: %s' % self.delay)
|
|
||||||
time.sleep(self.delay)
|
|
||||||
self.delay *= self.backoff
|
|
||||||
|
|
||||||
def handle_task(self, task, ts):
|
|
||||||
if not self.huey.ready_to_run(task, ts):
|
|
||||||
self._logger.info('Adding %s to schedule' % task)
|
|
||||||
self.add_schedule(task)
|
|
||||||
elif not self.is_revoked(task, ts):
|
|
||||||
self.process_task(task, ts)
|
|
||||||
|
|
||||||
def process_task(self, task, ts):
|
|
||||||
try:
|
|
||||||
self._logger.info('Executing %s' % task)
|
|
||||||
self.huey.emit_task('started', task)
|
|
||||||
self.huey.execute(task)
|
|
||||||
self.huey.emit_task('finished', task)
|
|
||||||
except DataStorePutException:
|
|
||||||
self._logger.warn('Error storing result', exc_info=1)
|
|
||||||
except:
|
|
||||||
self._logger.error('Unhandled exception in worker thread',
|
|
||||||
exc_info=1)
|
|
||||||
self.huey.emit_task('error', task, error=True)
|
|
||||||
if task.retries:
|
|
||||||
self.huey.emit_task('retrying', task)
|
|
||||||
self.requeue_task(task, self.get_now())
|
|
||||||
|
|
||||||
def requeue_task(self, task, ts):
|
|
||||||
task.retries -= 1
|
|
||||||
self._logger.info('Re-enqueueing task %s, %s tries left' %
|
|
||||||
(task.task_id, task.retries))
|
|
||||||
if task.retry_delay:
|
|
||||||
delay = datetime.timedelta(seconds=task.retry_delay)
|
|
||||||
task.execute_time = ts + delay
|
|
||||||
self._logger.debug('Execute %s at: %s' % (task, task.execute_time))
|
|
||||||
self.add_schedule(task)
|
|
||||||
else:
|
|
||||||
self.enqueue(task)
|
|
||||||
|
|
||||||
|
|
||||||
class Consumer(object):
|
|
||||||
def __init__(self, huey, workers=1, periodic=True, initial_delay=0.1,
|
|
||||||
backoff=1.15, max_delay=10.0, utc=True, scheduler_interval=1,
|
|
||||||
periodic_task_interval=60):
|
|
||||||
|
|
||||||
self._logger = logging.getLogger('huey.consumer.ConsumerThread')
|
|
||||||
self.huey = huey
|
|
||||||
self.workers = workers
|
|
||||||
self.periodic = periodic
|
|
||||||
self.default_delay = initial_delay
|
|
||||||
self.backoff = backoff
|
|
||||||
self.max_delay = max_delay
|
|
||||||
self.utc = utc
|
|
||||||
self.scheduler_interval = scheduler_interval
|
|
||||||
self.periodic_task_interval = periodic_task_interval
|
|
||||||
|
|
||||||
self.delay = self.default_delay
|
|
||||||
|
|
||||||
self._shutdown = threading.Event()
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
try:
|
|
||||||
self.start()
|
|
||||||
# it seems that calling self._shutdown.wait() here prevents the
|
|
||||||
# signal handler from executing
|
|
||||||
while not self._shutdown.is_set():
|
|
||||||
self._shutdown.wait(.1)
|
|
||||||
except:
|
|
||||||
self._logger.error('Error', exc_info=1)
|
|
||||||
self.shutdown()
|
|
||||||
|
|
||||||
def start(self):
|
|
||||||
self._logger.info('%d worker threads' % self.workers)
|
|
||||||
|
|
||||||
self._set_signal_handler()
|
|
||||||
self._log_registered_commands()
|
|
||||||
self._create_threads()
|
|
||||||
|
|
||||||
self._logger.info('Starting scheduler thread')
|
|
||||||
self.scheduler_t.start()
|
|
||||||
|
|
||||||
self._logger.info('Starting worker threads')
|
|
||||||
for worker in self.worker_threads:
|
|
||||||
worker.start()
|
|
||||||
|
|
||||||
if self.periodic:
|
|
||||||
self._logger.info('Starting periodic task scheduler thread')
|
|
||||||
self.periodic_t.start()
|
|
||||||
|
|
||||||
def shutdown(self):
|
|
||||||
self._logger.info('Shutdown initiated')
|
|
||||||
self._shutdown.set()
|
|
||||||
|
|
||||||
def _handle_signal(self, sig_num, frame):
|
|
||||||
self._logger.info('Received SIGTERM')
|
|
||||||
self.shutdown()
|
|
||||||
|
|
||||||
def _set_signal_handler(self):
|
|
||||||
self._logger.info('Setting signal handler')
|
|
||||||
signal.signal(signal.SIGTERM, self._handle_signal)
|
|
||||||
|
|
||||||
def _log_registered_commands(self):
|
|
||||||
msg = ['Huey consumer initialized with following commands']
|
|
||||||
for command in registry._registry:
|
|
||||||
msg.append('+ %s' % command.replace('queuecmd_', ''))
|
|
||||||
self._logger.info('\n'.join(msg))
|
|
||||||
|
|
||||||
def _create_threads(self):
|
|
||||||
self.scheduler_t = SchedulerThread(
|
|
||||||
self.huey,
|
|
||||||
self.utc,
|
|
||||||
self._shutdown,
|
|
||||||
self.scheduler_interval)
|
|
||||||
self.scheduler_t.name = 'Scheduler'
|
|
||||||
|
|
||||||
self.worker_threads = []
|
|
||||||
for i in range(self.workers):
|
|
||||||
worker_t = WorkerThread(
|
|
||||||
self.huey,
|
|
||||||
self.default_delay,
|
|
||||||
self.max_delay,
|
|
||||||
self.backoff,
|
|
||||||
self.utc,
|
|
||||||
self._shutdown)
|
|
||||||
worker_t.daemon = True
|
|
||||||
worker_t.name = 'Worker %d' % (i + 1)
|
|
||||||
self.worker_threads.append(worker_t)
|
|
||||||
|
|
||||||
if self.periodic:
|
|
||||||
self.periodic_t = PeriodicTaskThread(
|
|
||||||
self.huey,
|
|
||||||
self.utc,
|
|
||||||
self._shutdown,
|
|
||||||
self.periodic_task_interval)
|
|
||||||
self.periodic_t.daemon = True
|
|
||||||
self.periodic_t.name = 'Periodic Task'
|
|
||||||
else:
|
|
||||||
self.periodic_t = None
|
|
@ -1,119 +0,0 @@
|
|||||||
"""
|
|
||||||
This module contains a lot of cruft to handle instantiating a "Huey" object
|
|
||||||
using Django settings. Unlike more flexible python apps, the huey django
|
|
||||||
integration consists of a single global Huey instance configured via the
|
|
||||||
settings module.
|
|
||||||
"""
|
|
||||||
from functools import wraps
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
from django.db import connection
|
|
||||||
|
|
||||||
from huey import crontab
|
|
||||||
from huey import Huey
|
|
||||||
from huey.utils import load_class
|
|
||||||
|
|
||||||
|
|
||||||
configuration_message = """
|
|
||||||
Configuring Huey for use with Django
|
|
||||||
====================================
|
|
||||||
|
|
||||||
Huey was designed to be simple to configure in the general case. For that
|
|
||||||
reason, huey will "just work" with no configuration at all provided you have
|
|
||||||
Redis installed and running locally.
|
|
||||||
|
|
||||||
On the other hand, you can configure huey manually using the following
|
|
||||||
setting structure. The following example uses Redis on localhost:
|
|
||||||
|
|
||||||
Simply point to a backend:
|
|
||||||
|
|
||||||
HUEY = {
|
|
||||||
'backend': 'huey.backends.redis_backend',
|
|
||||||
'name': 'unique name',
|
|
||||||
'connection': {'host': 'localhost', 'port': 6379}
|
|
||||||
|
|
||||||
'consumer_options': {'workers': 4},
|
|
||||||
}
|
|
||||||
|
|
||||||
If you would like to configure Huey's logger using Django's integrated logging
|
|
||||||
settings, the logger used by consumer is named "huey.consumer".
|
|
||||||
|
|
||||||
For more granular control, you can assign HUEY programmatically:
|
|
||||||
|
|
||||||
HUEY = Huey(RedisBlockingQueue('my-queue'))
|
|
||||||
"""
|
|
||||||
|
|
||||||
def default_queue_name():
|
|
||||||
try:
|
|
||||||
return settings.DATABASE_NAME
|
|
||||||
except AttributeError:
|
|
||||||
return settings.DATABASES['default']['NAME']
|
|
||||||
except KeyError:
|
|
||||||
return 'huey'
|
|
||||||
|
|
||||||
def config_error(msg):
|
|
||||||
print(configuration_message)
|
|
||||||
print('\n\n')
|
|
||||||
print(msg)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
def dynamic_import(obj, key, required=False):
|
|
||||||
try:
|
|
||||||
path = obj[key]
|
|
||||||
except KeyError:
|
|
||||||
if required:
|
|
||||||
config_error('Missing required configuration: "%s"' % key)
|
|
||||||
return None
|
|
||||||
try:
|
|
||||||
return load_class(path + '.Components')
|
|
||||||
except ImportError:
|
|
||||||
config_error('Unable to import %s: "%s"' % (key, path))
|
|
||||||
|
|
||||||
try:
|
|
||||||
HUEY = getattr(settings, 'HUEY', None)
|
|
||||||
except:
|
|
||||||
config_error('Error encountered reading settings.HUEY')
|
|
||||||
|
|
||||||
if HUEY is None:
|
|
||||||
try:
|
|
||||||
from huey import RedisHuey
|
|
||||||
except ImportError:
|
|
||||||
config_error('Error: Huey could not import the redis backend. '
|
|
||||||
'Install `redis-py`.')
|
|
||||||
HUEY = RedisHuey(default_queue_name())
|
|
||||||
|
|
||||||
if not isinstance(HUEY, Huey):
|
|
||||||
Queue, DataStore, Schedule, Events = dynamic_import(HUEY, 'backend')
|
|
||||||
name = HUEY.get('name') or default_queue_name()
|
|
||||||
conn = HUEY.get('connection', {})
|
|
||||||
always_eager = HUEY.get('always_eager', False)
|
|
||||||
HUEY = Huey(
|
|
||||||
Queue(name, **conn),
|
|
||||||
DataStore and DataStore(name, **conn) or None,
|
|
||||||
Schedule and Schedule(name, **conn) or None,
|
|
||||||
Events and Events(name, **conn) or None,
|
|
||||||
always_eager=always_eager)
|
|
||||||
|
|
||||||
task = HUEY.task
|
|
||||||
periodic_task = HUEY.periodic_task
|
|
||||||
|
|
||||||
def close_db(fn):
|
|
||||||
"""Decorator to be used with tasks that may operate on the database."""
|
|
||||||
@wraps(fn)
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
return fn(*args, **kwargs)
|
|
||||||
finally:
|
|
||||||
connection.close()
|
|
||||||
return inner
|
|
||||||
|
|
||||||
def db_task(*args, **kwargs):
|
|
||||||
def decorator(fn):
|
|
||||||
return task(*args, **kwargs)(close_db(fn))
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
def db_periodic_task(*args, **kwargs):
|
|
||||||
def decorator(fn):
|
|
||||||
return periodic_task(*args, **kwargs)(close_db(fn))
|
|
||||||
return decorator
|
|
@ -1,126 +0,0 @@
|
|||||||
import imp
|
|
||||||
import sys
|
|
||||||
from optparse import make_option
|
|
||||||
|
|
||||||
from django.conf import settings
|
|
||||||
from django.core.management.base import BaseCommand
|
|
||||||
try:
|
|
||||||
from importlib import import_module
|
|
||||||
except ImportError:
|
|
||||||
from django.utils.importlib import import_module
|
|
||||||
|
|
||||||
try:
|
|
||||||
from django.apps import apps as django_apps
|
|
||||||
HAS_DJANGO_APPS = True
|
|
||||||
except ImportError:
|
|
||||||
# Django 1.6
|
|
||||||
HAS_DJANGO_APPS = False
|
|
||||||
|
|
||||||
from huey.consumer import Consumer
|
|
||||||
from huey.bin.huey_consumer import get_loglevel
|
|
||||||
from huey.bin.huey_consumer import setup_logger
|
|
||||||
|
|
||||||
|
|
||||||
class Command(BaseCommand):
|
|
||||||
"""
|
|
||||||
Queue consumer. Example usage::
|
|
||||||
|
|
||||||
To start the consumer (note you must export the settings module):
|
|
||||||
|
|
||||||
django-admin.py run_huey
|
|
||||||
"""
|
|
||||||
help = "Run the queue consumer"
|
|
||||||
|
|
||||||
option_list = BaseCommand.option_list + (
|
|
||||||
make_option('--periodic', '-p',
|
|
||||||
dest='periodic',
|
|
||||||
action='store_true',
|
|
||||||
help='Enqueue periodic commands'
|
|
||||||
),
|
|
||||||
make_option('--no-periodic', '-n',
|
|
||||||
dest='periodic',
|
|
||||||
action='store_false',
|
|
||||||
help='Do not enqueue periodic commands'
|
|
||||||
),
|
|
||||||
make_option('--workers', '-w',
|
|
||||||
dest='workers',
|
|
||||||
type='int',
|
|
||||||
help='Number of worker threads'
|
|
||||||
),
|
|
||||||
make_option('--delay', '-d',
|
|
||||||
dest='initial_delay',
|
|
||||||
type='float',
|
|
||||||
help='Delay between polling requests'
|
|
||||||
),
|
|
||||||
make_option('--max_delay', '-m',
|
|
||||||
dest='max_delay',
|
|
||||||
type='float',
|
|
||||||
help='Maximum delay between polling requests'
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def autodiscover_appconfigs(self):
|
|
||||||
"""Use Django app registry to pull out potential apps with tasks.py module."""
|
|
||||||
module_name = 'tasks'
|
|
||||||
for config in django_apps.get_app_configs():
|
|
||||||
app_path = config.module.__path__
|
|
||||||
try:
|
|
||||||
fp, path, description = imp.find_module(module_name, app_path)
|
|
||||||
except ImportError:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
import_path = '%s.%s' % (config.name, module_name)
|
|
||||||
imp.load_module(import_path, fp, path, description)
|
|
||||||
|
|
||||||
def autodiscover_old(self):
|
|
||||||
# this is to find modules named <commands.py> in a django project's
|
|
||||||
# installed apps directories
|
|
||||||
module_name = 'tasks'
|
|
||||||
|
|
||||||
for app in settings.INSTALLED_APPS:
|
|
||||||
try:
|
|
||||||
import_module(app)
|
|
||||||
app_path = sys.modules[app].__path__
|
|
||||||
except AttributeError:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
imp.find_module(module_name, app_path)
|
|
||||||
except ImportError:
|
|
||||||
continue
|
|
||||||
import_module('%s.%s' % (app, module_name))
|
|
||||||
app_path = sys.modules['%s.%s' % (app, module_name)]
|
|
||||||
|
|
||||||
def autodiscover(self):
|
|
||||||
"""Switch between Django 1.7 style and old style app importing."""
|
|
||||||
if HAS_DJANGO_APPS:
|
|
||||||
self.autodiscover_appconfigs()
|
|
||||||
else:
|
|
||||||
self.autodiscover_old()
|
|
||||||
|
|
||||||
def handle(self, *args, **options):
|
|
||||||
from huey.djhuey import HUEY
|
|
||||||
try:
|
|
||||||
consumer_options = settings.HUEY['consumer_options']
|
|
||||||
except:
|
|
||||||
consumer_options = {}
|
|
||||||
|
|
||||||
if options['workers'] is not None:
|
|
||||||
consumer_options['workers'] = options['workers']
|
|
||||||
|
|
||||||
if options['periodic'] is not None:
|
|
||||||
consumer_options['periodic'] = options['periodic']
|
|
||||||
|
|
||||||
if options['initial_delay'] is not None:
|
|
||||||
consumer_options['initial_delay'] = options['initial_delay']
|
|
||||||
|
|
||||||
if options['max_delay'] is not None:
|
|
||||||
consumer_options['max_delay'] = options['max_delay']
|
|
||||||
|
|
||||||
self.autodiscover()
|
|
||||||
|
|
||||||
loglevel = get_loglevel(consumer_options.pop('loglevel', None))
|
|
||||||
logfile = consumer_options.pop('logfile', None)
|
|
||||||
setup_logger(loglevel, logfile)
|
|
||||||
|
|
||||||
consumer = Consumer(HUEY, **consumer_options)
|
|
||||||
consumer.run()
|
|
@ -1,26 +0,0 @@
|
|||||||
class QueueException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class QueueWriteException(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class QueueReadException(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class QueueRemoveException(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class DataStoreGetException(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class DataStorePutException(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class DataStoreTimeout(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class ScheduleAddException(QueueException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class ScheduleReadException(QueueException):
|
|
||||||
pass
|
|
@ -1,20 +0,0 @@
|
|||||||
from functools import wraps
|
|
||||||
|
|
||||||
|
|
||||||
def _transaction(db, fn):
|
|
||||||
@wraps(fn)
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
# Execute function in its own connection, in a transaction.
|
|
||||||
with db.execution_context(with_transaction=True):
|
|
||||||
return fn(*args, **kwargs)
|
|
||||||
return inner
|
|
||||||
|
|
||||||
def db_task(huey, db, *args, **kwargs):
|
|
||||||
def decorator(fn):
|
|
||||||
return huey.task(*args, **kwargs)(_transaction(db, fn))
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
def db_periodic_task(huey, db, *args, **kwargs):
|
|
||||||
def decorator(fn):
|
|
||||||
return huey.periodic_task(*args, **kwargs)(_transaction(db, fn))
|
|
||||||
return decorator
|
|
@ -1,77 +0,0 @@
|
|||||||
import pickle
|
|
||||||
|
|
||||||
from huey.exceptions import QueueException
|
|
||||||
|
|
||||||
|
|
||||||
class TaskRegistry(object):
|
|
||||||
"""
|
|
||||||
A simple Registry used to track subclasses of :class:`QueueTask` - the
|
|
||||||
purpose of this registry is to allow translation from queue messages to
|
|
||||||
task classes, and vice-versa.
|
|
||||||
"""
|
|
||||||
_ignore = ['QueueTask', 'PeriodicQueueTask']
|
|
||||||
|
|
||||||
_registry = {}
|
|
||||||
_periodic_tasks = []
|
|
||||||
|
|
||||||
def task_to_string(self, task):
|
|
||||||
return '%s' % (task.__name__)
|
|
||||||
|
|
||||||
def register(self, task_class):
|
|
||||||
klass_str = self.task_to_string(task_class)
|
|
||||||
if klass_str in self._ignore:
|
|
||||||
return
|
|
||||||
|
|
||||||
if klass_str not in self._registry:
|
|
||||||
self._registry[klass_str] = task_class
|
|
||||||
|
|
||||||
# store an instance in a separate list of periodic tasks
|
|
||||||
if hasattr(task_class, 'validate_datetime'):
|
|
||||||
self._periodic_tasks.append(task_class())
|
|
||||||
|
|
||||||
def unregister(self, task_class):
|
|
||||||
klass_str = self.task_to_string(task_class)
|
|
||||||
|
|
||||||
if klass_str in self._registry:
|
|
||||||
del(self._registry[klass_str])
|
|
||||||
|
|
||||||
for task in self._periodic_tasks:
|
|
||||||
if isinstance(task, task_class):
|
|
||||||
self._periodic_tasks.remove(task)
|
|
||||||
|
|
||||||
def __contains__(self, klass_str):
|
|
||||||
return klass_str in self._registry
|
|
||||||
|
|
||||||
def get_message_for_task(self, task):
|
|
||||||
"""Convert a task object to a message for storage in the queue"""
|
|
||||||
return pickle.dumps((
|
|
||||||
task.task_id,
|
|
||||||
self.task_to_string(type(task)),
|
|
||||||
task.execute_time,
|
|
||||||
task.retries,
|
|
||||||
task.retry_delay,
|
|
||||||
task.get_data(),
|
|
||||||
))
|
|
||||||
|
|
||||||
def get_task_class(self, klass_str):
|
|
||||||
klass = self._registry.get(klass_str)
|
|
||||||
|
|
||||||
if not klass:
|
|
||||||
raise QueueException('%s not found in TaskRegistry' % klass_str)
|
|
||||||
|
|
||||||
return klass
|
|
||||||
|
|
||||||
def get_task_for_message(self, msg):
|
|
||||||
"""Convert a message from the queue into a task"""
|
|
||||||
# parse out the pieces from the enqueued message
|
|
||||||
raw = pickle.loads(msg)
|
|
||||||
task_id, klass_str, execute_time, retries, delay, data = raw
|
|
||||||
|
|
||||||
klass = self.get_task_class(klass_str)
|
|
||||||
return klass(data, task_id, execute_time, retries, delay)
|
|
||||||
|
|
||||||
def get_periodic_tasks(self):
|
|
||||||
return self._periodic_tasks
|
|
||||||
|
|
||||||
|
|
||||||
registry = TaskRegistry()
|
|
@ -1,10 +0,0 @@
|
|||||||
from huey.tests.backends import *
|
|
||||||
from huey.tests.consumer import *
|
|
||||||
from huey.tests.crontab import *
|
|
||||||
from huey.tests.queue import *
|
|
||||||
from huey.tests.utils import *
|
|
||||||
try:
|
|
||||||
import peewee
|
|
||||||
from huey.tests.peewee_tests import *
|
|
||||||
except ImportError:
|
|
||||||
pass
|
|
@ -1,170 +0,0 @@
|
|||||||
from collections import deque
|
|
||||||
import datetime
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import tempfile
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from huey.api import Huey
|
|
||||||
from huey.backends.dummy import DummyDataStore
|
|
||||||
from huey.backends.dummy import DummyEventEmitter
|
|
||||||
from huey.backends.dummy import DummyQueue
|
|
||||||
from huey.backends.dummy import DummySchedule
|
|
||||||
from huey.utils import EmptyData
|
|
||||||
from huey.backends.sqlite_backend import SqliteDataStore
|
|
||||||
from huey.backends.sqlite_backend import SqliteQueue
|
|
||||||
from huey.backends.sqlite_backend import SqliteSchedule
|
|
||||||
try:
|
|
||||||
from huey.backends.redis_backend import RedisDataStore
|
|
||||||
from huey.backends.redis_backend import RedisEventEmitter
|
|
||||||
from huey.backends.redis_backend import RedisQueue
|
|
||||||
from huey.backends.redis_backend import RedisSchedule
|
|
||||||
except ImportError:
|
|
||||||
RedisQueue = RedisDataStore = RedisSchedule = RedisEventEmitter = None
|
|
||||||
|
|
||||||
try:
|
|
||||||
from huey.backends.rabbitmq_backend import RabbitQueue, RabbitEventEmitter
|
|
||||||
except ImportError:
|
|
||||||
RabbitQueue = RabbitEventEmitter = None
|
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info[0] == 2:
|
|
||||||
redis_kwargs = {}
|
|
||||||
else:
|
|
||||||
redis_kwargs = {'decode_responses': True}
|
|
||||||
|
|
||||||
|
|
||||||
QUEUES = (DummyQueue, RedisQueue, SqliteQueue, RabbitQueue)
|
|
||||||
DATA_STORES = (DummyDataStore, RedisDataStore, SqliteDataStore, None)
|
|
||||||
SCHEDULES = (DummySchedule, RedisSchedule, SqliteSchedule, None)
|
|
||||||
EVENTS = (DummyEventEmitter, RedisEventEmitter, None, RabbitEventEmitter)
|
|
||||||
|
|
||||||
|
|
||||||
class HueyBackendTestCase(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
self.sqlite_location = tempfile.mkstemp(prefix='hueytest.')[1]
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
os.unlink(self.sqlite_location)
|
|
||||||
|
|
||||||
def test_queues(self):
|
|
||||||
result_store = DummyDataStore('dummy')
|
|
||||||
for q in QUEUES:
|
|
||||||
if not q:
|
|
||||||
continue
|
|
||||||
if issubclass(q, SqliteQueue):
|
|
||||||
queue = q('test', location=self.sqlite_location)
|
|
||||||
elif issubclass(q, RedisQueue):
|
|
||||||
queue = q('test', **redis_kwargs)
|
|
||||||
else:
|
|
||||||
queue = q('test')
|
|
||||||
queue.flush()
|
|
||||||
queue.write('a')
|
|
||||||
queue.write('b')
|
|
||||||
self.assertEqual(len(queue), 2)
|
|
||||||
self.assertEqual(queue.read(), 'a')
|
|
||||||
self.assertEqual(queue.read(), 'b')
|
|
||||||
self.assertEqual(queue.read(), None)
|
|
||||||
|
|
||||||
queue.write('c')
|
|
||||||
queue.write('d')
|
|
||||||
queue.write('c')
|
|
||||||
queue.write('x')
|
|
||||||
queue.write('d')
|
|
||||||
self.assertEqual(len(queue), 5)
|
|
||||||
self.assertEqual(queue.remove('c'), 2)
|
|
||||||
self.assertEqual(len(queue), 3)
|
|
||||||
self.assertEqual(queue.read(), 'd')
|
|
||||||
self.assertEqual(queue.read(), 'x')
|
|
||||||
self.assertEqual(queue.read(), 'd')
|
|
||||||
|
|
||||||
queue.flush()
|
|
||||||
test_huey = Huey(queue, result_store)
|
|
||||||
|
|
||||||
@test_huey.task()
|
|
||||||
def test_queues_add(k, v):
|
|
||||||
return k + v
|
|
||||||
|
|
||||||
res = test_queues_add('k', 'v')
|
|
||||||
self.assertEqual(len(queue), 1)
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
test_huey.execute(task)
|
|
||||||
self.assertEqual(res.get(), 'kv')
|
|
||||||
|
|
||||||
res = test_queues_add('\xce', '\xcf')
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
test_huey.execute(task)
|
|
||||||
self.assertEqual(res.get(), '\xce\xcf')
|
|
||||||
|
|
||||||
def test_data_stores(self):
|
|
||||||
for d in DATA_STORES:
|
|
||||||
if not d:
|
|
||||||
continue
|
|
||||||
if issubclass(d, SqliteDataStore):
|
|
||||||
data_store = d('test', location=self.sqlite_location)
|
|
||||||
elif issubclass(d, RedisDataStore):
|
|
||||||
data_store = d('test', **redis_kwargs)
|
|
||||||
else:
|
|
||||||
data_store = d('test')
|
|
||||||
data_store.put('k1', 'v1')
|
|
||||||
data_store.put('k2', 'v2')
|
|
||||||
data_store.put('k3', 'v3')
|
|
||||||
self.assertEqual(data_store.peek('k2'), 'v2')
|
|
||||||
self.assertEqual(data_store.get('k2'), 'v2')
|
|
||||||
self.assertEqual(data_store.peek('k2'), EmptyData)
|
|
||||||
self.assertEqual(data_store.get('k2'), EmptyData)
|
|
||||||
|
|
||||||
self.assertEqual(data_store.peek('k3'), 'v3')
|
|
||||||
data_store.put('k3', 'v3-2')
|
|
||||||
self.assertEqual(data_store.peek('k3'), 'v3-2')
|
|
||||||
|
|
||||||
def test_schedules(self):
|
|
||||||
for s in SCHEDULES:
|
|
||||||
if not s:
|
|
||||||
continue
|
|
||||||
if issubclass(s, SqliteSchedule):
|
|
||||||
schedule = s('test', location=self.sqlite_location)
|
|
||||||
elif issubclass(s, RedisSchedule):
|
|
||||||
schedule = s('test', **redis_kwargs)
|
|
||||||
else:
|
|
||||||
schedule = s('test')
|
|
||||||
dt1 = datetime.datetime(2013, 1, 1, 0, 0)
|
|
||||||
dt2 = datetime.datetime(2013, 1, 2, 0, 0)
|
|
||||||
dt3 = datetime.datetime(2013, 1, 3, 0, 0)
|
|
||||||
dt4 = datetime.datetime(2013, 1, 4, 0, 0)
|
|
||||||
|
|
||||||
# Add to schedule out-of-order to ensure sorting is performed by
|
|
||||||
# the schedule.
|
|
||||||
schedule.add('s2', dt2)
|
|
||||||
schedule.add('s1', dt1)
|
|
||||||
schedule.add('s4', dt4)
|
|
||||||
schedule.add('s3', dt3)
|
|
||||||
|
|
||||||
# Ensure that asking for a timestamp previous to any item in the
|
|
||||||
# schedule returns empty list.
|
|
||||||
self.assertEqual(
|
|
||||||
schedule.read(dt1 - datetime.timedelta(days=1)),
|
|
||||||
[])
|
|
||||||
|
|
||||||
# Ensure the upper boundary is inclusive of whatever timestamp
|
|
||||||
# is passed in.
|
|
||||||
self.assertEqual(schedule.read(dt3), ['s1', 's2', 's3'])
|
|
||||||
self.assertEqual(schedule.read(dt3), [])
|
|
||||||
|
|
||||||
# Ensure the schedule is flushed and an empty schedule returns an
|
|
||||||
# empty list.
|
|
||||||
self.assertEqual(schedule.read(dt4), ['s4'])
|
|
||||||
self.assertEqual(schedule.read(dt4), [])
|
|
||||||
|
|
||||||
def test_events(self):
|
|
||||||
for e in EVENTS:
|
|
||||||
if not e:
|
|
||||||
continue
|
|
||||||
e = e('test')
|
|
||||||
|
|
||||||
messages = ['a', 'b', 'c', 'd']
|
|
||||||
for message in messages:
|
|
||||||
e.emit(message)
|
|
||||||
|
|
||||||
if hasattr(e, '_events'):
|
|
||||||
self.assertEqual(e._events, deque(['d', 'c', 'b', 'a']))
|
|
@ -1,441 +0,0 @@
|
|||||||
from collections import deque
|
|
||||||
import datetime
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from huey import crontab
|
|
||||||
from huey import Huey
|
|
||||||
from huey.backends.dummy import DummyDataStore
|
|
||||||
from huey.backends.dummy import DummyEventEmitter
|
|
||||||
from huey.backends.dummy import DummyQueue
|
|
||||||
from huey.backends.dummy import DummySchedule
|
|
||||||
from huey.consumer import Consumer
|
|
||||||
from huey.consumer import WorkerThread
|
|
||||||
from huey.registry import registry
|
|
||||||
|
|
||||||
# Logger used by the consumer.
|
|
||||||
logger = logging.getLogger('huey.consumer')
|
|
||||||
|
|
||||||
# Store some global state.
|
|
||||||
state = {}
|
|
||||||
|
|
||||||
# Create a queue, result store, schedule and event emitter, then attach them
|
|
||||||
# to a test-only Huey instance.
|
|
||||||
test_queue = DummyQueue('test-queue')
|
|
||||||
test_result_store = DummyDataStore('test-queue')
|
|
||||||
test_schedule = DummySchedule('test-queue')
|
|
||||||
test_events = DummyEventEmitter('test-queue')
|
|
||||||
test_huey = Huey(test_queue, test_result_store, test_schedule, test_events)
|
|
||||||
|
|
||||||
# Create some test tasks.
|
|
||||||
@test_huey.task()
|
|
||||||
def modify_state(k, v):
|
|
||||||
state[k] = v
|
|
||||||
return v
|
|
||||||
|
|
||||||
@test_huey.task()
|
|
||||||
def blow_up():
|
|
||||||
raise Exception('blowed up')
|
|
||||||
|
|
||||||
@test_huey.task(retries=3)
|
|
||||||
def retry_command(k, always_fail=True):
|
|
||||||
if k not in state:
|
|
||||||
if not always_fail:
|
|
||||||
state[k] = 'fixed'
|
|
||||||
raise Exception('fappsk')
|
|
||||||
return state[k]
|
|
||||||
|
|
||||||
@test_huey.task(retries=3, retry_delay=10)
|
|
||||||
def retry_command_slow(k, always_fail=True):
|
|
||||||
if k not in state:
|
|
||||||
if not always_fail:
|
|
||||||
state[k] = 'fixed'
|
|
||||||
raise Exception('fappsk')
|
|
||||||
return state[k]
|
|
||||||
|
|
||||||
@test_huey.periodic_task(crontab(minute='0'))
|
|
||||||
def every_hour():
|
|
||||||
state['p'] = 'y'
|
|
||||||
|
|
||||||
|
|
||||||
# Create a log handler that will track messages generated by the consumer.
|
|
||||||
class TestLogHandler(logging.Handler):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.messages = []
|
|
||||||
logging.Handler.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
def emit(self, record):
|
|
||||||
self.messages.append(record.getMessage())
|
|
||||||
|
|
||||||
|
|
||||||
class ConsumerTestCase(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
global state
|
|
||||||
state = {}
|
|
||||||
|
|
||||||
self.orig_pc = registry._periodic_tasks
|
|
||||||
registry._periodic_commands = [every_hour.task_class()]
|
|
||||||
|
|
||||||
self.orig_sleep = time.sleep
|
|
||||||
time.sleep = lambda x: None
|
|
||||||
|
|
||||||
test_huey.queue.flush()
|
|
||||||
test_huey.result_store.flush()
|
|
||||||
test_huey.schedule.flush()
|
|
||||||
test_events._events = deque()
|
|
||||||
|
|
||||||
self.consumer = Consumer(test_huey, workers=2)
|
|
||||||
self.consumer._create_threads()
|
|
||||||
|
|
||||||
self.handler = TestLogHandler()
|
|
||||||
logger.addHandler(self.handler)
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
self.consumer.shutdown()
|
|
||||||
logger.removeHandler(self.handler)
|
|
||||||
registry._periodic_tasks = self.orig_pc
|
|
||||||
time.sleep = self.orig_sleep
|
|
||||||
|
|
||||||
def assertStatusTask(self, status_task):
|
|
||||||
parsed = []
|
|
||||||
i = 0
|
|
||||||
while i < len(status_task):
|
|
||||||
event = json.loads(test_events._events[i])
|
|
||||||
status, task, extra = status_task[i]
|
|
||||||
self.assertEqual(event['status'], status)
|
|
||||||
self.assertEqual(event['id'], task.task_id)
|
|
||||||
for k, v in extra.items():
|
|
||||||
self.assertEqual(event[k], v)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
def spawn(self, func, *args, **kwargs):
|
|
||||||
t = threading.Thread(target=func, args=args, kwargs=kwargs)
|
|
||||||
t.start()
|
|
||||||
return t
|
|
||||||
|
|
||||||
def run_worker(self, task, ts=None):
|
|
||||||
worker_t = WorkerThread(
|
|
||||||
test_huey,
|
|
||||||
self.consumer.default_delay,
|
|
||||||
self.consumer.max_delay,
|
|
||||||
self.consumer.backoff,
|
|
||||||
self.consumer.utc,
|
|
||||||
self.consumer._shutdown)
|
|
||||||
ts = ts or datetime.datetime.utcnow()
|
|
||||||
worker_t.handle_task(task, ts)
|
|
||||||
|
|
||||||
def test_message_processing(self):
|
|
||||||
self.consumer.worker_threads[0].start()
|
|
||||||
|
|
||||||
self.assertFalse('k' in state)
|
|
||||||
|
|
||||||
res = modify_state('k', 'v')
|
|
||||||
res.get(blocking=True)
|
|
||||||
|
|
||||||
self.assertTrue('k' in state)
|
|
||||||
self.assertEqual(res.get(), 'v')
|
|
||||||
|
|
||||||
self.assertEqual(len(test_events._events), 2)
|
|
||||||
self.assertStatusTask([
|
|
||||||
('finished', res.task, {}),
|
|
||||||
('started', res.task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_worker(self):
|
|
||||||
modify_state('k', 'w')
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task)
|
|
||||||
self.assertEqual(state, {'k': 'w'})
|
|
||||||
|
|
||||||
def test_worker_exception(self):
|
|
||||||
blow_up()
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
|
|
||||||
self.run_worker(task)
|
|
||||||
self.assertTrue(
|
|
||||||
'Unhandled exception in worker thread' in self.handler.messages)
|
|
||||||
|
|
||||||
self.assertEqual(len(test_events._events), 2)
|
|
||||||
self.assertStatusTask([
|
|
||||||
('error', task, {'error': True}),
|
|
||||||
('started', task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_retries_and_logging(self):
|
|
||||||
# this will continually fail
|
|
||||||
retry_command('blampf')
|
|
||||||
|
|
||||||
for i in reversed(range(4)):
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.assertEqual(task.retries, i)
|
|
||||||
self.run_worker(task)
|
|
||||||
if i > 0:
|
|
||||||
self.assertEqual(
|
|
||||||
self.handler.messages[-1],
|
|
||||||
'Re-enqueueing task %s, %s tries left' % (
|
|
||||||
task.task_id, i - 1))
|
|
||||||
self.assertStatusTask([
|
|
||||||
('enqueued', task, {}),
|
|
||||||
('retrying', task, {}),
|
|
||||||
('error', task,{}),
|
|
||||||
('started', task, {}),
|
|
||||||
])
|
|
||||||
last_idx = -2
|
|
||||||
else:
|
|
||||||
self.assertStatusTask([
|
|
||||||
('error', task,{}),
|
|
||||||
('started', task, {}),
|
|
||||||
])
|
|
||||||
last_idx = -1
|
|
||||||
self.assertEqual(self.handler.messages[last_idx],
|
|
||||||
'Unhandled exception in worker thread')
|
|
||||||
|
|
||||||
self.assertEqual(test_huey.dequeue(), None)
|
|
||||||
|
|
||||||
def test_retries_with_success(self):
|
|
||||||
# this will fail once, then succeed
|
|
||||||
retry_command('blampf', False)
|
|
||||||
self.assertFalse('blampf' in state)
|
|
||||||
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task)
|
|
||||||
self.assertEqual(self.handler.messages, [
|
|
||||||
'Executing %s' % task,
|
|
||||||
'Unhandled exception in worker thread',
|
|
||||||
'Re-enqueueing task %s, 2 tries left' % task.task_id])
|
|
||||||
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.assertEqual(task.retries, 2)
|
|
||||||
self.run_worker(task)
|
|
||||||
|
|
||||||
self.assertEqual(state['blampf'], 'fixed')
|
|
||||||
self.assertEqual(test_huey.dequeue(), None)
|
|
||||||
|
|
||||||
self.assertStatusTask([
|
|
||||||
('finished', task, {}),
|
|
||||||
('started', task, {}),
|
|
||||||
('enqueued', task, {'retries': 2}),
|
|
||||||
('retrying', task, {'retries': 3}),
|
|
||||||
('error', task, {'error': True}),
|
|
||||||
('started', task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_scheduling(self):
|
|
||||||
dt = datetime.datetime(2011, 1, 1, 0, 0)
|
|
||||||
dt2 = datetime.datetime(2037, 1, 1, 0, 0)
|
|
||||||
ad1 = modify_state.schedule(args=('k', 'v'), eta=dt, convert_utc=False)
|
|
||||||
ad2 = modify_state.schedule(args=('k2', 'v2'), eta=dt2, convert_utc=False)
|
|
||||||
|
|
||||||
# dequeue the past-timestamped task and run it.
|
|
||||||
worker = self.consumer.worker_threads[0]
|
|
||||||
worker.check_message()
|
|
||||||
|
|
||||||
self.assertTrue('k' in state)
|
|
||||||
|
|
||||||
# dequeue the future-timestamped task.
|
|
||||||
worker.check_message()
|
|
||||||
|
|
||||||
# verify the task got stored in the schedule instead of executing
|
|
||||||
self.assertFalse('k2' in state)
|
|
||||||
|
|
||||||
self.assertStatusTask([
|
|
||||||
('scheduled', ad2.task, {}),
|
|
||||||
('finished', ad1.task, {}),
|
|
||||||
('started', ad1.task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
# run through an iteration of the scheduler
|
|
||||||
self.consumer.scheduler_t.loop(dt)
|
|
||||||
|
|
||||||
# our command was not enqueued and no events were emitted.
|
|
||||||
self.assertEqual(len(test_queue._queue), 0)
|
|
||||||
self.assertEqual(len(test_events._events), 3)
|
|
||||||
|
|
||||||
# run through an iteration of the scheduler
|
|
||||||
self.consumer.scheduler_t.loop(dt2)
|
|
||||||
|
|
||||||
# our command was enqueued
|
|
||||||
self.assertEqual(len(test_queue._queue), 1)
|
|
||||||
self.assertEqual(len(test_events._events), 4)
|
|
||||||
self.assertStatusTask([
|
|
||||||
('enqueued', ad2.task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_retry_scheduling(self):
|
|
||||||
# this will continually fail
|
|
||||||
retry_command_slow('blampf')
|
|
||||||
cur_time = datetime.datetime.utcnow()
|
|
||||||
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task, ts=cur_time)
|
|
||||||
self.assertEqual(self.handler.messages, [
|
|
||||||
'Executing %s' % task,
|
|
||||||
'Unhandled exception in worker thread',
|
|
||||||
'Re-enqueueing task %s, 2 tries left' % task.task_id,
|
|
||||||
])
|
|
||||||
|
|
||||||
in_11 = cur_time + datetime.timedelta(seconds=11)
|
|
||||||
tasks_from_sched = test_huey.read_schedule(in_11)
|
|
||||||
self.assertEqual(tasks_from_sched, [task])
|
|
||||||
|
|
||||||
task = tasks_from_sched[0]
|
|
||||||
self.assertEqual(task.retries, 2)
|
|
||||||
exec_time = task.execute_time
|
|
||||||
|
|
||||||
self.assertEqual((exec_time - cur_time).seconds, 10)
|
|
||||||
self.assertStatusTask([
|
|
||||||
('scheduled', task, {
|
|
||||||
'retries': 2,
|
|
||||||
'retry_delay': 10,
|
|
||||||
'execute_time': time.mktime(exec_time.timetuple())}),
|
|
||||||
('retrying', task, {
|
|
||||||
'retries': 3,
|
|
||||||
'retry_delay': 10,
|
|
||||||
'execute_time': None}),
|
|
||||||
('error', task, {}),
|
|
||||||
('started', task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
def test_revoking_normal(self):
|
|
||||||
# enqueue 2 normal commands
|
|
||||||
r1 = modify_state('k', 'v')
|
|
||||||
r2 = modify_state('k2', 'v2')
|
|
||||||
|
|
||||||
# revoke the first *before it has been checked*
|
|
||||||
r1.revoke()
|
|
||||||
self.assertTrue(test_huey.is_revoked(r1.task))
|
|
||||||
self.assertFalse(test_huey.is_revoked(r2.task))
|
|
||||||
|
|
||||||
# dequeue a *single* message (r1)
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task)
|
|
||||||
|
|
||||||
self.assertEqual(len(test_events._events), 1)
|
|
||||||
self.assertStatusTask([
|
|
||||||
('revoked', r1.task, {}),
|
|
||||||
])
|
|
||||||
|
|
||||||
# no changes and the task was not added to the schedule
|
|
||||||
self.assertFalse('k' in state)
|
|
||||||
|
|
||||||
# dequeue a *single* message
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task)
|
|
||||||
|
|
||||||
self.assertTrue('k2' in state)
|
|
||||||
|
|
||||||
def test_revoking_schedule(self):
|
|
||||||
global state
|
|
||||||
dt = datetime.datetime(2011, 1, 1)
|
|
||||||
dt2 = datetime.datetime(2037, 1, 1)
|
|
||||||
|
|
||||||
r1 = modify_state.schedule(args=('k', 'v'), eta=dt, convert_utc=False)
|
|
||||||
r2 = modify_state.schedule(args=('k2', 'v2'), eta=dt, convert_utc=False)
|
|
||||||
r3 = modify_state.schedule(args=('k3', 'v3'), eta=dt2, convert_utc=False)
|
|
||||||
r4 = modify_state.schedule(args=('k4', 'v4'), eta=dt2, convert_utc=False)
|
|
||||||
|
|
||||||
# revoke r1 and r3
|
|
||||||
r1.revoke()
|
|
||||||
r3.revoke()
|
|
||||||
self.assertTrue(test_huey.is_revoked(r1.task))
|
|
||||||
self.assertFalse(test_huey.is_revoked(r2.task))
|
|
||||||
self.assertTrue(test_huey.is_revoked(r3.task))
|
|
||||||
self.assertFalse(test_huey.is_revoked(r4.task))
|
|
||||||
|
|
||||||
expected = [
|
|
||||||
#state, schedule
|
|
||||||
({}, 0),
|
|
||||||
({'k2': 'v2'}, 0),
|
|
||||||
({'k2': 'v2'}, 1),
|
|
||||||
({'k2': 'v2'}, 2),
|
|
||||||
]
|
|
||||||
|
|
||||||
for i in range(4):
|
|
||||||
estate, esc = expected[i]
|
|
||||||
|
|
||||||
# dequeue a *single* message
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task)
|
|
||||||
|
|
||||||
self.assertEqual(state, estate)
|
|
||||||
self.assertEqual(len(test_huey.schedule._schedule), esc)
|
|
||||||
|
|
||||||
# lets pretend its 2037
|
|
||||||
future = dt2 + datetime.timedelta(seconds=1)
|
|
||||||
self.consumer.scheduler_t.loop(future)
|
|
||||||
self.assertEqual(len(test_huey.schedule._schedule), 0)
|
|
||||||
|
|
||||||
# There are two tasks in the queue now (r3 and r4) -- process both.
|
|
||||||
for i in range(2):
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task, future)
|
|
||||||
|
|
||||||
self.assertEqual(state, {'k2': 'v2', 'k4': 'v4'})
|
|
||||||
|
|
||||||
def test_revoking_periodic(self):
|
|
||||||
global state
|
|
||||||
def loop_periodic(ts):
|
|
||||||
self.consumer.periodic_t.loop(ts)
|
|
||||||
for i in range(len(test_queue._queue)):
|
|
||||||
task = test_huey.dequeue()
|
|
||||||
self.run_worker(task, ts)
|
|
||||||
|
|
||||||
# revoke the command once
|
|
||||||
every_hour.revoke(revoke_once=True)
|
|
||||||
self.assertTrue(every_hour.is_revoked())
|
|
||||||
|
|
||||||
# it will be skipped the first go-round
|
|
||||||
dt = datetime.datetime(2011, 1, 1, 0, 0)
|
|
||||||
loop_periodic(dt)
|
|
||||||
|
|
||||||
# it has not been run
|
|
||||||
self.assertEqual(state, {})
|
|
||||||
|
|
||||||
# the next go-round it will be enqueued
|
|
||||||
loop_periodic(dt)
|
|
||||||
|
|
||||||
# our command was run
|
|
||||||
self.assertEqual(state, {'p': 'y'})
|
|
||||||
|
|
||||||
# reset state
|
|
||||||
state = {}
|
|
||||||
|
|
||||||
# revoke the command
|
|
||||||
every_hour.revoke()
|
|
||||||
self.assertTrue(every_hour.is_revoked())
|
|
||||||
|
|
||||||
# it will no longer be enqueued
|
|
||||||
loop_periodic(dt)
|
|
||||||
loop_periodic(dt)
|
|
||||||
self.assertEqual(state, {})
|
|
||||||
|
|
||||||
# restore
|
|
||||||
every_hour.restore()
|
|
||||||
self.assertFalse(every_hour.is_revoked())
|
|
||||||
|
|
||||||
# it will now be enqueued
|
|
||||||
loop_periodic(dt)
|
|
||||||
self.assertEqual(state, {'p': 'y'})
|
|
||||||
|
|
||||||
# reset
|
|
||||||
state = {}
|
|
||||||
|
|
||||||
# revoke for an hour
|
|
||||||
td = datetime.timedelta(seconds=3600)
|
|
||||||
every_hour.revoke(revoke_until=dt + td)
|
|
||||||
|
|
||||||
loop_periodic(dt)
|
|
||||||
self.assertEqual(state, {})
|
|
||||||
|
|
||||||
# after an hour it is back
|
|
||||||
loop_periodic(dt + td)
|
|
||||||
self.assertEqual(state, {'p': 'y'})
|
|
||||||
|
|
||||||
# our data store should reflect the delay
|
|
||||||
task_obj = every_hour.task_class()
|
|
||||||
self.assertEqual(len(test_huey.result_store._results), 1)
|
|
||||||
self.assertTrue(task_obj.revoke_id in test_huey.result_store._results)
|
|
@ -1,91 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from huey import crontab
|
|
||||||
|
|
||||||
|
|
||||||
class CrontabTestCase(unittest.TestCase):
|
|
||||||
def test_crontab_month(self):
|
|
||||||
# validates the following months, 1, 4, 7, 8, 9
|
|
||||||
valids = [1, 4, 7, 8, 9]
|
|
||||||
validate_m = crontab(month='1,4,*/6,8-9')
|
|
||||||
|
|
||||||
for x in range(1, 13):
|
|
||||||
res = validate_m(datetime.datetime(2011, x, 1))
|
|
||||||
self.assertEqual(res, x in valids)
|
|
||||||
|
|
||||||
def test_crontab_day(self):
|
|
||||||
# validates the following days
|
|
||||||
valids = [1, 4, 7, 8, 9, 13, 19, 25, 31]
|
|
||||||
validate_d = crontab(day='*/6,1,4,8-9')
|
|
||||||
|
|
||||||
for x in range(1, 32):
|
|
||||||
res = validate_d(datetime.datetime(2011, 1, x))
|
|
||||||
self.assertEqual(res, x in valids)
|
|
||||||
|
|
||||||
def test_crontab_hour(self):
|
|
||||||
# validates the following hours
|
|
||||||
valids = [0, 1, 4, 6, 8, 9, 12, 18]
|
|
||||||
validate_h = crontab(hour='8-9,*/6,1,4')
|
|
||||||
|
|
||||||
for x in range(24):
|
|
||||||
res = validate_h(datetime.datetime(2011, 1, 1, x))
|
|
||||||
self.assertEqual(res, x in valids)
|
|
||||||
|
|
||||||
edge = crontab(hour=0)
|
|
||||||
self.assertTrue(edge(datetime.datetime(2011, 1, 1, 0, 0)))
|
|
||||||
self.assertFalse(edge(datetime.datetime(2011, 1, 1, 12, 0)))
|
|
||||||
|
|
||||||
def test_crontab_minute(self):
|
|
||||||
# validates the following minutes
|
|
||||||
valids = [0, 1, 4, 6, 8, 9, 12, 18, 24, 30, 36, 42, 48, 54]
|
|
||||||
validate_m = crontab(minute='4,8-9,*/6,1')
|
|
||||||
|
|
||||||
for x in range(60):
|
|
||||||
res = validate_m(datetime.datetime(2011, 1, 1, 1, x))
|
|
||||||
self.assertEqual(res, x in valids)
|
|
||||||
|
|
||||||
def test_crontab_day_of_week(self):
|
|
||||||
# validates the following days of week
|
|
||||||
# jan, 1, 2011 is a saturday
|
|
||||||
valids = [2, 4, 9, 11, 16, 18, 23, 25, 30]
|
|
||||||
validate_dow = crontab(day_of_week='0,2')
|
|
||||||
|
|
||||||
for x in range(1, 32):
|
|
||||||
res = validate_dow(datetime.datetime(2011, 1, x))
|
|
||||||
self.assertEqual(res, x in valids)
|
|
||||||
|
|
||||||
def test_crontab_all_together(self):
|
|
||||||
# jan 1, 2011 is a saturday
|
|
||||||
# may 1, 2011 is a sunday
|
|
||||||
validate = crontab(
|
|
||||||
month='1,5',
|
|
||||||
day='1,4,7',
|
|
||||||
day_of_week='0,6',
|
|
||||||
hour='*/4',
|
|
||||||
minute='1-5,10-15,50'
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertTrue(validate(datetime.datetime(2011, 5, 1, 4, 11)))
|
|
||||||
self.assertTrue(validate(datetime.datetime(2011, 5, 7, 20, 50)))
|
|
||||||
self.assertTrue(validate(datetime.datetime(2011, 1, 1, 0, 1)))
|
|
||||||
|
|
||||||
# fails validation on month
|
|
||||||
self.assertFalse(validate(datetime.datetime(2011, 6, 4, 4, 11)))
|
|
||||||
|
|
||||||
# fails validation on day
|
|
||||||
self.assertFalse(validate(datetime.datetime(2011, 1, 6, 4, 11)))
|
|
||||||
|
|
||||||
# fails validation on day_of_week
|
|
||||||
self.assertFalse(validate(datetime.datetime(2011, 1, 4, 4, 11)))
|
|
||||||
|
|
||||||
# fails validation on hour
|
|
||||||
self.assertFalse(validate(datetime.datetime(2011, 1, 1, 1, 11)))
|
|
||||||
|
|
||||||
# fails validation on minute
|
|
||||||
self.assertFalse(validate(datetime.datetime(2011, 1, 1, 4, 6)))
|
|
||||||
|
|
||||||
def test_invalid_crontabs(self):
|
|
||||||
# check invalid configurations are detected and reported
|
|
||||||
self.assertRaises(ValueError, crontab, minute='61')
|
|
||||||
self.assertRaises(ValueError, crontab, minute='0-61')
|
|
@ -1,62 +0,0 @@
|
|||||||
from contextlib import contextmanager
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from huey import Huey
|
|
||||||
from huey.backends.dummy import DummyDataStore
|
|
||||||
from huey.backends.dummy import DummyQueue
|
|
||||||
from huey.backends.dummy import DummySchedule
|
|
||||||
from huey.peewee_helpers import db_periodic_task
|
|
||||||
from huey.peewee_helpers import db_task
|
|
||||||
from peewee import *
|
|
||||||
|
|
||||||
|
|
||||||
queue = DummyQueue('test-queue')
|
|
||||||
schedule = DummySchedule('test-queue')
|
|
||||||
data_store = DummyDataStore('test-queue')
|
|
||||||
huey = Huey(queue, data_store, schedule=schedule)
|
|
||||||
|
|
||||||
STATE = []
|
|
||||||
|
|
||||||
class MockSqliteDatabase(SqliteDatabase):
|
|
||||||
def record_call(fn):
|
|
||||||
def inner(*args, **kwargs):
|
|
||||||
STATE.append(fn.__name__)
|
|
||||||
return fn(*args, **kwargs)
|
|
||||||
return inner
|
|
||||||
connect = record_call(SqliteDatabase.connect)
|
|
||||||
_close = record_call(SqliteDatabase._close)
|
|
||||||
transaction = record_call(SqliteDatabase.transaction)
|
|
||||||
|
|
||||||
db = MockSqliteDatabase('test.huey.db')
|
|
||||||
|
|
||||||
class Value(Model):
|
|
||||||
data = CharField()
|
|
||||||
|
|
||||||
class Meta:
|
|
||||||
database = db
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create(cls, *args, **kwargs):
|
|
||||||
STATE.append('create')
|
|
||||||
return super(Value, cls).create(*args, **kwargs)
|
|
||||||
|
|
||||||
@db_task(huey, db)
|
|
||||||
def test_db_task(val):
|
|
||||||
return Value.create(data=val)
|
|
||||||
|
|
||||||
class TestPeeweeHelpers(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
global STATE
|
|
||||||
STATE = []
|
|
||||||
queue.flush()
|
|
||||||
data_store.flush()
|
|
||||||
schedule.flush()
|
|
||||||
Value.drop_table(True)
|
|
||||||
Value.create_table()
|
|
||||||
|
|
||||||
def test_helper(self):
|
|
||||||
test_db_task('foo')
|
|
||||||
self.assertEqual(STATE, ['connect'])
|
|
||||||
huey.execute(huey.dequeue())
|
|
||||||
self.assertEqual(STATE, ['connect', 'transaction', 'create', '_close'])
|
|
||||||
self.assertEqual(Value.select().count(), 1)
|
|
@ -1,438 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from huey import crontab
|
|
||||||
from huey import exceptions as huey_exceptions
|
|
||||||
from huey import Huey
|
|
||||||
from huey.api import QueueTask
|
|
||||||
from huey.backends.dummy import DummyDataStore
|
|
||||||
from huey.backends.dummy import DummyQueue
|
|
||||||
from huey.backends.dummy import DummySchedule
|
|
||||||
from huey.registry import registry
|
|
||||||
from huey.utils import EmptyData
|
|
||||||
from huey.utils import local_to_utc
|
|
||||||
|
|
||||||
|
|
||||||
queue_name = 'test-queue'
|
|
||||||
queue = DummyQueue(queue_name)
|
|
||||||
schedule = DummySchedule(queue_name)
|
|
||||||
huey = Huey(queue, schedule=schedule)
|
|
||||||
|
|
||||||
res_queue_name = 'test-queue-2'
|
|
||||||
res_queue = DummyQueue(res_queue_name)
|
|
||||||
res_store = DummyDataStore(res_queue_name)
|
|
||||||
|
|
||||||
res_huey = Huey(res_queue, res_store, schedule)
|
|
||||||
res_huey_nones = Huey(res_queue, res_store, store_none=True)
|
|
||||||
|
|
||||||
# store some global state
|
|
||||||
state = {}
|
|
||||||
last_executed_task_class = []
|
|
||||||
|
|
||||||
# create a decorated queue command
|
|
||||||
@huey.task()
|
|
||||||
def add(key, value):
|
|
||||||
state[key] = value
|
|
||||||
|
|
||||||
@huey.task(include_task=True)
|
|
||||||
def self_aware(key, value, task=None):
|
|
||||||
last_executed_task_class.append(task.__class__.__name__)
|
|
||||||
|
|
||||||
# create a periodic queue command
|
|
||||||
@huey.periodic_task(crontab(minute='0'))
|
|
||||||
def add_on_the_hour():
|
|
||||||
state['periodic'] = 'x'
|
|
||||||
|
|
||||||
# define a command using the class
|
|
||||||
class AddTask(QueueTask):
|
|
||||||
def execute(self):
|
|
||||||
k, v = self.data
|
|
||||||
state[k] = v
|
|
||||||
|
|
||||||
# create a command that raises an exception
|
|
||||||
class BampfException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@huey.task()
|
|
||||||
def throw_error():
|
|
||||||
raise BampfException('bampf')
|
|
||||||
|
|
||||||
@res_huey.task()
|
|
||||||
def add2(a, b):
|
|
||||||
return a + b
|
|
||||||
|
|
||||||
@res_huey.periodic_task(crontab(minute='0'))
|
|
||||||
def add_on_the_hour2():
|
|
||||||
state['periodic'] = 'x'
|
|
||||||
|
|
||||||
@res_huey.task()
|
|
||||||
def returns_none():
|
|
||||||
return None
|
|
||||||
|
|
||||||
@res_huey_nones.task()
|
|
||||||
def returns_none2():
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class HueyTestCase(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
global state
|
|
||||||
global last_executed_task_class
|
|
||||||
queue.flush()
|
|
||||||
res_queue.flush()
|
|
||||||
schedule.flush()
|
|
||||||
state = {}
|
|
||||||
last_executed_task_class = []
|
|
||||||
|
|
||||||
def test_registration(self):
|
|
||||||
self.assertTrue('queuecmd_add' in registry)
|
|
||||||
self.assertTrue('queuecmd_add_on_the_hour' in registry)
|
|
||||||
self.assertTrue('AddTask' in registry)
|
|
||||||
|
|
||||||
def test_enqueue(self):
|
|
||||||
# sanity check
|
|
||||||
self.assertEqual(len(queue), 0)
|
|
||||||
|
|
||||||
# initializing the command does not enqueue it
|
|
||||||
ac = AddTask(('k', 'v'))
|
|
||||||
self.assertEqual(len(queue), 0)
|
|
||||||
|
|
||||||
# ok, enqueue it, then check that it was enqueued
|
|
||||||
huey.enqueue(ac)
|
|
||||||
self.assertEqual(len(queue), 1)
|
|
||||||
|
|
||||||
# it can be enqueued multiple times
|
|
||||||
huey.enqueue(ac)
|
|
||||||
self.assertEqual(len(queue), 2)
|
|
||||||
|
|
||||||
# no changes to state
|
|
||||||
self.assertFalse('k' in state)
|
|
||||||
|
|
||||||
def test_enqueue_decorator(self):
|
|
||||||
# sanity check
|
|
||||||
self.assertEqual(len(queue), 0)
|
|
||||||
|
|
||||||
add('k', 'v')
|
|
||||||
self.assertEqual(len(queue), 1)
|
|
||||||
|
|
||||||
add('k', 'v')
|
|
||||||
self.assertEqual(len(queue), 2)
|
|
||||||
|
|
||||||
# no changes to state
|
|
||||||
self.assertFalse('k' in state)
|
|
||||||
|
|
||||||
def test_schedule(self):
|
|
||||||
dt = datetime.datetime(2011, 1, 1, 0, 1)
|
|
||||||
add('k', 'v')
|
|
||||||
self.assertEqual(len(queue), 1)
|
|
||||||
|
|
||||||
task = huey.dequeue()
|
|
||||||
self.assertEqual(task.execute_time, None)
|
|
||||||
|
|
||||||
add.schedule(args=('k2', 'v2'), eta=dt)
|
|
||||||
self.assertEqual(len(queue), 1)
|
|
||||||
task = huey.dequeue()
|
|
||||||
self.assertEqual(task.execute_time, local_to_utc(dt))
|
|
||||||
|
|
||||||
add.schedule(args=('k3', 'v3'), eta=dt, convert_utc=False)
|
|
||||||
self.assertEqual(len(queue), 1)
|
|
||||||
task = huey.dequeue()
|
|
||||||
self.assertEqual(task.execute_time, dt)
|
|
||||||
|
|
||||||
def test_error_raised(self):
|
|
||||||
throw_error()
|
|
||||||
|
|
||||||
# no error
|
|
||||||
task = huey.dequeue()
|
|
||||||
|
|
||||||
# error
|
|
||||||
self.assertRaises(BampfException, huey.execute, task)
|
|
||||||
|
|
||||||
def test_internal_error(self):
|
|
||||||
"""
|
|
||||||
Verify that exceptions are wrapped with the special "huey"
|
|
||||||
exception classes.
|
|
||||||
"""
|
|
||||||
class SpecialException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class BrokenQueue(DummyQueue):
|
|
||||||
def read(self):
|
|
||||||
raise SpecialException('read error')
|
|
||||||
|
|
||||||
def write(self, data):
|
|
||||||
raise SpecialException('write error')
|
|
||||||
|
|
||||||
class BrokenDataStore(DummyDataStore):
|
|
||||||
def get(self, key):
|
|
||||||
raise SpecialException('get error')
|
|
||||||
|
|
||||||
def put(self, key, value):
|
|
||||||
raise SpecialException('put error')
|
|
||||||
|
|
||||||
class BrokenSchedule(DummySchedule):
|
|
||||||
def add(self, data, ts):
|
|
||||||
raise SpecialException('add error')
|
|
||||||
|
|
||||||
def read(self, ts):
|
|
||||||
raise SpecialException('read error')
|
|
||||||
|
|
||||||
task = AddTask()
|
|
||||||
huey = Huey(
|
|
||||||
BrokenQueue('q'),
|
|
||||||
BrokenDataStore('q'),
|
|
||||||
BrokenSchedule('q'))
|
|
||||||
|
|
||||||
self.assertRaises(
|
|
||||||
huey_exceptions.QueueWriteException,
|
|
||||||
huey.enqueue,
|
|
||||||
AddTask())
|
|
||||||
self.assertRaises(
|
|
||||||
huey_exceptions.QueueReadException,
|
|
||||||
huey.dequeue)
|
|
||||||
self.assertRaises(
|
|
||||||
huey_exceptions.DataStorePutException,
|
|
||||||
huey.revoke,
|
|
||||||
task)
|
|
||||||
self.assertRaises(
|
|
||||||
huey_exceptions.DataStoreGetException,
|
|
||||||
huey.restore,
|
|
||||||
task)
|
|
||||||
self.assertRaises(
|
|
||||||
huey_exceptions.ScheduleAddException,
|
|
||||||
huey.add_schedule,
|
|
||||||
task)
|
|
||||||
self.assertRaises(
|
|
||||||
huey_exceptions.ScheduleReadException,
|
|
||||||
huey.read_schedule,
|
|
||||||
1)
|
|
||||||
|
|
||||||
def test_dequeueing(self):
|
|
||||||
res = huey.dequeue() # no error raised if queue is empty
|
|
||||||
self.assertEqual(res, None)
|
|
||||||
|
|
||||||
add('k', 'v')
|
|
||||||
task = huey.dequeue()
|
|
||||||
|
|
||||||
self.assertTrue(isinstance(task, QueueTask))
|
|
||||||
self.assertEqual(task.get_data(), (('k', 'v'), {}))
|
|
||||||
|
|
||||||
def test_execution(self):
|
|
||||||
self.assertFalse('k' in state)
|
|
||||||
add('k', 'v')
|
|
||||||
|
|
||||||
task = huey.dequeue()
|
|
||||||
self.assertFalse('k' in state)
|
|
||||||
|
|
||||||
huey.execute(task)
|
|
||||||
self.assertEqual(state['k'], 'v')
|
|
||||||
|
|
||||||
add('k', 'X')
|
|
||||||
self.assertEqual(state['k'], 'v')
|
|
||||||
|
|
||||||
huey.execute(huey.dequeue())
|
|
||||||
self.assertEqual(state['k'], 'X')
|
|
||||||
|
|
||||||
self.assertRaises(TypeError, huey.execute, huey.dequeue())
|
|
||||||
|
|
||||||
def test_self_awareness(self):
|
|
||||||
self_aware('k', 'v')
|
|
||||||
task = huey.dequeue()
|
|
||||||
huey.execute(task)
|
|
||||||
self.assertEqual(last_executed_task_class.pop(), "queuecmd_self_aware")
|
|
||||||
|
|
||||||
self_aware('k', 'v')
|
|
||||||
huey.execute(huey.dequeue())
|
|
||||||
self.assertEqual(last_executed_task_class.pop(), "queuecmd_self_aware")
|
|
||||||
|
|
||||||
add('k', 'x')
|
|
||||||
huey.execute(huey.dequeue())
|
|
||||||
self.assertEqual(len(last_executed_task_class), 0)
|
|
||||||
|
|
||||||
def test_call_local(self):
|
|
||||||
self.assertEqual(len(queue), 0)
|
|
||||||
self.assertEqual(state, {})
|
|
||||||
add.call_local('nugget', 'green')
|
|
||||||
|
|
||||||
self.assertEqual(len(queue), 0)
|
|
||||||
self.assertEqual(state['nugget'], 'green')
|
|
||||||
|
|
||||||
def test_revoke(self):
|
|
||||||
ac = AddTask(('k', 'v'))
|
|
||||||
ac2 = AddTask(('k2', 'v2'))
|
|
||||||
ac3 = AddTask(('k3', 'v3'))
|
|
||||||
|
|
||||||
res_huey.enqueue(ac)
|
|
||||||
res_huey.enqueue(ac2)
|
|
||||||
res_huey.enqueue(ac3)
|
|
||||||
res_huey.enqueue(ac2)
|
|
||||||
res_huey.enqueue(ac)
|
|
||||||
|
|
||||||
self.assertEqual(len(res_queue), 5)
|
|
||||||
res_huey.revoke(ac2)
|
|
||||||
|
|
||||||
while res_queue:
|
|
||||||
task = res_huey.dequeue()
|
|
||||||
if not res_huey.is_revoked(task):
|
|
||||||
res_huey.execute(task)
|
|
||||||
|
|
||||||
self.assertEqual(state, {'k': 'v', 'k3': 'v3'})
|
|
||||||
|
|
||||||
def test_revoke_periodic(self):
|
|
||||||
add_on_the_hour2.revoke()
|
|
||||||
self.assertTrue(add_on_the_hour2.is_revoked())
|
|
||||||
|
|
||||||
# it is still revoked
|
|
||||||
self.assertTrue(add_on_the_hour2.is_revoked())
|
|
||||||
|
|
||||||
add_on_the_hour2.restore()
|
|
||||||
self.assertFalse(add_on_the_hour2.is_revoked())
|
|
||||||
|
|
||||||
add_on_the_hour2.revoke(revoke_once=True)
|
|
||||||
self.assertTrue(add_on_the_hour2.is_revoked()) # it is revoked once, but we are preserving that state
|
|
||||||
self.assertTrue(add_on_the_hour2.is_revoked(peek=False)) # is revoked once, but clear state
|
|
||||||
self.assertFalse(add_on_the_hour2.is_revoked()) # no longer revoked
|
|
||||||
|
|
||||||
d = datetime.datetime
|
|
||||||
add_on_the_hour2.revoke(revoke_until=d(2011, 1, 1, 11, 0))
|
|
||||||
self.assertTrue(add_on_the_hour2.is_revoked(dt=d(2011, 1, 1, 10, 0)))
|
|
||||||
self.assertTrue(add_on_the_hour2.is_revoked(dt=d(2011, 1, 1, 10, 59)))
|
|
||||||
self.assertFalse(add_on_the_hour2.is_revoked(dt=d(2011, 1, 1, 11, 0)))
|
|
||||||
|
|
||||||
add_on_the_hour2.restore()
|
|
||||||
self.assertFalse(add_on_the_hour2.is_revoked())
|
|
||||||
|
|
||||||
def test_result_store(self):
|
|
||||||
res = add2(1, 2)
|
|
||||||
res2 = add2(4, 5)
|
|
||||||
res3 = add2(0, 0)
|
|
||||||
|
|
||||||
# none have been executed as yet
|
|
||||||
self.assertEqual(res.get(), None)
|
|
||||||
self.assertEqual(res2.get(), None)
|
|
||||||
self.assertEqual(res3.get(), None)
|
|
||||||
|
|
||||||
# execute the first task
|
|
||||||
res_huey.execute(res_huey.dequeue())
|
|
||||||
self.assertEqual(res.get(), 3)
|
|
||||||
self.assertEqual(res2.get(), None)
|
|
||||||
self.assertEqual(res3.get(), None)
|
|
||||||
|
|
||||||
# execute the second task
|
|
||||||
res_huey.execute(res_huey.dequeue())
|
|
||||||
self.assertEqual(res.get(), 3)
|
|
||||||
self.assertEqual(res2.get(), 9)
|
|
||||||
self.assertEqual(res3.get(), None)
|
|
||||||
|
|
||||||
# execute the 3rd, which returns a zero value
|
|
||||||
res_huey.execute(res_huey.dequeue())
|
|
||||||
self.assertEqual(res.get(), 3)
|
|
||||||
self.assertEqual(res2.get(), 9)
|
|
||||||
self.assertEqual(res3.get(), 0)
|
|
||||||
|
|
||||||
# check that it returns None when nothing is present
|
|
||||||
res = returns_none()
|
|
||||||
self.assertEqual(res.get(), None)
|
|
||||||
|
|
||||||
# execute, it will still return None, but underneath it is an EmptyResult
|
|
||||||
# indicating its actual result was not persisted
|
|
||||||
res_huey.execute(res_huey.dequeue())
|
|
||||||
self.assertEqual(res.get(), None)
|
|
||||||
self.assertEqual(res._result, EmptyData)
|
|
||||||
|
|
||||||
# execute again, this time note that we're pointing at the invoker
|
|
||||||
# that *does* accept None as a store-able result
|
|
||||||
res = returns_none2()
|
|
||||||
self.assertEqual(res.get(), None)
|
|
||||||
|
|
||||||
# it stores None
|
|
||||||
res_huey_nones.execute(res_huey_nones.dequeue())
|
|
||||||
self.assertEqual(res.get(), None)
|
|
||||||
self.assertEqual(res._result, None)
|
|
||||||
|
|
||||||
def test_task_store(self):
|
|
||||||
dt1 = datetime.datetime(2011, 1, 1, 0, 0)
|
|
||||||
dt2 = datetime.datetime(2035, 1, 1, 0, 0)
|
|
||||||
|
|
||||||
add2.schedule(args=('k', 'v'), eta=dt1, convert_utc=False)
|
|
||||||
task1 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2.schedule(args=('k2', 'v2'), eta=dt2, convert_utc=False)
|
|
||||||
task2 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2('k3', 'v3')
|
|
||||||
task3 = res_huey.dequeue()
|
|
||||||
|
|
||||||
# add the command to the schedule
|
|
||||||
res_huey.add_schedule(task1)
|
|
||||||
self.assertEqual(len(res_huey.schedule._schedule), 1)
|
|
||||||
|
|
||||||
# add a future-dated command
|
|
||||||
res_huey.add_schedule(task2)
|
|
||||||
self.assertEqual(len(res_huey.schedule._schedule), 2)
|
|
||||||
|
|
||||||
res_huey.add_schedule(task3)
|
|
||||||
|
|
||||||
tasks = res_huey.read_schedule(dt1)
|
|
||||||
self.assertEqual(tasks, [task3, task1])
|
|
||||||
|
|
||||||
tasks = res_huey.read_schedule(dt1)
|
|
||||||
self.assertEqual(tasks, [])
|
|
||||||
|
|
||||||
tasks = res_huey.read_schedule(dt2)
|
|
||||||
self.assertEqual(tasks, [task2])
|
|
||||||
|
|
||||||
def test_ready_to_run_method(self):
|
|
||||||
dt1 = datetime.datetime(2011, 1, 1, 0, 0)
|
|
||||||
dt2 = datetime.datetime(2035, 1, 1, 0, 0)
|
|
||||||
|
|
||||||
add2.schedule(args=('k', 'v'), eta=dt1)
|
|
||||||
task1 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2.schedule(args=('k2', 'v2'), eta=dt2)
|
|
||||||
task2 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2('k3', 'v3')
|
|
||||||
task3 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2.schedule(args=('k4', 'v4'), task_id='test_task_id')
|
|
||||||
task4 = res_huey.dequeue()
|
|
||||||
|
|
||||||
# sanity check what should be run
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task1))
|
|
||||||
self.assertFalse(res_huey.ready_to_run(task2))
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task3))
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task4))
|
|
||||||
self.assertEqual('test_task_id', task4.task_id)
|
|
||||||
|
|
||||||
def test_task_delay(self):
|
|
||||||
curr = datetime.datetime.utcnow()
|
|
||||||
curr50 = curr + datetime.timedelta(seconds=50)
|
|
||||||
curr70 = curr + datetime.timedelta(seconds=70)
|
|
||||||
|
|
||||||
add2.schedule(args=('k', 'v'), delay=60)
|
|
||||||
task1 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2.schedule(args=('k2', 'v2'), delay=600)
|
|
||||||
task2 = res_huey.dequeue()
|
|
||||||
|
|
||||||
add2('k3', 'v3')
|
|
||||||
task3 = res_huey.dequeue()
|
|
||||||
|
|
||||||
# add the command to the schedule
|
|
||||||
res_huey.add_schedule(task1)
|
|
||||||
res_huey.add_schedule(task2)
|
|
||||||
res_huey.add_schedule(task3)
|
|
||||||
|
|
||||||
# sanity check what should be run
|
|
||||||
self.assertFalse(res_huey.ready_to_run(task1))
|
|
||||||
self.assertFalse(res_huey.ready_to_run(task2))
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task3))
|
|
||||||
|
|
||||||
self.assertFalse(res_huey.ready_to_run(task1, curr50))
|
|
||||||
self.assertFalse(res_huey.ready_to_run(task2, curr50))
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task3, curr50))
|
|
||||||
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task1, curr70))
|
|
||||||
self.assertFalse(res_huey.ready_to_run(task2, curr70))
|
|
||||||
self.assertTrue(res_huey.ready_to_run(task3, curr70))
|
|
@ -1,24 +0,0 @@
|
|||||||
import unittest
|
|
||||||
|
|
||||||
from huey.utils import wrap_exception
|
|
||||||
|
|
||||||
|
|
||||||
class MyException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class TestWrapException(unittest.TestCase):
|
|
||||||
def test_wrap_exception(self):
|
|
||||||
def raise_keyerror():
|
|
||||||
try:
|
|
||||||
{}['huey']
|
|
||||||
except KeyError as exc:
|
|
||||||
raise wrap_exception(MyException)
|
|
||||||
|
|
||||||
self.assertRaises(MyException, raise_keyerror)
|
|
||||||
try:
|
|
||||||
raise_keyerror()
|
|
||||||
except MyException as exc:
|
|
||||||
self.assertEqual(str(exc), "KeyError: 'huey'")
|
|
||||||
else:
|
|
||||||
assert False
|
|
@ -1,21 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
|
|
||||||
class EmptyData(object):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def load_class(s):
|
|
||||||
path, klass = s.rsplit('.', 1)
|
|
||||||
__import__(path)
|
|
||||||
mod = sys.modules[path]
|
|
||||||
return getattr(mod, klass)
|
|
||||||
|
|
||||||
def wrap_exception(new_exc_class):
|
|
||||||
exc_class, exc, tb = sys.exc_info()
|
|
||||||
raise new_exc_class('%s: %s' % (exc_class.__name__, exc))
|
|
||||||
|
|
||||||
def local_to_utc(dt):
|
|
||||||
return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6])
|
|
@ -1,20 +1,30 @@
|
|||||||
#============================================================================
|
# Copyright (c) Citrix Systems, Inc.
|
||||||
# This library is free software; you can redistribute it and/or
|
# All rights reserved.
|
||||||
# modify it under the terms of version 2.1 of the GNU Lesser General Public
|
|
||||||
# License as published by the Free Software Foundation.
|
|
||||||
#
|
#
|
||||||
# This library is distributed in the hope that it will be useful,
|
# Redistribution and use in source and binary forms, with or without
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
# modification, are permitted provided that the following conditions are
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
# met:
|
||||||
# Lesser General Public License for more details.
|
|
||||||
#
|
#
|
||||||
# You should have received a copy of the GNU Lesser General Public
|
# 1) Redistributions of source code must retain the above copyright
|
||||||
# License along with this library; if not, write to the Free Software
|
# notice, this list of conditions and the following disclaimer.
|
||||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
||||||
#============================================================================
|
|
||||||
# Copyright (C) 2006-2007 XenSource Inc.
|
|
||||||
#============================================================================
|
|
||||||
#
|
#
|
||||||
|
# 2) Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in
|
||||||
|
# the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
# --------------------------------------------------------------------
|
||||||
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
|
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
|
||||||
# interface included in the Python distribution.
|
# interface included in the Python distribution.
|
||||||
#
|
#
|
||||||
@ -45,27 +55,16 @@
|
|||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
import gettext
|
import gettext
|
||||||
import xmlrpclib
|
import six.moves.xmlrpc_client as xmlrpclib
|
||||||
import httplib
|
import six.moves.http_client as httplib
|
||||||
import socket
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
translation = gettext.translation('xen-xm', fallback = True)
|
translation = gettext.translation('xen-xm', fallback = True)
|
||||||
|
|
||||||
API_VERSION_1_1 = '1.1'
|
API_VERSION_1_1 = '1.1'
|
||||||
API_VERSION_1_2 = '1.2'
|
API_VERSION_1_2 = '1.2'
|
||||||
|
|
||||||
#
|
|
||||||
# Methods that have different parameters between API versions 1.1 and 1.2, and
|
|
||||||
# the number of parameters in 1.1.
|
|
||||||
#
|
|
||||||
COMPATIBILITY_METHODS_1_1 = [
|
|
||||||
('SR.create' , 8),
|
|
||||||
('SR.introduce' , 6),
|
|
||||||
('SR.make' , 7),
|
|
||||||
('VDI.snapshot' , 1),
|
|
||||||
('VDI.clone' , 1),
|
|
||||||
]
|
|
||||||
|
|
||||||
class Failure(Exception):
|
class Failure(Exception):
|
||||||
def __init__(self, details):
|
def __init__(self, details):
|
||||||
self.details = details
|
self.details = details
|
||||||
@ -73,17 +72,18 @@ class Failure(Exception):
|
|||||||
def __str__(self):
|
def __str__(self):
|
||||||
try:
|
try:
|
||||||
return str(self.details)
|
return str(self.details)
|
||||||
except Exception, exn:
|
except Exception as exn:
|
||||||
import sys
|
msg = "Xen-API failure: %s" % exn
|
||||||
print >>sys.stderr, exn
|
sys.stderr.write(msg)
|
||||||
return "Xen-API failure: %s" % str(self.details)
|
return msg
|
||||||
|
|
||||||
def _details_map(self):
|
def _details_map(self):
|
||||||
return dict([(str(i), self.details[i])
|
return dict([(str(i), self.details[i])
|
||||||
for i in range(len(self.details))])
|
for i in range(len(self.details))])
|
||||||
|
|
||||||
|
|
||||||
_RECONNECT_AND_RETRY = (lambda _ : ())
|
# Just a "constant" that we use to decide whether to retry the RPC
|
||||||
|
_RECONNECT_AND_RETRY = object()
|
||||||
|
|
||||||
class UDSHTTPConnection(httplib.HTTPConnection):
|
class UDSHTTPConnection(httplib.HTTPConnection):
|
||||||
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
|
"""HTTPConnection subclass to allow HTTP over Unix domain sockets. """
|
||||||
@ -92,12 +92,26 @@ class UDSHTTPConnection(httplib.HTTPConnection):
|
|||||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||||
self.sock.connect(path)
|
self.sock.connect(path)
|
||||||
|
|
||||||
class UDSHTTP(httplib.HTTP):
|
class UDSHTTP(httplib.HTTPConnection):
|
||||||
_connection_class = UDSHTTPConnection
|
_connection_class = UDSHTTPConnection
|
||||||
|
|
||||||
class UDSTransport(xmlrpclib.Transport):
|
class UDSTransport(xmlrpclib.Transport):
|
||||||
|
def __init__(self, use_datetime=0):
|
||||||
|
self._use_datetime = use_datetime
|
||||||
|
self._extra_headers=[]
|
||||||
|
self._connection = (None, None)
|
||||||
|
def add_extra_header(self, key, value):
|
||||||
|
self._extra_headers += [ (key,value) ]
|
||||||
def make_connection(self, host):
|
def make_connection(self, host):
|
||||||
return UDSHTTP(host)
|
# Python 2.4 compatibility
|
||||||
|
if sys.version_info[0] <= 2 and sys.version_info[1] < 7:
|
||||||
|
return UDSHTTP(host)
|
||||||
|
else:
|
||||||
|
return UDSHTTPConnection(host)
|
||||||
|
def send_request(self, connection, handler, request_body):
|
||||||
|
connection.putrequest("POST", handler)
|
||||||
|
for key, value in self._extra_headers:
|
||||||
|
connection.putheader(key, value)
|
||||||
|
|
||||||
class Session(xmlrpclib.ServerProxy):
|
class Session(xmlrpclib.ServerProxy):
|
||||||
"""A server proxy and session manager for communicating with xapi using
|
"""A server proxy and session manager for communicating with xapi using
|
||||||
@ -106,15 +120,26 @@ class Session(xmlrpclib.ServerProxy):
|
|||||||
Example:
|
Example:
|
||||||
|
|
||||||
session = Session('http://localhost/')
|
session = Session('http://localhost/')
|
||||||
session.login_with_password('me', 'mypassword')
|
session.login_with_password('me', 'mypassword', '1.0', 'xen-api-scripts-xenapi.py')
|
||||||
session.xenapi.VM.start(vm_uuid)
|
session.xenapi.VM.start(vm_uuid)
|
||||||
session.xenapi.session.logout()
|
session.xenapi.session.logout()
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, uri, transport=None, encoding=None, verbose=0,
|
def __init__(self, uri, transport=None, encoding=None, verbose=0,
|
||||||
allow_none=1):
|
allow_none=1, ignore_ssl=False):
|
||||||
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
|
|
||||||
verbose, allow_none)
|
# Fix for CA-172901 (+ Python 2.4 compatibility)
|
||||||
|
# Fix for context=ctx ( < Python 2.7.9 compatibility)
|
||||||
|
if not (sys.version_info[0] <= 2 and sys.version_info[1] <= 7 and sys.version_info[2] <= 9 ) \
|
||||||
|
and ignore_ssl:
|
||||||
|
import ssl
|
||||||
|
ctx = ssl._create_unverified_context()
|
||||||
|
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
|
||||||
|
verbose, allow_none, context=ctx)
|
||||||
|
else:
|
||||||
|
xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding,
|
||||||
|
verbose, allow_none)
|
||||||
|
self.transport = transport
|
||||||
self._session = None
|
self._session = None
|
||||||
self.last_login_method = None
|
self.last_login_method = None
|
||||||
self.last_login_params = None
|
self.last_login_params = None
|
||||||
@ -125,7 +150,7 @@ class Session(xmlrpclib.ServerProxy):
|
|||||||
if methodname.startswith('login'):
|
if methodname.startswith('login'):
|
||||||
self._login(methodname, params)
|
self._login(methodname, params)
|
||||||
return None
|
return None
|
||||||
elif methodname == 'logout':
|
elif methodname == 'logout' or methodname == 'session.logout':
|
||||||
self._logout()
|
self._logout()
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
@ -133,7 +158,7 @@ class Session(xmlrpclib.ServerProxy):
|
|||||||
while retry_count < 3:
|
while retry_count < 3:
|
||||||
full_params = (self._session,) + params
|
full_params = (self._session,) + params
|
||||||
result = _parse_result(getattr(self, methodname)(*full_params))
|
result = _parse_result(getattr(self, methodname)(*full_params))
|
||||||
if result == _RECONNECT_AND_RETRY:
|
if result is _RECONNECT_AND_RETRY:
|
||||||
retry_count += 1
|
retry_count += 1
|
||||||
if self.last_login_method:
|
if self.last_login_method:
|
||||||
self._login(self.last_login_method,
|
self._login(self.last_login_method,
|
||||||
@ -145,21 +170,24 @@ class Session(xmlrpclib.ServerProxy):
|
|||||||
raise xmlrpclib.Fault(
|
raise xmlrpclib.Fault(
|
||||||
500, 'Tried 3 times to get a valid session, but failed')
|
500, 'Tried 3 times to get a valid session, but failed')
|
||||||
|
|
||||||
|
|
||||||
def _login(self, method, params):
|
def _login(self, method, params):
|
||||||
result = _parse_result(getattr(self, 'session.%s' % method)(*params))
|
try:
|
||||||
if result == _RECONNECT_AND_RETRY:
|
result = _parse_result(
|
||||||
raise xmlrpclib.Fault(
|
getattr(self, 'session.%s' % method)(*params))
|
||||||
500, 'Received SESSION_INVALID when logging in')
|
if result is _RECONNECT_AND_RETRY:
|
||||||
self._session = result
|
raise xmlrpclib.Fault(
|
||||||
self.last_login_method = method
|
500, 'Received SESSION_INVALID when logging in')
|
||||||
self.last_login_params = params
|
self._session = result
|
||||||
if method.startswith("slave_local"):
|
self.last_login_method = method
|
||||||
self.API_version = API_VERSION_1_2
|
self.last_login_params = params
|
||||||
else:
|
|
||||||
self.API_version = self._get_api_version()
|
self.API_version = self._get_api_version()
|
||||||
|
except socket.error as e:
|
||||||
|
if e.errno == socket.errno.ETIMEDOUT:
|
||||||
|
raise xmlrpclib.Fault(504, 'The connection timed out')
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
def logout(self):
|
def _logout(self):
|
||||||
try:
|
try:
|
||||||
if self.last_login_method.startswith("slave_local"):
|
if self.last_login_method.startswith("slave_local"):
|
||||||
return _parse_result(self.session.local_logout(self._session))
|
return _parse_result(self.session.local_logout(self._session))
|
||||||
@ -174,11 +202,9 @@ class Session(xmlrpclib.ServerProxy):
|
|||||||
def _get_api_version(self):
|
def _get_api_version(self):
|
||||||
pool = self.xenapi.pool.get_all()[0]
|
pool = self.xenapi.pool.get_all()[0]
|
||||||
host = self.xenapi.pool.get_master(pool)
|
host = self.xenapi.pool.get_master(pool)
|
||||||
if (self.xenapi.host.get_API_version_major(host) == "1" and
|
major = self.xenapi.host.get_API_version_major(host)
|
||||||
self.xenapi.host.get_API_version_minor(host) == "2"):
|
minor = self.xenapi.host.get_API_version_minor(host)
|
||||||
return API_VERSION_1_2
|
return "%s.%s"%(major,minor)
|
||||||
else:
|
|
||||||
return API_VERSION_1_1
|
|
||||||
|
|
||||||
def __getattr__(self, name):
|
def __getattr__(self, name):
|
||||||
if name == 'handle':
|
if name == 'handle':
|
||||||
@ -187,11 +213,13 @@ class Session(xmlrpclib.ServerProxy):
|
|||||||
return _Dispatcher(self.API_version, self.xenapi_request, None)
|
return _Dispatcher(self.API_version, self.xenapi_request, None)
|
||||||
elif name.startswith('login') or name.startswith('slave_local'):
|
elif name.startswith('login') or name.startswith('slave_local'):
|
||||||
return lambda *params: self._login(name, params)
|
return lambda *params: self._login(name, params)
|
||||||
|
elif name == 'logout':
|
||||||
|
return _Dispatcher(self.API_version, self.xenapi_request, "logout")
|
||||||
else:
|
else:
|
||||||
return xmlrpclib.ServerProxy.__getattr__(self, name)
|
return xmlrpclib.ServerProxy.__getattr__(self, name)
|
||||||
|
|
||||||
def xapi_local():
|
def xapi_local():
|
||||||
return Session("http://_var_xapi_xapi/", transport=UDSTransport())
|
return Session("http://_var_lib_xcp_xapi/", transport=UDSTransport())
|
||||||
|
|
||||||
def _parse_result(result):
|
def _parse_result(result):
|
||||||
if type(result) != dict or 'Status' not in result:
|
if type(result) != dict or 'Status' not in result:
|
||||||
@ -233,10 +261,4 @@ class _Dispatcher:
|
|||||||
return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name))
|
return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name))
|
||||||
|
|
||||||
def __call__(self, *args):
|
def __call__(self, *args):
|
||||||
if self.__API_version == API_VERSION_1_1:
|
|
||||||
for m in COMPATIBILITY_METHODS_1_1:
|
|
||||||
if self.__name == m[0]:
|
|
||||||
return self.__send(self.__name, args[0:m[1]])
|
|
||||||
|
|
||||||
return self.__send(self.__name, args)
|
return self.__send(self.__name, args)
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -24,13 +24,13 @@ import sys
|
|||||||
try:
|
try:
|
||||||
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
||||||
import paramiko
|
import paramiko
|
||||||
except ImportError,e:
|
except ImportError as e:
|
||||||
print "Error : can not load paramiko library %s" % e
|
print(("Error : can not load paramiko library %s" % e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
sys.stderr = sys.__stderr__
|
sys.stderr = sys.__stderr__
|
||||||
|
|
||||||
from common import *
|
from libtisbackup.common import *
|
||||||
|
|
||||||
class backup_mysql(backup_generic):
|
class backup_mysql(backup_generic):
|
||||||
"""Backup a mysql database as gzipped sql file through ssh"""
|
"""Backup a mysql database as gzipped sql file through ssh"""
|
||||||
@ -52,7 +52,7 @@ class backup_mysql(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.makedirs(self.dest_dir)
|
os.makedirs(self.dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'mkdir "%s"' % self.dest_dir
|
print(('mkdir "%s"' % self.dest_dir))
|
||||||
else:
|
else:
|
||||||
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
||||||
|
|
||||||
@ -101,7 +101,7 @@ class backup_mysql(backup_generic):
|
|||||||
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
|
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
|
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
|
||||||
print output
|
print(output)
|
||||||
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
|
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
|
||||||
if error_code:
|
if error_code:
|
||||||
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
|
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
|
||||||
@ -176,4 +176,4 @@ class backup_mysql(backup_generic):
|
|||||||
self.logger.info('Skipping %s, already registered',dir_name)
|
self.logger.info('Skipping %s, already registered',dir_name)
|
||||||
|
|
||||||
|
|
||||||
register_driver(backup_mysql)
|
register_driver(backup_mysql)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
from common import *
|
from .common import *
|
||||||
|
|
||||||
|
|
||||||
class backup_null(backup_generic):
|
class backup_null(backup_generic):
|
||||||
|
@ -21,8 +21,8 @@ import sys
|
|||||||
try:
|
try:
|
||||||
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
||||||
import paramiko
|
import paramiko
|
||||||
except ImportError,e:
|
except ImportError as e:
|
||||||
print "Error : can not load paramiko library %s" % e
|
print(("Error : can not load paramiko library %s" % e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
sys.stderr = sys.__stderr__
|
sys.stderr = sys.__stderr__
|
||||||
@ -30,7 +30,7 @@ sys.stderr = sys.__stderr__
|
|||||||
import datetime
|
import datetime
|
||||||
import base64
|
import base64
|
||||||
import os
|
import os
|
||||||
from common import *
|
from libtisbackup.common import *
|
||||||
import re
|
import re
|
||||||
|
|
||||||
class backup_oracle(backup_generic):
|
class backup_oracle(backup_generic):
|
||||||
@ -49,8 +49,8 @@ class backup_oracle(backup_generic):
|
|||||||
try:
|
try:
|
||||||
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
|
mykey = paramiko.RSAKey.from_private_key_file(self.private_key)
|
||||||
except paramiko.SSHException:
|
except paramiko.SSHException:
|
||||||
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
|
#mykey = paramiko.DSSKey.from_private_key_file(self.private_key)
|
||||||
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
|
mykey = paramiko.Ed25519Key.from_private_key_file(self.private_key)
|
||||||
|
|
||||||
self.ssh = paramiko.SSHClient()
|
self.ssh = paramiko.SSHClient()
|
||||||
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||||
@ -66,7 +66,7 @@ class backup_oracle(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.makedirs(self.dest_dir)
|
os.makedirs(self.dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'mkdir "%s"' % self.dest_dir
|
print(('mkdir "%s"' % self.dest_dir))
|
||||||
else:
|
else:
|
||||||
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
||||||
# dump db
|
# dump db
|
||||||
@ -171,4 +171,4 @@ class backup_oracle(backup_generic):
|
|||||||
if error_code:
|
if error_code:
|
||||||
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
|
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
|
||||||
|
|
||||||
register_driver(backup_oracle)
|
register_driver(backup_oracle)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -21,13 +21,13 @@ import sys
|
|||||||
try:
|
try:
|
||||||
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
||||||
import paramiko
|
import paramiko
|
||||||
except ImportError,e:
|
except ImportError as e:
|
||||||
print "Error : can not load paramiko library %s" % e
|
print(("Error : can not load paramiko library %s" % e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
sys.stderr = sys.__stderr__
|
sys.stderr = sys.__stderr__
|
||||||
|
|
||||||
from common import *
|
from .common import *
|
||||||
|
|
||||||
class backup_pgsql(backup_generic):
|
class backup_pgsql(backup_generic):
|
||||||
"""Backup a postgresql database as gzipped sql file through ssh"""
|
"""Backup a postgresql database as gzipped sql file through ssh"""
|
||||||
@ -46,7 +46,7 @@ class backup_pgsql(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.makedirs(self.dest_dir)
|
os.makedirs(self.dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'mkdir "%s"' % self.dest_dir
|
print(('mkdir "%s"' % self.dest_dir))
|
||||||
else:
|
else:
|
||||||
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
||||||
|
|
||||||
|
@ -1,127 +0,0 @@
|
|||||||
# -----------------------------------------------------------------------
|
|
||||||
# This file is part of TISBackup
|
|
||||||
#
|
|
||||||
# TISBackup is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# TISBackup is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
# -----------------------------------------------------------------------
|
|
||||||
|
|
||||||
|
|
||||||
import os
|
|
||||||
import datetime
|
|
||||||
from common import *
|
|
||||||
import time
|
|
||||||
|
|
||||||
class backup_rdiff:
|
|
||||||
backup_dir=''
|
|
||||||
backup_start_date=None
|
|
||||||
backup_name=''
|
|
||||||
server_name=''
|
|
||||||
exclude_list=''
|
|
||||||
ssh_port='22'
|
|
||||||
remote_user='root'
|
|
||||||
remote_dir=''
|
|
||||||
dest_dir=''
|
|
||||||
verbose = False
|
|
||||||
dry_run=False
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def __init__(self, backup_name, backup_base_dir):
|
|
||||||
self.backup_dir = backup_base_dir + '/' + backup_name
|
|
||||||
|
|
||||||
if os.path.isdir(self.backup_dir )==False:
|
|
||||||
os.makedirs(self.backup_dir)
|
|
||||||
|
|
||||||
self.backup_name = backup_name
|
|
||||||
t = datetime.datetime.now()
|
|
||||||
self.backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
|
|
||||||
|
|
||||||
def get_latest_backup(self):
|
|
||||||
filelist = os.listdir(self.backup_dir)
|
|
||||||
if len(filelist) == 0:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
filelist.sort()
|
|
||||||
|
|
||||||
return filelist[-1]
|
|
||||||
|
|
||||||
def cleanup_backup(self):
|
|
||||||
filelist = os.listdir(self.backup_dir)
|
|
||||||
if len(filelist) == 0:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
filelist.sort()
|
|
||||||
for backup_date in filelist:
|
|
||||||
today = time.time()
|
|
||||||
print backup_date
|
|
||||||
datestring = backup_date[0:8]
|
|
||||||
c = time.strptime(datestring,"%Y%m%d")
|
|
||||||
# TODO: improve
|
|
||||||
if today - c < 60 * 60 * 24* 30:
|
|
||||||
print time.strftime("%Y%m%d",c) + " is to be deleted"
|
|
||||||
|
|
||||||
|
|
||||||
def copy_latest_to_new(self):
|
|
||||||
# TODO check that latest exist
|
|
||||||
# TODO check that new does not exist
|
|
||||||
|
|
||||||
|
|
||||||
last_backup = self.get_latest_backup()
|
|
||||||
if last_backup=='':
|
|
||||||
print "*********************************"
|
|
||||||
print "*first backup for " + self.backup_name
|
|
||||||
else:
|
|
||||||
latest_backup_path = self.backup_dir + '/' + last_backup
|
|
||||||
new_backup_path = self.backup_dir + '/' + self.backup_start_date
|
|
||||||
print "#cp -al starting"
|
|
||||||
cmd = 'cp -al ' + latest_backup_path + ' ' + new_backup_path
|
|
||||||
print cmd
|
|
||||||
if self.dry_run==False:
|
|
||||||
call_external_process(cmd)
|
|
||||||
print "#cp -al finished"
|
|
||||||
|
|
||||||
|
|
||||||
def rsync_to_new(self):
|
|
||||||
|
|
||||||
self.dest_dir = self.backup_dir + '/' + self.backup_start_date + '/'
|
|
||||||
src_server = self.remote_user + '@' + self.server_name + ':"' + self.remote_dir.strip() + '/"'
|
|
||||||
|
|
||||||
print "#starting rsync"
|
|
||||||
verbose_arg=""
|
|
||||||
if self.verbose==True:
|
|
||||||
verbose_arg = "-P "
|
|
||||||
|
|
||||||
cmd = "rdiff-backup " + verbose_arg + ' --compress-level=9 --numeric-ids -az --partial -e "ssh -o StrictHostKeyChecking=no -p ' + self.ssh_port + ' -i ' + self.private_key + '" --stats --delete-after ' + self.exclude_list + ' ' + src_server + ' ' + self.dest_dir
|
|
||||||
print cmd
|
|
||||||
|
|
||||||
## deal with exit code 24 (file vanished)
|
|
||||||
if self.dry_run==False:
|
|
||||||
p = subprocess.call(cmd, shell=True)
|
|
||||||
if (p ==24):
|
|
||||||
print "Note: some files vanished before transfer"
|
|
||||||
if (p != 0 and p != 24 ):
|
|
||||||
raise Exception('shell program exited with error code ' + str(p), cmd)
|
|
||||||
|
|
||||||
|
|
||||||
print "#finished rsync"
|
|
||||||
|
|
||||||
def process_backup(self):
|
|
||||||
print ""
|
|
||||||
print "#========Starting backup item ========="
|
|
||||||
self.copy_latest_to_new()
|
|
||||||
|
|
||||||
self.rsync_to_new()
|
|
||||||
print "#========Backup item finished=========="
|
|
||||||
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
from common import *
|
from libtisbackup.common import *
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
@ -69,7 +69,7 @@ class backup_rsync(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.makedirs(dest_dir)
|
os.makedirs(dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'mkdir "%s"' % dest_dir
|
print(('mkdir "%s"' % dest_dir))
|
||||||
else:
|
else:
|
||||||
raise Exception('backup destination directory already exists : %s' % dest_dir)
|
raise Exception('backup destination directory already exists : %s' % dest_dir)
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ class backup_rsync(backup_generic):
|
|||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
options.append('-d')
|
options.append('-d')
|
||||||
|
|
||||||
if self.overload_args <> None:
|
if self.overload_args != None:
|
||||||
options.append(self.overload_args)
|
options.append(self.overload_args)
|
||||||
elif not "cygdrive" in self.remote_dir:
|
elif not "cygdrive" in self.remote_dir:
|
||||||
# we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful
|
# we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful
|
||||||
@ -118,7 +118,7 @@ class backup_rsync(backup_generic):
|
|||||||
try:
|
try:
|
||||||
# newsettings with exclude_list='too','titi', parsed as a str python list content
|
# newsettings with exclude_list='too','titi', parsed as a str python list content
|
||||||
excludes = eval('[%s]' % self.exclude_list)
|
excludes = eval('[%s]' % self.exclude_list)
|
||||||
except Exception,e:
|
except Exception as e:
|
||||||
raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e))
|
raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e))
|
||||||
options.extend(['--exclude="%s"' % x for x in excludes])
|
options.extend(['--exclude="%s"' % x for x in excludes])
|
||||||
|
|
||||||
@ -146,13 +146,13 @@ class backup_rsync(backup_generic):
|
|||||||
ssh_params.append('-i %s' % self.private_key)
|
ssh_params.append('-i %s' % self.private_key)
|
||||||
if self.cipher_spec:
|
if self.cipher_spec:
|
||||||
ssh_params.append('-c %s' % self.cipher_spec)
|
ssh_params.append('-c %s' % self.cipher_spec)
|
||||||
if self.ssh_port <> 22:
|
if self.ssh_port != 22:
|
||||||
ssh_params.append('-p %i' % self.ssh_port)
|
ssh_params.append('-p %i' % self.ssh_port)
|
||||||
options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params)))
|
options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params)))
|
||||||
backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir)
|
backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir)
|
||||||
|
|
||||||
# ensure there is a slash at end
|
# ensure there is a slash at end
|
||||||
if backup_source[-1] <> '/':
|
if backup_source[-1] != '/':
|
||||||
backup_source += '/'
|
backup_source += '/'
|
||||||
|
|
||||||
options_params = " ".join(options)
|
options_params = " ".join(options)
|
||||||
@ -165,7 +165,7 @@ class backup_rsync(backup_generic):
|
|||||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
|
||||||
def ondata(data,context):
|
def ondata(data,context):
|
||||||
if context.verbose:
|
if context.verbose:
|
||||||
print data
|
print(data)
|
||||||
context.logger.debug(data)
|
context.logger.debug(data)
|
||||||
|
|
||||||
log = monitor_stdout(process,ondata,self)
|
log = monitor_stdout(process,ondata,self)
|
||||||
@ -195,7 +195,7 @@ class backup_rsync(backup_generic):
|
|||||||
self.logger.error("[" + self.backup_name + "] shell program exited with error code " + str(returncode))
|
self.logger.error("[" + self.backup_name + "] shell program exited with error code " + str(returncode))
|
||||||
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
|
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
|
||||||
else:
|
else:
|
||||||
print cmd
|
print(cmd)
|
||||||
|
|
||||||
#we suppress the .rsync suffix if everything went well
|
#we suppress the .rsync suffix if everything went well
|
||||||
finaldest = os.path.join(self.backup_dir,self.backup_start_date)
|
finaldest = os.path.join(self.backup_dir,self.backup_start_date)
|
||||||
@ -203,14 +203,14 @@ class backup_rsync(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.rename(dest_dir, finaldest)
|
os.rename(dest_dir, finaldest)
|
||||||
self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest)
|
self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest)
|
||||||
print os.popen('touch "%s"' % finaldest).read()
|
print((os.popen('touch "%s"' % finaldest).read()))
|
||||||
else:
|
else:
|
||||||
print "mv" ,dest_dir,finaldest
|
print(("mv" ,dest_dir,finaldest))
|
||||||
stats['backup_location'] = finaldest
|
stats['backup_location'] = finaldest
|
||||||
stats['status']='OK'
|
stats['status']='OK'
|
||||||
stats['log']='ssh+rsync backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count'])
|
stats['log']='ssh+rsync backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count'])
|
||||||
|
|
||||||
except BaseException , e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
raise
|
raise
|
||||||
@ -340,5 +340,5 @@ if __name__=='__main__':
|
|||||||
b = backup_rsync('htouvet','/backup/data/htouvet',dbstat)
|
b = backup_rsync('htouvet','/backup/data/htouvet',dbstat)
|
||||||
b.read_config(cp)
|
b.read_config(cp)
|
||||||
b.process_backup()
|
b.process_backup()
|
||||||
print b.checknagios()
|
print((b.checknagios()))
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -20,13 +20,13 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
from common import *
|
from .common import *
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import os.path
|
import os.path
|
||||||
import datetime
|
import datetime
|
||||||
from common import *
|
from .common import *
|
||||||
|
|
||||||
|
|
||||||
class backup_rsync_btrfs(backup_generic):
|
class backup_rsync_btrfs(backup_generic):
|
||||||
@ -78,7 +78,7 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
else:
|
else:
|
||||||
self.logger.info("[" + self.backup_name + "] create btrs volume: %s"%dest_dir)
|
self.logger.info("[" + self.backup_name + "] create btrs volume: %s"%dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'btrfs subvolume create "%s"' %dest_dir
|
print(('btrfs subvolume create "%s"' %dest_dir))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
options.append('-d')
|
options.append('-d')
|
||||||
|
|
||||||
if self.overload_args <> None:
|
if self.overload_args != None:
|
||||||
options.append(self.overload_args)
|
options.append(self.overload_args)
|
||||||
elif not "cygdrive" in self.remote_dir:
|
elif not "cygdrive" in self.remote_dir:
|
||||||
# we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful
|
# we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful
|
||||||
@ -128,7 +128,7 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
try:
|
try:
|
||||||
# newsettings with exclude_list='too','titi', parsed as a str python list content
|
# newsettings with exclude_list='too','titi', parsed as a str python list content
|
||||||
excludes = eval('[%s]' % self.exclude_list)
|
excludes = eval('[%s]' % self.exclude_list)
|
||||||
except Exception,e:
|
except Exception as e:
|
||||||
raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e))
|
raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e))
|
||||||
options.extend(['--exclude="%s"' % x for x in excludes])
|
options.extend(['--exclude="%s"' % x for x in excludes])
|
||||||
|
|
||||||
@ -154,13 +154,13 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
ssh_params.append('-i %s' % self.private_key)
|
ssh_params.append('-i %s' % self.private_key)
|
||||||
if self.cipher_spec:
|
if self.cipher_spec:
|
||||||
ssh_params.append('-c %s' % self.cipher_spec)
|
ssh_params.append('-c %s' % self.cipher_spec)
|
||||||
if self.ssh_port <> 22:
|
if self.ssh_port != 22:
|
||||||
ssh_params.append('-p %i' % self.ssh_port)
|
ssh_params.append('-p %i' % self.ssh_port)
|
||||||
options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params)))
|
options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params)))
|
||||||
backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir)
|
backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir)
|
||||||
|
|
||||||
# ensure there is a slash at end
|
# ensure there is a slash at end
|
||||||
if backup_source[-1] <> '/':
|
if backup_source[-1] != '/':
|
||||||
backup_source += '/'
|
backup_source += '/'
|
||||||
|
|
||||||
options_params = " ".join(options)
|
options_params = " ".join(options)
|
||||||
@ -173,7 +173,7 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
|
||||||
def ondata(data,context):
|
def ondata(data,context):
|
||||||
if context.verbose:
|
if context.verbose:
|
||||||
print data
|
print(data)
|
||||||
context.logger.debug(data)
|
context.logger.debug(data)
|
||||||
|
|
||||||
log = monitor_stdout(process,ondata,self)
|
log = monitor_stdout(process,ondata,self)
|
||||||
@ -203,7 +203,7 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
self.logger.error("[" + self.backup_name + "] shell program exited with error code ", str(returncode))
|
self.logger.error("[" + self.backup_name + "] shell program exited with error code ", str(returncode))
|
||||||
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
|
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd, log[-512:])
|
||||||
else:
|
else:
|
||||||
print cmd
|
print(cmd)
|
||||||
|
|
||||||
#we take a snapshot of last_backup if everything went well
|
#we take a snapshot of last_backup if everything went well
|
||||||
finaldest = os.path.join(self.backup_dir,self.backup_start_date)
|
finaldest = os.path.join(self.backup_dir,self.backup_start_date)
|
||||||
@ -220,16 +220,16 @@ class backup_rsync_btrfs(backup_generic):
|
|||||||
else:
|
else:
|
||||||
self.logger.info("[" + self.backup_name + "] snapshot directory created %s"%finaldest)
|
self.logger.info("[" + self.backup_name + "] snapshot directory created %s"%finaldest)
|
||||||
else:
|
else:
|
||||||
print "btrfs snapshot of %s to %s"%(dest_dir,finaldest)
|
print(("btrfs snapshot of %s to %s"%(dest_dir,finaldest)))
|
||||||
else:
|
else:
|
||||||
raise Exception('snapshot directory already exists : %s' %finaldest)
|
raise Exception('snapshot directory already exists : %s' %finaldest)
|
||||||
self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest)
|
self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest)
|
||||||
print os.popen('touch "%s"' % finaldest).read()
|
print((os.popen('touch "%s"' % finaldest).read()))
|
||||||
stats['backup_location'] = finaldest
|
stats['backup_location'] = finaldest
|
||||||
stats['status']='OK'
|
stats['status']='OK'
|
||||||
stats['log']='ssh+rsync+btrfs backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count'])
|
stats['log']='ssh+rsync+btrfs backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count'])
|
||||||
|
|
||||||
except BaseException , e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
raise
|
raise
|
||||||
@ -358,5 +358,5 @@ if __name__=='__main__':
|
|||||||
b = backup_rsync('htouvet','/backup/data/htouvet',dbstat)
|
b = backup_rsync('htouvet','/backup/data/htouvet',dbstat)
|
||||||
b.read_config(cp)
|
b.read_config(cp)
|
||||||
b.process_backup()
|
b.process_backup()
|
||||||
print b.checknagios()
|
print((b.checknagios()))
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -24,13 +24,13 @@ import sys
|
|||||||
try:
|
try:
|
||||||
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
||||||
import paramiko
|
import paramiko
|
||||||
except ImportError,e:
|
except ImportError as e:
|
||||||
print "Error : can not load paramiko library %s" % e
|
print("Error : can not load paramiko library %s" % e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
sys.stderr = sys.__stderr__
|
sys.stderr = sys.__stderr__
|
||||||
|
|
||||||
from common import *
|
from .common import *
|
||||||
|
|
||||||
class backup_samba4(backup_generic):
|
class backup_samba4(backup_generic):
|
||||||
"""Backup a samba4 databases as gzipped tdbs file through ssh"""
|
"""Backup a samba4 databases as gzipped tdbs file through ssh"""
|
||||||
@ -47,7 +47,7 @@ class backup_samba4(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.makedirs(self.dest_dir)
|
os.makedirs(self.dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'mkdir "%s"' % self.dest_dir
|
print('mkdir "%s"' % self.dest_dir)
|
||||||
else:
|
else:
|
||||||
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
raise Exception('backup destination directory already exists : %s' % self.dest_dir)
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ class backup_samba4(backup_generic):
|
|||||||
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
|
self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd)
|
||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
|
(error_code,output) = ssh_exec(cmd,ssh=self.ssh)
|
||||||
print output
|
print(output)
|
||||||
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
|
self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output)
|
||||||
if error_code:
|
if error_code:
|
||||||
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
|
raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd))
|
||||||
|
@ -24,8 +24,8 @@ import sys
|
|||||||
try:
|
try:
|
||||||
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
||||||
import paramiko
|
import paramiko
|
||||||
except ImportError,e:
|
except ImportError as e:
|
||||||
print "Error : can not load paramiko library %s" % e
|
print("Error : can not load paramiko library %s" % e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
sys.stderr = sys.__stderr__
|
sys.stderr = sys.__stderr__
|
||||||
@ -33,7 +33,7 @@ sys.stderr = sys.__stderr__
|
|||||||
import datetime
|
import datetime
|
||||||
import base64
|
import base64
|
||||||
import os
|
import os
|
||||||
from common import *
|
from .common import *
|
||||||
|
|
||||||
class backup_sqlserver(backup_generic):
|
class backup_sqlserver(backup_generic):
|
||||||
"""Backup a SQLSERVER database as gzipped sql file through ssh"""
|
"""Backup a SQLSERVER database as gzipped sql file through ssh"""
|
||||||
@ -67,18 +67,17 @@ class backup_sqlserver(backup_generic):
|
|||||||
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
|
backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S')
|
||||||
|
|
||||||
backup_file = self.remote_backup_dir + '/' + self.db_name + '-' + backup_start_date + '.bak'
|
backup_file = self.remote_backup_dir + '/' + self.db_name + '-' + backup_start_date + '.bak'
|
||||||
if not self.db_user == '':
|
if not self.db_user == '':
|
||||||
self.userdb = '-U %s -P %s' % ( self.db_user, self.db_password )
|
self.userdb = '-U %s -P %s' % ( self.db_user, self.db_password )
|
||||||
|
|
||||||
# dump db
|
# dump db
|
||||||
stats['status']='Dumping'
|
stats['status']='Dumping'
|
||||||
if self.sqlserver_before_2005:
|
if self.sqlserver_before_2005:
|
||||||
cmd = """osql -E -Q "BACKUP DATABASE [%s]
|
cmd = """osql -E -Q "BACKUP DATABASE [%s]
|
||||||
TO DISK='%s'
|
TO DISK='%s'
|
||||||
WITH FORMAT" """ % ( self.db_name, backup_file )
|
WITH FORMAT" """ % ( self.db_name, backup_file )
|
||||||
|
else:
|
||||||
else:
|
cmd = """sqlcmd %s -S "%s" -d master -Q "BACKUP DATABASE [%s]
|
||||||
cmd = """sqlcmd %s -S "%s" -d master -Q "BACKUP DATABASE [%s]
|
|
||||||
TO DISK = N'%s'
|
TO DISK = N'%s'
|
||||||
WITH INIT, NOUNLOAD ,
|
WITH INIT, NOUNLOAD ,
|
||||||
NAME = N'Backup %s', NOSKIP ,STATS = 10, NOFORMAT" """ % (self.userdb, self.db_server_name, self.db_name, backup_file ,self.db_name )
|
NAME = N'Backup %s', NOSKIP ,STATS = 10, NOFORMAT" """ % (self.userdb, self.db_server_name, self.db_name, backup_file ,self.db_name )
|
||||||
|
@ -20,15 +20,15 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
from common import *
|
from .common import *
|
||||||
import XenAPI
|
from . import XenAPI
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import os.path
|
import os.path
|
||||||
import datetime
|
import datetime
|
||||||
import select
|
import select
|
||||||
import urllib2, urllib
|
import urllib.request, urllib.error, urllib.parse, urllib.request, urllib.parse, urllib.error
|
||||||
import base64
|
import base64
|
||||||
import socket
|
import socket
|
||||||
import requests
|
import requests
|
||||||
@ -149,8 +149,9 @@ class backup_switch(backup_generic):
|
|||||||
else:
|
else:
|
||||||
child.sendline(self.switch_user)
|
child.sendline(self.switch_user)
|
||||||
child.expect(".*#")
|
child.expect(".*#")
|
||||||
child.sendline( "terminal datadump")
|
|
||||||
child.expect("#")
|
child.sendline( "terminal datadump")
|
||||||
|
child.expect("#")
|
||||||
child.sendline( "show startup-config")
|
child.sendline( "show startup-config")
|
||||||
child.expect("#")
|
child.expect("#")
|
||||||
lines = child.before
|
lines = child.before
|
||||||
@ -237,7 +238,7 @@ class backup_switch(backup_generic):
|
|||||||
stats['log']='Switch backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes'])
|
stats['log']='Switch backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes'])
|
||||||
|
|
||||||
|
|
||||||
except BaseException , e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
raise
|
raise
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
from __future__ import with_statement
|
|
||||||
from common import *
|
from .common import *
|
||||||
import pyVmomi
|
import pyVmomi
|
||||||
from pyVmomi import vim
|
from pyVmomi import vim
|
||||||
from pyVmomi import vmodl
|
from pyVmomi import vmodl
|
||||||
@ -101,7 +101,7 @@ class backup_vmdk(backup_generic):
|
|||||||
ovfDescParams = vim.OvfManager.CreateDescriptorParams()
|
ovfDescParams = vim.OvfManager.CreateDescriptorParams()
|
||||||
ovf = si.content.ovfManager.CreateDescriptor(vm, ovfDescParams)
|
ovf = si.content.ovfManager.CreateDescriptor(vm, ovfDescParams)
|
||||||
root = ET.fromstring(ovf.ovfDescriptor)
|
root = ET.fromstring(ovf.ovfDescriptor)
|
||||||
new_id = root[0][1].attrib.values()[0][1:3]
|
new_id = list(root[0][1].attrib.values())[0][1:3]
|
||||||
ovfFiles = []
|
ovfFiles = []
|
||||||
for vmdk in vmdks:
|
for vmdk in vmdks:
|
||||||
old_id = vmdk['id'][1:3]
|
old_id = vmdk['id'][1:3]
|
||||||
@ -211,7 +211,7 @@ class backup_vmdk(backup_generic):
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
os.makedirs(dest_dir)
|
os.makedirs(dest_dir)
|
||||||
else:
|
else:
|
||||||
print 'mkdir "%s"' % dest_dir
|
print('mkdir "%s"' % dest_dir)
|
||||||
else:
|
else:
|
||||||
raise Exception('backup destination directory already exists : %s' % dest_dir)
|
raise Exception('backup destination directory already exists : %s' % dest_dir)
|
||||||
os.chdir(dest_dir)
|
os.chdir(dest_dir)
|
||||||
@ -271,7 +271,7 @@ class backup_vmdk(backup_generic):
|
|||||||
stats['status']='OK'
|
stats['status']='OK'
|
||||||
|
|
||||||
|
|
||||||
except BaseException , e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
raise
|
raise
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -20,7 +20,7 @@
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
from common import *
|
from .common import *
|
||||||
import paramiko
|
import paramiko
|
||||||
|
|
||||||
class backup_xcp_metadata(backup_generic):
|
class backup_xcp_metadata(backup_generic):
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -17,20 +17,21 @@
|
|||||||
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
from __future__ import with_statement
|
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
import urllib
|
import urllib.request, urllib.parse, urllib.error
|
||||||
import socket
|
import socket
|
||||||
import tarfile
|
import tarfile
|
||||||
import hashlib
|
import hashlib
|
||||||
from stat import *
|
from stat import *
|
||||||
import ssl
|
import ssl
|
||||||
|
import requests
|
||||||
|
|
||||||
from common import *
|
from .common import *
|
||||||
import XenAPI
|
from . import XenAPI
|
||||||
|
|
||||||
if hasattr(ssl, '_create_unverified_context'):
|
if hasattr(ssl, '_create_unverified_context'):
|
||||||
ssl._create_default_https_context = ssl._create_unverified_context
|
ssl._create_default_https_context = ssl._create_unverified_context
|
||||||
@ -72,7 +73,7 @@ class backup_xva(backup_generic):
|
|||||||
session = XenAPI.Session('https://'+self.xcphost)
|
session = XenAPI.Session('https://'+self.xcphost)
|
||||||
try:
|
try:
|
||||||
session.login_with_password(user_xen,password_xen)
|
session.login_with_password(user_xen,password_xen)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
msg,ip = error.details
|
msg,ip = error.details
|
||||||
|
|
||||||
if msg == 'HOST_IS_SLAVE':
|
if msg == 'HOST_IS_SLAVE':
|
||||||
@ -117,7 +118,7 @@ class backup_xva(backup_generic):
|
|||||||
if not 'NULL' in vdi:
|
if not 'NULL' in vdi:
|
||||||
session.xenapi.VDI.destroy(vdi)
|
session.xenapi.VDI.destroy(vdi)
|
||||||
session.xenapi.VM.destroy(old_snapshot)
|
session.xenapi.VM.destroy(old_snapshot)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
return("error when destroy snapshot %s"%(error))
|
return("error when destroy snapshot %s"%(error))
|
||||||
|
|
||||||
now = datetime.datetime.now()
|
now = datetime.datetime.now()
|
||||||
@ -125,7 +126,7 @@ class backup_xva(backup_generic):
|
|||||||
try:
|
try:
|
||||||
snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vdi_name))
|
snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vdi_name))
|
||||||
self.logger.debug("[%s] got snapshot %s", vdi_name, snapshot)
|
self.logger.debug("[%s] got snapshot %s", vdi_name, snapshot)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
return("error when snapshot %s"%(error))
|
return("error when snapshot %s"%(error))
|
||||||
#get snapshot opaqueRef
|
#get snapshot opaqueRef
|
||||||
vm = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vdi_name))[0]
|
vm = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vdi_name))[0]
|
||||||
@ -135,7 +136,7 @@ class backup_xva(backup_generic):
|
|||||||
if status_vm == "Running":
|
if status_vm == "Running":
|
||||||
self.logger.debug("[%s] Shudown in progress",self.backup_name)
|
self.logger.debug("[%s] Shudown in progress",self.backup_name)
|
||||||
if dry_run:
|
if dry_run:
|
||||||
print "session.xenapi.VM.clean_shutdown(vm)"
|
print("session.xenapi.VM.clean_shutdown(vm)")
|
||||||
else:
|
else:
|
||||||
session.xenapi.VM.clean_shutdown(vm)
|
session.xenapi.VM.clean_shutdown(vm)
|
||||||
try:
|
try:
|
||||||
@ -150,8 +151,13 @@ class backup_xva(backup_generic):
|
|||||||
scheme = "https://"
|
scheme = "https://"
|
||||||
url = scheme+user_xen+":"+password_xen+"@"+self.xcphost+"/export?use_compression="+self.use_compression+"&uuid="+session.xenapi.VM.get_uuid(vm)
|
url = scheme+user_xen+":"+password_xen+"@"+self.xcphost+"/export?use_compression="+self.use_compression+"&uuid="+session.xenapi.VM.get_uuid(vm)
|
||||||
|
|
||||||
urllib.urlretrieve(url, filename_temp)
|
|
||||||
urllib.urlcleanup()
|
|
||||||
|
|
||||||
|
top_level_url = scheme+self.xcphost+"/export?use_compression="+self.use_compression+"&uuid="+session.xenapi.VM.get_uuid(vm)
|
||||||
|
r = requests.get(top_level_url, auth=(user_xen, password_xen))
|
||||||
|
open(filename_temp, 'wb').write(r.content)
|
||||||
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.logger.error("[%s] error when fetching snap: %s", "tisbackup-%s"%(vdi_name), e)
|
self.logger.error("[%s] error when fetching snap: %s", "tisbackup-%s"%(vdi_name), e)
|
||||||
@ -171,13 +177,13 @@ class backup_xva(backup_generic):
|
|||||||
if not 'NULL' in vdi:
|
if not 'NULL' in vdi:
|
||||||
session.xenapi.VDI.destroy(vdi)
|
session.xenapi.VDI.destroy(vdi)
|
||||||
session.xenapi.VM.destroy(snapshot)
|
session.xenapi.VM.destroy(snapshot)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
return("error when destroy snapshot %s"%(error))
|
return("error when destroy snapshot %s"%(error))
|
||||||
|
|
||||||
elif status_vm == "Running":
|
elif status_vm == "Running":
|
||||||
self.logger.debug("[%s] Starting in progress",self.backup_name)
|
self.logger.debug("[%s] Starting in progress",self.backup_name)
|
||||||
if dry_run:
|
if dry_run:
|
||||||
print "session.xenapi.Async.VM.start(vm,False,True)"
|
print("session.xenapi.Async.VM.start(vm,False,True)")
|
||||||
else:
|
else:
|
||||||
session.xenapi.Async.VM.start(vm,False,True)
|
session.xenapi.Async.VM.start(vm,False,True)
|
||||||
|
|
||||||
@ -219,7 +225,7 @@ class backup_xva(backup_generic):
|
|||||||
else:
|
else:
|
||||||
raise Exception(cmd)
|
raise Exception(cmd)
|
||||||
|
|
||||||
except BaseException , e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
raise
|
raise
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -18,6 +18,7 @@
|
|||||||
#
|
#
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import re
|
import re
|
||||||
@ -34,8 +35,8 @@ import sys
|
|||||||
try:
|
try:
|
||||||
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
sys.stderr = open('/dev/null') # Silence silly warnings from paramiko
|
||||||
import paramiko
|
import paramiko
|
||||||
except ImportError,e:
|
except ImportError as e:
|
||||||
print "Error : can not load paramiko library %s" % e
|
print(("Error : can not load paramiko library %s" % e))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
sys.stderr = sys.__stderr__
|
sys.stderr = sys.__stderr__
|
||||||
@ -121,7 +122,7 @@ def check_string(test_string):
|
|||||||
pattern = r'[^\.A-Za-z0-9\-_]'
|
pattern = r'[^\.A-Za-z0-9\-_]'
|
||||||
if re.search(pattern, test_string):
|
if re.search(pattern, test_string):
|
||||||
#Character other then . a-z 0-9 was found
|
#Character other then . a-z 0-9 was found
|
||||||
print 'Invalid : %r' % (test_string,)
|
print(('Invalid : %r' % (test_string,)))
|
||||||
|
|
||||||
def convert_bytes(bytes):
|
def convert_bytes(bytes):
|
||||||
if bytes is None:
|
if bytes is None:
|
||||||
@ -207,7 +208,7 @@ def html_table(cur,callback=None):
|
|||||||
yield dict((cur.description[idx][0], value)
|
yield dict((cur.description[idx][0], value)
|
||||||
for idx, value in enumerate(row))
|
for idx, value in enumerate(row))
|
||||||
|
|
||||||
head=u"<tr>"+"".join(["<th>"+c[0]+"</th>" for c in cur.description])+"</tr>"
|
head="<tr>"+"".join(["<th>"+c[0]+"</th>" for c in cur.description])+"</tr>"
|
||||||
lines=""
|
lines=""
|
||||||
if callback:
|
if callback:
|
||||||
for r in itermap(cur):
|
for r in itermap(cur):
|
||||||
@ -237,7 +238,7 @@ def monitor_stdout(aprocess, onoutputdata,context):
|
|||||||
while read_set:
|
while read_set:
|
||||||
try:
|
try:
|
||||||
rlist, wlist, xlist = select.select(read_set, [], [])
|
rlist, wlist, xlist = select.select(read_set, [], [])
|
||||||
except select.error, e:
|
except select.error as e:
|
||||||
if e.args[0] == errno.EINTR:
|
if e.args[0] == errno.EINTR:
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
@ -245,12 +246,14 @@ def monitor_stdout(aprocess, onoutputdata,context):
|
|||||||
# Reads one line from stdout
|
# Reads one line from stdout
|
||||||
if aprocess.stdout in rlist:
|
if aprocess.stdout in rlist:
|
||||||
data = os.read(aprocess.stdout.fileno(), 1)
|
data = os.read(aprocess.stdout.fileno(), 1)
|
||||||
|
data = data.decode('utf-8')
|
||||||
if data == "":
|
if data == "":
|
||||||
aprocess.stdout.close()
|
aprocess.stdout.close()
|
||||||
read_set.remove(aprocess.stdout)
|
read_set.remove(aprocess.stdout)
|
||||||
while data and not data in ('\n','\r'):
|
while data and not data in ('\n','\r'):
|
||||||
line += data
|
line += data
|
||||||
data = os.read(aprocess.stdout.fileno(), 1)
|
data = os.read(aprocess.stdout.fileno(), 1)
|
||||||
|
data = data.decode('utf-8')
|
||||||
if line or data in ('\n','\r'):
|
if line or data in ('\n','\r'):
|
||||||
stdout.append(line)
|
stdout.append(line)
|
||||||
if onoutputdata:
|
if onoutputdata:
|
||||||
@ -260,12 +263,14 @@ def monitor_stdout(aprocess, onoutputdata,context):
|
|||||||
# Reads one line from stderr
|
# Reads one line from stderr
|
||||||
if aprocess.stderr in rlist:
|
if aprocess.stderr in rlist:
|
||||||
data = os.read(aprocess.stderr.fileno(), 1)
|
data = os.read(aprocess.stderr.fileno(), 1)
|
||||||
|
data = data.decode('utf-8')
|
||||||
if data == "":
|
if data == "":
|
||||||
aprocess.stderr.close()
|
aprocess.stderr.close()
|
||||||
read_set.remove(aprocess.stderr)
|
read_set.remove(aprocess.stderr)
|
||||||
while data and not data in ('\n','\r'):
|
while data and not data in ('\n','\r'):
|
||||||
line += data
|
line += data
|
||||||
data = os.read(aprocess.stderr.fileno(), 1)
|
data = os.read(aprocess.stderr.fileno(), 1)
|
||||||
|
data = data.decode('utf-8')
|
||||||
if line or data in ('\n','\r'):
|
if line or data in ('\n','\r'):
|
||||||
stdout.append(line)
|
stdout.append(line)
|
||||||
if onoutputdata:
|
if onoutputdata:
|
||||||
@ -442,7 +447,7 @@ CREATE INDEX idx_stats_backup_name_start on stats(backup_name,backup_start);""")
|
|||||||
return value
|
return value
|
||||||
|
|
||||||
#for r in self.query('select * from stats where backup_name=? order by backup_end desc limit ?',(backup_name,count)):
|
#for r in self.query('select * from stats where backup_name=? order by backup_end desc limit ?',(backup_name,count)):
|
||||||
print pp(cur,None,1,fcb)
|
print((pp(cur,None,1,fcb)))
|
||||||
|
|
||||||
|
|
||||||
def fcb(self,fields,fieldname,value):
|
def fcb(self,fields,fieldname,value):
|
||||||
@ -492,12 +497,13 @@ def ssh_exec(command,ssh=None,server_name='',remote_user='',private_key='',ssh_p
|
|||||||
|
|
||||||
chan.exec_command(command)
|
chan.exec_command(command)
|
||||||
stdout.flush()
|
stdout.flush()
|
||||||
output = stdout.read()
|
output_base = stdout.read()
|
||||||
|
output = output_base.decode(encoding='UTF-8').replace("'","")
|
||||||
exit_code = chan.recv_exit_status()
|
exit_code = chan.recv_exit_status()
|
||||||
return (exit_code,output)
|
return (exit_code,output)
|
||||||
|
|
||||||
|
|
||||||
class backup_generic:
|
class backup_generic(ABC):
|
||||||
"""Generic ancestor class for backups, not registered"""
|
"""Generic ancestor class for backups, not registered"""
|
||||||
type = 'generic'
|
type = 'generic'
|
||||||
required_params = ['type','backup_name','backup_dir','server_name','backup_retention_time','maximum_backup_age']
|
required_params = ['type','backup_name','backup_dir','server_name','backup_retention_time','maximum_backup_age']
|
||||||
@ -696,7 +702,7 @@ class backup_generic:
|
|||||||
self.logger.info('[%s] ######### Backup finished : %s',self.backup_name,stats['log'])
|
self.logger.info('[%s] ######### Backup finished : %s',self.backup_name,stats['log'])
|
||||||
return stats
|
return stats
|
||||||
|
|
||||||
except BaseException, e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
endtime = time.time()
|
endtime = time.time()
|
||||||
@ -798,7 +804,7 @@ class backup_generic:
|
|||||||
if not self.dry_run:
|
if not self.dry_run:
|
||||||
self.dbstat.db.execute('update stats set TYPE="CLEAN" where backup_name=? and backup_location=?',(self.backup_name,oldbackup_location))
|
self.dbstat.db.execute('update stats set TYPE="CLEAN" where backup_name=? and backup_location=?',(self.backup_name,oldbackup_location))
|
||||||
self.dbstat.db.commit()
|
self.dbstat.db.commit()
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
self.logger.error('cleanup_backup : Unable to remove directory/file "%s". Error %s', oldbackup_location,e)
|
self.logger.error('cleanup_backup : Unable to remove directory/file "%s". Error %s', oldbackup_location,e)
|
||||||
removed.append((self.backup_name,oldbackup_location))
|
removed.append((self.backup_name,oldbackup_location))
|
||||||
else:
|
else:
|
||||||
@ -809,10 +815,12 @@ class backup_generic:
|
|||||||
self.logger.info('[%s] Cleanup finished : removed : %s' , self.backup_name,','.join([('[%s]-"%s"') % r for r in removed]) or 'Nothing')
|
self.logger.info('[%s] Cleanup finished : removed : %s' , self.backup_name,','.join([('[%s]-"%s"') % r for r in removed]) or 'Nothing')
|
||||||
return removed
|
return removed
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
def register_existingbackups(self):
|
def register_existingbackups(self):
|
||||||
"""scan existing backups and insert stats in database"""
|
pass
|
||||||
registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',[self.backup_name])]
|
# """scan existing backups and insert stats in database"""
|
||||||
raise Exception('Abstract method')
|
# registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',[self.backup_name])]
|
||||||
|
# raise Exception('Abstract method')
|
||||||
|
|
||||||
def export_latestbackup(self,destdir):
|
def export_latestbackup(self,destdir):
|
||||||
"""Copy (rsync) latest OK backup to external storage located at locally mounted "destdir"
|
"""Copy (rsync) latest OK backup to external storage located at locally mounted "destdir"
|
||||||
@ -846,9 +854,9 @@ class backup_generic:
|
|||||||
raise Exception('Backup source %s doesn\'t exists' % backup_source)
|
raise Exception('Backup source %s doesn\'t exists' % backup_source)
|
||||||
|
|
||||||
# ensure there is a slash at end
|
# ensure there is a slash at end
|
||||||
if os.path.isdir(backup_source) and backup_source[-1] <> '/':
|
if os.path.isdir(backup_source) and backup_source[-1] != '/':
|
||||||
backup_source += '/'
|
backup_source += '/'
|
||||||
if backup_dest[-1] <> '/':
|
if backup_dest[-1] != '/':
|
||||||
backup_dest += '/'
|
backup_dest += '/'
|
||||||
|
|
||||||
if not os.path.isdir(backup_dest):
|
if not os.path.isdir(backup_dest):
|
||||||
@ -874,7 +882,7 @@ class backup_generic:
|
|||||||
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
|
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
|
||||||
def ondata(data,context):
|
def ondata(data,context):
|
||||||
if context.verbose:
|
if context.verbose:
|
||||||
print data
|
print(data)
|
||||||
context.logger.debug(data)
|
context.logger.debug(data)
|
||||||
|
|
||||||
log = monitor_stdout(process,ondata,self)
|
log = monitor_stdout(process,ondata,self)
|
||||||
@ -898,7 +906,7 @@ class backup_generic:
|
|||||||
self.logger.error("[" + self.backup_name + "] shell program exited with error code ")
|
self.logger.error("[" + self.backup_name + "] shell program exited with error code ")
|
||||||
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd)
|
raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd)
|
||||||
else:
|
else:
|
||||||
print cmd
|
print(cmd)
|
||||||
|
|
||||||
stats['status']='OK'
|
stats['status']='OK'
|
||||||
self.logger.info('export backup from %s to %s OK, %d bytes written for %d changed files' % (backup_source,backup_dest,stats['written_bytes'],stats['written_files_count']))
|
self.logger.info('export backup from %s to %s OK, %d bytes written for %d changed files' % (backup_source,backup_dest,stats['written_bytes'],stats['written_files_count']))
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -20,8 +20,8 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
from common import *
|
from .common import *
|
||||||
import XenAPI
|
from . import XenAPI
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
@ -29,7 +29,7 @@ import os.path
|
|||||||
import os
|
import os
|
||||||
import datetime
|
import datetime
|
||||||
import select
|
import select
|
||||||
import urllib2
|
import urllib.request, urllib.error, urllib.parse
|
||||||
import base64
|
import base64
|
||||||
import socket
|
import socket
|
||||||
from stat import *
|
from stat import *
|
||||||
@ -66,7 +66,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
session = XenAPI.Session('https://'+self.server_name)
|
session = XenAPI.Session('https://'+self.server_name)
|
||||||
try:
|
try:
|
||||||
session.login_with_password(user_xen,password_xen)
|
session.login_with_password(user_xen,password_xen)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
msg,ip = error.details
|
msg,ip = error.details
|
||||||
|
|
||||||
if msg == 'HOST_IS_SLAVE':
|
if msg == 'HOST_IS_SLAVE':
|
||||||
@ -81,7 +81,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
#get storage opaqueRef
|
#get storage opaqueRef
|
||||||
try:
|
try:
|
||||||
storage = session.xenapi.SR.get_by_name_label(storage_name)[0]
|
storage = session.xenapi.SR.get_by_name_label(storage_name)[0]
|
||||||
except IndexError,error:
|
except IndexError as error:
|
||||||
result = (1,"error get SR opaqueref %s"%(error))
|
result = (1,"error get SR opaqueref %s"%(error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -89,14 +89,14 @@ class copy_vm_xcp(backup_generic):
|
|||||||
#get vm to copy opaqueRef
|
#get vm to copy opaqueRef
|
||||||
try:
|
try:
|
||||||
vm = session.xenapi.VM.get_by_name_label(vm_name)[0]
|
vm = session.xenapi.VM.get_by_name_label(vm_name)[0]
|
||||||
except IndexError,error:
|
except IndexError as error:
|
||||||
result = (1,"error get VM opaqueref %s"%(error))
|
result = (1,"error get VM opaqueref %s"%(error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# get vm backup network opaqueRef
|
# get vm backup network opaqueRef
|
||||||
try:
|
try:
|
||||||
networkRef = session.xenapi.network.get_by_name_label(self.network_name)[0]
|
networkRef = session.xenapi.network.get_by_name_label(self.network_name)[0]
|
||||||
except IndexError, error:
|
except IndexError as error:
|
||||||
result = (1, "error get VM network opaqueref %s" % (error))
|
result = (1, "error get VM network opaqueref %s" % (error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -104,9 +104,9 @@ class copy_vm_xcp(backup_generic):
|
|||||||
status_vm = session.xenapi.VM.get_power_state(vm)
|
status_vm = session.xenapi.VM.get_power_state(vm)
|
||||||
self.logger.debug("[%s] Status of VM: %s",self.backup_name,status_vm)
|
self.logger.debug("[%s] Status of VM: %s",self.backup_name,status_vm)
|
||||||
if status_vm == "Running":
|
if status_vm == "Running":
|
||||||
self.logger.debug("[%s] Shudown in progress",self.backup_name)
|
self.logger.debug("[%s] Shutdown in progress",self.backup_name)
|
||||||
if dry_run:
|
if dry_run:
|
||||||
print "session.xenapi.VM.clean_shutdown(vm)"
|
print("session.xenapi.VM.clean_shutdown(vm)")
|
||||||
else:
|
else:
|
||||||
session.xenapi.VM.clean_shutdown(vm)
|
session.xenapi.VM.clean_shutdown(vm)
|
||||||
snapshot = vm
|
snapshot = vm
|
||||||
@ -115,7 +115,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
self.logger.debug("[%s] Snapshot in progress",self.backup_name)
|
self.logger.debug("[%s] Snapshot in progress",self.backup_name)
|
||||||
try:
|
try:
|
||||||
snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vm_name))
|
snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vm_name))
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
result = (1,"error when snapshot %s"%(error))
|
result = (1,"error when snapshot %s"%(error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -165,7 +165,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
session.xenapi.VDI.destroy(vdi)
|
session.xenapi.VDI.destroy(vdi)
|
||||||
|
|
||||||
session.xenapi.VM.destroy(oldest_backup_vm)
|
session.xenapi.VM.destroy(oldest_backup_vm)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
result = (1,"error when destroy old backup vm %s"%(error))
|
result = (1,"error when destroy old backup vm %s"%(error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -173,7 +173,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
self.logger.debug("[%s] Copy %s in progress on %s",self.backup_name,vm_name,storage_name)
|
self.logger.debug("[%s] Copy %s in progress on %s",self.backup_name,vm_name,storage_name)
|
||||||
try:
|
try:
|
||||||
backup_vm = session.xenapi.VM.copy(snapshot,vm_backup_name+now.strftime("%Y-%m-%d %H:%M"),storage)
|
backup_vm = session.xenapi.VM.copy(snapshot,vm_backup_name+now.strftime("%Y-%m-%d %H:%M"),storage)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
result = (1,"error when copy %s"%(error))
|
result = (1,"error when copy %s"%(error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -184,7 +184,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
#change the network of the new VM
|
#change the network of the new VM
|
||||||
try:
|
try:
|
||||||
vifDestroy = session.xenapi.VM.get_VIFs(backup_vm)
|
vifDestroy = session.xenapi.VM.get_VIFs(backup_vm)
|
||||||
except IndexError,error:
|
except IndexError as error:
|
||||||
result = (1,"error get VIF opaqueref %s"%(error))
|
result = (1,"error get VIF opaqueref %s"%(error))
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -213,7 +213,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
session.xenapi.VIF.create(data)
|
session.xenapi.VIF.create(data)
|
||||||
except Exception, error:
|
except Exception as error:
|
||||||
result = (1,error)
|
result = (1,error)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ class copy_vm_xcp(backup_generic):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
#Disable automatic boot
|
#Disable automatic boot
|
||||||
if session.xenapi.VM.get_other_config(backup_vm).has_key('auto_poweron'):
|
if 'auto_poweron' in session.xenapi.VM.get_other_config(backup_vm):
|
||||||
session.xenapi.VM.remove_from_other_config(backup_vm, "auto_poweron")
|
session.xenapi.VM.remove_from_other_config(backup_vm, "auto_poweron")
|
||||||
|
|
||||||
if not str2bool(self.halt_vm):
|
if not str2bool(self.halt_vm):
|
||||||
@ -251,14 +251,14 @@ class copy_vm_xcp(backup_generic):
|
|||||||
if not 'NULL' in vdi:
|
if not 'NULL' in vdi:
|
||||||
session.xenapi.VDI.destroy(vdi)
|
session.xenapi.VDI.destroy(vdi)
|
||||||
session.xenapi.VM.destroy(snapshot)
|
session.xenapi.VM.destroy(snapshot)
|
||||||
except XenAPI.Failure, error:
|
except XenAPI.Failure as error:
|
||||||
result = (1,"error when destroy snapshot %s"%(error))
|
result = (1,"error when destroy snapshot %s"%(error))
|
||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
if status_vm == "Running":
|
if status_vm == "Running":
|
||||||
self.logger.debug("[%s] Starting in progress",self.backup_name)
|
self.logger.debug("[%s] Starting in progress",self.backup_name)
|
||||||
if dry_run:
|
if dry_run:
|
||||||
print "session.xenapi.VM.start(vm,False,True)"
|
print("session.xenapi.VM.start(vm,False,True)")
|
||||||
else:
|
else:
|
||||||
session.xenapi.VM.start(vm,False,True)
|
session.xenapi.VM.start(vm,False,True)
|
||||||
|
|
||||||
@ -282,9 +282,14 @@ class copy_vm_xcp(backup_generic):
|
|||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=cmd[1]
|
stats['log']=cmd[1]
|
||||||
|
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
stats['status']='ERROR'
|
stats['status']='ERROR'
|
||||||
stats['log']=str(e)
|
stats['log']=str(e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def register_existingbackups(self):
|
||||||
|
"""scan backup dir and insert stats in database"""
|
||||||
|
#This backup is on target server, no data available on this server
|
||||||
|
pass
|
||||||
|
|
||||||
register_driver(copy_vm_xcp)
|
register_driver(copy_vm_xcp)
|
||||||
|
20
iniparse/__init__.py → libtisbackup/iniparse/__init__.py
Executable file → Normal file
20
iniparse/__init__.py → libtisbackup/iniparse/__init__.py
Executable file → Normal file
@ -3,17 +3,17 @@
|
|||||||
# Copyright (c) 2007 Tim Lauridsen <tla@rasmil.dk>
|
# Copyright (c) 2007 Tim Lauridsen <tla@rasmil.dk>
|
||||||
# All Rights Reserved. See LICENSE-PSF & LICENSE for details.
|
# All Rights Reserved. See LICENSE-PSF & LICENSE for details.
|
||||||
|
|
||||||
from ini import INIConfig, change_comment_syntax
|
from .ini import INIConfig, change_comment_syntax
|
||||||
from config import BasicConfig, ConfigNamespace
|
from .config import BasicConfig, ConfigNamespace
|
||||||
from compat import RawConfigParser, ConfigParser, SafeConfigParser
|
from .compat import RawConfigParser, ConfigParser, SafeConfigParser
|
||||||
from utils import tidy
|
from .utils import tidy
|
||||||
|
|
||||||
from ConfigParser import DuplicateSectionError, \
|
from .configparser import DuplicateSectionError, \
|
||||||
NoSectionError, NoOptionError, \
|
NoSectionError, NoOptionError, \
|
||||||
InterpolationMissingOptionError, \
|
InterpolationMissingOptionError, \
|
||||||
InterpolationDepthError, \
|
InterpolationDepthError, \
|
||||||
InterpolationSyntaxError, \
|
InterpolationSyntaxError, \
|
||||||
DEFAULTSECT, MAX_INTERPOLATION_DEPTH
|
DEFAULTSECT, MAX_INTERPOLATION_DEPTH
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'BasicConfig', 'ConfigNamespace',
|
'BasicConfig', 'ConfigNamespace',
|
38
iniparse/compat.py → libtisbackup/iniparse/compat.py
Executable file → Normal file
38
iniparse/compat.py → libtisbackup/iniparse/compat.py
Executable file → Normal file
@ -12,19 +12,22 @@ The underlying INIConfig object can be accessed as cfg.data
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from ConfigParser import DuplicateSectionError, \
|
from .configparser import DuplicateSectionError, \
|
||||||
NoSectionError, NoOptionError, \
|
NoSectionError, NoOptionError, \
|
||||||
InterpolationMissingOptionError, \
|
InterpolationMissingOptionError, \
|
||||||
InterpolationDepthError, \
|
InterpolationDepthError, \
|
||||||
InterpolationSyntaxError, \
|
InterpolationSyntaxError, \
|
||||||
DEFAULTSECT, MAX_INTERPOLATION_DEPTH
|
DEFAULTSECT, MAX_INTERPOLATION_DEPTH
|
||||||
|
|
||||||
# These are imported only for compatiability.
|
# These are imported only for compatiability.
|
||||||
# The code below does not reference them directly.
|
# The code below does not reference them directly.
|
||||||
from ConfigParser import Error, InterpolationError, \
|
from .configparser import Error, InterpolationError, \
|
||||||
MissingSectionHeaderError, ParsingError
|
MissingSectionHeaderError, ParsingError
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from . import ini
|
||||||
|
|
||||||
import ini
|
|
||||||
|
|
||||||
class RawConfigParser(object):
|
class RawConfigParser(object):
|
||||||
def __init__(self, defaults=None, dict_type=dict):
|
def __init__(self, defaults=None, dict_type=dict):
|
||||||
@ -56,7 +59,7 @@ class RawConfigParser(object):
|
|||||||
# The default section is the only one that gets the case-insensitive
|
# The default section is the only one that gets the case-insensitive
|
||||||
# treatment - so it is special-cased here.
|
# treatment - so it is special-cased here.
|
||||||
if section.lower() == "default":
|
if section.lower() == "default":
|
||||||
raise ValueError, 'Invalid section name: %s' % section
|
raise ValueError('Invalid section name: %s' % section)
|
||||||
|
|
||||||
if self.has_section(section):
|
if self.has_section(section):
|
||||||
raise DuplicateSectionError(section)
|
raise DuplicateSectionError(section)
|
||||||
@ -68,7 +71,7 @@ class RawConfigParser(object):
|
|||||||
|
|
||||||
The DEFAULT section is not acknowledged.
|
The DEFAULT section is not acknowledged.
|
||||||
"""
|
"""
|
||||||
return (section in self.data)
|
return section in self.data
|
||||||
|
|
||||||
def options(self, section):
|
def options(self, section):
|
||||||
"""Return a list of option names for the given section name."""
|
"""Return a list of option names for the given section name."""
|
||||||
@ -88,7 +91,7 @@ class RawConfigParser(object):
|
|||||||
filename may also be given.
|
filename may also be given.
|
||||||
"""
|
"""
|
||||||
files_read = []
|
files_read = []
|
||||||
if isinstance(filenames, basestring):
|
if isinstance(filenames, six.string_types):
|
||||||
filenames = [filenames]
|
filenames = [filenames]
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
try:
|
try:
|
||||||
@ -113,8 +116,6 @@ class RawConfigParser(object):
|
|||||||
def get(self, section, option, vars=None):
|
def get(self, section, option, vars=None):
|
||||||
if not self.has_section(section):
|
if not self.has_section(section):
|
||||||
raise NoSectionError(section)
|
raise NoSectionError(section)
|
||||||
if vars is not None and option in vars:
|
|
||||||
value = vars[option]
|
|
||||||
|
|
||||||
sec = self.data[section]
|
sec = self.data[section]
|
||||||
if option in sec:
|
if option in sec:
|
||||||
@ -143,7 +144,7 @@ class RawConfigParser(object):
|
|||||||
def getboolean(self, section, option):
|
def getboolean(self, section, option):
|
||||||
v = self.get(section, option)
|
v = self.get(section, option)
|
||||||
if v.lower() not in self._boolean_states:
|
if v.lower() not in self._boolean_states:
|
||||||
raise ValueError, 'Not a boolean: %s' % v
|
raise ValueError('Not a boolean: %s' % v)
|
||||||
return self._boolean_states[v.lower()]
|
return self._boolean_states[v.lower()]
|
||||||
|
|
||||||
def has_option(self, section, option):
|
def has_option(self, section, option):
|
||||||
@ -234,7 +235,7 @@ class ConfigParser(RawConfigParser):
|
|||||||
if "%(" in value:
|
if "%(" in value:
|
||||||
try:
|
try:
|
||||||
value = value % vars
|
value = value % vars
|
||||||
except KeyError, e:
|
except KeyError as e:
|
||||||
raise InterpolationMissingOptionError(
|
raise InterpolationMissingOptionError(
|
||||||
option, section, rawval, e.args[0])
|
option, section, rawval, e.args[0])
|
||||||
else:
|
else:
|
||||||
@ -283,7 +284,7 @@ class SafeConfigParser(ConfigParser):
|
|||||||
_badpercent_re = re.compile(r"%[^%]|%$")
|
_badpercent_re = re.compile(r"%[^%]|%$")
|
||||||
|
|
||||||
def set(self, section, option, value):
|
def set(self, section, option, value):
|
||||||
if not isinstance(value, basestring):
|
if not isinstance(value, six.string_types):
|
||||||
raise TypeError("option values must be strings")
|
raise TypeError("option values must be strings")
|
||||||
# check for bad percent signs:
|
# check for bad percent signs:
|
||||||
# first, replace all "good" interpolations
|
# first, replace all "good" interpolations
|
||||||
@ -323,8 +324,7 @@ class SafeConfigParser(ConfigParser):
|
|||||||
elif c == "(":
|
elif c == "(":
|
||||||
m = self._interpvar_match(rest)
|
m = self._interpvar_match(rest)
|
||||||
if m is None:
|
if m is None:
|
||||||
raise InterpolationSyntaxError(option, section,
|
raise InterpolationSyntaxError(option, section, "bad interpolation variable reference %r" % rest)
|
||||||
"bad interpolation variable reference %r" % rest)
|
|
||||||
var = m.group(1)
|
var = m.group(1)
|
||||||
rest = rest[m.end():]
|
rest = rest[m.end():]
|
||||||
try:
|
try:
|
22
iniparse/config.py → libtisbackup/iniparse/config.py
Executable file → Normal file
22
iniparse/config.py → libtisbackup/iniparse/config.py
Executable file → Normal file
@ -86,6 +86,7 @@ class ConfigNamespace(object):
|
|||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
self.__dict__.update(state)
|
self.__dict__.update(state)
|
||||||
|
|
||||||
|
|
||||||
class Undefined(object):
|
class Undefined(object):
|
||||||
"""Helper class used to hold undefined names until assignment.
|
"""Helper class used to hold undefined names until assignment.
|
||||||
|
|
||||||
@ -143,16 +144,16 @@ class BasicConfig(ConfigNamespace):
|
|||||||
|
|
||||||
>>> n.aaa = 42
|
>>> n.aaa = 42
|
||||||
>>> del n.x
|
>>> del n.x
|
||||||
>>> print n
|
>>> print(n)
|
||||||
aaa = 42
|
aaa = 42
|
||||||
name.first = paramjit
|
name.first = paramjit
|
||||||
name.last = oberoi
|
name.last = oberoi
|
||||||
|
|
||||||
Nested namepsaces are also namespaces:
|
Nested namespaces are also namespaces:
|
||||||
|
|
||||||
>>> isinstance(n.name, ConfigNamespace)
|
>>> isinstance(n.name, ConfigNamespace)
|
||||||
True
|
True
|
||||||
>>> print n.name
|
>>> print(n.name)
|
||||||
first = paramjit
|
first = paramjit
|
||||||
last = oberoi
|
last = oberoi
|
||||||
>>> sorted(list(n.name))
|
>>> sorted(list(n.name))
|
||||||
@ -160,7 +161,7 @@ class BasicConfig(ConfigNamespace):
|
|||||||
|
|
||||||
Finally, values can be read from a file as follows:
|
Finally, values can be read from a file as follows:
|
||||||
|
|
||||||
>>> from StringIO import StringIO
|
>>> from six import StringIO
|
||||||
>>> sio = StringIO('''
|
>>> sio = StringIO('''
|
||||||
... # comment
|
... # comment
|
||||||
... ui.height = 100
|
... ui.height = 100
|
||||||
@ -171,7 +172,7 @@ class BasicConfig(ConfigNamespace):
|
|||||||
... ''')
|
... ''')
|
||||||
>>> n = BasicConfig()
|
>>> n = BasicConfig()
|
||||||
>>> n._readfp(sio)
|
>>> n._readfp(sio)
|
||||||
>>> print n
|
>>> print(n)
|
||||||
complexity = medium
|
complexity = medium
|
||||||
data.secret.password = goodness=gracious me
|
data.secret.password = goodness=gracious me
|
||||||
have_python
|
have_python
|
||||||
@ -199,7 +200,7 @@ class BasicConfig(ConfigNamespace):
|
|||||||
|
|
||||||
def __str__(self, prefix=''):
|
def __str__(self, prefix=''):
|
||||||
lines = []
|
lines = []
|
||||||
keys = self._data.keys()
|
keys = list(self._data.keys())
|
||||||
keys.sort()
|
keys.sort()
|
||||||
for name in keys:
|
for name in keys:
|
||||||
value = self._data[name]
|
value = self._data[name]
|
||||||
@ -258,7 +259,7 @@ def update_config(target, source):
|
|||||||
>>> n.ui.display_clock = True
|
>>> n.ui.display_clock = True
|
||||||
>>> n.ui.display_qlength = True
|
>>> n.ui.display_qlength = True
|
||||||
>>> n.ui.width = 150
|
>>> n.ui.width = 150
|
||||||
>>> print n
|
>>> print(n)
|
||||||
playlist.expand_playlist = True
|
playlist.expand_playlist = True
|
||||||
ui.display_clock = True
|
ui.display_clock = True
|
||||||
ui.display_qlength = True
|
ui.display_qlength = True
|
||||||
@ -267,7 +268,7 @@ def update_config(target, source):
|
|||||||
>>> from iniparse import ini
|
>>> from iniparse import ini
|
||||||
>>> i = ini.INIConfig()
|
>>> i = ini.INIConfig()
|
||||||
>>> update_config(i, n)
|
>>> update_config(i, n)
|
||||||
>>> print i
|
>>> print(i)
|
||||||
[playlist]
|
[playlist]
|
||||||
expand_playlist = True
|
expand_playlist = True
|
||||||
<BLANKLINE>
|
<BLANKLINE>
|
||||||
@ -277,7 +278,7 @@ def update_config(target, source):
|
|||||||
width = 150
|
width = 150
|
||||||
|
|
||||||
"""
|
"""
|
||||||
for name in source:
|
for name in sorted(source):
|
||||||
value = source[name]
|
value = source[name]
|
||||||
if isinstance(value, ConfigNamespace):
|
if isinstance(value, ConfigNamespace):
|
||||||
if name in target:
|
if name in target:
|
||||||
@ -289,6 +290,3 @@ def update_config(target, source):
|
|||||||
update_config(myns, value)
|
update_config(myns, value)
|
||||||
else:
|
else:
|
||||||
target[name] = value
|
target[name] = value
|
||||||
|
|
||||||
|
|
||||||
|
|
7
libtisbackup/iniparse/configparser.py
Normal file
7
libtisbackup/iniparse/configparser.py
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
try:
|
||||||
|
from ConfigParser import *
|
||||||
|
# not all objects get imported with __all__
|
||||||
|
from ConfigParser import Error, InterpolationMissingOptionError
|
||||||
|
except ImportError:
|
||||||
|
from configparser import *
|
||||||
|
from configparser import Error, InterpolationMissingOptionError
|
95
iniparse/ini.py → libtisbackup/iniparse/ini.py
Executable file → Normal file
95
iniparse/ini.py → libtisbackup/iniparse/ini.py
Executable file → Normal file
@ -7,7 +7,7 @@
|
|||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
>>> from StringIO import StringIO
|
>>> from six import StringIO
|
||||||
>>> sio = StringIO('''# configure foo-application
|
>>> sio = StringIO('''# configure foo-application
|
||||||
... [foo]
|
... [foo]
|
||||||
... bar1 = qualia
|
... bar1 = qualia
|
||||||
@ -16,14 +16,14 @@ Example:
|
|||||||
... special = 1''')
|
... special = 1''')
|
||||||
|
|
||||||
>>> cfg = INIConfig(sio)
|
>>> cfg = INIConfig(sio)
|
||||||
>>> print cfg.foo.bar1
|
>>> print(cfg.foo.bar1)
|
||||||
qualia
|
qualia
|
||||||
>>> print cfg['foo-ext'].special
|
>>> print(cfg['foo-ext'].special)
|
||||||
1
|
1
|
||||||
>>> cfg.foo.newopt = 'hi!'
|
>>> cfg.foo.newopt = 'hi!'
|
||||||
>>> cfg.baz.enabled = 0
|
>>> cfg.baz.enabled = 0
|
||||||
|
|
||||||
>>> print cfg
|
>>> print(cfg)
|
||||||
# configure foo-application
|
# configure foo-application
|
||||||
[foo]
|
[foo]
|
||||||
bar1 = qualia
|
bar1 = qualia
|
||||||
@ -42,9 +42,12 @@ Example:
|
|||||||
# Backward-compatiable with ConfigParser
|
# Backward-compatiable with ConfigParser
|
||||||
|
|
||||||
import re
|
import re
|
||||||
from ConfigParser import DEFAULTSECT, ParsingError, MissingSectionHeaderError
|
from .configparser import DEFAULTSECT, ParsingError, MissingSectionHeaderError
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from . import config
|
||||||
|
|
||||||
import config
|
|
||||||
|
|
||||||
class LineType(object):
|
class LineType(object):
|
||||||
line = None
|
line = None
|
||||||
@ -73,10 +76,10 @@ class LineType(object):
|
|||||||
|
|
||||||
|
|
||||||
class SectionLine(LineType):
|
class SectionLine(LineType):
|
||||||
regex = re.compile(r'^\['
|
regex = re.compile(r'^\['
|
||||||
r'(?P<name>[^]]+)'
|
r'(?P<name>[^]]+)'
|
||||||
r'\]\s*'
|
r'\]\s*'
|
||||||
r'((?P<csep>;|#)(?P<comment>.*))?$')
|
r'((?P<csep>;|#)(?P<comment>.*))?$')
|
||||||
|
|
||||||
def __init__(self, name, comment=None, comment_separator=None,
|
def __init__(self, name, comment=None, comment_separator=None,
|
||||||
comment_offset=-1, line=None):
|
comment_offset=-1, line=None):
|
||||||
@ -170,8 +173,9 @@ def change_comment_syntax(comment_chars='%;#', allow_rem=False):
|
|||||||
regex += r')(?P<comment>.*)$'
|
regex += r')(?P<comment>.*)$'
|
||||||
CommentLine.regex = re.compile(regex)
|
CommentLine.regex = re.compile(regex)
|
||||||
|
|
||||||
|
|
||||||
class CommentLine(LineType):
|
class CommentLine(LineType):
|
||||||
regex = re.compile(r'^(?P<csep>[;#]|[rR][eE][mM] +)'
|
regex = re.compile(r'^(?P<csep>[;#])'
|
||||||
r'(?P<comment>.*)$')
|
r'(?P<comment>.*)$')
|
||||||
|
|
||||||
def __init__(self, comment='', separator='#', line=None):
|
def __init__(self, comment='', separator='#', line=None):
|
||||||
@ -187,6 +191,7 @@ class CommentLine(LineType):
|
|||||||
if m is None:
|
if m is None:
|
||||||
return None
|
return None
|
||||||
return cls(m.group('comment'), m.group('csep'), line)
|
return cls(m.group('comment'), m.group('csep'), line)
|
||||||
|
|
||||||
parse = classmethod(parse)
|
parse = classmethod(parse)
|
||||||
|
|
||||||
|
|
||||||
@ -195,11 +200,13 @@ class EmptyLine(LineType):
|
|||||||
def to_string(self):
|
def to_string(self):
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
value = property(lambda _: '')
|
value = property(lambda self: '')
|
||||||
|
|
||||||
def parse(cls, line):
|
def parse(cls, line):
|
||||||
if line.strip(): return None
|
if line.strip():
|
||||||
|
return None
|
||||||
return cls(line)
|
return cls(line)
|
||||||
|
|
||||||
parse = classmethod(parse)
|
parse = classmethod(parse)
|
||||||
|
|
||||||
|
|
||||||
@ -221,6 +228,7 @@ class ContinuationLine(LineType):
|
|||||||
if m is None:
|
if m is None:
|
||||||
return None
|
return None
|
||||||
return cls(m.group('value'), m.start('value'), line)
|
return cls(m.group('value'), m.start('value'), line)
|
||||||
|
|
||||||
parse = classmethod(parse)
|
parse = classmethod(parse)
|
||||||
|
|
||||||
|
|
||||||
@ -275,6 +283,7 @@ class LineContainer(object):
|
|||||||
self.add(EmptyLine())
|
self.add(EmptyLine())
|
||||||
|
|
||||||
name = property(get_name, set_name)
|
name = property(get_name, set_name)
|
||||||
|
|
||||||
value = property(get_value, set_value)
|
value = property(get_value, set_value)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -322,8 +331,8 @@ class INISection(config.ConfigNamespace):
|
|||||||
_optionxformvalue = None
|
_optionxformvalue = None
|
||||||
_optionxformsource = None
|
_optionxformsource = None
|
||||||
_compat_skip_empty_lines = set()
|
_compat_skip_empty_lines = set()
|
||||||
def __init__(self, lineobj, defaults = None,
|
|
||||||
optionxformvalue=None, optionxformsource=None):
|
def __init__(self, lineobj, defaults=None, optionxformvalue=None, optionxformsource=None):
|
||||||
self._lines = [lineobj]
|
self._lines = [lineobj]
|
||||||
self._defaults = defaults
|
self._defaults = defaults
|
||||||
self._optionxformvalue = optionxformvalue
|
self._optionxformvalue = optionxformvalue
|
||||||
@ -453,6 +462,7 @@ class INIConfig(config.ConfigNamespace):
|
|||||||
_sectionxformsource = None
|
_sectionxformsource = None
|
||||||
_parse_exc = None
|
_parse_exc = None
|
||||||
_bom = False
|
_bom = False
|
||||||
|
|
||||||
def __init__(self, fp=None, defaults=None, parse_exc=True,
|
def __init__(self, fp=None, defaults=None, parse_exc=True,
|
||||||
optionxformvalue=lower, optionxformsource=None,
|
optionxformvalue=lower, optionxformsource=None,
|
||||||
sectionxformvalue=None, sectionxformsource=None):
|
sectionxformvalue=None, sectionxformsource=None):
|
||||||
@ -465,7 +475,7 @@ class INIConfig(config.ConfigNamespace):
|
|||||||
self._sections = {}
|
self._sections = {}
|
||||||
if defaults is None: defaults = {}
|
if defaults is None: defaults = {}
|
||||||
self._defaults = INISection(LineContainer(), optionxformsource=self)
|
self._defaults = INISection(LineContainer(), optionxformsource=self)
|
||||||
for name, value in defaults.iteritems():
|
for name, value in defaults.items():
|
||||||
self._defaults[name] = value
|
self._defaults[name] = value
|
||||||
if fp is not None:
|
if fp is not None:
|
||||||
self._readfp(fp)
|
self._readfp(fp)
|
||||||
@ -545,34 +555,34 @@ class INIConfig(config.ConfigNamespace):
|
|||||||
fname = fp.name
|
fname = fp.name
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
fname = '<???>'
|
fname = '<???>'
|
||||||
linecount = 0
|
line_count = 0
|
||||||
exc = None
|
exc = None
|
||||||
line = None
|
line = None
|
||||||
|
|
||||||
for line in readline_iterator(fp):
|
for line in readline_iterator(fp):
|
||||||
# Check for BOM on first line
|
# Check for BOM on first line
|
||||||
if linecount == 0 and isinstance(line, unicode):
|
if line_count == 0 and isinstance(line, six.text_type):
|
||||||
if line[0] == u'\ufeff':
|
if line[0] == u'\ufeff':
|
||||||
line = line[1:]
|
line = line[1:]
|
||||||
self._bom = True
|
self._bom = True
|
||||||
|
|
||||||
lineobj = self._parse(line)
|
line_obj = self._parse(line)
|
||||||
linecount += 1
|
line_count += 1
|
||||||
|
|
||||||
if not cur_section and not isinstance(lineobj,
|
if not cur_section and not isinstance(line_obj, (CommentLine, EmptyLine, SectionLine)):
|
||||||
(CommentLine, EmptyLine, SectionLine)):
|
|
||||||
if self._parse_exc:
|
if self._parse_exc:
|
||||||
raise MissingSectionHeaderError(fname, linecount, line)
|
raise MissingSectionHeaderError(fname, line_count, line)
|
||||||
else:
|
else:
|
||||||
lineobj = make_comment(line)
|
line_obj = make_comment(line)
|
||||||
|
|
||||||
if lineobj is None:
|
if line_obj is None:
|
||||||
if self._parse_exc:
|
if self._parse_exc:
|
||||||
if exc is None: exc = ParsingError(fname)
|
if exc is None:
|
||||||
exc.append(linecount, line)
|
exc = ParsingError(fname)
|
||||||
lineobj = make_comment(line)
|
exc.append(line_count, line)
|
||||||
|
line_obj = make_comment(line)
|
||||||
|
|
||||||
if isinstance(lineobj, ContinuationLine):
|
if isinstance(line_obj, ContinuationLine):
|
||||||
if cur_option:
|
if cur_option:
|
||||||
if pending_lines:
|
if pending_lines:
|
||||||
cur_option.extend(pending_lines)
|
cur_option.extend(pending_lines)
|
||||||
@ -580,20 +590,21 @@ class INIConfig(config.ConfigNamespace):
|
|||||||
if pending_empty_lines:
|
if pending_empty_lines:
|
||||||
optobj._compat_skip_empty_lines.add(cur_option_name)
|
optobj._compat_skip_empty_lines.add(cur_option_name)
|
||||||
pending_empty_lines = False
|
pending_empty_lines = False
|
||||||
cur_option.add(lineobj)
|
cur_option.add(line_obj)
|
||||||
else:
|
else:
|
||||||
# illegal continuation line - convert to comment
|
# illegal continuation line - convert to comment
|
||||||
if self._parse_exc:
|
if self._parse_exc:
|
||||||
if exc is None: exc = ParsingError(fname)
|
if exc is None:
|
||||||
exc.append(linecount, line)
|
exc = ParsingError(fname)
|
||||||
lineobj = make_comment(line)
|
exc.append(line_count, line)
|
||||||
|
line_obj = make_comment(line)
|
||||||
|
|
||||||
if isinstance(lineobj, OptionLine):
|
if isinstance(line_obj, OptionLine):
|
||||||
if pending_lines:
|
if pending_lines:
|
||||||
cur_section.extend(pending_lines)
|
cur_section.extend(pending_lines)
|
||||||
pending_lines = []
|
pending_lines = []
|
||||||
pending_empty_lines = False
|
pending_empty_lines = False
|
||||||
cur_option = LineContainer(lineobj)
|
cur_option = LineContainer(line_obj)
|
||||||
cur_section.add(cur_option)
|
cur_section.add(cur_option)
|
||||||
if self._optionxform:
|
if self._optionxform:
|
||||||
cur_option_name = self._optionxform(cur_option.name)
|
cur_option_name = self._optionxform(cur_option.name)
|
||||||
@ -605,11 +616,11 @@ class INIConfig(config.ConfigNamespace):
|
|||||||
optobj = self._sections[cur_section_name]
|
optobj = self._sections[cur_section_name]
|
||||||
optobj._options[cur_option_name] = cur_option
|
optobj._options[cur_option_name] = cur_option
|
||||||
|
|
||||||
if isinstance(lineobj, SectionLine):
|
if isinstance(line_obj, SectionLine):
|
||||||
self._data.extend(pending_lines)
|
self._data.extend(pending_lines)
|
||||||
pending_lines = []
|
pending_lines = []
|
||||||
pending_empty_lines = False
|
pending_empty_lines = False
|
||||||
cur_section = LineContainer(lineobj)
|
cur_section = LineContainer(line_obj)
|
||||||
self._data.add(cur_section)
|
self._data.add(cur_section)
|
||||||
cur_option = None
|
cur_option = None
|
||||||
cur_option_name = None
|
cur_option_name = None
|
||||||
@ -628,16 +639,14 @@ class INIConfig(config.ConfigNamespace):
|
|||||||
else:
|
else:
|
||||||
self._sections[cur_section_name]._lines.append(cur_section)
|
self._sections[cur_section_name]._lines.append(cur_section)
|
||||||
|
|
||||||
if isinstance(lineobj, (CommentLine, EmptyLine)):
|
if isinstance(line_obj, (CommentLine, EmptyLine)):
|
||||||
pending_lines.append(lineobj)
|
pending_lines.append(line_obj)
|
||||||
if isinstance(lineobj, EmptyLine):
|
if isinstance(line_obj, EmptyLine):
|
||||||
pending_empty_lines = True
|
pending_empty_lines = True
|
||||||
|
|
||||||
self._data.extend(pending_lines)
|
self._data.extend(pending_lines)
|
||||||
if line and line[-1]=='\n':
|
if line and line[-1] == '\n':
|
||||||
self._data.add(EmptyLine())
|
self._data.add(EmptyLine())
|
||||||
|
|
||||||
if exc:
|
if exc:
|
||||||
raise exc
|
raise exc
|
||||||
|
|
||||||
|
|
9
iniparse/utils.py → libtisbackup/iniparse/utils.py
Executable file → Normal file
9
iniparse/utils.py → libtisbackup/iniparse/utils.py
Executable file → Normal file
@ -1,5 +1,6 @@
|
|||||||
import compat
|
from . import compat
|
||||||
from ini import LineContainer, EmptyLine
|
from .ini import LineContainer, EmptyLine
|
||||||
|
|
||||||
|
|
||||||
def tidy(cfg):
|
def tidy(cfg):
|
||||||
"""Clean up blank lines.
|
"""Clean up blank lines.
|
||||||
@ -32,12 +33,12 @@ def tidy(cfg):
|
|||||||
if cont and not isinstance(cont[-1], EmptyLine):
|
if cont and not isinstance(cont[-1], EmptyLine):
|
||||||
cont.append(EmptyLine())
|
cont.append(EmptyLine())
|
||||||
|
|
||||||
|
|
||||||
def tidy_section(lc):
|
def tidy_section(lc):
|
||||||
cont = lc.contents
|
cont = lc.contents
|
||||||
i = 1
|
i = 1
|
||||||
while i < len(cont):
|
while i < len(cont):
|
||||||
if (isinstance(cont[i-1], EmptyLine) and
|
if isinstance(cont[i-1], EmptyLine) and isinstance(cont[i], EmptyLine):
|
||||||
isinstance(cont[i], EmptyLine)):
|
|
||||||
del cont[i]
|
del cont[i]
|
||||||
else:
|
else:
|
||||||
i += 1
|
i += 1
|
2
requirements.txt
Normal file
2
requirements.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
huey==2.4.3
|
||||||
|
iniparse
|
@ -4,6 +4,9 @@ set -ex
|
|||||||
rm -rf ./builddir/ ./BUILD *.rpm ./RPMS
|
rm -rf ./builddir/ ./BUILD *.rpm ./RPMS
|
||||||
mkdir -p BUILD RPMS
|
mkdir -p BUILD RPMS
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
VERSION=`git rev-list HEAD --count`
|
VERSION=`git rev-list HEAD --count`
|
||||||
echo $VERSION > __VERSION__
|
echo $VERSION > __VERSION__
|
||||||
|
|
||||||
|
@ -14,28 +14,32 @@ URL: http://dev.tranquil.it
|
|||||||
Source0: ../
|
Source0: ../
|
||||||
Prefix: /
|
Prefix: /
|
||||||
|
|
||||||
Requires: unzip rsync python-paramiko python-pyvmomi nfs-utils python-flask python-simplejson autofs pexpect
|
%if "%{rhel}" == "8"
|
||||||
|
Requires: unzip rsync python3-paramiko python3-pyvmomi nfs-utils python3-flask python3-simplejson autofs python3-pexpect
|
||||||
# Turn off the brp-python-bytecompile script
|
%endif
|
||||||
#%global __os_install_post %(echo '%{__os_install_post}' | sed -e 's!/usr/lib[^[:space:]]*/brp-python-bytecompile[[:space:]].*$!!g')
|
%if "%{rhel}" == "7"
|
||||||
|
Requires: unzip rsync python36-paramiko python3-pyvmomi nfs-utils python3-flask python3-simplejson autofs pexpect
|
||||||
|
%endif
|
||||||
|
|
||||||
%description
|
%description
|
||||||
|
|
||||||
%install
|
%install
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
mkdir -p %{buildroot}/opt/tisbackup/
|
mkdir -p %{buildroot}/opt/tisbackup/lib
|
||||||
mkdir -p %{buildroot}/usr/lib/systemd/system/
|
mkdir -p %{buildroot}/usr/lib/systemd/system/
|
||||||
mkdir -p %{buildroot}/etc/cron.d/
|
mkdir -p %{buildroot}/etc/cron.d/
|
||||||
mkdir -p %{buildroot}/etc/tis
|
mkdir -p %{buildroot}/etc/tis
|
||||||
mkdir -p %{buildroot}/usr/bin/
|
mkdir -p %{buildroot}/usr/bin/
|
||||||
|
|
||||||
rsync --exclude "deb/" --exclude "doc/" --exclude "rpm/" --exclude ".git" -aP ../../../tisbackup/ %{buildroot}/opt/tisbackup/
|
pip3 install -r ../../requirements.txt -t %{buildroot}/opt/tisbackup/lib
|
||||||
rsync -aP ../../../tisbackup/scripts/tisbackup_gui.service %{buildroot}/usr/lib/systemd/system/
|
|
||||||
rsync -aP ../../../tisbackup/scripts/tisbackup_huey.service %{buildroot}/usr/lib/systemd/system/
|
rsync --exclude "deb/" --exclude "doc/" --exclude "rpm/" --exclude ".git" -aP ../../ %{buildroot}/opt/tisbackup/
|
||||||
rsync -aP ../../../tisbackup/samples/tisbackup.cron %{buildroot}/etc/cron.d/tisbackup
|
rsync -aP ../../scripts/tisbackup_gui.service %{buildroot}/usr/lib/systemd/system/
|
||||||
rsync -aP ../../../tisbackup/samples/tisbackup_gui.ini %{buildroot}/etc/tis
|
rsync -aP ../../scripts/tisbackup_huey.service %{buildroot}/usr/lib/systemd/system/
|
||||||
rsync -aP ../../../tisbackup/samples/tisbackup-config.ini.sample %{buildroot}/etc/tis/tisbackup-config.ini.sample
|
rsync -aP ../../samples/tisbackup.cron %{buildroot}/etc/cron.d/tisbackup
|
||||||
|
rsync -aP ../../samples/tisbackup_gui.ini %{buildroot}/etc/tis
|
||||||
|
rsync -aP ../../samples/tisbackup-config.ini.sample %{buildroot}/etc/tis/tisbackup-config.ini.sample
|
||||||
ln -s /opt/tisbackup/tisbackup.py %{buildroot}/usr/bin/tisbackup
|
ln -s /opt/tisbackup/tisbackup.py %{buildroot}/usr/bin/tisbackup
|
||||||
|
|
||||||
%files
|
%files
|
||||||
@ -51,4 +55,8 @@ ln -s /opt/tisbackup/tisbackup.py %{buildroot}/usr/bin/tisbackup
|
|||||||
|
|
||||||
|
|
||||||
%post
|
%post
|
||||||
|
python3 -m compileall /opt/tisbackup/
|
||||||
|
find /opt/tisbackup -name "*.pyc" -exec rm -rf {} \;
|
||||||
|
|
||||||
|
%postun
|
||||||
|
rm -rf /opt/tisbackup
|
||||||
|
@ -1,39 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
|
|
||||||
backups = [ "ns3-test-etc-bind"]
|
|
||||||
backup_base_dir = "/backup/data/"
|
|
||||||
backup_retention_time=60
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if not os.path.isdir("/backup/data/empty/"):
|
|
||||||
os.mkdir("/backup/data/empty/")
|
|
||||||
for backup in backups:
|
|
||||||
base_dir = os.path.join(backup_base_dir,backup)
|
|
||||||
dest_dir = os.path.join(base_dir, 'last_backup')
|
|
||||||
if not os.path.isdir(dest_dir):
|
|
||||||
cmd = "/bin/btrfs subvolume create %s"%dest_dir
|
|
||||||
print 'btrfs subvolume create "%s"' %dest_dir
|
|
||||||
print subprocess.check_output(cmd, shell=True)
|
|
||||||
|
|
||||||
if len(os.listdir(dest_dir)) == 0:
|
|
||||||
list_backups = sorted([os.path.join(base_dir, f) for f in os.listdir(base_dir)], key=os.path.getctime)
|
|
||||||
recent_backup = list_backups[-2]
|
|
||||||
print "The most recent backup : " + recent_backup
|
|
||||||
print "Initial copy"
|
|
||||||
#cmd = 'rsync -rt --stats --delete-excluded --numeric-ids -P -lpgoD --protect-args "%s"/ "%s"' % ( recent_backup, dest_dir)
|
|
||||||
cmd = 'cp -v -a --reflink=always "%s"/* "%s"' % ( recent_backup, dest_dir)
|
|
||||||
print "Runinig %s " % cmd
|
|
||||||
print subprocess.check_output(cmd, shell=True)
|
|
||||||
if len(os.listdir(base_dir)) > backup_retention_time:
|
|
||||||
for folder in sorted([os.path.join(base_dir, f) for f in os.listdir(base_dir)], key=os.path.getctime)[0:len(os.listdir(base_dir)) - (backup_retention_time )]:
|
|
||||||
#cmd = 'rsync --dry-run -av --del /backup/data/empty/ "%s/"' % folder
|
|
||||||
cmd = 'rsync -av --del /backup/data/empty/ "%s/"' % folder
|
|
||||||
print "Runinig %s " % cmd
|
|
||||||
print subprocess.check_output(cmd, shell=True)
|
|
||||||
os.rmdir(folder)
|
|
||||||
|
|
||||||
|
|
4
tasks.py
4
tasks.py
@ -1,9 +1,9 @@
|
|||||||
from config import huey
|
from huey import RedisHuey
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
from tisbackup import tis_backup
|
from tisbackup import tis_backup
|
||||||
|
|
||||||
|
huey = RedisHuey('tisbackup', host='localhost')
|
||||||
@huey.task()
|
@huey.task()
|
||||||
def run_export_backup(base, config_file, mount_point, backup_sections):
|
def run_export_backup(base, config_file, mount_point, backup_sections):
|
||||||
try:
|
try:
|
||||||
|
47
tisbackup.py
47
tisbackup.py
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -17,14 +17,17 @@
|
|||||||
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
# along with TISBackup. If not, see <http://www.gnu.org/licenses/>.
|
||||||
#
|
#
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
import os
|
|
||||||
import datetime
|
import datetime
|
||||||
import subprocess
|
import subprocess
|
||||||
from iniparse import ConfigParser
|
import os,sys
|
||||||
from optparse import OptionParser
|
from os.path import isfile, join
|
||||||
|
|
||||||
|
tisbackup_root_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
sys.path.insert(0,os.path.join(tisbackup_root_dir,'lib'))
|
||||||
|
|
||||||
|
from iniparse import ini,ConfigParser
|
||||||
|
from optparse import OptionParser
|
||||||
import re
|
import re
|
||||||
import sys
|
|
||||||
import getopt
|
import getopt
|
||||||
import os.path
|
import os.path
|
||||||
import logging
|
import logging
|
||||||
@ -33,20 +36,20 @@ from libtisbackup.common import *
|
|||||||
from libtisbackup.backup_mysql import backup_mysql
|
from libtisbackup.backup_mysql import backup_mysql
|
||||||
from libtisbackup.backup_rsync import backup_rsync
|
from libtisbackup.backup_rsync import backup_rsync
|
||||||
from libtisbackup.backup_rsync import backup_rsync_ssh
|
from libtisbackup.backup_rsync import backup_rsync_ssh
|
||||||
from libtisbackup.backup_oracle import backup_oracle
|
#from libtisbackup.backup_oracle import backup_oracle
|
||||||
from libtisbackup.backup_rsync_btrfs import backup_rsync_btrfs
|
from libtisbackup.backup_rsync_btrfs import backup_rsync_btrfs
|
||||||
from libtisbackup.backup_rsync_btrfs import backup_rsync__btrfs_ssh
|
from libtisbackup.backup_rsync_btrfs import backup_rsync__btrfs_ssh
|
||||||
from libtisbackup.backup_pgsql import backup_pgsql
|
from libtisbackup.backup_pgsql import backup_pgsql
|
||||||
from libtisbackup.backup_xva import backup_xva
|
from libtisbackup.backup_xva import backup_xva
|
||||||
from libtisbackup.backup_vmdk import backup_vmdk
|
#from libtisbackup.backup_vmdk import backup_vmdk
|
||||||
from libtisbackup.backup_switch import backup_switch
|
#from libtisbackup.backup_switch import backup_switch
|
||||||
from libtisbackup.backup_null import backup_null
|
from libtisbackup.backup_null import backup_null
|
||||||
from libtisbackup.backup_xcp_metadata import backup_xcp_metadata
|
from libtisbackup.backup_xcp_metadata import backup_xcp_metadata
|
||||||
from libtisbackup.copy_vm_xcp import copy_vm_xcp
|
from libtisbackup.copy_vm_xcp import copy_vm_xcp
|
||||||
from libtisbackup.backup_sqlserver import backup_sqlserver
|
#from libtisbackup.backup_sqlserver import backup_sqlserver
|
||||||
from libtisbackup.backup_samba4 import backup_samba4
|
from libtisbackup.backup_samba4 import backup_samba4
|
||||||
|
|
||||||
__version__="1.1"
|
__version__="2.0"
|
||||||
|
|
||||||
usage="""\
|
usage="""\
|
||||||
%prog -c configfile action
|
%prog -c configfile action
|
||||||
@ -88,6 +91,7 @@ class tis_backup:
|
|||||||
self.verbose=False
|
self.verbose=False
|
||||||
|
|
||||||
def read_ini_file(self,filename):
|
def read_ini_file(self,filename):
|
||||||
|
ini.change_comment_syntax()
|
||||||
cp = ConfigParser()
|
cp = ConfigParser()
|
||||||
cp.read(filename)
|
cp.read(filename)
|
||||||
|
|
||||||
@ -179,15 +183,15 @@ class tis_backup:
|
|||||||
nagiosoutput = 'ALL backups OK %s' % (','.join(sections))
|
nagiosoutput = 'ALL backups OK %s' % (','.join(sections))
|
||||||
|
|
||||||
|
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
worst_nagiosstatus = nagiosStateCritical
|
worst_nagiosstatus = nagiosStateCritical
|
||||||
nagiosoutput = 'EXCEPTION',"Critical : %s" % str(e)
|
nagiosoutput = 'EXCEPTION',"Critical : %s" % str(e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
self.logger.debug('worst nagios status :"%i"',worst_nagiosstatus)
|
self.logger.debug('worst nagios status :"%i"',worst_nagiosstatus)
|
||||||
print '%s (tisbackup V%s)' %(nagiosoutput,version)
|
print('%s (tisbackup V%s)' %(nagiosoutput,version))
|
||||||
print '\n'.join(["[%s]:%s" % (l[0],l[1]) for l in globallog])
|
print('\n'.join(["[%s]:%s" % (l[0],l[1]) for l in globallog]))
|
||||||
sys.exit(worst_nagiosstatus)
|
sys.exit(worst_nagiosstatus)
|
||||||
|
|
||||||
def process_backup(self,sections=[]):
|
def process_backup(self,sections=[]):
|
||||||
@ -204,7 +208,7 @@ class tis_backup:
|
|||||||
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
||||||
stats = backup_item.process_backup()
|
stats = backup_item.process_backup()
|
||||||
processed.append((backup_item.backup_name,stats))
|
processed.append((backup_item.backup_name,stats))
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
self.logger.critical('Backup [%s] processed with error : %s',backup_item.backup_name,e)
|
self.logger.critical('Backup [%s] processed with error : %s',backup_item.backup_name,e)
|
||||||
errors.append((backup_item.backup_name,str(e)))
|
errors.append((backup_item.backup_name,str(e)))
|
||||||
if not processed and not errors:
|
if not processed and not errors:
|
||||||
@ -230,7 +234,7 @@ class tis_backup:
|
|||||||
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
||||||
stats = backup_item.export_latestbackup(destdir=exportdir)
|
stats = backup_item.export_latestbackup(destdir=exportdir)
|
||||||
processed.append((backup_item.backup_name,stats))
|
processed.append((backup_item.backup_name,stats))
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
self.logger.critical('Export Backup [%s] processed with error : %s',backup_item.backup_name,e)
|
self.logger.critical('Export Backup [%s] processed with error : %s',backup_item.backup_name,e)
|
||||||
errors.append((backup_item.backup_name,str(e)))
|
errors.append((backup_item.backup_name,str(e)))
|
||||||
if not processed and not errors:
|
if not processed and not errors:
|
||||||
@ -252,7 +256,8 @@ class tis_backup:
|
|||||||
from stats
|
from stats
|
||||||
where status="OK" and backup_start>=?""",(mindate,))
|
where status="OK" and backup_start>=?""",(mindate,))
|
||||||
|
|
||||||
defined_backups = map(lambda f:f.backup_name, [ x for x in self.backup_list if not isinstance(x, backup_null) ])
|
|
||||||
|
defined_backups = list(map(lambda f:f.backup_name, [ x for x in self.backup_list if not isinstance(x, backup_null) ]))
|
||||||
failed_backups_names = set(defined_backups) - set([b['bname'] for b in failed_backups if b['bname'] in defined_backups])
|
failed_backups_names = set(defined_backups) - set([b['bname'] for b in failed_backups if b['bname'] in defined_backups])
|
||||||
|
|
||||||
|
|
||||||
@ -265,7 +270,7 @@ class tis_backup:
|
|||||||
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
self.logger.info('Processing [%s]',(backup_item.backup_name))
|
||||||
stats = backup_item.process_backup()
|
stats = backup_item.process_backup()
|
||||||
processed.append((backup_item.backup_name,stats))
|
processed.append((backup_item.backup_name,stats))
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
self.logger.critical('Backup [%s] not processed, error : %s',backup_item.backup_name,e)
|
self.logger.critical('Backup [%s] not processed, error : %s',backup_item.backup_name,e)
|
||||||
errors.append((backup_item.backup_name,str(e)))
|
errors.append((backup_item.backup_name,str(e)))
|
||||||
if not processed and not errors:
|
if not processed and not errors:
|
||||||
@ -293,7 +298,7 @@ class tis_backup:
|
|||||||
self.logger.info('Processing cleanup of [%s]',(backup_item.backup_name))
|
self.logger.info('Processing cleanup of [%s]',(backup_item.backup_name))
|
||||||
backup_item.cleanup_backup()
|
backup_item.cleanup_backup()
|
||||||
processed = True
|
processed = True
|
||||||
except BaseException,e:
|
except BaseException as e:
|
||||||
self.logger.critical('Cleanup of [%s] not processed, error : %s',backup_item.backup_name,e)
|
self.logger.critical('Cleanup of [%s] not processed, error : %s',backup_item.backup_name,e)
|
||||||
if not processed:
|
if not processed:
|
||||||
self.logger.critical('No cleanup properly finished or processed')
|
self.logger.critical('No cleanup properly finished or processed')
|
||||||
@ -325,7 +330,7 @@ def main():
|
|||||||
(options,args)=parser.parse_args()
|
(options,args)=parser.parse_args()
|
||||||
|
|
||||||
if len(args) != 1:
|
if len(args) != 1:
|
||||||
print "ERROR : You must provide one action to perform"
|
print("ERROR : You must provide one action to perform")
|
||||||
parser.print_usage()
|
parser.print_usage()
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
@ -335,7 +340,7 @@ def main():
|
|||||||
action = args[0]
|
action = args[0]
|
||||||
if action == "listdrivers":
|
if action == "listdrivers":
|
||||||
for t in backup_drivers:
|
for t in backup_drivers:
|
||||||
print backup_drivers[t].get_help()
|
print(backup_drivers[t].get_help())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
config_file =options.config
|
config_file =options.config
|
||||||
@ -376,7 +381,7 @@ def main():
|
|||||||
hdlr = logging.FileHandler(os.path.join(log_dir,'tisbackup_%s.log' % (backup_start_date)))
|
hdlr = logging.FileHandler(os.path.join(log_dir,'tisbackup_%s.log' % (backup_start_date)))
|
||||||
hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
|
hdlr.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
|
||||||
logger.addHandler(hdlr)
|
logger.addHandler(hdlr)
|
||||||
except IOError, e:
|
except IOError as e:
|
||||||
if action == 'cleanup' and e.errno == errno.ENOSPC:
|
if action == 'cleanup' and e.errno == errno.ENOSPC:
|
||||||
logger.warning("No space left on device, disabling file logging.")
|
logger.warning("No space left on device, disabling file logging.")
|
||||||
else:
|
else:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python3
|
||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
# -----------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
# This file is part of TISBackup
|
# This file is part of TISBackup
|
||||||
@ -28,12 +28,12 @@ from iniparse import ConfigParser,RawConfigParser
|
|||||||
from libtisbackup.common import *
|
from libtisbackup.common import *
|
||||||
import time
|
import time
|
||||||
from flask import request, Flask, session, g, appcontext_pushed, redirect, url_for, abort, render_template, flash, jsonify, Response
|
from flask import request, Flask, session, g, appcontext_pushed, redirect, url_for, abort, render_template, flash, jsonify, Response
|
||||||
from urlparse import urlparse
|
from urllib.parse import urlparse
|
||||||
import json
|
import json
|
||||||
import glob
|
import glob
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from config import huey
|
from huey import *
|
||||||
from tasks import run_export_backup, get_task, set_task
|
from tasks import run_export_backup, get_task, set_task
|
||||||
|
|
||||||
from tisbackup import tis_backup
|
from tisbackup import tis_backup
|
||||||
@ -87,11 +87,11 @@ def read_all_configs(base_dir):
|
|||||||
backup_dict['null_list'] = []
|
backup_dict['null_list'] = []
|
||||||
backup_dict['pgsql_list'] = []
|
backup_dict['pgsql_list'] = []
|
||||||
backup_dict['mysql_list'] = []
|
backup_dict['mysql_list'] = []
|
||||||
backup_dict['sqlserver_list'] = []
|
#backup_dict['sqlserver_list'] = []
|
||||||
backup_dict['xva_list'] = []
|
backup_dict['xva_list'] = []
|
||||||
backup_dict['metadata_list'] = []
|
backup_dict['metadata_list'] = []
|
||||||
backup_dict['switch_list'] = []
|
#backup_dict['switch_list'] = []
|
||||||
backup_dict['oracle_list'] = []
|
#backup_dict['oracle_list'] = []
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
cp = ConfigParser()
|
cp = ConfigParser()
|
||||||
@ -153,20 +153,20 @@ def read_all_configs(base_dir):
|
|||||||
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
||||||
backup_dict['mysql_list'].append(
|
backup_dict['mysql_list'].append(
|
||||||
[server_name, backup_name, backup_type, db_name])
|
[server_name, backup_name, backup_type, db_name])
|
||||||
if backup_type == "sqlserver+ssh":
|
# if backup_type == "sqlserver+ssh":
|
||||||
db_name = row['db_name']
|
# db_name = row['db_name']
|
||||||
backup_dict['sqlserver_list'].append(
|
# backup_dict['sqlserver_list'].append(
|
||||||
[server_name, backup_name, backup_type, db_name])
|
# [server_name, backup_name, backup_type, db_name])
|
||||||
if backup_type == "oracle+ssh":
|
# if backup_type == "oracle+ssh":
|
||||||
db_name = row['db_name']
|
# db_name = row['db_name']
|
||||||
backup_dict['oracle_list'].append(
|
# backup_dict['oracle_list'].append(
|
||||||
[server_name, backup_name, backup_type, db_name])
|
# [server_name, backup_name, backup_type, db_name])
|
||||||
if backup_type == "xen-xva":
|
if backup_type == "xen-xva":
|
||||||
backup_dict['xva_list'].append(
|
backup_dict['xva_list'].append(
|
||||||
[server_name, backup_name, backup_type, ""])
|
[server_name, backup_name, backup_type, ""])
|
||||||
if backup_type == "switch":
|
# if backup_type == "switch":
|
||||||
backup_dict['switch_list'].append(
|
# backup_dict['switch_list'].append(
|
||||||
[server_name, backup_name, backup_type, ""])
|
# [server_name, backup_name, backup_type, ""])
|
||||||
|
|
||||||
return backup_dict
|
return backup_dict
|
||||||
|
|
||||||
@ -209,11 +209,11 @@ def read_config():
|
|||||||
backup_dict['null_list'] = []
|
backup_dict['null_list'] = []
|
||||||
backup_dict['pgsql_list'] = []
|
backup_dict['pgsql_list'] = []
|
||||||
backup_dict['mysql_list'] = []
|
backup_dict['mysql_list'] = []
|
||||||
backup_dict['sqlserver_list'] = []
|
#backup_dict['sqlserver_list'] = []
|
||||||
backup_dict['xva_list'] = []
|
backup_dict['xva_list'] = []
|
||||||
backup_dict['metadata_list'] = []
|
backup_dict['metadata_list'] = []
|
||||||
backup_dict['switch_list'] = []
|
#backup_dict['switch_list'] = []
|
||||||
backup_dict['oracle_list'] = []
|
#backup_dict['oracle_list'] = []
|
||||||
for row in result:
|
for row in result:
|
||||||
backup_name = row['backup_name']
|
backup_name = row['backup_name']
|
||||||
server_name = row['server_name']
|
server_name = row['server_name']
|
||||||
@ -237,16 +237,16 @@ def read_config():
|
|||||||
if backup_type == "mysql+ssh":
|
if backup_type == "mysql+ssh":
|
||||||
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
db_name = row['db_name'] if len(row['db_name']) > 0 else '*'
|
||||||
backup_dict['mysql_list'].append([server_name, backup_name, backup_type, db_name])
|
backup_dict['mysql_list'].append([server_name, backup_name, backup_type, db_name])
|
||||||
if backup_type == "sqlserver+ssh":
|
# if backup_type == "sqlserver+ssh":
|
||||||
db_name = row['db_name']
|
# db_name = row['db_name']
|
||||||
backup_dict['sqlserver_list'].append([server_name, backup_name, backup_type, db_name])
|
# backup_dict['sqlserver_list'].append([server_name, backup_name, backup_type, db_name])
|
||||||
if backup_type == "oracle+ssh":
|
# if backup_type == "oracle+ssh":
|
||||||
db_name = row['db_name']
|
# db_name = row['db_name']
|
||||||
backup_dict['oracle_list'].append([server_name, backup_name, backup_type, db_name])
|
# backup_dict['oracle_list'].append([server_name, backup_name, backup_type, db_name])
|
||||||
if backup_type == "xen-xva":
|
if backup_type == "xen-xva":
|
||||||
backup_dict['xva_list'].append([server_name, backup_name, backup_type, ""])
|
backup_dict['xva_list'].append([server_name, backup_name, backup_type, ""])
|
||||||
if backup_type == "switch":
|
# if backup_type == "switch":
|
||||||
backup_dict['switch_list'].append([server_name, backup_name, backup_type, ""])
|
# backup_dict['switch_list'].append([server_name, backup_name, backup_type, ""])
|
||||||
return backup_dict
|
return backup_dict
|
||||||
|
|
||||||
@app.route('/')
|
@app.route('/')
|
||||||
@ -262,19 +262,21 @@ def set_config_number(id=None):
|
|||||||
global config_number
|
global config_number
|
||||||
config_number=id
|
config_number=id
|
||||||
read_config()
|
read_config()
|
||||||
return jsonify(configs=CONFIG,config_number=config_number)
|
return jsonify(configs=CONFIG,config_number=config_number)
|
||||||
|
|
||||||
|
|
||||||
@app.route('/all_json')
|
@app.route('/all_json')
|
||||||
def backup_all_json():
|
def backup_all_json():
|
||||||
backup_dict = read_all_configs(BASE_DIR)
|
backup_dict = read_all_configs(BASE_DIR)
|
||||||
return json.dumps(backup_dict['rsync_list']+backup_dict['sqlserver_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list']+ backup_dict['switch_list'])
|
return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list'])
|
||||||
|
#+ backup_dict['switch_list'])+backup_dict['sqlserver_list']
|
||||||
|
|
||||||
|
|
||||||
@app.route('/json')
|
@app.route('/json')
|
||||||
def backup_json():
|
def backup_json():
|
||||||
backup_dict = read_config()
|
backup_dict = read_config()
|
||||||
return json.dumps(backup_dict['rsync_list']+backup_dict['sqlserver_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list']+ backup_dict['switch_list'])
|
return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list'])
|
||||||
|
#+ backup_dict['switch_list'])+backup_dict['sqlserver_list']
|
||||||
|
|
||||||
|
|
||||||
def check_usb_disk():
|
def check_usb_disk():
|
||||||
@ -289,23 +291,23 @@ def check_usb_disk():
|
|||||||
if len(usb_disk_list) == 0:
|
if len(usb_disk_list) == 0:
|
||||||
raise_error("Cannot find any external usb disk", "You should plug the usb hard drive into the server")
|
raise_error("Cannot find any external usb disk", "You should plug the usb hard drive into the server")
|
||||||
return ""
|
return ""
|
||||||
print usb_disk_list
|
print(usb_disk_list)
|
||||||
|
|
||||||
usb_partition_list = []
|
usb_partition_list = []
|
||||||
for usb_disk in usb_disk_list:
|
for usb_disk in usb_disk_list:
|
||||||
cmd = "udevadm info -q path -n %s" % usb_disk + '1'
|
cmd = "udevadm info -q path -n %s" % usb_disk + '1'
|
||||||
output = os.popen(cmd).read()
|
output = os.popen(cmd).read()
|
||||||
print "cmd : " + cmd
|
print("cmd : " + cmd)
|
||||||
print "output : " + output
|
print("output : " + output)
|
||||||
|
|
||||||
if '/devices/pci' in output:
|
if '/devices/pci' in output:
|
||||||
#flash("partition found: %s1" % usb_disk)
|
#flash("partition found: %s1" % usb_disk)
|
||||||
usb_partition_list.append(usb_disk + "1")
|
usb_partition_list.append(usb_disk + "1")
|
||||||
|
|
||||||
print usb_partition_list
|
print(usb_partition_list)
|
||||||
|
|
||||||
if len(usb_partition_list) ==0:
|
if len(usb_partition_list) ==0:
|
||||||
raise_error("The dribe %s has no partition" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label")
|
raise_error("The drive %s has no partition" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
tisbackup_partition_list = []
|
tisbackup_partition_list = []
|
||||||
@ -314,7 +316,7 @@ def check_usb_disk():
|
|||||||
flash("tisbackup backup partition found: %s" % usb_partition)
|
flash("tisbackup backup partition found: %s" % usb_partition)
|
||||||
tisbackup_partition_list.append(usb_partition)
|
tisbackup_partition_list.append(usb_partition)
|
||||||
|
|
||||||
print tisbackup_partition_list
|
print(tisbackup_partition_list)
|
||||||
|
|
||||||
if len(tisbackup_partition_list) ==0:
|
if len(tisbackup_partition_list) ==0:
|
||||||
raise_error("No tisbackup partition exist on disk %s" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label")
|
raise_error("No tisbackup partition exist on disk %s" % (usb_disk_list[0] ), "You should initialize the usb drive and format an ext4 partition with TISBACKUP label")
|
||||||
@ -334,9 +336,9 @@ def check_already_mount(partition_name,refresh):
|
|||||||
for line in f.readlines():
|
for line in f.readlines():
|
||||||
if line.startswith(partition_name):
|
if line.startswith(partition_name):
|
||||||
mount_point = line.split(' ')[1]
|
mount_point = line.split(' ')[1]
|
||||||
if not refresh:
|
if not refresh:
|
||||||
run_command("/bin/umount %s" % mount_point)
|
run_command("/bin/umount %s" % mount_point)
|
||||||
os.rmdir(mount_point)
|
os.rmdir(mount_point)
|
||||||
return mount_point
|
return mount_point
|
||||||
|
|
||||||
def run_command(cmd, info=""):
|
def run_command(cmd, info=""):
|
||||||
@ -411,13 +413,14 @@ def export_backup():
|
|||||||
if backup_types == "null_list":
|
if backup_types == "null_list":
|
||||||
continue
|
continue
|
||||||
for section in backup_dict[backup_types]:
|
for section in backup_dict[backup_types]:
|
||||||
if section.count > 0:
|
#if section.count > 0:
|
||||||
|
if len(section) > 0:
|
||||||
sections.append(section[1])
|
sections.append(section[1])
|
||||||
|
|
||||||
noJobs = (not runnings_backups())
|
noJobs = (not runnings_backups())
|
||||||
if "start" in request.args.keys() or not noJobs:
|
if "start" in list(request.args.keys()) or not noJobs:
|
||||||
start=True
|
start=True
|
||||||
if "sections" in request.args.keys():
|
if "sections" in list(request.args.keys()):
|
||||||
backup_sections = request.args.getlist('sections')
|
backup_sections = request.args.getlist('sections')
|
||||||
|
|
||||||
|
|
||||||
@ -435,9 +438,9 @@ def export_backup():
|
|||||||
global mindate
|
global mindate
|
||||||
mindate = datetime2isodate(datetime.datetime.now())
|
mindate = datetime2isodate(datetime.datetime.now())
|
||||||
if not error and start:
|
if not error and start:
|
||||||
print tisbackup_config_file
|
print(tisbackup_config_file)
|
||||||
task = run_export_backup(base=backup_base_dir, config_file=CONFIG[config_number], mount_point=mount_point, backup_sections=",".join([str(x) for x in backup_sections]))
|
task = run_export_backup(base=backup_base_dir, config_file=CONFIG[config_number], mount_point=mount_point, backup_sections=",".join([str(x) for x in backup_sections]))
|
||||||
set_task(task)
|
set_task(task)
|
||||||
|
|
||||||
|
|
||||||
return render_template("export_backup.html", error=error, start=start, info=info, email=ADMIN_EMAIL, sections=sections)
|
return render_template("export_backup.html", error=error, start=start, info=info, email=ADMIN_EMAIL, sections=sections)
|
||||||
|
Loading…
Reference in New Issue
Block a user