diff --git a/libtisbackup/common.py b/libtisbackup/common.py
index 7d545f9..7e07cff 100644
--- a/libtisbackup/common.py
+++ b/libtisbackup/common.py
@@ -325,7 +325,9 @@ create table stats (
create index idx_stats_backup_name on stats(backup_name);""")
self.db.execute("""
create index idx_stats_backup_location on stats(backup_location);""")
- self.db.commit()
+ self.db.execute("""
+CREATE INDEX idx_stats_backup_name_start on stats(backup_name,backup_start);""")
+ self.db.commit()
def start(self,backup_name,server_name,TYPE,description='',backup_location=None):
""" Add in stat DB a record for the newly running backup"""
@@ -743,6 +745,8 @@ class backup_generic:
return (nagiosStateCritical,"CRITICAL Backup %s (%s), %s seems older than start of backup" % (self.backup_name,isodate2datetime(b['backup_end']),b['log']))
elif os.path.isdir(b['backup_location']):
return (nagiosStateOk,"OK Backup %s (%s), %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log']))
+ elif self.type == 'copy-vm-xcp':
+ return (nagiosStateOk,"OK Backup %s (%s), %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log']))
else:
return (nagiosStateCritical,"CRITICAL Backup %s (%s), %s has disapeared from backup location %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log'],b['backup_location']))
@@ -768,7 +772,18 @@ class backup_generic:
if os.path.isdir(oldbackup_location) and self.backup_dir in oldbackup_location :
self.logger.info('[%s] removing directory "%s"',self.backup_name,oldbackup_location)
if not self.dry_run:
- shutil.rmtree(oldbackup_location.encode('ascii'))
+ if self.type =="rsync+btrfs+ssh" or self.type == "rsync+btrfs":
+ cmd = "/sbin/btrfs subvolume delete %s"%oldbackup_location.encode('ascii')
+ process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
+ log = monitor_stdout(process,'',self)
+ returncode = process.returncode
+ if (returncode != 0):
+ self.logger.error("[" + self.backup_name + "] shell program exited with error code: %s"%log)
+ raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd)
+ else:
+ self.logger.info("[" + self.backup_name + "] deleting snapshot volume: %s"%oldbackup_location.encode('ascii'))
+ else:
+ shutil.rmtree(oldbackup_location.encode('ascii'))
if os.path.isfile(oldbackup_location) and self.backup_dir in oldbackup_location :
self.logger.debug('[%s] removing file "%s"',self.backup_name,oldbackup_location)
if not self.dry_run:
@@ -860,13 +875,13 @@ class backup_generic:
for l in log.splitlines():
if l.startswith('Number of files:'):
- stats['total_files_count'] += int(l.split(':')[1])
+ stats['total_files_count'] += int(re.findall('[0-9]+', l.split(':')[1])[0])
if l.startswith('Number of files transferred:'):
stats['written_files_count'] += int(l.split(':')[1])
if l.startswith('Total file size:'):
- stats['total_bytes'] += int(l.split(':')[1].split()[0])
+ stats['total_bytes'] += float(l.replace(',','').split(':')[1].split()[0])
if l.startswith('Total transferred file size:'):
- stats['written_bytes'] += int(l.split(':')[1].split()[0])
+ stats['written_bytes'] += float(l.replace(',','').split(':')[1].split()[0])
returncode = process.returncode
## deal with exit code 24 (file vanished)
if (returncode == 24):
@@ -884,7 +899,6 @@ class backup_generic:
endtime = time.time()
duration = (endtime-starttime)/3600.0
-
if not self.dry_run and self.dbstat:
self.dbstat.finish(stat_rowid,
backup_end=datetime2isodate(datetime.datetime.now()),
@@ -895,7 +909,7 @@ class backup_generic:
written_bytes=stats['written_bytes'],
status=stats['status'],
log=stats['log'],
- backup_location=backup_dest)
+ backup_location=backup_dest)
return stats
diff --git a/templates/backups.html b/templates/backups.html
index 443a575..cbf4fec 100755
--- a/templates/backups.html
+++ b/templates/backups.html
@@ -22,6 +22,25 @@
{% endif %}
+{% if backup_list['rsync_btrfs_list']|count != 0 %}
+
rsync+btrfs+ssh
+
+
+ Server |
+ Backup |
+ Directory |
+
+
+ {% for entry in backup_list['rsync_btrfs_list'] %}
+
+ {{ entry[0] }} |
+ {{ entry[1] }} |
+ {{ entry[3] }} |
+
+ {% endfor %}
+
+
+{% endif %}
{% if backup_list['rsync_list']|count != 0 %}
Rsync
@@ -66,7 +85,7 @@
{% if backup_list['mysql_list']|count != 0 %}
- MySQL
+ SQL Server
Server |
@@ -85,6 +104,25 @@
{% endif %}
+{% if backup_list['sqlserver_list']|count != 0 %}
+ SQL Server
+
+
+ Server |
+ Backup |
+ Database |
+
+
+ {% for entry in backup_list['sqlserver_list'] %}
+
+ {{ entry[0] }} |
+ {{ entry[1] }} |
+ {{ entry[3] }} |
+
+ {% endfor %}
+
+
+{% endif %}
{% if backup_list['xva_list']|count != 0 %}
XVA
diff --git a/templates/export_backup.html b/templates/export_backup.html
index 68d79b5..53f3728 100755
--- a/templates/export_backup.html
+++ b/templates/export_backup.html
@@ -28,9 +28,15 @@
});
+
{% else %}
Backups is running:
diff --git a/templates/last_backups.html b/templates/last_backups.html
index edd2bb0..bb89d9f 100755
--- a/templates/last_backups.html
+++ b/templates/last_backups.html
@@ -7,7 +7,7 @@
"sAjaxDataProp": "data",
"sAjaxSource": "/backups.json",
"iDisplayLength": 25,
- "aLengthMenu": [[25, 50, 100, 200, 500, -1], [25, 50, 100, 200, 500, "All"]],
+// "aLengthMenu": [[25, 50, 100, 200, 500, -1], [25, 50, 100, 200, 500, "All"]],
"aaSorting": [[ 0, "desc" ]],
"aoColumnDefs": [
{
@@ -32,6 +32,13 @@
var d = new Date(data);
return d.getFullYear()+"/"+(d.getMonth()+1)+"/"+d.getDate()+" "+d.toLocaleTimeString();
}
+ },
+ {
+ "aTargets": [ 6 ],
+ "mData": "written_bytes",
+ "mRender": function ( data, type, full ) {
+ return humanFileSize(data, false);
+ }
}
],
"aoColumns": [
@@ -41,7 +48,7 @@
{ "mData":"backup_name" },
{ "mData":"backup_duration"},
{ "mData":"status" },
- { "mData":"written_bytes" , "bVisible": false},
+ { "mData":"written_bytes"},
{ "mData":"written_files_count" , "bVisible": false},
{ "mData":"total_files_count" , "bVisible": false},
{ "mData":"total_bytes" , "bVisible": false },
@@ -49,9 +56,40 @@
{ "mData":"description" , "bVisible": false },
{ "mData":"log" , "bVisible": false },
{ "mData":"TYPE" , "bVisible": false }
- ]
+ ],
+ "fnFooterCallback": function ( nRow, aaData, iStart, iEnd, aiDisplay ) {
+//humanFileSize(aaData[1]['written_bytes'], true)
+ var total_bytes = 0;
+ var total_time = 0;
+ for (var i = iStart; i < iEnd; i++) {
+ total_bytes += aaData[i]['written_bytes'];
+ total_time += aaData[i]['backup_duration'];
+ }
+
+ var cells_data = nRow.getElementsByTagName('th');
+ cells_data[1].innerHTML = humanFileSize(total_bytes, true);
+
+
+ var cells_time = nRow.nextElementSibling.cells;
+ cells_time[1].innerHTML = secondsToHms(total_time * 3600);
+ }
} );
+ $('#inputDatabaseName').keyup(function () { delay(function(){ oTable.fnLengthChange($('#inputDatabaseName').val() ); }, 300 )});
+ $(".dataTables_length").remove()
+ var nb_row = GetURLParameter('row');
+ if (nb_row ){
+ oTable.fnLengthChange( nb_row) ;
+ $('#inputDatabaseName').val(nb_row);
+ }
+
} );
+ var delay = (function(){
+ var timer = 0;
+ return function(callback, ms){
+ clearTimeout (timer);
+ timer = setTimeout(callback, ms);
+ };
+ })();
function fnShowHide( iCol )
{
/* Get the DataTables object again - this is not a recreation, just a get of the object */
@@ -67,6 +105,57 @@
var s = Math.floor(d % 3600 % 60);
return ((h > 0 ? h + ":" : "0:") + (m > 0 ? (m < 10 ? "0" : "") + m + ":" : "00:") + (s < 10 ? "0" : "") + s);
}
+ function GetURLParameter(sParam)
+ {
+ var sPageURL = window.location.search.substring(1);
+ var sURLVariables = sPageURL.split('&');
+ for (var i = 0; i < sURLVariables.length; i++)
+ {
+ var sParameterName = sURLVariables[i].split('=');
+ if (sParameterName[0] == sParam)
+ {
+ return sParameterName[1];
+ }
+ }
+ }
+ function humanFileSize(bytes, si) {
+ var thresh = si ? 1000 : 1024;
+ if(bytes < thresh) return bytes + ' B';
+ var units = si ? ['kB','MB','GB','TB','PB','EB','ZB','YB'] : ['KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB'];
+ var u = -1;
+ do {
+ bytes /= thresh;
+ ++u;
+ } while(bytes >= thresh);
+ return bytes.toFixed(1)+' '+units[u];
+ };
+ $.fn.dataTableExt.oApi.fnLengthChange = function ( oSettings, iDisplay )
+ {
+ oSettings._iDisplayLength = iDisplay;
+ oSettings.oApi._fnCalculateEnd( oSettings );
+
+ /* If we have space to show extra rows (backing up from the end point - then do so */
+ if ( oSettings._iDisplayEnd == oSettings.aiDisplay.length )
+ {
+ oSettings._iDisplayStart = oSettings._iDisplayEnd - oSettings._iDisplayLength;
+ if ( oSettings._iDisplayStart < 0 )
+ {
+ oSettings._iDisplayStart = 0;
+ }
+ }
+
+ if ( oSettings._iDisplayLength == -1 )
+ {
+ oSettings._iDisplayStart = 0;
+ }
+
+ oSettings.oApi._fnDraw( oSettings );
+
+ if ( oSettings.aanFeatures.l )
+ {
+ $('select', oSettings.aanFeatures.l).val( iDisplay );
+ }
+ };
@@ -90,8 +179,19 @@
+
+
+ Total Written Bytes |
+ |
+
+
+ Total Duration time |
+ |
+
+
-
+
+Nomber of rows per page :
Backup start
Backup end
@@ -117,5 +217,6 @@ $('input:checkbox:eq(2)').attr('checked', true);
$('input:checkbox:eq(3)').attr('checked', true);
$('input:checkbox:eq(4)').attr('checked', true);
$('input:checkbox:eq(5)').attr('checked', true);
+$('input:checkbox:eq(6)').attr('checked', true);
{% endblock %}
diff --git a/tisbackup.py b/tisbackup.py
index 7d5d8cd..eeae082 100644
--- a/tisbackup.py
+++ b/tisbackup.py
@@ -32,12 +32,15 @@ from libtisbackup.common import *
from libtisbackup.backup_mysql import backup_mysql
from libtisbackup.backup_rsync import backup_rsync
from libtisbackup.backup_rsync import backup_rsync_ssh
+from libtisbackup.backup_rsync_btrfs import backup_rsync_btrfs
+from libtisbackup.backup_rsync_btrfs import backup_rsync__btrfs_ssh
from libtisbackup.backup_pgsql import backup_pgsql
from libtisbackup.backup_xva import backup_xva
#from libtisbackup.backup_switch import backup_switch
from libtisbackup.backup_null import backup_null
from libtisbackup.backup_xcp_metadata import backup_xcp_metadata
from libtisbackup.copy_vm_xcp import copy_vm_xcp
+from libtisbackup.backup_sqlserver import backup_sqlserver
usage="""\
%prog -c configfile action
diff --git a/tisbackup_gui.py b/tisbackup_gui.py
index 1da499e..d7c3a47 100755
--- a/tisbackup_gui.py
+++ b/tisbackup_gui.py
@@ -18,6 +18,10 @@
#
# -----------------------------------------------------------------------
import os,sys
+tisbackup_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
+sys.path.append(os.path.join(tisbackup_root_dir,'lib'))
+
+
from shutil import *
from iniparse import ConfigParser
from libtisbackup.common import *
@@ -32,11 +36,11 @@ import logging
import re
-CONFIG = uwsgi.opt['config']
+CONFIG = uwsgi.opt['config_tisbackup']
SECTIONS = uwsgi.opt['sections']
ADMIN_EMAIL = uwsgi.opt.get('ADMIN_EMAIL',uwsgi.opt.get('admin_email'))
spooler = uwsgi.opt['spooler']
-tisbackup_config_file= uwsgi.opt['config']
+tisbackup_config_file= uwsgi.opt['config_tisbackup']
cp = ConfigParser()
cp.read(tisbackup_config_file)
@@ -82,10 +86,12 @@ def read_config():
backup_dict = {}
backup_dict['rsync_ssh_list'] = []
+ backup_dict['rsync_btrfs_list'] = []
backup_dict['rsync_list'] = []
backup_dict['null_list'] = []
backup_dict['pgsql_list'] = []
backup_dict['mysql_list'] = []
+ backup_dict['sqlserver_list'] = []
backup_dict['xva_list'] = []
backup_dict['metadata_list'] = []
backup_dict['switch_list'] = []
@@ -98,6 +104,9 @@ def read_config():
if backup_type == "rsync+ssh":
remote_dir = row['remote_dir']
backup_dict['rsync_ssh_list'].append([server_name, backup_name, backup_type,remote_dir])
+ if backup_type == "rsync+btrfs+ssh":
+ remote_dir = row['remote_dir']
+ backup_dict['rsync_btrfs_list'].append([server_name, backup_name, backup_type,remote_dir])
if backup_type == "rsync":
remote_dir = row['remote_dir']
backup_dict['rsync_list'].append([server_name, backup_name, backup_type,remote_dir])
@@ -109,6 +118,9 @@ def read_config():
if backup_type == "mysql+ssh":
db_name = row['db_name']
backup_dict['mysql_list'].append([server_name, backup_name, backup_type, db_name])
+ if backup_type == "sqlserver+ssh":
+ db_name = row['db_name']
+ backup_dict['sqlserver_list'].append([server_name, backup_name, backup_type, db_name])
if backup_type == "xen-xva":
backup_dict['xva_list'].append([server_name, backup_name, backup_type, ""])
if backup_type == "switch":
@@ -123,37 +135,7 @@ def backup_all():
@app.route('/json')
def backup_json():
backup_dict = read_config()
- return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list']+ backup_dict['switch_list'])
-
-#def check_usb_disk():
-# """This method returns the mounts point of FIRST external disk"""
-# disk_name = []
-# for name in glob.glob('/dev/sd[a-z]'):
-# for line in os.popen("udevinfo --query=env --name %s" % name):
-# if "ID_BUS=usb" in line:
-# disk_name += [ name ]
-# if len(disk_name) == 0:
-# raise_error("cannot find external usb disk", "You should plug the usb hard drive into the server")
-# return ""
-# elif len(disk_name) > 1:
-# raise_error("There are many usb disk", "You should plug remove one of them")
-# return ""
-# else:
-# disk_name = disk_name[0]
-# flash("The first usb media is: %s" % disk_name)
-# if os.path.exists(disk_name+"1"):
-# flash("partition found: %s1" % disk_name)
-# partition_name = disk_name+"1"
-# else:
-# raise_error("No partition exist", "You should initialize the usb drive")
-# return ""
-# if not "tisbackup" in os.popen("/sbin/dumpe2fs -h %s 2>&1 |/bin/grep 'volume name'" % partition_name).read():
-# raise_error("the label is not vaid", "You should use 'TISBACKUP' label")
-# return ""
-# if not "ext4" in os.popen("/sbin/fsck -N %s 2>&1" % partition_name).read():
-# raise_error("bad file system", "You should format usb drive into ext4")
-# return ""
-# return partition_name
+ return json.dumps(backup_dict['rsync_list']+backup_dict['rsync_btrfs_list']+backup_dict['rsync_ssh_list']+backup_dict['pgsql_list']+backup_dict['mysql_list']+backup_dict['xva_list']+backup_dict['null_list']+backup_dict['metadata_list']+ backup_dict['switch_list'])
def check_usb_disk():
@@ -256,10 +238,19 @@ def last_backup():
@app.route('/export_backup')
def export_backup():
raise_error("", "")
-
+ backup_dict = read_config()
+ sections = []
+ for backup_types in backup_dict:
+ for section in backup_dict[backup_types]:
+ if section.count > 0:
+ sections.append(section[1])
+
noJobs = ( len(os.listdir(spooler)) == 0 )
if "start" in request.args.keys() or not noJobs:
start=True
+ if "sections" in request.args.keys():
+ backup_sections = request.args.getlist('sections')
+
else:
start=False
cp.read(tisbackup_config_file)
@@ -274,9 +265,9 @@ def export_backup():
global mindate
mindate = datetime2isodate(datetime.datetime.now())
if not error and start:
- run_export_backup.spool(base=backup_base_dir, config_file=tisbackup_config_file, mount_point=mount_point)
+ run_export_backup.spool(base=backup_base_dir, config_file=tisbackup_config_file, mount_point=mount_point, backup_sections=",".join([str(x) for x in backup_sections]))
- return render_template("export_backup.html", error=error, start=start, info=info, email=ADMIN_EMAIL)
+ return render_template("export_backup.html", error=error, start=start, info=info, email=ADMIN_EMAIL, sections=sections)
def raise_error(strError, strInfo):
@@ -303,8 +294,11 @@ def run_export_backup(args):
# Main
logger.info("Running export....")
-
- backup_sections = []
+ if args['backup_sections']:
+ backup_sections = args['backup_sections'].split(",")
+ else:
+ backup_sections = []
+
backup = tis_backup(dry_run=False,verbose=True,backup_base_dir=args['base'])
backup.read_ini_file(args['config_file'])
mount_point = args['mount_point']