diff --git a/README.md b/README.md index 5526c8b..29da480 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,29 @@ -tisbackup -========= +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- -backup server side executed python scripts for managing linux and windows system and application data backups, developed by adminsys for adminsys \ No newline at end of file + +Le script tisbackup se base sur un fichier de configuration .ini. Cf le fichier d'exemple pour le format + +Pour lancer le backup, lancer la commande +./tisbackup.py -c fichierconf.ini + +Pour lancer une section particulière du fichier .ini +./tisbackup.py -c fichierconf.ini -s section_choisi + +Pour mettre le mode debug +./tisbackup.py -c fichierconf.ini -l debug diff --git a/iniparse/__init__.py b/iniparse/__init__.py new file mode 100755 index 0000000..8de756f --- /dev/null +++ b/iniparse/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2001, 2002, 2003 Python Software Foundation +# Copyright (c) 2004-2008 Paramjit Oberoi +# Copyright (c) 2007 Tim Lauridsen +# All Rights Reserved. See LICENSE-PSF & LICENSE for details. + +from ini import INIConfig, change_comment_syntax +from config import BasicConfig, ConfigNamespace +from compat import RawConfigParser, ConfigParser, SafeConfigParser +from utils import tidy + +from ConfigParser import DuplicateSectionError, \ + NoSectionError, NoOptionError, \ + InterpolationMissingOptionError, \ + InterpolationDepthError, \ + InterpolationSyntaxError, \ + DEFAULTSECT, MAX_INTERPOLATION_DEPTH + +__all__ = [ + 'BasicConfig', 'ConfigNamespace', + 'INIConfig', 'tidy', 'change_comment_syntax', + 'RawConfigParser', 'ConfigParser', 'SafeConfigParser', + 'DuplicateSectionError', 'NoSectionError', 'NoOptionError', + 'InterpolationMissingOptionError', 'InterpolationDepthError', + 'InterpolationSyntaxError', 'DEFAULTSECT', 'MAX_INTERPOLATION_DEPTH', +] diff --git a/iniparse/compat.py b/iniparse/compat.py new file mode 100755 index 0000000..db89ed8 --- /dev/null +++ b/iniparse/compat.py @@ -0,0 +1,343 @@ +# Copyright (c) 2001, 2002, 2003 Python Software Foundation +# Copyright (c) 2004-2008 Paramjit Oberoi +# All Rights Reserved. See LICENSE-PSF & LICENSE for details. + +"""Compatibility interfaces for ConfigParser + +Interfaces of ConfigParser, RawConfigParser and SafeConfigParser +should be completely identical to the Python standard library +versions. Tested with the unit tests included with Python-2.3.4 + +The underlying INIConfig object can be accessed as cfg.data +""" + +import re +from ConfigParser import DuplicateSectionError, \ + NoSectionError, NoOptionError, \ + InterpolationMissingOptionError, \ + InterpolationDepthError, \ + InterpolationSyntaxError, \ + DEFAULTSECT, MAX_INTERPOLATION_DEPTH + +# These are imported only for compatiability. +# The code below does not reference them directly. +from ConfigParser import Error, InterpolationError, \ + MissingSectionHeaderError, ParsingError + +import ini + +class RawConfigParser(object): + def __init__(self, defaults=None, dict_type=dict): + if dict_type != dict: + raise ValueError('Custom dict types not supported') + self.data = ini.INIConfig(defaults=defaults, optionxformsource=self) + + def optionxform(self, optionstr): + return optionstr.lower() + + def defaults(self): + d = {} + secobj = self.data._defaults + for name in secobj._options: + d[name] = secobj._compat_get(name) + return d + + def sections(self): + """Return a list of section names, excluding [DEFAULT]""" + return list(self.data) + + def add_section(self, section): + """Create a new section in the configuration. + + Raise DuplicateSectionError if a section by the specified name + already exists. Raise ValueError if name is DEFAULT or any of + its case-insensitive variants. + """ + # The default section is the only one that gets the case-insensitive + # treatment - so it is special-cased here. + if section.lower() == "default": + raise ValueError, 'Invalid section name: %s' % section + + if self.has_section(section): + raise DuplicateSectionError(section) + else: + self.data._new_namespace(section) + + def has_section(self, section): + """Indicate whether the named section is present in the configuration. + + The DEFAULT section is not acknowledged. + """ + return (section in self.data) + + def options(self, section): + """Return a list of option names for the given section name.""" + if section in self.data: + return list(self.data[section]) + else: + raise NoSectionError(section) + + def read(self, filenames): + """Read and parse a filename or a list of filenames. + + Files that cannot be opened are silently ignored; this is + designed so that you can specify a list of potential + configuration file locations (e.g. current directory, user's + home directory, systemwide directory), and all existing + configuration files in the list will be read. A single + filename may also be given. + """ + files_read = [] + if isinstance(filenames, basestring): + filenames = [filenames] + for filename in filenames: + try: + fp = open(filename) + except IOError: + continue + files_read.append(filename) + self.data._readfp(fp) + fp.close() + return files_read + + def readfp(self, fp, filename=None): + """Like read() but the argument must be a file-like object. + + The `fp' argument must have a `readline' method. Optional + second argument is the `filename', which if not given, is + taken from fp.name. If fp has no `name' attribute, `' is + used. + """ + self.data._readfp(fp) + + def get(self, section, option, vars=None): + if not self.has_section(section): + raise NoSectionError(section) + if vars is not None and option in vars: + value = vars[option] + + sec = self.data[section] + if option in sec: + return sec._compat_get(option) + else: + raise NoOptionError(option, section) + + def items(self, section): + if section in self.data: + ans = [] + for opt in self.data[section]: + ans.append((opt, self.get(section, opt))) + return ans + else: + raise NoSectionError(section) + + def getint(self, section, option): + return int(self.get(section, option)) + + def getfloat(self, section, option): + return float(self.get(section, option)) + + _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} + + def getboolean(self, section, option): + v = self.get(section, option) + if v.lower() not in self._boolean_states: + raise ValueError, 'Not a boolean: %s' % v + return self._boolean_states[v.lower()] + + def has_option(self, section, option): + """Check for the existence of a given option in a given section.""" + if section in self.data: + sec = self.data[section] + else: + raise NoSectionError(section) + return (option in sec) + + def set(self, section, option, value): + """Set an option.""" + if section in self.data: + self.data[section][option] = value + else: + raise NoSectionError(section) + + def write(self, fp): + """Write an .ini-format representation of the configuration state.""" + fp.write(str(self.data)) + + def remove_option(self, section, option): + """Remove an option.""" + if section in self.data: + sec = self.data[section] + else: + raise NoSectionError(section) + if option in sec: + del sec[option] + return 1 + else: + return 0 + + def remove_section(self, section): + """Remove a file section.""" + if not self.has_section(section): + return False + del self.data[section] + return True + + +class ConfigDict(object): + """Present a dict interface to a ini section.""" + + def __init__(self, cfg, section, vars): + self.cfg = cfg + self.section = section + self.vars = vars + + def __getitem__(self, key): + try: + return RawConfigParser.get(self.cfg, self.section, key, self.vars) + except (NoOptionError, NoSectionError): + raise KeyError(key) + + +class ConfigParser(RawConfigParser): + + def get(self, section, option, raw=False, vars=None): + """Get an option value for a given section. + + All % interpolations are expanded in the return values, based on the + defaults passed into the constructor, unless the optional argument + `raw' is true. Additional substitutions may be provided using the + `vars' argument, which must be a dictionary whose contents overrides + any pre-existing defaults. + + The section DEFAULT is special. + """ + if section != DEFAULTSECT and not self.has_section(section): + raise NoSectionError(section) + + option = self.optionxform(option) + value = RawConfigParser.get(self, section, option, vars) + + if raw: + return value + else: + d = ConfigDict(self, section, vars) + return self._interpolate(section, option, value, d) + + def _interpolate(self, section, option, rawval, vars): + # do the string interpolation + value = rawval + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if "%(" in value: + try: + value = value % vars + except KeyError, e: + raise InterpolationMissingOptionError( + option, section, rawval, e.args[0]) + else: + break + if value.find("%(") != -1: + raise InterpolationDepthError(option, section, rawval) + return value + + def items(self, section, raw=False, vars=None): + """Return a list of tuples with (name, value) for each option + in the section. + + All % interpolations are expanded in the return values, based on the + defaults passed into the constructor, unless the optional argument + `raw' is true. Additional substitutions may be provided using the + `vars' argument, which must be a dictionary whose contents overrides + any pre-existing defaults. + + The section DEFAULT is special. + """ + if section != DEFAULTSECT and not self.has_section(section): + raise NoSectionError(section) + if vars is None: + options = list(self.data[section]) + else: + options = [] + for x in self.data[section]: + if x not in vars: + options.append(x) + options.extend(vars.keys()) + + if "__name__" in options: + options.remove("__name__") + + d = ConfigDict(self, section, vars) + if raw: + return [(option, d[option]) + for option in options] + else: + return [(option, self._interpolate(section, option, d[option], d)) + for option in options] + + +class SafeConfigParser(ConfigParser): + _interpvar_re = re.compile(r"%\(([^)]+)\)s") + _badpercent_re = re.compile(r"%[^%]|%$") + + def set(self, section, option, value): + if not isinstance(value, basestring): + raise TypeError("option values must be strings") + # check for bad percent signs: + # first, replace all "good" interpolations + tmp_value = self._interpvar_re.sub('', value) + # then, check if there's a lone percent sign left + m = self._badpercent_re.search(tmp_value) + if m: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, m.start())) + + ConfigParser.set(self, section, option, value) + + def _interpolate(self, section, option, rawval, vars): + # do the string interpolation + L = [] + self._interpolate_some(option, L, rawval, section, vars, 1) + return ''.join(L) + + _interpvar_match = re.compile(r"%\(([^)]+)\)s").match + + def _interpolate_some(self, option, accum, rest, section, map, depth): + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rest) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._interpvar_match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = m.group(1) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise InterpolationMissingOptionError( + option, section, rest, var) + if "%" in v: + self._interpolate_some(option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%' must be followed by '%' or '(', found: " + repr(rest)) diff --git a/iniparse/config.py b/iniparse/config.py new file mode 100755 index 0000000..5cfa2ea --- /dev/null +++ b/iniparse/config.py @@ -0,0 +1,294 @@ +class ConfigNamespace(object): + """Abstract class representing the interface of Config objects. + + A ConfigNamespace is a collection of names mapped to values, where + the values may be nested namespaces. Values can be accessed via + container notation - obj[key] - or via dotted notation - obj.key. + Both these access methods are equivalent. + + To minimize name conflicts between namespace keys and class members, + the number of class members should be minimized, and the names of + all class members should start with an underscore. + + Subclasses must implement the methods for container-like access, + and this class will automatically provide dotted access. + + """ + + # Methods that must be implemented by subclasses + + def _getitem(self, key): + return NotImplementedError(key) + + def __setitem__(self, key, value): + raise NotImplementedError(key, value) + + def __delitem__(self, key): + raise NotImplementedError(key) + + def __iter__(self): + return NotImplementedError() + + def _new_namespace(self, name): + raise NotImplementedError(name) + + def __contains__(self, key): + try: + self._getitem(key) + except KeyError: + return False + return True + + # Machinery for converting dotted access into container access, + # and automatically creating new sections/namespaces. + # + # To distinguish between accesses of class members and namespace + # keys, we first call object.__getattribute__(). If that succeeds, + # the name is assumed to be a class member. Otherwise it is + # treated as a namespace key. + # + # Therefore, member variables should be defined in the class, + # not just in the __init__() function. See BasicNamespace for + # an example. + + def __getitem__(self, key): + try: + return self._getitem(key) + except KeyError: + return Undefined(key, self) + + def __getattr__(self, name): + try: + return self._getitem(name) + except KeyError: + if name.startswith('__') and name.endswith('__'): + raise AttributeError + return Undefined(name, self) + + def __setattr__(self, name, value): + try: + object.__getattribute__(self, name) + object.__setattr__(self, name, value) + except AttributeError: + self.__setitem__(name, value) + + def __delattr__(self, name): + try: + object.__getattribute__(self, name) + object.__delattr__(self, name) + except AttributeError: + self.__delitem__(name) + + # During unpickling, Python checks if the class has a __setstate__ + # method. But, the data dicts have not been initialised yet, which + # leads to _getitem and hence __getattr__ raising an exception. So + # we explicitly impement default __setstate__ behavior. + def __setstate__(self, state): + self.__dict__.update(state) + +class Undefined(object): + """Helper class used to hold undefined names until assignment. + + This class helps create any undefined subsections when an + assignment is made to a nested value. For example, if the + statement is "cfg.a.b.c = 42", but "cfg.a.b" does not exist yet. + """ + + def __init__(self, name, namespace): + object.__setattr__(self, 'name', name) + object.__setattr__(self, 'namespace', namespace) + + def __setattr__(self, name, value): + obj = self.namespace._new_namespace(self.name) + obj[name] = value + + def __setitem__(self, name, value): + obj = self.namespace._new_namespace(self.name) + obj[name] = value + + +# ---- Basic implementation of a ConfigNamespace + +class BasicConfig(ConfigNamespace): + """Represents a hierarchical collection of named values. + + Values are added using dotted notation: + + >>> n = BasicConfig() + >>> n.x = 7 + >>> n.name.first = 'paramjit' + >>> n.name.last = 'oberoi' + + ...and accessed the same way, or with [...]: + + >>> n.x + 7 + >>> n.name.first + 'paramjit' + >>> n.name.last + 'oberoi' + >>> n['x'] + 7 + >>> n['name']['first'] + 'paramjit' + + Iterating over the namespace object returns the keys: + + >>> l = list(n) + >>> l.sort() + >>> l + ['name', 'x'] + + Values can be deleted using 'del' and printed using 'print'. + + >>> n.aaa = 42 + >>> del n.x + >>> print n + aaa = 42 + name.first = paramjit + name.last = oberoi + + Nested namepsaces are also namespaces: + + >>> isinstance(n.name, ConfigNamespace) + True + >>> print n.name + first = paramjit + last = oberoi + >>> sorted(list(n.name)) + ['first', 'last'] + + Finally, values can be read from a file as follows: + + >>> from StringIO import StringIO + >>> sio = StringIO(''' + ... # comment + ... ui.height = 100 + ... ui.width = 150 + ... complexity = medium + ... have_python + ... data.secret.password = goodness=gracious me + ... ''') + >>> n = BasicConfig() + >>> n._readfp(sio) + >>> print n + complexity = medium + data.secret.password = goodness=gracious me + have_python + ui.height = 100 + ui.width = 150 + """ + + # this makes sure that __setattr__ knows this is not a namespace key + _data = None + + def __init__(self): + self._data = {} + + def _getitem(self, key): + return self._data[key] + + def __setitem__(self, key, value): + self._data[key] = value + + def __delitem__(self, key): + del self._data[key] + + def __iter__(self): + return iter(self._data) + + def __str__(self, prefix=''): + lines = [] + keys = self._data.keys() + keys.sort() + for name in keys: + value = self._data[name] + if isinstance(value, ConfigNamespace): + lines.append(value.__str__(prefix='%s%s.' % (prefix,name))) + else: + if value is None: + lines.append('%s%s' % (prefix, name)) + else: + lines.append('%s%s = %s' % (prefix, name, value)) + return '\n'.join(lines) + + def _new_namespace(self, name): + obj = BasicConfig() + self._data[name] = obj + return obj + + def _readfp(self, fp): + while True: + line = fp.readline() + if not line: + break + + line = line.strip() + if not line: continue + if line[0] == '#': continue + data = line.split('=', 1) + if len(data) == 1: + name = line + value = None + else: + name = data[0].strip() + value = data[1].strip() + name_components = name.split('.') + ns = self + for n in name_components[:-1]: + if n in ns: + ns = ns[n] + if not isinstance(ns, ConfigNamespace): + raise TypeError('value-namespace conflict', n) + else: + ns = ns._new_namespace(n) + ns[name_components[-1]] = value + + +# ---- Utility functions + +def update_config(target, source): + """Imports values from source into target. + + Recursively walks the ConfigNamespace and inserts values + into the ConfigNamespace. For example: + + >>> n = BasicConfig() + >>> n.playlist.expand_playlist = True + >>> n.ui.display_clock = True + >>> n.ui.display_qlength = True + >>> n.ui.width = 150 + >>> print n + playlist.expand_playlist = True + ui.display_clock = True + ui.display_qlength = True + ui.width = 150 + + >>> from iniparse import ini + >>> i = ini.INIConfig() + >>> update_config(i, n) + >>> print i + [playlist] + expand_playlist = True + + [ui] + display_clock = True + display_qlength = True + width = 150 + + """ + for name in source: + value = source[name] + if isinstance(value, ConfigNamespace): + if name in target: + myns = target[name] + if not isinstance(myns, ConfigNamespace): + raise TypeError('value-namespace conflict') + else: + myns = target._new_namespace(name) + update_config(myns, value) + else: + target[name] = value + + + diff --git a/iniparse/ini.py b/iniparse/ini.py new file mode 100755 index 0000000..68dd65c --- /dev/null +++ b/iniparse/ini.py @@ -0,0 +1,643 @@ +"""Access and/or modify INI files + +* Compatiable with ConfigParser +* Preserves order of sections & options +* Preserves comments/blank lines/etc +* More conveninet access to data + +Example: + + >>> from StringIO import StringIO + >>> sio = StringIO('''# configure foo-application + ... [foo] + ... bar1 = qualia + ... bar2 = 1977 + ... [foo-ext] + ... special = 1''') + + >>> cfg = INIConfig(sio) + >>> print cfg.foo.bar1 + qualia + >>> print cfg['foo-ext'].special + 1 + >>> cfg.foo.newopt = 'hi!' + >>> cfg.baz.enabled = 0 + + >>> print cfg + # configure foo-application + [foo] + bar1 = qualia + bar2 = 1977 + newopt = hi! + [foo-ext] + special = 1 + + [baz] + enabled = 0 + +""" + +# An ini parser that supports ordered sections/options +# Also supports updates, while preserving structure +# Backward-compatiable with ConfigParser + +import re +from ConfigParser import DEFAULTSECT, ParsingError, MissingSectionHeaderError + +import config + +class LineType(object): + line = None + + def __init__(self, line=None): + if line is not None: + self.line = line.strip('\n') + + # Return the original line for unmodified objects + # Otherwise construct using the current attribute values + def __str__(self): + if self.line is not None: + return self.line + else: + return self.to_string() + + # If an attribute is modified after initialization + # set line to None since it is no longer accurate. + def __setattr__(self, name, value): + if hasattr(self,name): + self.__dict__['line'] = None + self.__dict__[name] = value + + def to_string(self): + raise Exception('This method must be overridden in derived classes') + + +class SectionLine(LineType): + regex = re.compile(r'^\[' + r'(?P[^]]+)' + r'\]\s*' + r'((?P;|#)(?P.*))?$') + + def __init__(self, name, comment=None, comment_separator=None, + comment_offset=-1, line=None): + super(SectionLine, self).__init__(line) + self.name = name + self.comment = comment + self.comment_separator = comment_separator + self.comment_offset = comment_offset + + def to_string(self): + out = '[' + self.name + ']' + if self.comment is not None: + # try to preserve indentation of comments + out = (out+' ').ljust(self.comment_offset) + out = out + self.comment_separator + self.comment + return out + + def parse(cls, line): + m = cls.regex.match(line.rstrip()) + if m is None: + return None + return cls(m.group('name'), m.group('comment'), + m.group('csep'), m.start('csep'), + line) + parse = classmethod(parse) + + +class OptionLine(LineType): + def __init__(self, name, value, separator=' = ', comment=None, + comment_separator=None, comment_offset=-1, line=None): + super(OptionLine, self).__init__(line) + self.name = name + self.value = value + self.separator = separator + self.comment = comment + self.comment_separator = comment_separator + self.comment_offset = comment_offset + + def to_string(self): + out = '%s%s%s' % (self.name, self.separator, self.value) + if self.comment is not None: + # try to preserve indentation of comments + out = (out+' ').ljust(self.comment_offset) + out = out + self.comment_separator + self.comment + return out + + regex = re.compile(r'^(?P[^:=\s[][^:=]*)' + r'(?P[:=]\s*)' + r'(?P.*)$') + + def parse(cls, line): + m = cls.regex.match(line.rstrip()) + if m is None: + return None + + name = m.group('name').rstrip() + value = m.group('value') + sep = m.group('name')[len(name):] + m.group('sep') + + # comments are not detected in the regex because + # ensuring total compatibility with ConfigParser + # requires that: + # option = value ;comment // value=='value' + # option = value;1 ;comment // value=='value;1 ;comment' + # + # Doing this in a regex would be complicated. I + # think this is a bug. The whole issue of how to + # include ';' in the value needs to be addressed. + # Also, '#' doesn't mark comments in options... + + coff = value.find(';') + if coff != -1 and value[coff-1].isspace(): + comment = value[coff+1:] + csep = value[coff] + value = value[:coff].rstrip() + coff = m.start('value') + coff + else: + comment = None + csep = None + coff = -1 + + return cls(name, value, sep, comment, csep, coff, line) + parse = classmethod(parse) + + +def change_comment_syntax(comment_chars='%;#', allow_rem=False): + comment_chars = re.sub(r'([\]\-\^])', r'\\\1', comment_chars) + regex = r'^(?P[%s]' % comment_chars + if allow_rem: + regex += '|[rR][eE][mM]' + regex += r')(?P.*)$' + CommentLine.regex = re.compile(regex) + +class CommentLine(LineType): + regex = re.compile(r'^(?P[;#]|[rR][eE][mM] +)' + r'(?P.*)$') + + def __init__(self, comment='', separator='#', line=None): + super(CommentLine, self).__init__(line) + self.comment = comment + self.separator = separator + + def to_string(self): + return self.separator + self.comment + + def parse(cls, line): + m = cls.regex.match(line.rstrip()) + if m is None: + return None + return cls(m.group('comment'), m.group('csep'), line) + parse = classmethod(parse) + + +class EmptyLine(LineType): + # could make this a singleton + def to_string(self): + return '' + + value = property(lambda _: '') + + def parse(cls, line): + if line.strip(): return None + return cls(line) + parse = classmethod(parse) + + +class ContinuationLine(LineType): + regex = re.compile(r'^\s+(?P.*)$') + + def __init__(self, value, value_offset=None, line=None): + super(ContinuationLine, self).__init__(line) + self.value = value + if value_offset is None: + value_offset = 8 + self.value_offset = value_offset + + def to_string(self): + return ' '*self.value_offset + self.value + + def parse(cls, line): + m = cls.regex.match(line.rstrip()) + if m is None: + return None + return cls(m.group('value'), m.start('value'), line) + parse = classmethod(parse) + + +class LineContainer(object): + def __init__(self, d=None): + self.contents = [] + self.orgvalue = None + if d: + if isinstance(d, list): self.extend(d) + else: self.add(d) + + def add(self, x): + self.contents.append(x) + + def extend(self, x): + for i in x: self.add(i) + + def get_name(self): + return self.contents[0].name + + def set_name(self, data): + self.contents[0].name = data + + def get_value(self): + if self.orgvalue is not None: + return self.orgvalue + elif len(self.contents) == 1: + return self.contents[0].value + else: + return '\n'.join([('%s' % x.value) for x in self.contents + if not isinstance(x, CommentLine)]) + + def set_value(self, data): + self.orgvalue = data + lines = ('%s' % data).split('\n') + + # If there is an existing ContinuationLine, use its offset + value_offset = None + for v in self.contents: + if isinstance(v, ContinuationLine): + value_offset = v.value_offset + break + + # Rebuild contents list, preserving initial OptionLine + self.contents = self.contents[0:1] + self.contents[0].value = lines[0] + del lines[0] + for line in lines: + if line.strip(): + self.add(ContinuationLine(line, value_offset)) + else: + self.add(EmptyLine()) + + name = property(get_name, set_name) + value = property(get_value, set_value) + + def __str__(self): + s = [x.__str__() for x in self.contents] + return '\n'.join(s) + + def finditer(self, key): + for x in self.contents[::-1]: + if hasattr(x, 'name') and x.name==key: + yield x + + def find(self, key): + for x in self.finditer(key): + return x + raise KeyError(key) + + +def _make_xform_property(myattrname, srcattrname=None): + private_attrname = myattrname + 'value' + private_srcname = myattrname + 'source' + if srcattrname is None: + srcattrname = myattrname + + def getfn(self): + srcobj = getattr(self, private_srcname) + if srcobj is not None: + return getattr(srcobj, srcattrname) + else: + return getattr(self, private_attrname) + + def setfn(self, value): + srcobj = getattr(self, private_srcname) + if srcobj is not None: + setattr(srcobj, srcattrname, value) + else: + setattr(self, private_attrname, value) + + return property(getfn, setfn) + + +class INISection(config.ConfigNamespace): + _lines = None + _options = None + _defaults = None + _optionxformvalue = None + _optionxformsource = None + _compat_skip_empty_lines = set() + def __init__(self, lineobj, defaults = None, + optionxformvalue=None, optionxformsource=None): + self._lines = [lineobj] + self._defaults = defaults + self._optionxformvalue = optionxformvalue + self._optionxformsource = optionxformsource + self._options = {} + + _optionxform = _make_xform_property('_optionxform') + + def _compat_get(self, key): + # identical to __getitem__ except that _compat_XXX + # is checked for backward-compatible handling + if key == '__name__': + return self._lines[-1].name + if self._optionxform: key = self._optionxform(key) + try: + value = self._options[key].value + del_empty = key in self._compat_skip_empty_lines + except KeyError: + if self._defaults and key in self._defaults._options: + value = self._defaults._options[key].value + del_empty = key in self._defaults._compat_skip_empty_lines + else: + raise + if del_empty: + value = re.sub('\n+', '\n', value) + return value + + def _getitem(self, key): + if key == '__name__': + return self._lines[-1].name + if self._optionxform: key = self._optionxform(key) + try: + return self._options[key].value + except KeyError: + if self._defaults and key in self._defaults._options: + return self._defaults._options[key].value + else: + raise + + def __setitem__(self, key, value): + if self._optionxform: xkey = self._optionxform(key) + else: xkey = key + if xkey in self._compat_skip_empty_lines: + self._compat_skip_empty_lines.remove(xkey) + if xkey not in self._options: + # create a dummy object - value may have multiple lines + obj = LineContainer(OptionLine(key, '')) + self._lines[-1].add(obj) + self._options[xkey] = obj + # the set_value() function in LineContainer + # automatically handles multi-line values + self._options[xkey].value = value + + def __delitem__(self, key): + if self._optionxform: key = self._optionxform(key) + if key in self._compat_skip_empty_lines: + self._compat_skip_empty_lines.remove(key) + for l in self._lines: + remaining = [] + for o in l.contents: + if isinstance(o, LineContainer): + n = o.name + if self._optionxform: n = self._optionxform(n) + if key != n: remaining.append(o) + else: + remaining.append(o) + l.contents = remaining + del self._options[key] + + def __iter__(self): + d = set() + for l in self._lines: + for x in l.contents: + if isinstance(x, LineContainer): + if self._optionxform: + ans = self._optionxform(x.name) + else: + ans = x.name + if ans not in d: + yield ans + d.add(ans) + if self._defaults: + for x in self._defaults: + if x not in d: + yield x + d.add(x) + + def _new_namespace(self, name): + raise Exception('No sub-sections allowed', name) + + +def make_comment(line): + return CommentLine(line.rstrip('\n')) + + +def readline_iterator(f): + """iterate over a file by only using the file object's readline method""" + + have_newline = False + while True: + line = f.readline() + + if not line: + if have_newline: + yield "" + return + + if line.endswith('\n'): + have_newline = True + else: + have_newline = False + + yield line + + +def lower(x): + return x.lower() + + +class INIConfig(config.ConfigNamespace): + _data = None + _sections = None + _defaults = None + _optionxformvalue = None + _optionxformsource = None + _sectionxformvalue = None + _sectionxformsource = None + _parse_exc = None + _bom = False + def __init__(self, fp=None, defaults=None, parse_exc=True, + optionxformvalue=lower, optionxformsource=None, + sectionxformvalue=None, sectionxformsource=None): + self._data = LineContainer() + self._parse_exc = parse_exc + self._optionxformvalue = optionxformvalue + self._optionxformsource = optionxformsource + self._sectionxformvalue = sectionxformvalue + self._sectionxformsource = sectionxformsource + self._sections = {} + if defaults is None: defaults = {} + self._defaults = INISection(LineContainer(), optionxformsource=self) + for name, value in defaults.iteritems(): + self._defaults[name] = value + if fp is not None: + self._readfp(fp) + + _optionxform = _make_xform_property('_optionxform', 'optionxform') + _sectionxform = _make_xform_property('_sectionxform', 'optionxform') + + def _getitem(self, key): + if key == DEFAULTSECT: + return self._defaults + if self._sectionxform: key = self._sectionxform(key) + return self._sections[key] + + def __setitem__(self, key, value): + raise Exception('Values must be inside sections', key, value) + + def __delitem__(self, key): + if self._sectionxform: key = self._sectionxform(key) + for line in self._sections[key]._lines: + self._data.contents.remove(line) + del self._sections[key] + + def __iter__(self): + d = set() + d.add(DEFAULTSECT) + for x in self._data.contents: + if isinstance(x, LineContainer): + if x.name not in d: + yield x.name + d.add(x.name) + + def _new_namespace(self, name): + if self._data.contents: + self._data.add(EmptyLine()) + obj = LineContainer(SectionLine(name)) + self._data.add(obj) + if self._sectionxform: name = self._sectionxform(name) + if name in self._sections: + ns = self._sections[name] + ns._lines.append(obj) + else: + ns = INISection(obj, defaults=self._defaults, + optionxformsource=self) + self._sections[name] = ns + return ns + + def __str__(self): + if self._bom: + fmt = u'\ufeff%s' + else: + fmt = '%s' + return fmt % self._data.__str__() + + __unicode__ = __str__ + + _line_types = [EmptyLine, CommentLine, + SectionLine, OptionLine, + ContinuationLine] + + def _parse(self, line): + for linetype in self._line_types: + lineobj = linetype.parse(line) + if lineobj: + return lineobj + else: + # can't parse line + return None + + def _readfp(self, fp): + cur_section = None + cur_option = None + cur_section_name = None + cur_option_name = None + pending_lines = [] + pending_empty_lines = False + try: + fname = fp.name + except AttributeError: + fname = '' + linecount = 0 + exc = None + line = None + + for line in readline_iterator(fp): + # Check for BOM on first line + if linecount == 0 and isinstance(line, unicode): + if line[0] == u'\ufeff': + line = line[1:] + self._bom = True + + lineobj = self._parse(line) + linecount += 1 + + if not cur_section and not isinstance(lineobj, + (CommentLine, EmptyLine, SectionLine)): + if self._parse_exc: + raise MissingSectionHeaderError(fname, linecount, line) + else: + lineobj = make_comment(line) + + if lineobj is None: + if self._parse_exc: + if exc is None: exc = ParsingError(fname) + exc.append(linecount, line) + lineobj = make_comment(line) + + if isinstance(lineobj, ContinuationLine): + if cur_option: + if pending_lines: + cur_option.extend(pending_lines) + pending_lines = [] + if pending_empty_lines: + optobj._compat_skip_empty_lines.add(cur_option_name) + pending_empty_lines = False + cur_option.add(lineobj) + else: + # illegal continuation line - convert to comment + if self._parse_exc: + if exc is None: exc = ParsingError(fname) + exc.append(linecount, line) + lineobj = make_comment(line) + + if isinstance(lineobj, OptionLine): + if pending_lines: + cur_section.extend(pending_lines) + pending_lines = [] + pending_empty_lines = False + cur_option = LineContainer(lineobj) + cur_section.add(cur_option) + if self._optionxform: + cur_option_name = self._optionxform(cur_option.name) + else: + cur_option_name = cur_option.name + if cur_section_name == DEFAULTSECT: + optobj = self._defaults + else: + optobj = self._sections[cur_section_name] + optobj._options[cur_option_name] = cur_option + + if isinstance(lineobj, SectionLine): + self._data.extend(pending_lines) + pending_lines = [] + pending_empty_lines = False + cur_section = LineContainer(lineobj) + self._data.add(cur_section) + cur_option = None + cur_option_name = None + if cur_section.name == DEFAULTSECT: + self._defaults._lines.append(cur_section) + cur_section_name = DEFAULTSECT + else: + if self._sectionxform: + cur_section_name = self._sectionxform(cur_section.name) + else: + cur_section_name = cur_section.name + if cur_section_name not in self._sections: + self._sections[cur_section_name] = \ + INISection(cur_section, defaults=self._defaults, + optionxformsource=self) + else: + self._sections[cur_section_name]._lines.append(cur_section) + + if isinstance(lineobj, (CommentLine, EmptyLine)): + pending_lines.append(lineobj) + if isinstance(lineobj, EmptyLine): + pending_empty_lines = True + + self._data.extend(pending_lines) + if line and line[-1]=='\n': + self._data.add(EmptyLine()) + + if exc: + raise exc + + diff --git a/iniparse/utils.py b/iniparse/utils.py new file mode 100755 index 0000000..829fc28 --- /dev/null +++ b/iniparse/utils.py @@ -0,0 +1,47 @@ +import compat +from ini import LineContainer, EmptyLine + +def tidy(cfg): + """Clean up blank lines. + + This functions makes the configuration look clean and + handwritten - consecutive empty lines and empty lines at + the start of the file are removed, and one is guaranteed + to be at the end of the file. + """ + + if isinstance(cfg, compat.RawConfigParser): + cfg = cfg.data + cont = cfg._data.contents + i = 1 + while i < len(cont): + if isinstance(cont[i], LineContainer): + tidy_section(cont[i]) + i += 1 + elif (isinstance(cont[i-1], EmptyLine) and + isinstance(cont[i], EmptyLine)): + del cont[i] + else: + i += 1 + + # Remove empty first line + if cont and isinstance(cont[0], EmptyLine): + del cont[0] + + # Ensure a last line + if cont and not isinstance(cont[-1], EmptyLine): + cont.append(EmptyLine()) + +def tidy_section(lc): + cont = lc.contents + i = 1 + while i < len(cont): + if (isinstance(cont[i-1], EmptyLine) and + isinstance(cont[i], EmptyLine)): + del cont[i] + else: + i += 1 + + # Remove empty first line + if len(cont) > 1 and isinstance(cont[1], EmptyLine): + del cont[1] diff --git a/libtisbackup/XenAPI.py b/libtisbackup/XenAPI.py new file mode 100644 index 0000000..092b4fe --- /dev/null +++ b/libtisbackup/XenAPI.py @@ -0,0 +1,242 @@ +#============================================================================ +# This library is free software; you can redistribute it and/or +# modify it under the terms of version 2.1 of the GNU Lesser General Public +# License as published by the Free Software Foundation. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +#============================================================================ +# Copyright (C) 2006-2007 XenSource Inc. +#============================================================================ +# +# Parts of this file are based upon xmlrpclib.py, the XML-RPC client +# interface included in the Python distribution. +# +# Copyright (c) 1999-2002 by Secret Labs AB +# Copyright (c) 1999-2002 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +import gettext +import xmlrpclib +import httplib +import socket + +translation = gettext.translation('xen-xm', fallback = True) + +API_VERSION_1_1 = '1.1' +API_VERSION_1_2 = '1.2' + +# +# Methods that have different parameters between API versions 1.1 and 1.2, and +# the number of parameters in 1.1. +# +COMPATIBILITY_METHODS_1_1 = [ + ('SR.create' , 8), + ('SR.introduce' , 6), + ('SR.make' , 7), + ('VDI.snapshot' , 1), + ('VDI.clone' , 1), + ] + +class Failure(Exception): + def __init__(self, details): + self.details = details + + def __str__(self): + try: + return str(self.details) + except Exception, exn: + import sys + print >>sys.stderr, exn + return "Xen-API failure: %s" % str(self.details) + + def _details_map(self): + return dict([(str(i), self.details[i]) + for i in range(len(self.details))]) + + +_RECONNECT_AND_RETRY = (lambda _ : ()) + +class UDSHTTPConnection(httplib.HTTPConnection): + """HTTPConnection subclass to allow HTTP over Unix domain sockets. """ + def connect(self): + path = self.host.replace("_", "/") + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.sock.connect(path) + +class UDSHTTP(httplib.HTTP): + _connection_class = UDSHTTPConnection + +class UDSTransport(xmlrpclib.Transport): + def make_connection(self, host): + return UDSHTTP(host) + +class Session(xmlrpclib.ServerProxy): + """A server proxy and session manager for communicating with xapi using + the Xen-API. + + Example: + + session = Session('http://localhost/') + session.login_with_password('me', 'mypassword') + session.xenapi.VM.start(vm_uuid) + session.xenapi.session.logout() + """ + + def __init__(self, uri, transport=None, encoding=None, verbose=0, + allow_none=1): + xmlrpclib.ServerProxy.__init__(self, uri, transport, encoding, + verbose, allow_none) + self._session = None + self.last_login_method = None + self.last_login_params = None + self.API_version = API_VERSION_1_1 + + + def xenapi_request(self, methodname, params): + if methodname.startswith('login'): + self._login(methodname, params) + return None + elif methodname == 'logout': + self._logout() + return None + else: + retry_count = 0 + while retry_count < 3: + full_params = (self._session,) + params + result = _parse_result(getattr(self, methodname)(*full_params)) + if result == _RECONNECT_AND_RETRY: + retry_count += 1 + if self.last_login_method: + self._login(self.last_login_method, + self.last_login_params) + else: + raise xmlrpclib.Fault(401, 'You must log in') + else: + return result + raise xmlrpclib.Fault( + 500, 'Tried 3 times to get a valid session, but failed') + + + def _login(self, method, params): + result = _parse_result(getattr(self, 'session.%s' % method)(*params)) + if result == _RECONNECT_AND_RETRY: + raise xmlrpclib.Fault( + 500, 'Received SESSION_INVALID when logging in') + self._session = result + self.last_login_method = method + self.last_login_params = params + if method.startswith("slave_local"): + self.API_version = API_VERSION_1_2 + else: + self.API_version = self._get_api_version() + + def logout(self): + try: + if self.last_login_method.startswith("slave_local"): + return _parse_result(self.session.local_logout(self._session)) + else: + return _parse_result(self.session.logout(self._session)) + finally: + self._session = None + self.last_login_method = None + self.last_login_params = None + self.API_version = API_VERSION_1_1 + + def _get_api_version(self): + pool = self.xenapi.pool.get_all()[0] + host = self.xenapi.pool.get_master(pool) + if (self.xenapi.host.get_API_version_major(host) == "1" and + self.xenapi.host.get_API_version_minor(host) == "2"): + return API_VERSION_1_2 + else: + return API_VERSION_1_1 + + def __getattr__(self, name): + if name == 'handle': + return self._session + elif name == 'xenapi': + return _Dispatcher(self.API_version, self.xenapi_request, None) + elif name.startswith('login') or name.startswith('slave_local'): + return lambda *params: self._login(name, params) + else: + return xmlrpclib.ServerProxy.__getattr__(self, name) + +def xapi_local(): + return Session("http://_var_xapi_xapi/", transport=UDSTransport()) + +def _parse_result(result): + if type(result) != dict or 'Status' not in result: + raise xmlrpclib.Fault(500, 'Missing Status in response from server' + result) + if result['Status'] == 'Success': + if 'Value' in result: + return result['Value'] + else: + raise xmlrpclib.Fault(500, + 'Missing Value in response from server') + else: + if 'ErrorDescription' in result: + if result['ErrorDescription'][0] == 'SESSION_INVALID': + return _RECONNECT_AND_RETRY + else: + raise Failure(result['ErrorDescription']) + else: + raise xmlrpclib.Fault( + 500, 'Missing ErrorDescription in response from server') + + +# Based upon _Method from xmlrpclib. +class _Dispatcher: + def __init__(self, API_version, send, name): + self.__API_version = API_version + self.__send = send + self.__name = name + + def __repr__(self): + if self.__name: + return '' % self.__name + else: + return '' + + def __getattr__(self, name): + if self.__name is None: + return _Dispatcher(self.__API_version, self.__send, name) + else: + return _Dispatcher(self.__API_version, self.__send, "%s.%s" % (self.__name, name)) + + def __call__(self, *args): + if self.__API_version == API_VERSION_1_1: + for m in COMPATIBILITY_METHODS_1_1: + if self.__name == m[0]: + return self.__send(self.__name, args[0:m[1]]) + + return self.__send(self.__name, args) + diff --git a/libtisbackup/__init__.py b/libtisbackup/__init__.py new file mode 100644 index 0000000..b72c7a3 --- /dev/null +++ b/libtisbackup/__init__.py @@ -0,0 +1,18 @@ +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + diff --git a/libtisbackup/backup_mysql.py b/libtisbackup/backup_mysql.py new file mode 100644 index 0000000..050f988 --- /dev/null +++ b/libtisbackup/backup_mysql.py @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + + + +import sys +try: + sys.stderr = open('/dev/null') # Silence silly warnings from paramiko + import paramiko +except ImportError,e: + print "Error : can not load paramiko library %s" % e + raise + +sys.stderr = sys.__stderr__ + +import datetime +import base64 +import os +from common import * + +class backup_mysql(backup_generic): + """Backup a mysql database as gzipped sql file through ssh""" + type = 'mysql+ssh' + required_params = backup_generic.required_params + ['db_name','db_user','db_passwd','private_key'] + db_name='' + db_user='' + db_passwd='' + + def do_backup(self,stats): + + self.logger.debug('[%s] Connecting to %s with user root and key %s',self.backup_name,self.server_name,self.private_key) + try: + mykey = paramiko.RSAKey.from_private_key_file(self.private_key) + except paramiko.SSHException: + mykey = paramiko.DSSKey.from_private_key_file(self.private_key) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(self.server_name,username='root',pkey = mykey, port=self.ssh_port) + + t = datetime.datetime.now() + backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S') + + # dump db + stats['status']='Dumping' + cmd = 'mysqldump -u' + self.db_user +' -p' + self.db_passwd + ' ' + self.db_name + ' > /tmp/' + self.db_name + '-' + backup_start_date + '.sql' + self.logger.debug('[%s] Dump DB : %s',self.backup_name,cmd) + if not self.dry_run: + (error_code,output) = ssh_exec(cmd,ssh=ssh) + self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output) + if error_code: + raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd)) + + # zip the file + stats['status']='Zipping' + cmd = 'gzip /tmp/' + self.db_name + '-' + backup_start_date + '.sql' + self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd) + if not self.dry_run: + (error_code,output) = ssh_exec(cmd,ssh=ssh) + self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output) + if error_code: + raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd)) + + # get the file + stats['status']='SFTP' + filepath = '/tmp/' + self.db_name + '-' + backup_start_date + '.sql.gz' + localpath = os.path.join(self.backup_dir , self.db_name + '-' + backup_start_date + '.sql.gz') + self.logger.debug('[%s] Get gz backup with sftp on %s from %s to %s',self.backup_name,self.server_name,filepath,localpath) + if not self.dry_run: + transport = ssh.get_transport() + sftp = paramiko.SFTPClient.from_transport(transport) + sftp.get(filepath, localpath) + sftp.close() + + if not self.dry_run: + stats['total_files_count']=1 + stats['written_files_count']=1 + stats['total_bytes']=os.stat(localpath).st_size + stats['written_bytes']=os.stat(localpath).st_size + stats['log']='gzip dump of DB %s:%s (%d bytes) to %s' % (self.server_name,self.db_name, stats['written_bytes'], localpath) + stats['backup_location'] = localpath + + stats['status']='RMTemp' + cmd = 'rm -f /tmp/' + self.db_name + '-' + backup_start_date + '.sql.gz' + self.logger.debug('[%s] Remove temp gzip : %s',self.backup_name,cmd) + if not self.dry_run: + (error_code,output) = ssh_exec(cmd,ssh=ssh) + self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output) + if error_code: + raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd)) + stats['status']='OK' + + def register_existingbackups(self): + """scan backup dir and insert stats in database""" + + registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))] + + filelist = os.listdir(self.backup_dir) + filelist.sort() + p = re.compile('^%s-(?P\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).sql.gz$' % self.db_name) + for item in filelist: + sr = p.match(item) + if sr: + file_name = os.path.join(self.backup_dir,item) + start = datetime.datetime.strptime(sr.groups()[0],'%Y%m%d-%Hh%Mm%S').isoformat() + if not file_name in registered: + self.logger.info('Registering %s from %s',file_name,fileisodate(file_name)) + size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split('\t')[0]) + self.logger.debug(' Size in bytes : %i',size_bytes) + if not self.dry_run: + self.dbstat.add(self.backup_name,self.server_name,'',\ + backup_start=start,backup_end=fileisodate(file_name),status='OK',total_bytes=size_bytes,backup_location=file_name) + else: + self.logger.info('Skipping %s from %s, already registered',file_name,fileisodate(file_name)) + +register_driver(backup_mysql) diff --git a/libtisbackup/backup_null.py b/libtisbackup/backup_null.py new file mode 100755 index 0000000..c5c58d3 --- /dev/null +++ b/libtisbackup/backup_null.py @@ -0,0 +1,49 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + +import os +import datetime +from common import * + + +class backup_null(backup_generic): + """Null backup to register servers which don't need any backups + but we still want to know they are taken in account""" + type = 'null' + + required_params = ['type','server_name','backup_name'] + optional_params = [] + + def do_backup(self,stats): + pass + def process_backup(self): + pass + def cleanup_backup(self): + pass + def export_latestbackup(self,destdir): + return {} + def checknagios(self,maxage_hours=30): + return (nagiosStateOk,"No backups needs to be performed") + +register_driver(backup_null) + +if __name__=='__main__': + pass + diff --git a/libtisbackup/backup_pgsql.py b/libtisbackup/backup_pgsql.py new file mode 100644 index 0000000..5ec5077 --- /dev/null +++ b/libtisbackup/backup_pgsql.py @@ -0,0 +1,127 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- +import sys +try: + sys.stderr = open('/dev/null') # Silence silly warnings from paramiko + import paramiko +except ImportError,e: + print "Error : can not load paramiko library %s" % e + raise + +sys.stderr = sys.__stderr__ + +import datetime +import base64 +import os +import logging +import re +from common import * + +class backup_pgsql(backup_generic): + """Backup a postgresql database as gzipped sql file through ssh""" + type = 'pgsql+ssh' + required_params = backup_generic.required_params + ['db_name','private_key'] + db_name='' + + def do_backup(self,stats): + try: + mykey = paramiko.RSAKey.from_private_key_file(self.private_key) + except paramiko.SSHException: + mykey = paramiko.DSSKey.from_private_key_file(self.private_key) + + self.logger.debug('[%s] Trying to connect to "%s" with username root and key "%s"',self.backup_name,self.server_name,self.private_key) + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(self.server_name,username='root',pkey = mykey,port=self.ssh_port) + + t = datetime.datetime.now() + backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S') + + # dump db + cmd = 'sudo -u postgres pg_dump ' + self.db_name + ' > /tmp/' + self.db_name + '-' + backup_start_date + '.sql' + self.logger.debug('[%s] %s ',self.backup_name,cmd) + if not self.dry_run: + (error_code,output) = ssh_exec(cmd,ssh=ssh) + self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output) + if error_code: + raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd)) + + # zip the file + cmd = 'gzip /tmp/' + self.db_name + '-' + backup_start_date + '.sql' + self.logger.debug('[%s] %s ',self.backup_name,cmd) + if not self.dry_run: + (error_code,output) = ssh_exec(cmd,ssh=ssh) + self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output) + if error_code: + raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd)) + + # get the file + filepath = '/tmp/' + self.db_name + '-' + backup_start_date + '.sql.gz' + localpath = self.backup_dir + '/' + self.db_name + '-' + backup_start_date + '.sql.gz' + self.logger.debug('[%s] get the file using sftp from "%s" to "%s" ',self.backup_name,filepath,localpath) + if not self.dry_run: + transport = ssh.get_transport() + sftp = paramiko.SFTPClient.from_transport(transport) + sftp.get(filepath, localpath) + sftp.close() + + if not self.dry_run: + stats['total_files_count']=1 + stats['written_files_count']=1 + stats['total_bytes']=os.stat(localpath).st_size + stats['written_bytes']=os.stat(localpath).st_size + stats['log']='gzip dump of DB %s:%s (%d bytes) to %s' % (self.server_name,self.db_name, stats['written_bytes'], localpath) + + stats['backup_location'] = localpath + + cmd = 'rm -f /tmp/' + self.db_name + '-' + backup_start_date + '.sql.gz' + self.logger.debug('[%s] %s ',self.backup_name,cmd) + if not self.dry_run: + (error_code,output) = ssh_exec(cmd,ssh=ssh) + self.logger.debug("[%s] Output of %s :\n%s",self.backup_name,cmd,output) + if error_code: + raise Exception('Aborting, Not null exit code (%i) for "%s"' % (error_code,cmd)) + + stats['status']='OK' + + def register_existingbackups(self): + """scan backup dir and insert stats in database""" + + registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))] + + filelist = os.listdir(self.backup_dir) + filelist.sort() + p = re.compile('^%s-(?P\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).sql.gz$' % self.db_name) + for item in filelist: + sr = p.match(item) + if sr: + file_name = os.path.join(self.backup_dir,item) + start = datetime.datetime.strptime(sr.groups()[0],'%Y%m%d-%Hh%Mm%S').isoformat() + if not file_name in registered: + self.logger.info('Registering %s from %s',file_name,fileisodate(file_name)) + size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split('\t')[0]) + self.logger.debug(' Size in bytes : %i',size_bytes) + if not self.dry_run: + self.dbstat.add(self.backup_name,self.server_name,'',\ + backup_start=start,backup_end=fileisodate(file_name),status='OK',total_bytes=size_bytes,backup_location=file_name) + else: + self.logger.info('Skipping %s from %s, already registered',file_name,fileisodate(file_name)) + +register_driver(backup_pgsql) diff --git a/libtisbackup/backup_rdiff.py b/libtisbackup/backup_rdiff.py new file mode 100644 index 0000000..746332e --- /dev/null +++ b/libtisbackup/backup_rdiff.py @@ -0,0 +1,127 @@ +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + + +import os +import datetime +from common import * +import time + +class backup_rdiff: + backup_dir='' + backup_start_date=None + backup_name='' + server_name='' + exclude_list='' + ssh_port='22' + remote_user='root' + remote_dir='' + dest_dir='' + verbose = False + dry_run=False + + + + def __init__(self, backup_name, backup_base_dir): + self.backup_dir = backup_base_dir + '/' + backup_name + + if os.path.isdir(self.backup_dir )==False: + os.makedirs(self.backup_dir) + + self.backup_name = backup_name + t = datetime.datetime.now() + self.backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S') + + def get_latest_backup(self): + filelist = os.listdir(self.backup_dir) + if len(filelist) == 0: + return '' + + filelist.sort() + + return filelist[-1] + + def cleanup_backup(self): + filelist = os.listdir(self.backup_dir) + if len(filelist) == 0: + return '' + + filelist.sort() + for backup_date in filelist: + today = time.time() + print backup_date + datestring = backup_date[0:8] + c = time.strptime(datestring,"%Y%m%d") + # TODO: improve + if today - c < 60 * 60 * 24* 30: + print time.strftime("%Y%m%d",c) + " is to be deleted" + + + def copy_latest_to_new(self): + # TODO check that latest exist + # TODO check that new does not exist + + + last_backup = self.get_latest_backup() + if last_backup=='': + print "*********************************" + print "*first backup for " + self.backup_name + else: + latest_backup_path = self.backup_dir + '/' + last_backup + new_backup_path = self.backup_dir + '/' + self.backup_start_date + print "#cp -al starting" + cmd = 'cp -al ' + latest_backup_path + ' ' + new_backup_path + print cmd + if self.dry_run==False: + call_external_process(cmd) + print "#cp -al finished" + + + def rsync_to_new(self): + + self.dest_dir = self.backup_dir + '/' + self.backup_start_date + '/' + src_server = self.remote_user + '@' + self.server_name + ':"' + self.remote_dir.strip() + '/"' + + print "#starting rsync" + verbose_arg="" + if self.verbose==True: + verbose_arg = "-P " + + cmd = "rdiff-backup " + verbose_arg + ' --compress-level=9 --numeric-ids -az --partial -e "ssh -o StrictHostKeyChecking=no -c Blowfish -p ' + self.ssh_port + ' -i ' + self.private_key + '" --stats --delete-after ' + self.exclude_list + ' ' + src_server + ' ' + self.dest_dir + print cmd + + ## deal with exit code 24 (file vanished) + if self.dry_run==False: + p = subprocess.call(cmd, shell=True) + if (p ==24): + print "Note: some files vanished before transfer" + if (p != 0 and p != 24 ): + raise Exception('shell program exited with error code ' + str(p), cmd) + + + print "#finished rsync" + + def process_backup(self): + print "" + print "#========Starting backup item =========" + self.copy_latest_to_new() + + self.rsync_to_new() + print "#========Backup item finished==========" + + diff --git a/libtisbackup/backup_rsync.py b/libtisbackup/backup_rsync.py new file mode 100644 index 0000000..3a6df87 --- /dev/null +++ b/libtisbackup/backup_rsync.py @@ -0,0 +1,334 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + +import os +import datetime +from common import * +import time +import logging +import re +import os.path +import datetime + + +class backup_rsync(backup_generic): + """Backup a directory on remote server with rsync and rsync protocol (requires running remote rsync daemon)""" + type = 'rsync' + required_params = backup_generic.required_params + ['remote_user','remote_dir','rsync_module','password_file'] + optional_params = backup_generic.optional_params + ['compressionlevel','compression','bwlimit','exclude_list','protect_args','overload_args'] + + remote_user='root' + remote_dir='' + + exclude_list='' + rsync_module='' + password_file = '' + compression = '' + bwlimit = 0 + protect_args = '1' + overload_args = None + compressionlevel = 0 + + + + def read_config(self,iniconf): + assert(isinstance(iniconf,ConfigParser)) + backup_generic.read_config(self,iniconf) + if not self.bwlimit and iniconf.has_option('global','bw_limit'): + self.bwlimit = iniconf.getint('global','bw_limit') + if not self.compressionlevel and iniconf.has_option('global','compression_level'): + self.compressionlevel = iniconf.getint('global','compression_level') + + def do_backup(self,stats): + if not self.set_lock(): + self.logger.error("[%s] a lock file is set, a backup maybe already running!!",self.backup_name) + return False + + try: + try: + backup_source = 'undefined' + dest_dir = os.path.join(self.backup_dir,self.backup_start_date+'.rsync/') + if not os.path.isdir(dest_dir): + if not self.dry_run: + os.makedirs(dest_dir) + else: + print 'mkdir "%s"' % dest_dir + else: + raise Exception('backup destination directory already exists : %s' % dest_dir) + + options = ['-rt','--stats','--delete-excluded','--numeric-ids','--delete-after'] + if self.logger.level: + options.append('-P') + + if self.dry_run: + options.append('-d') + + if self.overload_args <> None: + options.append(self.overload_args) + elif not "cygdrive" in self.remote_dir: + # we don't preserve owner, group, links, hardlinks, perms for windows/cygwin as it is not reliable nor useful + options.append('-lpgoD') + + # the protect-args option is not available in all rsync version + if not self.protect_args.lower() in ('false','no','0'): + options.append('--protect-args') + + if self.compression.lower() in ('true','yes','1'): + options.append('-z') + + if self.compressionlevel: + options.append('--compress-level=%s' % self.compressionlevel) + + if self.bwlimit: + options.append('--bwlimit %s' % self.bwlimit) + + latest = self.get_latest_backup(self.backup_start_date) + if latest: + options.extend(['--link-dest="%s"' % os.path.join('..',b,'') for b in latest]) + + def strip_quotes(s): + if s[0] == '"': + s = s[1:] + if s[-1] == '"': + s = s[:-1] + return s + + # Add excludes + if "--exclude" in self.exclude_list: + # old settings with exclude_list=--exclude toto --exclude=titi + excludes = [strip_quotes(s).strip() for s in self.exclude_list.replace('--exclude=','').replace('--exclude ','').split()] + else: + try: + # newsettings with exclude_list='too','titi', parsed as a str python list content + excludes = eval('[%s]' % self.exclude_list) + except Exception,e: + raise Exception('Error reading exclude list : value %s, eval error %s (don\'t forget quotes and comma...)' % (self.exclude_list,e)) + options.extend(['--exclude="%s"' % x for x in excludes]) + + if (self.rsync_module and not self.password_file): + raise Exception('You must specify a password file if you specify a rsync module') + + if (not self.rsync_module and not self.private_key): + raise Exception('If you don''t use SSH, you must specify a rsync module') + + #rsync_re = re.compile('(?P[^:]*)::(?P[^/]*)/(?P.*)') + #ssh_re = re.compile('((?P.*)@)?(?P[^:]*):(?P/.*)') + + # Add ssh connection params + if self.rsync_module: + # Case of rsync exports + if self.password_file: + options.append('--password-file="%s"' % self.password_file) + backup_source = '%s@%s::%s%s' % (self.remote_user, self.server_name, self.rsync_module, self.remote_dir) + else: + # case of rsync + ssh + ssh_params = ['-o StrictHostKeyChecking=no','-c blowfish'] + if self.private_key: + ssh_params.append('-i %s' % self.private_key) + if self.ssh_port <> 22: + ssh_params.append('-p %i' % self.ssh_port) + options.append('-e "/usr/bin/ssh %s"' % (" ".join(ssh_params))) + backup_source = '%s@%s:%s' % (self.remote_user,self.server_name,self.remote_dir) + + # ensure there is a slash at end + if backup_source[-1] <> '/': + backup_source += '/' + + options_params = " ".join(options) + + cmd = '/usr/bin/rsync %s %s %s 2>&1' % (options_params,backup_source,dest_dir) + self.logger.debug("[%s] rsync : %s",self.backup_name,cmd) + + if not self.dry_run: + self.line = '' + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) + def ondata(data,context): + if context.verbose: + print data + context.logger.debug(data) + + log = monitor_stdout(process,ondata,self) + + for l in log.splitlines(): + if l.startswith('Number of files:'): + stats['total_files_count'] += int(l.split(':')[1]) + if l.startswith('Number of files transferred:'): + stats['written_files_count'] += int(l.split(':')[1]) + if l.startswith('Total file size:'): + stats['total_bytes'] += int(l.split(':')[1].split()[0]) + if l.startswith('Total transferred file size:'): + stats['written_bytes'] += int(l.split(':')[1].split()[0]) + + returncode = process.returncode + ## deal with exit code 24 (file vanished) + if (returncode == 24): + self.logger.warning("[" + self.backup_name + "] Note: some files vanished before transfer") + elif (returncode == 23): + self.logger.warning("[" + self.backup_name + "] unable so set uid on some files") + elif (returncode != 0): + self.logger.error("[" + self.backup_name + "] shell program exited with error code ") + raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd) + else: + print cmd + + #we suppress the .rsync suffix if everything went well + finaldest = os.path.join(self.backup_dir,self.backup_start_date) + self.logger.debug("[%s] renaming target directory from %s to %s" ,self.backup_name,dest_dir,finaldest) + if not self.dry_run: + os.rename(dest_dir, finaldest) + self.logger.debug("[%s] touching datetime of target directory %s" ,self.backup_name,finaldest) + print os.popen('touch "%s"' % finaldest).read() + else: + print "mv" ,dest_dir,finaldest + stats['backup_location'] = finaldest + stats['status']='OK' + stats['log']='ssh+rsync backup from %s OK, %d bytes written for %d changed files' % (backup_source,stats['written_bytes'],stats['written_files_count']) + + except BaseException , e: + stats['status']='ERROR' + stats['log']=str(e) + raise + + + finally: + self.remove_lock() + + def get_latest_backup(self,current): + result = [] + filelist = os.listdir(self.backup_dir) + filelist.sort() + filelist.reverse() + full = '' + r_full = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}$') + r_partial = re.compile('^\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}.rsync$') + # we take all latest partials younger than the latest full and the latest full + for item in filelist: + if r_partial.match(item) and itemstart: + stop = fileisodate(dir_name) + else: + stop = start + self.logger.info('Registering %s started on %s',dir_name,start) + self.logger.debug(' Disk usage %s','du -sb "%s"' % dir_name) + if not self.dry_run: + size_bytes = int(os.popen('du -sb "%s"' % dir_name).read().split('\t')[0]) + else: + size_bytes = 0 + self.logger.debug(' Size in bytes : %i',size_bytes) + if not self.dry_run: + self.dbstat.add(self.backup_name,self.server_name,'',\ + backup_start=start,backup_end = stop,status='OK',total_bytes=size_bytes,backup_location=dir_name) + else: + self.logger.info('Skipping %s, already registered',dir_name) + + + def is_pid_still_running(self,lockfile): + f = open(lockfile) + lines = f.readlines() + f.close() + if len(lines)==0 : + self.logger.info("[" + self.backup_name + "] empty lock file, removing...") + return False + + for line in lines: + if line.startswith('pid='): + pid = line.split('=')[1].strip() + if os.path.exists("/proc/" + pid): + self.logger.info("[" + self.backup_name + "] process still there") + return True + else: + self.logger.info("[" + self.backup_name + "] process not there anymore remove lock") + return False + else: + self.logger.info("[" + self.backup_name + "] incorrrect lock file : no pid line") + return False + + + def set_lock(self): + self.logger.debug("[" + self.backup_name + "] setting lock") + + #TODO: improve for race condition + #TODO: also check if process is really there + if os.path.isfile(self.backup_dir + '/lock'): + self.logger.debug("[" + self.backup_name + "] File " + self.backup_dir + '/lock already exist') + if self.is_pid_still_running(self.backup_dir + '/lock')==False: + self.logger.info("[" + self.backup_name + "] removing lock file " + self.backup_dir + '/lock') + os.unlink(self.backup_dir + '/lock') + else: + return False + + lockfile = open(self.backup_dir + '/lock',"w") + # Write all the lines at once: + lockfile.write('pid='+str(os.getpid())) + lockfile.write('\nbackup_time=' + self.backup_start_date) + lockfile.close() + return True + + def remove_lock(self): + self.logger.debug("[%s] removing lock",self.backup_name ) + os.unlink(self.backup_dir + '/lock') + +class backup_rsync_ssh(backup_rsync): + """Backup a directory on remote server with rsync and ssh protocol (requires rsync software on remote host)""" + type = 'rsync+ssh' + required_params = backup_generic.required_params + ['remote_user','remote_dir','private_key'] + optional_params = backup_generic.optional_params + ['compression','bwlimit','ssh_port','exclude_list','protect_args','overload_args'] + + +register_driver(backup_rsync) +register_driver(backup_rsync_ssh) + +if __name__=='__main__': + logger = logging.getLogger('tisbackup') + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + cp = ConfigParser() + cp.read('/opt/tisbackup/configtest.ini') + dbstat = BackupStat('/backup/data/log/tisbackup.sqlite') + b = backup_rsync('htouvet','/backup/data/htouvet',dbstat) + b.read_config(cp) + b.process_backup() + print b.checknagios() + diff --git a/libtisbackup/backup_switch.py b/libtisbackup/backup_switch.py new file mode 100644 index 0000000..0309651 --- /dev/null +++ b/libtisbackup/backup_switch.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + +import os +import datetime +from common import * +import XenAPI +import time +import logging +import re +import os.path +import datetime +import select +import urllib2, urllib +import base64 +import socket +import pexpect +from stat import * + + +class backup_switch(backup_generic): + """Backup a startup-config on a switch""" + type = 'switch' + + required_params = backup_generic.required_params + ['switch_ip','switch_user' , 'switch_type'] + optional_params = backup_generic.optional_params + ['switch_password'] + + def switch_hp(self, filename): + + s = socket.socket() + try: + s.connect((self.switch_ip, 23)) + s.close() + except: + raise + + child=pexpect.spawn('telnet '+self.switch_ip) + time.sleep(1) + if self.switch_user != "": + child.sendline(self.switch_user) + child.sendline(self.switch_password+'\r') + else: + child.sendline(self.switch_password+'\r') + try: + child.expect("#") + except: + raise Exception("Bad Credentials") + child.sendline( "terminal length 1000\r") + child.expect("#") + child.sendline( "show config\r") + child.maxread = 100000000 + child.expect("Startup.+$") + lines = child.after + if "-- MORE --" in lines: + raise Exception("Terminal lenght is not sufficient") + child.expect("#") + lines += child.before + child.sendline("logout\r") + child.send('y\r') + for line in lines.split("\n")[1:-1]: + open(filename,"a").write(line.strip()+"\n") + + def switch_linksys_SRW2024(self, filename): + s = socket.socket() + try: + s.connect((self.switch_ip, 23)) + s.close() + except: + raise + + child=pexpect.spawn('telnet '+self.switch_ip) + time.sleep(1) + if hasattr(self,'switch_password'): + child.sendline(self.switch_user+'\t') + child.sendline(self.switch_password+'\r') + else: + child.sendline(self.switch_user+'\r') + try: + child.expect('Menu') + except: + raise Exception("Bad Credentials") + child.sendline('\032') + child.expect('>') + child.sendline('lcli') + child.expect("Name:") + if hasattr(self,'switch_password'): + child.send(self.switch_user+'\r'+self.switch_password+'\r') + else: + child.sendline(self.switch_user) + child.expect(".*#") + child.sendline( "terminal datadump") + child.expect("#") + child.sendline( "show startup-config") + child.expect("#") + lines = child.before + if "Unrecognized command" in lines: + raise Exception("Bad Credentials") + child.sendline("exit") + child.expect( ">") + child.sendline("logout") + for line in lines.split("\n")[1:-1]: + open(filename,"a").write(line.strip()+"\n") + + + def switch_dlink_DGS1210(self, filename): + login_data = urllib.urlencode({'Login' : self.switch_user, 'Password' : self.switch_password, 'currlang' : 0, 'BrowsingPage' : 'index_dlink.htm', 'changlang' : 0}) + req = urllib2.Request('http://%s/' % self.switch_ip, login_data) + resp = urllib2.urlopen(req) + if "Wrong password" in resp.read(): + raise Exception("Wrong password") + resp = urllib2.urlopen("http://%s/config.bin?Gambit=gdkdcdgdidbdkdadkdbgegngjgogkdbgegngjgog&dumy=1348649950256" % self.switch_ip) + f = open(filename, 'w') + f.write(resp.read()) + + + def do_backup(self,stats): + try: + dest_filename = os.path.join(self.backup_dir,"%s-%s" % (self.backup_name,self.backup_start_date)) + + options = [] + options_params = " ".join(options) + if "LINKSYS-SRW2024" == self.switch_type: + dest_filename += '.txt' + self.switch_linksys_SRW2024(dest_filename) + elif self.switch_type in [ "HP-PROCURVE-4104GL", "HP-PROCURVE-2524" ]: + dest_filename += '.txt' + self.switch_hp(dest_filename) + elif "DLINK-DGS1210" == self.switch_type: + dest_filename += '.bin' + self.switch_dlink_DGS1210(dest_filename) + else: + raise Exception("Unknown Switch type") + + stats['total_files_count']=1 + stats['written_files_count']=1 + stats['total_bytes']= os.stat(dest_filename).st_size + stats['written_bytes'] = stats['total_bytes'] + stats['backup_location'] = dest_filename + stats['status']='OK' + stats['log']='Switch backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes']) + + + except BaseException , e: + stats['status']='ERROR' + stats['log']=str(e) + raise + + + +register_driver(backup_switch) + +if __name__=='__main__': + logger = logging.getLogger('tisbackup') + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + cp = ConfigParser() + cp.read('/opt/tisbackup/configtest.ini') + b = backup_xva() + b.read_config(cp) + diff --git a/libtisbackup/backup_xcp_metadata.py b/libtisbackup/backup_xcp_metadata.py new file mode 100644 index 0000000..86f3acd --- /dev/null +++ b/libtisbackup/backup_xcp_metadata.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + + + +import sys +import shutil + +import datetime +import base64 +import os +from common import * + +class backup_xcp_metadata(backup_generic): + """Backup metatdata of a xcp pool using xe pool-dump-database""" + type = 'xcp-dump-metadata' + required_params = ['type','server_name','xcp_user','xcp_passwd','backup_name'] + xcp_user='' + xcp_passwd='' + + def do_backup(self,stats): + + self.logger.debug('[%s] Connecting to %s with user root and key %s',self.backup_name,self.server_name,self.private_key) + + if os.path.isfile('/opt/xensource/bin/xe') == False: + raise Exception('Aborting, /opt/xensource/bin/xe binary not present"') + + + t = datetime.datetime.now() + backup_start_date = t.strftime('%Y%m%d-%Hh%Mm%S') + + # dump pool medatadata + localpath = os.path.join(self.backup_dir , 'xcp_metadata-' + backup_start_date + '.dump.gz') + temppath = '/tmp/xcp_metadata-' + backup_start_date + '.dump' + + stats['status']='Dumping' + + if not self.dry_run: + cmd = "/opt/xensource/bin/xe -s %s -u %s -pw %s pool-dump-database file-name=%s" %(self.server_name,self.xcp_user,self.xcp_passwd,temppath) + self.logger.debug('[%s] Dump XCP Metadata : %s',self.backup_name,cmd) + call_external_process(cmd) + + + # zip the file + stats['status']='Zipping' + cmd = 'gzip %s ' %temppath + self.logger.debug('[%s] Compress backup : %s',self.backup_name,cmd) + if not self.dry_run: + call_external_process(cmd) + + # get the file + stats['status']='move to backup directory' + self.logger.debug('[%s] Moving temp backup file %s to backup new path %s',self.backup_name,self.server_name,localpath) + if not self.dry_run: + shutil.move (temppath + '.gz' ,localpath) + + if not self.dry_run: + stats['total_files_count']=1 + stats['written_files_count']=1 + stats['total_bytes']=os.stat(localpath).st_size + stats['written_bytes']=os.stat(localpath).st_size + stats['log']='gzip dump of DB %s:%s (%d bytes) to %s' % (self.server_name,'xcp metadata dump', stats['written_bytes'], localpath) + stats['backup_location'] = localpath + stats['status']='OK' + + + + def register_existingbackups(self): + """scan metatdata backup files and insert stats in database""" + + registered = [b['backup_location'] for b in self.dbstat.query('select distinct backup_location from stats where backup_name=?',(self.backup_name,))] + + filelist = os.listdir(self.backup_dir) + filelist.sort() + p = re.compile('^%s-(?P\d{8,8}-\d{2,2}h\d{2,2}m\d{2,2}).dump.gz$' % self.server_name) + for item in filelist: + sr = p.match(item) + if sr: + file_name = os.path.join(self.backup_dir,item) + start = datetime.datetime.strptime(sr.groups()[0],'%Y%m%d-%Hh%Mm%S').isoformat() + if not file_name in registered: + self.logger.info('Registering %s from %s',file_name,fileisodate(file_name)) + size_bytes = int(os.popen('du -sb "%s"' % file_name).read().split('\t')[0]) + self.logger.debug(' Size in bytes : %i',size_bytes) + if not self.dry_run: + self.dbstat.add(self.backup_name,self.server_name,'',\ + backup_start=start,backup_end=fileisodate(file_name),status='OK',total_bytes=size_bytes,backup_location=file_name) + else: + self.logger.info('Skipping %s from %s, already registered',file_name,fileisodate(file_name)) + +register_driver(backup_xcp_metadata) diff --git a/libtisbackup/backup_xva.py b/libtisbackup/backup_xva.py new file mode 100755 index 0000000..2b3758f --- /dev/null +++ b/libtisbackup/backup_xva.py @@ -0,0 +1,165 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + +import os +import datetime +from common import * +import XenAPI +import time +import logging +import re +import os.path +import os +import datetime +import select +import urllib2 +import base64 +import socket +from stat import * + + +class backup_xva(backup_generic): + """Backup a VM running on a XCP server as a XVA file (requires xe tools and XenAPI)""" + type = 'xen-xva' + + required_params = backup_generic.required_params + ['xcphost','password_file','server_name'] + optional_params = backup_generic.optional_params + ['excluded_vbds','remote_user','private_key'] + + def export_xva(self, vdi_name, filename, dry_run): + + user_xen, password_xen, null = open(self.password_file).read().split('\n') + session = XenAPI.Session('https://'+self.xcphost) + try: + session.login_with_password(user_xen,password_xen) + except XenAPI.Failure, error: + msg,ip = error.details + + if msg == 'HOST_IS_SLAVE': + xcphost = ip + session = XenAPI.Session('https://'+xcphost) + session.login_with_password(user_xen,password_xen) + + vm = session.xenapi.VM.get_by_name_label(vdi_name)[0] + status_vm = session.xenapi.VM.get_power_state(vm) + + self.logger.debug("[%s] Status of VM: %s",self.backup_name,status_vm) + if status_vm == "Running": + self.logger.debug("[%s] Shudown in progress",self.backup_name) + if dry_run: + print "session.xenapi.VM.clean_shutdown(vm)" + + else: + session.xenapi.VM.clean_shutdown(vm) + + try: + try: + self.logger.debug("[%s] Copy in progress",self.backup_name) + + socket.setdefaulttimeout(120) + auth = base64.encodestring("%s:%s" % (user_xen, password_xen)).strip() + url = "https://"+self.xcphost+"/export?uuid="+session.xenapi.VM.get_uuid(vm) + request = urllib2.Request(url) + request.add_header("Authorization", "Basic %s" % auth) + result = urllib2.urlopen(request) + + if dry_run: + print "request = urllib2.Request(%s)" % url + print 'outputfile = open(%s, "wb")' % filename + else: + outputfile = open(filename, "wb") + for line in result: + outputfile.write(line) + outputfile.close() + + except: + if os.path.exists(filename): + os.unlink(filename) + raise + + finally: + if status_vm == "Running": + self.logger.debug("[%s] Starting in progress",self.backup_name) + if dry_run: + print "session.xenapi.Async.VM.start(vm,False,True)" + else: + session.xenapi.Async.VM.start(vm,False,True) + + session.logout() + + if os.path.exists(filename): + import tarfile + tar = tarfile.open(filename) + if not tar.getnames(): + unlink(filename) + return("Tar error") + tar.close() + + return(0) + + + + + def do_backup(self,stats): + try: + dest_filename = os.path.join(self.backup_dir,"%s-%s.%s" % (self.backup_name,self.backup_start_date,'xva')) + + options = [] + options_params = " ".join(options) + cmd = self.export_xva( self.server_name, dest_filename, self.dry_run) + if os.path.exists(dest_filename): + stats['written_bytes'] = os.stat(dest_filename)[ST_SIZE] + stats['total_files_count'] = 1 + stats['written_files_count'] = 1 + stats['total_bytes'] = stats['written_bytes'] + else: + stats['written_bytes'] = 0 + + stats['backup_location'] = dest_filename + if cmd == 0: + stats['log']='XVA backup from %s OK, %d bytes written' % (self.server_name,stats['written_bytes']) + stats['status']='OK' + else: + stats['status']='ERROR' + stats['log']=cmd + + + + except BaseException , e: + stats['status']='ERROR' + stats['log']=str(e) + raise + + + +register_driver(backup_xva) + +if __name__=='__main__': + logger = logging.getLogger('tisbackup') + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + cp = ConfigParser() + cp.read('/opt/tisbackup/configtest.ini') + b = backup_xva() + b.read_config(cp) + diff --git a/libtisbackup/common.py b/libtisbackup/common.py new file mode 100644 index 0000000..7d545f9 --- /dev/null +++ b/libtisbackup/common.py @@ -0,0 +1,909 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + +import os +import subprocess +import re +import logging +import datetime +import time +from iniparse import ConfigParser +import sqlite3 +import shutil +import select + +import sys + +try: + sys.stderr = open('/dev/null') # Silence silly warnings from paramiko + import paramiko +except ImportError,e: + print "Error : can not load paramiko library %s" % e + raise + +sys.stderr = sys.__stderr__ + +nagiosStateOk = 0 +nagiosStateWarning = 1 +nagiosStateCritical = 2 +nagiosStateUnknown = 3 + +backup_drivers = {} +def register_driver(driverclass): + backup_drivers[driverclass.type] = driverclass + +def datetime2isodate(adatetime=None): + if not adatetime: + adatetime = datetime.datetime.now() + assert(isinstance(adatetime,datetime.datetime)) + return adatetime.isoformat() + +def isodate2datetime(isodatestr): + # we remove the microseconds part as it is not working for python2.5 strptime + return datetime.datetime.strptime(isodatestr.split('.')[0] , "%Y-%m-%dT%H:%M:%S") + +def time2display(adatetime): + return adatetime.strftime("%Y-%m-%d %H:%M") + +def hours_minutes(hours): + if hours is None: + return None + else: + return "%02i:%02i" % ( int(hours) , int((hours - int(hours)) * 60.0)) + +def fileisodate(filename): + return datetime.datetime.fromtimestamp(os.stat(filename).st_mtime).isoformat() + +def dateof(adatetime): + return adatetime.replace(hour=0,minute=0,second=0,microsecond=0) + +##################################### +# http://code.activestate.com/recipes/498181-add-thousands-separator-commas-to-formatted-number/ +# Code from Michael Robellard's comment made 28 Feb 2010 +# Modified for leading +, -, space on 1 Mar 2010 by Glenn Linderman +# +# Tail recursion removed and leading garbage handled on March 12 2010, Alessandro Forghieri +def splitThousands( s, tSep=',', dSep='.'): + '''Splits a general float on thousands. GIGO on general input''' + if s == None: + return 0 + if not isinstance( s, str ): + s = str( s ) + + cnt=0 + numChars=dSep+'0123456789' + ls=len(s) + while cnt < ls and s[cnt] not in numChars: cnt += 1 + + lhs = s[ 0:cnt ] + s = s[ cnt: ] + if dSep == '': + cnt = -1 + else: + cnt = s.rfind( dSep ) + if cnt > 0: + rhs = dSep + s[ cnt+1: ] + s = s[ :cnt ] + else: + rhs = '' + + splt='' + while s != '': + splt= s[ -3: ] + tSep + splt + s = s[ :-3 ] + + return lhs + splt[ :-1 ] + rhs + + +def call_external_process(shell_string): + p = subprocess.call(shell_string, shell=True) + if (p != 0 ): + raise Exception('shell program exited with error code ' + str(p), shell_string) + +def check_string(test_string): + pattern = r'[^\.A-Za-z0-9\-_]' + if re.search(pattern, test_string): + #Character other then . a-z 0-9 was found + print 'Invalid : %r' % (test_string,) + +def convert_bytes(bytes): + if bytes is None: + return None + else: + bytes = float(bytes) + if bytes >= 1099511627776: + terabytes = bytes / 1099511627776 + size = '%.2fT' % terabytes + elif bytes >= 1073741824: + gigabytes = bytes / 1073741824 + size = '%.2fG' % gigabytes + elif bytes >= 1048576: + megabytes = bytes / 1048576 + size = '%.2fM' % megabytes + elif bytes >= 1024: + kilobytes = bytes / 1024 + size = '%.2fK' % kilobytes + else: + size = '%.2fb' % bytes + return size + +## {{{ http://code.activestate.com/recipes/81189/ (r2) +def pp(cursor, data=None, rowlens=0, callback=None): + """ + pretty print a query result as a table + callback is a function called for each field (fieldname,value) to format the output + """ + def defaultcb(fieldname,value): + return value + + if not callback: + callback = defaultcb + + d = cursor.description + if not d: + return "#### NO RESULTS ###" + names = [] + lengths = [] + rules = [] + if not data: + data = cursor.fetchall() + for dd in d: # iterate over description + l = dd[1] + if not l: + l = 12 # or default arg ... + l = max(l, len(dd[0])) # handle long names + names.append(dd[0]) + lengths.append(l) + for col in range(len(lengths)): + if rowlens: + rls = [len(str(callback(d[col][0],row[col]))) for row in data if row[col]] + lengths[col] = max([lengths[col]]+rls) + rules.append("-"*lengths[col]) + format = " ".join(["%%-%ss" % l for l in lengths]) + result = [format % tuple(names)] + result.append(format % tuple(rules)) + for row in data: + row_cb=[] + for col in range(len(d)): + row_cb.append(callback(d[col][0],row[col])) + result.append(format % tuple(row_cb)) + return "\n".join(result) +## end of http://code.activestate.com/recipes/81189/ }}} + + +def html_table(cur,callback=None): + """ + cur est un cursor issu d'une requete + callback est une fonction qui prend (rowmap,fieldname,value) + et renvoie une representation texte + """ + def safe_unicode(iso): + if iso is None: + return None + elif isinstance(iso, str): + return iso.decode('iso8859') + else: + return iso + + def itermap(cur): + for row in cur: + yield dict((cur.description[idx][0], value) + for idx, value in enumerate(row)) + + head=u""+"".join([""+c[0]+"" for c in cur.description])+"" + lines="" + if callback: + for r in itermap(cur): + lines=lines+""+"".join([""+str(callback(r,c[0],safe_unicode(r[c[0]])))+"" for c in cur.description])+"" + else: + for r in cur: + lines=lines+""+"".join([""+safe_unicode(c)+"" for c in r])+"" + + return "%s%s
" % (head,lines) + + + +def monitor_stdout(aprocess, onoutputdata,context): + """Reads data from stdout and stderr from aprocess and return as a string + on each chunk, call a call back onoutputdata(dataread) + """ + assert(isinstance(aprocess,subprocess.Popen)) + read_set = [] + stdout = [] + line = '' + + if aprocess.stdout: + read_set.append(aprocess.stdout) + if aprocess.stderr: + read_set.append(aprocess.stderr) + + while read_set: + try: + rlist, wlist, xlist = select.select(read_set, [], []) + except select.error, e: + if e.args[0] == errno.EINTR: + continue + raise + + # Reads one line from stdout + if aprocess.stdout in rlist: + data = os.read(aprocess.stdout.fileno(), 1) + if data == "": + aprocess.stdout.close() + read_set.remove(aprocess.stdout) + while data and not data in ('\n','\r'): + line += data + data = os.read(aprocess.stdout.fileno(), 1) + if line or data in ('\n','\r'): + stdout.append(line) + if onoutputdata: + onoutputdata(line,context) + line='' + + # Reads one line from stderr + if aprocess.stderr in rlist: + data = os.read(aprocess.stderr.fileno(), 1) + if data == "": + aprocess.stderr.close() + read_set.remove(aprocess.stderr) + while data and not data in ('\n','\r'): + line += data + data = os.read(aprocess.stderr.fileno(), 1) + if line or data in ('\n','\r'): + stdout.append(line) + if onoutputdata: + onoutputdata(line,context) + line='' + + aprocess.wait() + if line: + stdout.append(line) + if onoutputdata: + onoutputdata(line,context) + return "\n".join(stdout) + + +class BackupStat: + dbpath = '' + db = None + logger = logging.getLogger('tisbackup') + + def __init__(self,dbpath): + self.dbpath = dbpath + if not os.path.isfile(self.dbpath): + self.db=sqlite3.connect(self.dbpath) + self.initdb() + else: + self.db=sqlite3.connect(self.dbpath) + if not "'TYPE'" in str(self.db.execute("select * from stats").description): + self.updatedb() + + + def updatedb(self): + self.logger.debug('Update stat database') + self.db.execute("alter table stats add column TYPE TEXT;") + self.db.execute("update stats set TYPE='BACKUP';") + self.db.commit() + + def initdb(self): + assert(isinstance(self.db,sqlite3.Connection)) + self.logger.debug('Initialize stat database') + self.db.execute(""" +create table stats ( + backup_name TEXT, + server_name TEXT, + description TEXT, + backup_start TEXT, + backup_end TEXT, + backup_duration NUMERIC, + total_files_count INT, + written_files_count INT, + total_bytes INT, + written_bytes INT, + status TEXT, + log TEXT, + backup_location TEXT, + TYPE TEXT)""") + self.db.execute(""" +create index idx_stats_backup_name on stats(backup_name);""") + self.db.execute(""" +create index idx_stats_backup_location on stats(backup_location);""") + self.db.commit() + + def start(self,backup_name,server_name,TYPE,description='',backup_location=None): + """ Add in stat DB a record for the newly running backup""" + return self.add(backup_name=backup_name,server_name=server_name,description=description,backup_start=datetime2isodate(),status='Running',TYPE=TYPE) + + def finish(self,rowid,total_files_count=None,written_files_count=None,total_bytes=None,written_bytes=None,log=None,status='OK',backup_end=None,backup_duration=None,backup_location=None): + """ Update record in stat DB for the finished backup""" + if not backup_end: + backup_end = datetime2isodate() + if backup_duration == None: + try: + # get duration using start of backup datetime + backup_duration = (isodate2datetime(backup_end) - isodate2datetime(self.query('select backup_start from stats where rowid=?',(rowid,))[0]['backup_start'])).seconds / 3600.0 + except: + backup_duration = None + + # update stat record + self.db.execute("""\ + update stats set + total_files_count=?,written_files_count=?,total_bytes=?,written_bytes=?,log=?,status=?,backup_end=?,backup_duration=?,backup_location=? + where + rowid = ? + """,(total_files_count,written_files_count,total_bytes,written_bytes,log,status,backup_end,backup_duration,backup_location,rowid)) + self.db.commit() + + def add(self, + backup_name='', + server_name='', + description='', + backup_start=None, + backup_end=None, + backup_duration=None, + total_files_count=None, + written_files_count=None, + total_bytes=None, + written_bytes=None, + status='draft', + log='', + TYPE='', + backup_location=None): + if not backup_start: + backup_start=datetime2isodate() + if not backup_end: + backup_end=datetime2isodate() + + cur = self.db.execute("""\ + insert into stats ( + backup_name, + server_name, + description, + backup_start, + backup_end, + backup_duration, + total_files_count, + written_files_count, + total_bytes, + written_bytes, + status, + log, + backup_location, + TYPE) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?) + """,( + backup_name, + server_name, + description, + backup_start, + backup_end, + backup_duration, + total_files_count, + written_files_count, + total_bytes, + written_bytes, + status, + log, + backup_location, + TYPE) + ) + + self.db.commit() + return cur.lastrowid + + def query(self,query, args=(), one=False): + """ + execute la requete query sur la db et renvoie un tableau de dictionnaires + """ + cur = self.db.execute(query, args) + rv = [dict((cur.description[idx][0], value) + for idx, value in enumerate(row)) for row in cur.fetchall()] + return (rv[0] if rv else None) if one else rv + + def last_backups(self,backup_name,count=30): + if backup_name: + cur = self.db.execute('select * from stats where backup_name=? order by backup_end desc limit ?',(backup_name,count)) + else: + cur = self.db.execute('select * from stats order by backup_end desc limit ?',(count,)) + + def fcb(fieldname,value): + if fieldname in ('backup_start','backup_end'): + return time2display(isodate2datetime(value)) + elif 'bytes' in fieldname: + return convert_bytes(value) + elif 'count' in fieldname: + return splitThousands(value,' ','.') + elif 'backup_duration' in fieldname: + return hours_minutes(value) + else: + return value + + #for r in self.query('select * from stats where backup_name=? order by backup_end desc limit ?',(backup_name,count)): + print pp(cur,None,1,fcb) + + + def fcb(self,fields,fieldname,value): + if fieldname in ('backup_start','backup_end'): + return time2display(isodate2datetime(value)) + elif 'bytes' in fieldname: + return convert_bytes(value) + elif 'count' in fieldname: + return splitThousands(value,' ','.') + elif 'backup_duration' in fieldname: + return hours_minutes(value) + else: + return value + + def as_html(self,cur): + if cur: + return html_table(cur,self.fcb) + else: + return html_table(self.db.execute('select * from stats order by backup_start asc'),self.fcb) + + +def ssh_exec(command,ssh=None,server_name='',remote_user='',private_key='',ssh_port=22): + """execute command on server_name using the provided ssh connection + or creates a new connection if ssh is not provided. + returns (exit_code,output) + + output is the concatenation of stdout and stderr + """ + if not ssh: + assert(server_name and remote_user and private_key) + try: + mykey = paramiko.RSAKey.from_private_key_file(private_key) + except paramiko.SSHException: + mykey = paramiko.DSSKey.from_private_key_file(private_key) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(server_name,username=remote_user,pkey = private_key,port=ssh_port) + + tran = ssh.get_transport() + chan = tran.open_session() + + # chan.set_combine_stderr(True) + chan.get_pty() + stdout = chan.makefile() + + chan.exec_command(command) + stdout.flush() + output = stdout.read() + exit_code = chan.recv_exit_status() + return (exit_code,output) + + +class backup_generic: + """Generic ancestor class for backups, not registered""" + type = 'generic' + required_params = ['type','backup_name','backup_dir','server_name','backup_retention_time','maximum_backup_age'] + optional_params = ['preexec','postexec','description','private_key','remote_user','ssh_port'] + + logger = logging.getLogger('tisbackup') + backup_name = '' + backup_dir = '' + server_name = '' + remote_user = 'root' + description = '' + dbstat = None + dry_run = False + preexec = '' + postexec = '' + maximum_backup_age = None + backup_retention_time = None + verbose = False + private_key='' + ssh_port=22 + + def __init__(self,backup_name, backup_dir,dbstat=None,dry_run=False): + if not re.match('^[A-Za-z0-9_\-\.]*$',backup_name): + raise Exception('The backup name %s should contain only alphanumerical characters' % backup_name) + self.backup_name = backup_name + self.backup_dir = backup_dir + + self.dbstat = dbstat + assert(isinstance(self.dbstat,BackupStat) or self.dbstat==None) + + if not os.path.isdir(self.backup_dir): + os.makedirs(self.backup_dir) + + self.dry_run = dry_run + + @classmethod + def get_help(cls): + return """\ +%(type)s : %(desc)s + Required params : %(required)s + Optional params : %(optional)s +""" % {'type':cls.type, + 'desc':cls.__doc__, + 'required':",".join(cls.required_params), + 'optional':",".join(cls.optional_params)} + + def check_required_params(self): + for name in self.required_params: + if not hasattr(self,name) or not getattr(self,name): + raise Exception('[%s] Config Attribute %s is required' % (self.backup_name,name)) + if (self.preexec or self.postexec) and (not self.private_key or not self.remote_user): + raise Exception('[%s] remote_user and private_key file required if preexec or postexec is used' % self.backup_name) + + + def read_config(self,iniconf): + assert(isinstance(iniconf,ConfigParser)) + allowed_params = self.required_params+self.optional_params + for (name,value) in iniconf.items(self.backup_name): + if not name in allowed_params: + self.logger.critical('[%s] Invalid param name "%s"', self.backup_name,name); + raise Exception('[%s] Invalid param name "%s"', self.backup_name,name) + self.logger.debug('[%s] reading param %s = %s ', self.backup_name,name,value) + setattr(self,name,value) + + # if retention (in days) is not defined at section level, get default global one. + if not self.backup_retention_time: + self.backup_retention_time = iniconf.getint('global','backup_retention_time') + + # for nagios, if maximum last backup age (in hours) is not defined at section level, get default global one. + if not self.maximum_backup_age: + self.maximum_backup_age = iniconf.getint('global','maximum_backup_age') + + self.ssh_port = int(self.ssh_port) + self.backup_retention_time = int(self.backup_retention_time) + self.maximum_backup_age = int(self.maximum_backup_age) + + self.check_required_params() + + + def do_preexec(self,stats): + self.logger.info("[%s] executing preexec %s ",self.backup_name,self.preexec) + try: + mykey = paramiko.RSAKey.from_private_key_file(self.private_key) + except paramiko.SSHException: + mykey = paramiko.DSSKey.from_private_key_file(self.private_key) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(self.server_name,username=self.remote_user,pkey = mykey) + tran = ssh.get_transport() + chan = tran.open_session() + + # chan.set_combine_stderr(True) + chan.get_pty() + stdout = chan.makefile() + + if not self.dry_run: + chan.exec_command(self.preexec) + output = stdout.read() + exit_code = chan.recv_exit_status() + self.logger.info('[%s] preexec exit code : "%i", output : %s',self.backup_name , exit_code, output ) + return exit_code + else: + return 0 + + def do_postexec(self,stats): + self.logger.info("[%s] executing postexec %s ",self.backup_name,self.postexec) + try: + mykey = paramiko.RSAKey.from_private_key_file(self.private_key) + except paramiko.SSHException: + mykey = paramiko.DSSKey.from_private_key_file(self.private_key) + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(self.server_name,username=self.remote_user,pkey = mykey) + tran = ssh.get_transport() + chan = tran.open_session() + + # chan.set_combine_stderr(True) + chan.get_pty() + stdout = chan.makefile() + + if not self.dry_run: + chan.exec_command(self.postexec) + output = stdout.read() + exit_code = chan.recv_exit_status() + self.logger.info('[%s] postexec exit code : "%i", output : %s',self.backup_name , exit_code, output ) + return exit_code + else: + return 0 + + + def do_backup(self,stats): + """stats dict with keys : total_files_count,written_files_count,total_bytes,written_bytes""" + pass + + def check_params_connections(self): + """Perform a dry run trying to connect without actually doing backup""" + self.check_required_params() + + def process_backup(self): + """Process the backup. + launch + - do_preexec + - do_backup + - do_postexec + + returns a dict for stats + """ + self.logger.info('[%s] ######### Starting backup',self.backup_name) + + starttime = time.time() + self.backup_start_date = datetime.datetime.now().strftime('%Y%m%d-%Hh%Mm%S') + + if not self.dry_run and self.dbstat: + stat_rowid = self.dbstat.start(backup_name=self.backup_name,server_name=self.server_name,TYPE="BACKUP") + else: + stat_rowid = None + + try: + stats = {} + stats['total_files_count']=0 + stats['written_files_count']=0 + stats['total_bytes']=0 + stats['written_bytes']=0 + stats['log']='' + stats['status']='Running' + stats['backup_location']=None + + if self.preexec.strip(): + exit_code = self.do_preexec(stats) + if exit_code != 0 : + raise Exception('Preexec "%s" failed with exit code "%i"' % (self.preexec,exit_code)) + + self.do_backup(stats) + + if self.postexec.strip(): + exit_code = self.do_postexec(stats) + if exit_code != 0 : + raise Exception('Postexec "%s" failed with exit code "%i"' % (self.postexec,exit_code)) + + endtime = time.time() + duration = (endtime-starttime)/3600.0 + if not self.dry_run and self.dbstat: + self.dbstat.finish(stat_rowid, + backup_end=datetime2isodate(datetime.datetime.now()), + backup_duration = duration, + total_files_count=stats['total_files_count'], + written_files_count=stats['written_files_count'], + total_bytes=stats['total_bytes'], + written_bytes=stats['written_bytes'], + status=stats['status'], + log=stats['log'], + backup_location=stats['backup_location']) + + self.logger.info('[%s] ######### Backup finished : %s',self.backup_name,stats['log']) + return stats + + except BaseException, e: + stats['status']='ERROR' + stats['log']=str(e) + endtime = time.time() + duration = (endtime-starttime)/3600.0 + if not self.dry_run and self.dbstat: + self.dbstat.finish(stat_rowid, + backup_end=datetime2isodate(datetime.datetime.now()), + backup_duration = duration, + total_files_count=stats['total_files_count'], + written_files_count=stats['written_files_count'], + total_bytes=stats['total_bytes'], + written_bytes=stats['written_bytes'], + status=stats['status'], + log=stats['log'], + backup_location=stats['backup_location']) + + self.logger.error('[%s] ######### Backup finished with ERROR: %s',self.backup_name,stats['log']) + raise + + + def checknagios(self,maxage_hours=30): + """ + Returns a tuple (nagiosstatus,message) for the current backup_name + Read status from dbstat database + """ + if not self.dbstat: + self.logger.warn('[%s] checknagios : no database provided',self.backup_name) + return ('No database provided',nagiosStateUnknown) + else: + self.logger.debug('[%s] checknagios : sql query "%s" %s',self.backup_name,'select status, backup_end, log from stats where TYPE=\'BACKUP\' AND backup_name=? order by backup_end desc limit 30',self.backup_name) + q = self.dbstat.query('select status, backup_start, backup_end, log, backup_location, total_bytes from stats where TYPE=\'BACKUP\' AND backup_name=? order by backup_start desc limit 30',(self.backup_name,)) + if not q: + self.logger.debug('[%s] checknagios : no result from query',self.backup_name) + return (nagiosStateCritical,'CRITICAL : No backup found for %s in database' % self.backup_name) + else: + mindate = datetime2isodate((datetime.datetime.now() - datetime.timedelta(hours=maxage_hours))) + self.logger.debug('[%s] checknagios : looking for most recent OK not older than %s',self.backup_name,mindate) + for b in q: + if b['backup_end'] >= mindate and b['status'] == 'OK': + # check if backup actually exists on registered backup location and is newer than backup start date + if b['total_bytes'] == 0: + return (nagiosStateWarning,"WARNING : No data to backup was found for %s" % (self.backup_name,)) + + if not b['backup_location']: + return (nagiosStateWarning,"WARNING : No Backup location found for %s finished on (%s) %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log'])) + + if os.path.isfile(b['backup_location']): + backup_actual_date = datetime.datetime.fromtimestamp(os.stat(b['backup_location']).st_ctime) + if backup_actual_date + datetime.timedelta(hours = 1) > isodate2datetime(b['backup_start']): + return (nagiosStateOk,"OK Backup %s (%s), %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log'])) + else: + return (nagiosStateCritical,"CRITICAL Backup %s (%s), %s seems older than start of backup" % (self.backup_name,isodate2datetime(b['backup_end']),b['log'])) + elif os.path.isdir(b['backup_location']): + return (nagiosStateOk,"OK Backup %s (%s), %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log'])) + else: + return (nagiosStateCritical,"CRITICAL Backup %s (%s), %s has disapeared from backup location %s" % (self.backup_name,isodate2datetime(b['backup_end']),b['log'],b['backup_location'])) + + self.logger.debug('[%s] checknagios : looking for most recent Warning or Running not older than %s',self.backup_name,mindate) + for b in q: + if b['backup_end'] >= mindate and b['status'] in ('Warning','Running'): + return (nagiosStateWarning,'WARNING : Backup %s still running or warning. %s' % (self.backup_name,b['log'])) + + self.logger.debug('[%s] checknagios : No Ok or warning recent backup found',self.backup_name) + return (nagiosStateCritical,'CRITICAL : No recent backup for %s' % self.backup_name ) + + def cleanup_backup(self): + """Removes obsolete backups (older than backup_retention_time)""" + mindate = datetime2isodate((dateof(datetime.datetime.now()) - datetime.timedelta(days=self.backup_retention_time))) + # check if there is at least 1 "OK" backup left after cleanup : + ok_backups = self.dbstat.query('select backup_location from stats where TYPE="BACKUP" and backup_name=? and backup_start>=? and status="OK" order by backup_start desc',(self.backup_name,mindate)) + removed = [] + if ok_backups and os.path.exists(ok_backups[0]['backup_location']): + records = self.dbstat.query('select status, backup_start, backup_end, log, backup_location from stats where backup_name=? and backup_start '/': + backup_source += '/' + if backup_dest[-1] <> '/': + backup_dest += '/' + + if not os.path.isdir(backup_dest): + os.makedirs(backup_dest) + + options = ['-aP','--stats','--delete-excluded','--numeric-ids','--delete-after'] + if self.logger.level: + options.append('-P') + + if self.dry_run: + options.append('-d') + + options_params = " ".join(options) + + cmd = '/usr/bin/rsync %s %s %s 2>&1' % (options_params,backup_source,backup_dest) + self.logger.debug("[%s] rsync : %s",self.backup_name,cmd) + + if not self.dry_run: + self.line = '' + starttime = time.time() + stat_rowid = self.dbstat.start(backup_name=self.backup_name,server_name=self.server_name, TYPE="EXPORT") + + process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) + def ondata(data,context): + if context.verbose: + print data + context.logger.debug(data) + + log = monitor_stdout(process,ondata,self) + + for l in log.splitlines(): + if l.startswith('Number of files:'): + stats['total_files_count'] += int(l.split(':')[1]) + if l.startswith('Number of files transferred:'): + stats['written_files_count'] += int(l.split(':')[1]) + if l.startswith('Total file size:'): + stats['total_bytes'] += int(l.split(':')[1].split()[0]) + if l.startswith('Total transferred file size:'): + stats['written_bytes'] += int(l.split(':')[1].split()[0]) + returncode = process.returncode + ## deal with exit code 24 (file vanished) + if (returncode == 24): + self.logger.warning("[" + self.backup_name + "] Note: some files vanished before transfer") + elif (returncode == 23): + self.logger.warning("[" + self.backup_name + "] unable so set uid on some files") + elif (returncode != 0): + self.logger.error("[" + self.backup_name + "] shell program exited with error code ") + raise Exception("[" + self.backup_name + "] shell program exited with error code " + str(returncode), cmd) + else: + print cmd + + stats['status']='OK' + self.logger.info('export backup from %s to %s OK, %d bytes written for %d changed files' % (backup_source,backup_dest,stats['written_bytes'],stats['written_files_count'])) + + endtime = time.time() + duration = (endtime-starttime)/3600.0 + + if not self.dry_run and self.dbstat: + self.dbstat.finish(stat_rowid, + backup_end=datetime2isodate(datetime.datetime.now()), + backup_duration = duration, + total_files_count=stats['total_files_count'], + written_files_count=stats['written_files_count'], + total_bytes=stats['total_bytes'], + written_bytes=stats['written_bytes'], + status=stats['status'], + log=stats['log'], + backup_location=backup_dest) + return stats + + +if __name__ == '__main__': + logger = logging.getLogger('tisbackup') + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + dbstat = BackupStat('/backup/data/log/tisbackup.sqlite') diff --git a/libtisbackup/copy_vm_xcp.py b/libtisbackup/copy_vm_xcp.py new file mode 100755 index 0000000..ad2ea6e --- /dev/null +++ b/libtisbackup/copy_vm_xcp.py @@ -0,0 +1,224 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------- +# This file is part of TISBackup +# +# TISBackup is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# TISBackup is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with TISBackup. If not, see . +# +# ----------------------------------------------------------------------- + +import os +import datetime +from common import * +import XenAPI +import time +import logging +import re +import os.path +import os +import datetime +import select +import urllib2 +import base64 +import socket +from stat import * + + +class copy_vm_xcp(backup_generic): + """Backup a VM running on a XCP server on a second SR (requires xe tools and XenAPI)""" + type = 'copy-vm-xcp' + + required_params = backup_generic.required_params + ['server_name','storage_name','password_file','vm_name','network_name'] + optional_params = backup_generic.optional_params + ['start_vm','max_copies'] + + start_vm = "no" + max_copies = 1 + + def read_config(self,iniconf): + assert(isinstance(iniconf,ConfigParser)) + backup_generic.read_config(self,iniconf) + if self.start_vm in 'no' and iniconf.has_option('global','start_vm'): + self.start_vm = iniconf.get('global','start_vm') + if self.max_copies == 1 and iniconf.has_option('global','max_copies'): + self.max_copies = iniconf.getint('global','max_copies') + + + def copy_vm_to_sr(self, vm_name, storage_name, dry_run): + + user_xen, password_xen, null = open(self.password_file).read().split('\n') + session = XenAPI.Session('https://'+self.server_name) + try: + session.login_with_password(user_xen,password_xen) + except XenAPI.Failure, error: + msg,ip = error.details + + if msg == 'HOST_IS_SLAVE': + server_name = ip + session = XenAPI.Session('https://'+server_name) + session.login_with_password(user_xen,password_xen) + + + self.logger.debug("[%s] VM (%s) to backup in storage: %s",self.backup_name,vm_name,storage_name) + now = datetime.datetime.now() + + #get storage opaqueRef + try: + storage = session.xenapi.SR.get_by_name_label(storage_name)[0] + except IndexError,error: + return("error get storage opaqueref %s"%(error)) + + #get vm to copy opaqueRef + try: + vm = session.xenapi.VM.get_by_name_label(vm_name)[0] + except IndexError,error: + return("error get VM opaqueref %s"%(error)) + + #do the snapshot + self.logger.debug("[%s] Snapshot in progress",self.backup_name) + try: + snapshot = session.xenapi.VM.snapshot(vm,"tisbackup-%s"%(vm_name)) + except XenAPI.Failure, error: + return("error when snapshot %s"%(error)) + + #get snapshot opaqueRef + snapshot = session.xenapi.VM.get_by_name_label("tisbackup-%s"%(vm_name))[0] + session.xenapi.VM.set_name_description(snapshot,"snapshot created by tisbackup on : %s"%(now.strftime("%Y-%m-%d %H:%M"))) + + + + vm_backup_name = "zzz-%s-"%(vm_name) + + + #Check if old backup exit + list_backups = [] + for vm_ref in session.xenapi.VM.get_all(): + name_lablel = session.xenapi.VM.get_name_label(vm_ref) + if vm_backup_name in name_lablel: + list_backups.append(name_lablel) + + list_backups.sort() + + if len(list_backups) >= 1: + + # Shutting last backup if started + last_backup_vm = session.xenapi.VM.get_by_name_label(list_backups[-1])[0] + if not "Halted" in session.xenapi.VM.get_power_state(last_backup_vm): + self.logger.debug("[%s] Shutting down last backup vm : %s", self.backup_name, list_backups[-1] ) + session.xenapi.VM.hard_shutdown(last_backup_vm) + + # Delete oldest backup if exist + if len(list_backups) >= int(self.max_copies): + for i in range(len(list_backups)-int(self.max_copies)+1): + oldest_backup_vm = session.xenapi.VM.get_by_name_label(list_backups[i])[0] + if not "Halted" in session.xenapi.VM.get_power_state(oldest_backup_vm): + self.logger.debug("[%s] Shutting down old vm : %s", self.backup_name, list_backups[i] ) + session.xenapi.VM.hard_shutdown(oldest_backup_vm) + + try: + self.logger.debug("[%s] Deleting old vm : %s", self.backup_name, list_backups[i]) + for vbd in session.xenapi.VM.get_VBDs(oldest_backup_vm): + vdi = session.xenapi.VBD.get_VDI(vbd) + if not 'NULL' in vdi: + session.xenapi.VDI.destroy(vdi) + + session.xenapi.VM.destroy(oldest_backup_vm) + except XenAPI.Failure, error: + return("error when destroy old backup vm %s"%(error)) + + + self.logger.debug("[%s] Copy %s in progress on %s",self.backup_name,vm_name,storage_name) + try: + backup_vm = session.xenapi.VM.copy(snapshot,vm_backup_name+now.strftime("%Y-%m-%d %H:%M"),storage) + except XenAPI.Failure, error: + return("error when copy %s"%(error)) + + + # define VM as a template + session.xenapi.VM.set_is_a_template(backup_vm,False) + + #change the network of the new VM + try: + vifDestroy = session.xenapi.VM.get_VIFs(backup_vm) + except IndexError,error: + return("error get VIF opaqueref %s"%(error)) + + for i in vifDestroy: + vifRecord = session.xenapi.VIF.get_record(i) + session.xenapi.VIF.destroy(i) + networkRef = session.xenapi.network.get_by_name_label(self.network_name)[0] + data = {'MAC': vifRecord['MAC'], + 'MAC_autogenerated': False, + 'MTU': vifRecord['MTU'], + 'VM': backup_vm, + 'current_operations': vifRecord['current_operations'], + 'currently_attached': vifRecord['currently_attached'], + 'device': vifRecord['device'], + 'ipv4_allowed': vifRecord['ipv4_allowed'], + 'ipv6_allowed': vifRecord['ipv6_allowed'], + 'locking_mode': vifRecord['locking_mode'], + 'network': networkRef, + 'other_config': vifRecord['other_config'], + 'qos_algorithm_params': vifRecord['qos_algorithm_params'], + 'qos_algorithm_type': vifRecord['qos_algorithm_type'], + 'qos_supported_algorithms': vifRecord['qos_supported_algorithms'], + 'runtime_properties': vifRecord['runtime_properties'], + 'status_code': vifRecord['status_code'], + 'status_detail': vifRecord['status_detail'] + } + try: + session.xenapi.VIF.create(data) + except Exception, error: + return(error) + + + if self.start_vm in ['true', '1', 't', 'y', 'yes', 'oui']: + session.xenapi.VM.start(backup_vm,False,True) + + session.xenapi.VM.set_name_description(backup_vm,"snapshot created by tisbackup on : %s"%(now.strftime("%Y-%m-%d %H:%M"))) + #delete the snapshot + try: + session.xenapi.VM.destroy(snapshot) + except XenAPI.Failure, error: + return("error when destroy snapshot %s"%(error)) + + return(0) + + + def do_backup(self,stats): + try: + timestamp = int(time.time()) + cmd = self.copy_vm_to_sr(self.vm_name, self.storage_name, self.dry_run) + if cmd == 0: + timeExec = int(time.time()) - timestamp + stats['log']='copy of %s to an other storage OK' % (self.backup_name) + stats['status']='OK' + stats['total_files_count'] = 1 + stats['backup_location'] = self.storage_name + else: + stats['status']='ERROR' + stats['log']=cmd + + except BaseException,e: + stats['status']='ERROR' + stats['log']=str(e) + raise + + + +register_driver(copy_vm_xcp) + + + + + diff --git a/samples/backup_button_jobs b/samples/backup_button_jobs new file mode 100644 index 0000000..25f8e60 --- /dev/null +++ b/samples/backup_button_jobs @@ -0,0 +1,18 @@ +#!/bin/sh +. /frontview/bin/functions + +target=$(/frontview/bin/get_front_panel_usb_hdd) + +echo $(date +%Y-%m-%d\ %H:%M:%S) : Export TISBackup sur Disque USB : $target >> /var/log/tisbackup.log +if [ -n "$target" ]; then + hotplug_lcd "Start TISBackup export" + /usr/local/bin/tisbackup -x /$target/export exportbackup >> /var/log/tisbackup.log 2>&1 + hotplug_lcd "Finish TISBackup export" + sleep 3 +else + hotplug_lcd "Error, no USB disk" + sleep 3 +fi +echo $(date +%Y-%m-%d\ %H:%M:%S) : Fin Export TISBackup sur Disque USB : $target >> /var/log/tisbackup.log + + diff --git a/samples/config.ini.sample b/samples/config.ini.sample new file mode 100644 index 0000000..a620b07 --- /dev/null +++ b/samples/config.ini.sample @@ -0,0 +1,55 @@ +[global] +backup_base_dir = /root/tisbackup/backup_dir + +# backup retention in days +backup_retention_time=90 + +# for nagios check in hours +maximum_backup_age=30 + +;[srvopenerp-slash] +;type=rsync+ssh +;server_name=srvopenerp +;remote_dir=/ +;compression=True +;exclude_list="/proc/**","/sys/**","/dev/**" +;private_key=/root/.ssh/id_dsa +;ssh_port = 22 + +;[srvzimbra-slash] +;type=rsync+ssh +;server_name=srvzimbra +;remote_dir=/ +;exclude_list="/proc/**","/sys/**","/dev/**" +;private_key=/root/.ssh/id_dsa +;ssh_port = 22 + +;[backup_mysql_srvintranet] +;type=mysql+ssh +;server_name=srvintranet +;private_keys=/root/.ssh/id_dsa +;db_name=* +;db_user=root +;db_passwd=mypassword + +;[srvopenerp-pgsql] +;type=pgsql+ssh +;server_name=srvopenerp +;db_name=tranquil-production +;private_key=/root/.ssh/id_dsa +;ssh_port = 22 + +;[test-backup-xva2] +;type=xen-xva +;xcphost=srvxen1-test +;server_name=test-backup-xva2 +;password_file=/root/xen_passwd + +;[sw-serveur] +;type=switch +;server_name=sw-serveur +;switch_ip=192.168.149.253 +;switch_user=admin +;switch_password=toto +;switch_type=LINKSYS-SRW2024 + diff --git a/samples/tisbackup-config.ini b/samples/tisbackup-config.ini new file mode 100644 index 0000000..f92cd11 --- /dev/null +++ b/samples/tisbackup-config.ini @@ -0,0 +1,84 @@ +[global] +backup_base_dir = /backup/data/ + +# backup retention in days +backup_retention_time=15 + +# for nagios check in hours +maximum_backup_age=30 + +# bandwith limit for rsync +#bw_limit = 300 + +#compression level for rsync (0 to 9) +#compression_level=7 + +[srvfichiers-partages] +type=rsync+ssh +server_name=srvfichiers +remote_dir=/home/partages +exclude_list= +private_key=/root/.ssh/id_dsa +ssh_port = 22 + +[srvintranet-slash] +type=rsync+ssh +server_name=srvintranet +remote_dir=/ +exclude_list="/proc/**","/sys/**","/dev/**" +private_key=/root/.ssh/id_dsa +ssh_port = 22 + +[srvads-slash] +type=rsync+ssh +server_name=srvads +remote_dir=/ +exclude_list="/proc/**","/sys/**","/dev/**" +private_key=/root/.ssh/id_dsa + +[srvzimbra-slash] +type=rsync+ssh +server_name=srvzimbra +remote_dir=/ +exclude_list="/proc/**","/sys/**","/dev/**","/opt/**" +private_key=/root/.ssh/id_dsa +ssh_port = 22 + +[srvzimbra-opt] +type=rsync+ssh +server_name=srvzimbra +remote_dir=/opt +exclude_list= +private_key=/root/.ssh/id_dsa +ssh_port = 22 + +[gateway] +type=null +server_name=fwall + +[srvopenerp6-prod-pgsql] +type=pgsql+ssh +server_name=srvopenerp6-prod +db_name=tranquil_production +private_key=/root/.ssh/id_dsa +ssh_port = 22 + +[srvopenerp6-form-script] +type=rsync+ssh +server_name=srvopenerp6-form +remote_dir=/home/openerp/instances/form/openobject-library/ +exclude_list= +private_key=/root/.ssh/id_rsa +ssh_port = 22 + +;preexec=/etc/init.d/zimbra stop +;postexec=/etc/init.d/zimbra start + +;[backup_mysql_srvintranet] +;type=mysql+ssh +;server_name=srvintranet +;private_keys=/root/.ssh/id_dsa +;db_name= +;db_user=root +;db_passwd= + diff --git a/samples/tisbackup-pra.ini b/samples/tisbackup-pra.ini new file mode 100755 index 0000000..f179f03 --- /dev/null +++ b/samples/tisbackup-pra.ini @@ -0,0 +1,21 @@ +[global] +backup_base_dir = /home/homes/ssamson/ + +# backup retention in day +backup_retention_time=30 + +# for nagios check in hours +maximum_backup_age=30 +compression_level=7 +#max_copies=2 + +[test-copysr] +type=copy-vm-xcp +server_name=srvxen1-test +vm_name=test-pra +storage_name=FAST_SR2 +password_file=/home/homes/ssamson/tisbackup-pra/xen_passwd +network_name=net-test +#start_vm=no +#max_copies=3 + diff --git a/samples/tisbackup.cron b/samples/tisbackup.cron new file mode 100644 index 0000000..0fb80c1 --- /dev/null +++ b/samples/tisbackup.cron @@ -0,0 +1,7 @@ +#SHELL=/bin/sh +#PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + +# m h dom mon dow user command +30 22 * * * root /opt/tisbackup/tisbackup.py -c /etc/tis/tisbackup-config.ini backup >> /var/log/tisbackup.log 2>&1 +30 12 * * * root /opt/tisbackup/tisbackup.py -c /etc/tis/tisbackup-config.ini cleanup >> /var/log/tisbackup.log 2>&1 + diff --git a/samples/tisbackup_gui.ini b/samples/tisbackup_gui.ini new file mode 100644 index 0000000..78b1e30 --- /dev/null +++ b/samples/tisbackup_gui.ini @@ -0,0 +1,10 @@ +[uwsgi] +http = 0.0.0.0:8080 +master = true +processes = 1 +wsgi=tisbackup_gui:app +chdir=/opt/tisbackup +config= /etc/tis/tisbackup-config.ini +sections= +spooler=/opt/tisbackup/myspool +ADMIN_EMAIL=technique@tranquil-it-systems.fr diff --git a/scripts/tisbackup_gui b/scripts/tisbackup_gui new file mode 100755 index 0000000..e155b5f --- /dev/null +++ b/scripts/tisbackup_gui @@ -0,0 +1,133 @@ +#!/usr/bin/env bash + +### BEGIN INIT INFO +# Provides: tisbackup_gui-uwsgi +# Required-Start: $all +# Required-Stop: $all +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: starts the uwsgi app server for tisbackup_gui +# Description: starts uwsgi app server for tisbackup_gui using start-stop-daemon +### END INIT INFO +set -e + +VERSION=$(basename $0) +PATH=/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin +DAEMON=/usr/local/bin/$VERSION +RUN=/var/run/ +NAME=$VERSION +CONFIG_FILE=/etc/tis/tisbackup_gui.ini +LOGFILE=/var/log/$NAME.log +OWNER=root +DESC=$VERSION +OP=$1 + +DAEMON_OPTS="" + +# Include uwsgi defaults if available +if [[ -f /etc/default/$VERSION ]]; then + . /etc/default/$VERSION +fi + +do_pid_check() +{ + local PIDFILE=$1 + [[ -f $PIDFILE ]] || return 0 + local PID=$(cat $PIDFILE) + for p in $(pgrep $VERSION); do + [[ $p == $PID ]] && return 1 + done + return 0 +} + + +do_start() +{ +# for config in $ENABLED_CONFIGS; do + local PIDFILE=$RUN/$NAME.pid + if do_pid_check $PIDFILE; then + uwsgi -d $LOGFILE --pidfile $PIDFILE --ini $CONFIG_FILE + +# sudo -u $OWNER -i $VERSION $config $DAEMON_OPTS --pidfile $PIDFILE + else + echo "Already running!" + fi +# done +} + +send_sig() +{ + local PIDFILE=$RUN/$NAME.pid + set +e + [[ -f $PIDFILE ]] && kill $1 $(cat $PIDFILE) > /dev/null 2>&1 + set -e +} + +wait_and_clean_pidfiles() +{ + local PIDFILE=$RUN/$NAME.pid + until do_pid_check $PIDFILE; do + echo -n ""; + done + rm -f $PIDFILE +} + +do_stop() +{ + send_sig -3 + wait_and_clean_pidfiles +} + +do_reload() +{ + send_sig -1 +} + +do_force_reload() +{ + send_sig -15 +} + +get_status() +{ + send_sig -10 +} + +case "$OP" in + start) + echo "Starting $DESC: " + do_start + echo "$NAME." + ;; + stop) + echo -n "Stopping $DESC: " + do_stop + echo "$NAME." + ;; + reload) + echo -n "Reloading $DESC: " + do_reload + echo "$NAME." + ;; + force-reload) + echo -n "Force-reloading $DESC: " + do_force_reload + echo "$NAME." + ;; + restart) + echo "Restarting $DESC: " + do_stop + sleep 3 + do_start + echo "$NAME." + ;; + status) + get_status + ;; + *) + N=/etc/init.d/$NAME + echo "Usage: $N {start|stop|restart|reload|force-reload|status}" >&2 + exit 1 + ;; +esac +exit 0 diff --git a/static/images/back_disabled.png b/static/images/back_disabled.png new file mode 100644 index 0000000..881de79 Binary files /dev/null and b/static/images/back_disabled.png differ diff --git a/static/images/back_enabled.png b/static/images/back_enabled.png new file mode 100644 index 0000000..c608682 Binary files /dev/null and b/static/images/back_enabled.png differ diff --git a/static/images/back_enabled_hover.png b/static/images/back_enabled_hover.png new file mode 100644 index 0000000..d300f10 Binary files /dev/null and b/static/images/back_enabled_hover.png differ diff --git a/static/images/bg_body.gif b/static/images/bg_body.gif new file mode 100644 index 0000000..1cdfcba Binary files /dev/null and b/static/images/bg_body.gif differ diff --git a/static/images/check.png b/static/images/check.png new file mode 100644 index 0000000..5d6e7f6 Binary files /dev/null and b/static/images/check.png differ diff --git a/static/images/forward_disabled.png b/static/images/forward_disabled.png new file mode 100644 index 0000000..6a6ded7 Binary files /dev/null and b/static/images/forward_disabled.png differ diff --git a/static/images/forward_enabled.png b/static/images/forward_enabled.png new file mode 100644 index 0000000..a4e6b53 Binary files /dev/null and b/static/images/forward_enabled.png differ diff --git a/static/images/forward_enabled_hover.png b/static/images/forward_enabled_hover.png new file mode 100644 index 0000000..fc46c5e Binary files /dev/null and b/static/images/forward_enabled_hover.png differ diff --git a/static/images/img01.jpg b/static/images/img01.jpg new file mode 100644 index 0000000..0f44794 Binary files /dev/null and b/static/images/img01.jpg differ diff --git a/static/images/img02.jpg b/static/images/img02.jpg new file mode 100644 index 0000000..9d12f46 Binary files /dev/null and b/static/images/img02.jpg differ diff --git a/static/images/img03.jpg b/static/images/img03.jpg new file mode 100644 index 0000000..1567b24 Binary files /dev/null and b/static/images/img03.jpg differ diff --git a/static/images/img04.jpg b/static/images/img04.jpg new file mode 100644 index 0000000..3453cad Binary files /dev/null and b/static/images/img04.jpg differ diff --git a/static/images/important.gif b/static/images/important.gif new file mode 100644 index 0000000..41d4943 Binary files /dev/null and b/static/images/important.gif differ diff --git a/static/images/info.gif b/static/images/info.gif new file mode 100644 index 0000000..c81828d Binary files /dev/null and b/static/images/info.gif differ diff --git a/static/images/loader.gif b/static/images/loader.gif new file mode 100644 index 0000000..e482b69 Binary files /dev/null and b/static/images/loader.gif differ diff --git a/static/images/logo-tis.png b/static/images/logo-tis.png new file mode 100644 index 0000000..59b96ff Binary files /dev/null and b/static/images/logo-tis.png differ diff --git a/static/images/sort_asc.png b/static/images/sort_asc.png new file mode 100644 index 0000000..a88d797 Binary files /dev/null and b/static/images/sort_asc.png differ diff --git a/static/images/sort_asc_disabled.png b/static/images/sort_asc_disabled.png new file mode 100644 index 0000000..4e144cf Binary files /dev/null and b/static/images/sort_asc_disabled.png differ diff --git a/static/images/sort_both.png b/static/images/sort_both.png new file mode 100644 index 0000000..1867040 Binary files /dev/null and b/static/images/sort_both.png differ diff --git a/static/images/sort_desc.png b/static/images/sort_desc.png new file mode 100644 index 0000000..def071e Binary files /dev/null and b/static/images/sort_desc.png differ diff --git a/static/images/sort_desc_disabled.png b/static/images/sort_desc_disabled.png new file mode 100644 index 0000000..7824973 Binary files /dev/null and b/static/images/sort_desc_disabled.png differ diff --git a/static/images/title.gif b/static/images/title.gif new file mode 100644 index 0000000..f92b596 Binary files /dev/null and b/static/images/title.gif differ diff --git a/static/js/jquery.alerts.js b/static/js/jquery.alerts.js new file mode 100644 index 0000000..0b32996 --- /dev/null +++ b/static/js/jquery.alerts.js @@ -0,0 +1,235 @@ +// jQuery Alert Dialogs Plugin +// +// Version 1.1 +// +// Cory S.N. LaViska +// A Beautiful Site (http://abeautifulsite.net/) +// 14 May 2009 +// +// Website: http://abeautifulsite.net/blog/2008/12/jquery-alert-dialogs/ +// +// Usage: +// jAlert( message, [title, callback] ) +// jConfirm( message, [title, callback] ) +// jPrompt( message, [value, title, callback] ) +// +// History: +// +// 1.00 - Released (29 December 2008) +// +// 1.01 - Fixed bug where unbinding would destroy all resize events +// +// License: +// +// This plugin is dual-licensed under the GNU General Public License and the MIT License and +// is copyright 2008 A Beautiful Site, LLC. +// +(function($) { + + $.alerts = { + + // These properties can be read/written by accessing $.alerts.propertyName from your scripts at any time + + verticalOffset: -75, // vertical offset of the dialog from center screen, in pixels + horizontalOffset: 0, // horizontal offset of the dialog from center screen, in pixels/ + repositionOnResize: true, // re-centers the dialog on window resize + overlayOpacity: .01, // transparency level of overlay + overlayColor: '#FFF', // base color of overlay + draggable: true, // make the dialogs draggable (requires UI Draggables plugin) + okButton: ' OK ', // text for the OK button + cancelButton: ' Cancel ', // text for the Cancel button + dialogClass: null, // if specified, this class will be applied to all dialogs + + // Public methods + + alert: function(message, title, callback) { + if( title == null ) title = 'Alert'; + $.alerts._show(title, message, null, 'alert', function(result) { + if( callback ) callback(result); + }); + }, + + confirm: function(message, title, callback) { + if( title == null ) title = 'Confirm'; + $.alerts._show(title, message, null, 'confirm', function(result) { + if( callback ) callback(result); + }); + }, + + prompt: function(message, value, title, callback) { + if( title == null ) title = 'Prompt'; + $.alerts._show(title, message, value, 'prompt', function(result) { + if( callback ) callback(result); + }); + }, + + // Private methods + + _show: function(title, msg, value, type, callback) { + + $.alerts._hide(); + $.alerts._overlay('show'); + + $("BODY").append( + ''); + + if( $.alerts.dialogClass ) $("#popup_container").addClass($.alerts.dialogClass); + + // IE6 Fix + var pos = ($.browser.msie && parseInt($.browser.version) <= 6 ) ? 'absolute' : 'fixed'; + + $("#popup_container").css({ + position: pos, + zIndex: 99999, + padding: 0, + margin: 0 + }); + + $("#popup_title").text(title); + $("#popup_content").addClass(type); + $("#popup_message").text(msg); + $("#popup_message").html( $("#popup_message").text().replace(/\n/g, '
') ); + + $("#popup_container").css({ + minWidth: $("#popup_container").outerWidth(), + maxWidth: $("#popup_container").outerWidth() + }); + + $.alerts._reposition(); + $.alerts._maintainPosition(true); + + switch( type ) { + case 'alert': + $("#popup_message").after(''); + $("#popup_ok").click( function() { + $.alerts._hide(); + callback(true); + }); + $("#popup_ok").focus().keypress( function(e) { + if( e.keyCode == 13 || e.keyCode == 27 ) $("#popup_ok").trigger('click'); + }); + break; + case 'confirm': + $("#popup_message").after(''); + $("#popup_ok").click( function() { + $.alerts._hide(); + if( callback ) callback(true); + }); + $("#popup_cancel").click( function() { + $.alerts._hide(); + if( callback ) callback(false); + }); + $("#popup_ok").focus(); + $("#popup_ok, #popup_cancel").keypress( function(e) { + if( e.keyCode == 13 ) $("#popup_ok").trigger('click'); + if( e.keyCode == 27 ) $("#popup_cancel").trigger('click'); + }); + break; + case 'prompt': + $("#popup_message").append('
').after(''); + $("#popup_prompt").width( $("#popup_message").width() ); + $("#popup_ok").click( function() { + var val = $("#popup_prompt").val(); + $.alerts._hide(); + if( callback ) callback( val ); + }); + $("#popup_cancel").click( function() { + $.alerts._hide(); + if( callback ) callback( null ); + }); + $("#popup_prompt, #popup_ok, #popup_cancel").keypress( function(e) { + if( e.keyCode == 13 ) $("#popup_ok").trigger('click'); + if( e.keyCode == 27 ) $("#popup_cancel").trigger('click'); + }); + if( value ) $("#popup_prompt").val(value); + $("#popup_prompt").focus().select(); + break; + } + + // Make draggable + if( $.alerts.draggable ) { + try { + $("#popup_container").draggable({ handle: $("#popup_title") }); + $("#popup_title").css({ cursor: 'move' }); + } catch(e) { /* requires jQuery UI draggables */ } + } + }, + + _hide: function() { + $("#popup_container").remove(); + $.alerts._overlay('hide'); + $.alerts._maintainPosition(false); + }, + + _overlay: function(status) { + switch( status ) { + case 'show': + $.alerts._overlay('hide'); + $("BODY").append(''); + $("#popup_overlay").css({ + position: 'absolute', + zIndex: 99998, + top: '0px', + left: '0px', + width: '100%', + height: $(document).height(), + background: $.alerts.overlayColor, + opacity: $.alerts.overlayOpacity + }); + break; + case 'hide': + $("#popup_overlay").remove(); + break; + } + }, + + _reposition: function() { + var top = (($(window).height() / 2) - ($("#popup_container").outerHeight() / 2)) + $.alerts.verticalOffset; + var left = (($(window).width() / 2) - ($("#popup_container").outerWidth() / 2)) + $.alerts.horizontalOffset; + if( top < 0 ) top = 0; + if( left < 0 ) left = 0; + + // IE6 fix + if( $.browser.msie && parseInt($.browser.version) <= 6 ) top = top + $(window).scrollTop(); + + $("#popup_container").css({ + top: top + 'px', + left: left + 'px' + }); + $("#popup_overlay").height( $(document).height() ); + }, + + _maintainPosition: function(status) { + if( $.alerts.repositionOnResize ) { + switch(status) { + case true: + $(window).bind('resize', $.alerts._reposition); + break; + case false: + $(window).unbind('resize', $.alerts._reposition); + break; + } + } + } + + } + + // Shortuct functions + jAlert = function(message, title, callback) { + $.alerts.alert(message, title, callback); + } + + jConfirm = function(message, title, callback) { + $.alerts.confirm(message, title, callback); + }; + + jPrompt = function(message, value, title, callback) { + $.alerts.prompt(message, value, title, callback); + }; + +})(jQuery); \ No newline at end of file diff --git a/static/js/jquery.dataTables.js b/static/js/jquery.dataTables.js new file mode 100644 index 0000000..6b4d452 --- /dev/null +++ b/static/js/jquery.dataTables.js @@ -0,0 +1,12098 @@ +/** + * @summary DataTables + * @description Paginate, search and sort HTML tables + * @version 1.9.4 + * @file jquery.dataTables.js + * @author Allan Jardine (www.sprymedia.co.uk) + * @contact www.sprymedia.co.uk/contact + * + * @copyright Copyright 2008-2012 Allan Jardine, all rights reserved. + * + * This source file is free software, under either the GPL v2 license or a + * BSD style license, available at: + * http://datatables.net/license_gpl2 + * http://datatables.net/license_bsd + * + * This source file is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details. + * + * For details please refer to: http://www.datatables.net + */ + +/*jslint evil: true, undef: true, browser: true */ +/*globals $, jQuery,define,_fnExternApiFunc,_fnInitialise,_fnInitComplete,_fnLanguageCompat,_fnAddColumn,_fnColumnOptions,_fnAddData,_fnCreateTr,_fnGatherData,_fnBuildHead,_fnDrawHead,_fnDraw,_fnReDraw,_fnAjaxUpdate,_fnAjaxParameters,_fnAjaxUpdateDraw,_fnServerParams,_fnAddOptionsHtml,_fnFeatureHtmlTable,_fnScrollDraw,_fnAdjustColumnSizing,_fnFeatureHtmlFilter,_fnFilterComplete,_fnFilterCustom,_fnFilterColumn,_fnFilter,_fnBuildSearchArray,_fnBuildSearchRow,_fnFilterCreateSearch,_fnDataToSearch,_fnSort,_fnSortAttachListener,_fnSortingClasses,_fnFeatureHtmlPaginate,_fnPageChange,_fnFeatureHtmlInfo,_fnUpdateInfo,_fnFeatureHtmlLength,_fnFeatureHtmlProcessing,_fnProcessingDisplay,_fnVisibleToColumnIndex,_fnColumnIndexToVisible,_fnNodeToDataIndex,_fnVisbleColumns,_fnCalculateEnd,_fnConvertToWidth,_fnCalculateColumnWidths,_fnScrollingWidthAdjust,_fnGetWidestNode,_fnGetMaxLenString,_fnStringToCss,_fnDetectType,_fnSettingsFromNode,_fnGetDataMaster,_fnGetTrNodes,_fnGetTdNodes,_fnEscapeRegex,_fnDeleteIndex,_fnReOrderIndex,_fnColumnOrdering,_fnLog,_fnClearTable,_fnSaveState,_fnLoadState,_fnCreateCookie,_fnReadCookie,_fnDetectHeader,_fnGetUniqueThs,_fnScrollBarWidth,_fnApplyToChildren,_fnMap,_fnGetRowData,_fnGetCellData,_fnSetCellData,_fnGetObjectDataFn,_fnSetObjectDataFn,_fnApplyColumnDefs,_fnBindAction,_fnCallbackReg,_fnCallbackFire,_fnJsonString,_fnRender,_fnNodeToColumnIndex,_fnInfoMacros,_fnBrowserDetect,_fnGetColumns*/ + +(/** @lends */function( window, document, undefined ) { + +(function( factory ) { + "use strict"; + + // Define as an AMD module if possible + if ( typeof define === 'function' && define.amd ) + { + define( ['jquery'], factory ); + } + /* Define using browser globals otherwise + * Prevent multiple instantiations if the script is loaded twice + */ + else if ( jQuery && !jQuery.fn.dataTable ) + { + factory( jQuery ); + } +} +(/** @lends */function( $ ) { + "use strict"; + /** + * DataTables is a plug-in for the jQuery Javascript library. It is a + * highly flexible tool, based upon the foundations of progressive + * enhancement, which will add advanced interaction controls to any + * HTML table. For a full list of features please refer to + * DataTables.net. + * + * Note that the DataTable object is not a global variable but is + * aliased to jQuery.fn.DataTable and jQuery.fn.dataTable through which + * it may be accessed. + * + * @class + * @param {object} [oInit={}] Configuration object for DataTables. Options + * are defined by {@link DataTable.defaults} + * @requires jQuery 1.3+ + * + * @example + * // Basic initialisation + * $(document).ready( function { + * $('#example').dataTable(); + * } ); + * + * @example + * // Initialisation with configuration options - in this case, disable + * // pagination and sorting. + * $(document).ready( function { + * $('#example').dataTable( { + * "bPaginate": false, + * "bSort": false + * } ); + * } ); + */ + var DataTable = function( oInit ) + { + + + /** + * Add a column to the list used for the table with default values + * @param {object} oSettings dataTables settings object + * @param {node} nTh The th element for this column + * @memberof DataTable#oApi + */ + function _fnAddColumn( oSettings, nTh ) + { + var oDefaults = DataTable.defaults.columns; + var iCol = oSettings.aoColumns.length; + var oCol = $.extend( {}, DataTable.models.oColumn, oDefaults, { + "sSortingClass": oSettings.oClasses.sSortable, + "sSortingClassJUI": oSettings.oClasses.sSortJUI, + "nTh": nTh ? nTh : document.createElement('th'), + "sTitle": oDefaults.sTitle ? oDefaults.sTitle : nTh ? nTh.innerHTML : '', + "aDataSort": oDefaults.aDataSort ? oDefaults.aDataSort : [iCol], + "mData": oDefaults.mData ? oDefaults.oDefaults : iCol + } ); + oSettings.aoColumns.push( oCol ); + + /* Add a column specific filter */ + if ( oSettings.aoPreSearchCols[ iCol ] === undefined || oSettings.aoPreSearchCols[ iCol ] === null ) + { + oSettings.aoPreSearchCols[ iCol ] = $.extend( {}, DataTable.models.oSearch ); + } + else + { + var oPre = oSettings.aoPreSearchCols[ iCol ]; + + /* Don't require that the user must specify bRegex, bSmart or bCaseInsensitive */ + if ( oPre.bRegex === undefined ) + { + oPre.bRegex = true; + } + + if ( oPre.bSmart === undefined ) + { + oPre.bSmart = true; + } + + if ( oPre.bCaseInsensitive === undefined ) + { + oPre.bCaseInsensitive = true; + } + } + + /* Use the column options function to initialise classes etc */ + _fnColumnOptions( oSettings, iCol, null ); + } + + + /** + * Apply options for a column + * @param {object} oSettings dataTables settings object + * @param {int} iCol column index to consider + * @param {object} oOptions object with sType, bVisible and bSearchable etc + * @memberof DataTable#oApi + */ + function _fnColumnOptions( oSettings, iCol, oOptions ) + { + var oCol = oSettings.aoColumns[ iCol ]; + + /* User specified column options */ + if ( oOptions !== undefined && oOptions !== null ) + { + /* Backwards compatibility for mDataProp */ + if ( oOptions.mDataProp && !oOptions.mData ) + { + oOptions.mData = oOptions.mDataProp; + } + + if ( oOptions.sType !== undefined ) + { + oCol.sType = oOptions.sType; + oCol._bAutoType = false; + } + + $.extend( oCol, oOptions ); + _fnMap( oCol, oOptions, "sWidth", "sWidthOrig" ); + + /* iDataSort to be applied (backwards compatibility), but aDataSort will take + * priority if defined + */ + if ( oOptions.iDataSort !== undefined ) + { + oCol.aDataSort = [ oOptions.iDataSort ]; + } + _fnMap( oCol, oOptions, "aDataSort" ); + } + + /* Cache the data get and set functions for speed */ + var mRender = oCol.mRender ? _fnGetObjectDataFn( oCol.mRender ) : null; + var mData = _fnGetObjectDataFn( oCol.mData ); + + oCol.fnGetData = function (oData, sSpecific) { + var innerData = mData( oData, sSpecific ); + + if ( oCol.mRender && (sSpecific && sSpecific !== '') ) + { + return mRender( innerData, sSpecific, oData ); + } + return innerData; + }; + oCol.fnSetData = _fnSetObjectDataFn( oCol.mData ); + + /* Feature sorting overrides column specific when off */ + if ( !oSettings.oFeatures.bSort ) + { + oCol.bSortable = false; + } + + /* Check that the class assignment is correct for sorting */ + if ( !oCol.bSortable || + ($.inArray('asc', oCol.asSorting) == -1 && $.inArray('desc', oCol.asSorting) == -1) ) + { + oCol.sSortingClass = oSettings.oClasses.sSortableNone; + oCol.sSortingClassJUI = ""; + } + else if ( $.inArray('asc', oCol.asSorting) == -1 && $.inArray('desc', oCol.asSorting) == -1 ) + { + oCol.sSortingClass = oSettings.oClasses.sSortable; + oCol.sSortingClassJUI = oSettings.oClasses.sSortJUI; + } + else if ( $.inArray('asc', oCol.asSorting) != -1 && $.inArray('desc', oCol.asSorting) == -1 ) + { + oCol.sSortingClass = oSettings.oClasses.sSortableAsc; + oCol.sSortingClassJUI = oSettings.oClasses.sSortJUIAscAllowed; + } + else if ( $.inArray('asc', oCol.asSorting) == -1 && $.inArray('desc', oCol.asSorting) != -1 ) + { + oCol.sSortingClass = oSettings.oClasses.sSortableDesc; + oCol.sSortingClassJUI = oSettings.oClasses.sSortJUIDescAllowed; + } + } + + + /** + * Adjust the table column widths for new data. Note: you would probably want to + * do a redraw after calling this function! + * @param {object} oSettings dataTables settings object + * @memberof DataTable#oApi + */ + function _fnAdjustColumnSizing ( oSettings ) + { + /* Not interested in doing column width calculation if auto-width is disabled */ + if ( oSettings.oFeatures.bAutoWidth === false ) + { + return false; + } + + _fnCalculateColumnWidths( oSettings ); + for ( var i=0 , iLen=oSettings.aoColumns.length ; i