diff --git a/snf-deploy/conf/nodes.conf b/snf-deploy/conf/nodes.conf index 4a82ba010..d930f1412 100644 --- a/snf-deploy/conf/nodes.conf +++ b/snf-deploy/conf/nodes.conf @@ -6,6 +6,9 @@ # Instances will reside in the .vm. subdomain domain = synnefo.live +# This is the default forwarder to be used by bind +nameserver = 8.8.8.8 + # Each node should define: # The node's desired hostname. It will be set diff --git a/snf-deploy/files/etc/bind/named.conf.options b/snf-deploy/files/etc/bind/named.conf.options index afc640e12..c74699bdf 100644 --- a/snf-deploy/files/etc/bind/named.conf.options +++ b/snf-deploy/files/etc/bind/named.conf.options @@ -10,9 +10,9 @@ options { // Uncomment the following block, and insert the addresses replacing // the all-0's placeholder. - // forwarders { - // 0.0.0.0; - // }; + forwarders { + %NAMESERVERS%; + }; auth-nxdomain no; # conform to RFC1035 allow-recursion { %NODE_IPS%; }; diff --git a/snf-deploy/snfdeploy/components.py b/snf-deploy/snfdeploy/components.py index e531a20e1..392b39e21 100644 --- a/snf-deploy/snfdeploy/components.py +++ b/snf-deploy/snfdeploy/components.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2015 GRNET S.A. and individual contributors +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -192,6 +192,9 @@ def test(self): class SSH(base.Component): @base.run_cmds def prepare(self): + if not config.key_inject: + return [] + return [ "mkdir -p /root/.ssh", "for f in $(ls /root/.ssh/*); do cp $f $f.bak ; done", @@ -199,6 +202,9 @@ def prepare(self): ] def _configure(self): + if not config.key_inject: + return [] + files = [ "authorized_keys", "id_dsa", "id_dsa.pub", "id_rsa", "id_rsa.pub" ] @@ -207,6 +213,9 @@ def _configure(self): @base.run_cmds def initialize(self): + if not config.key_inject: + return [] + f = "/root/.ssh/authorized_keys" return [ "test -e {0}.bak && cat {0}.bak >> {0} || true".format(f) @@ -324,6 +333,10 @@ def prepare(self): def _configure(self): d = self.node.domain ip = self.node.ip + r1 = { + "node_ips": ";".join(self.ctx.all_ips), + "nameservers": ";".join(self.ctx.all_nameservers), + } return [ ("/etc/bind/named.conf.local", {"domain": d}, {}), ("/etc/bind/zones/example.com", @@ -334,8 +347,7 @@ def _configure(self): {"remote": "/etc/bind/zones/vm.%s" % d}), ("/etc/bind/rev/synnefo.in-addr.arpa.zone", {"domain": d}, {}), ("/etc/bind/rev/synnefo.ip6.arpa.zone", {"domain": d}, {}), - ("/etc/bind/named.conf.options", - {"node_ips": ";".join(self.ctx.all_ips)}, {}), + ("/etc/bind/named.conf.options", r1, {}), ("/root/ddns/ddns.key", {}, {"remote": "/etc/bind/ddns.key"}), ] @@ -518,7 +530,9 @@ def initialize(self): # If extra disk found use it # else create a raw file and losetup it cmd = """ -if [ -b "{0}" ]; then +if vgs {2}; then + echo "VG ${2} found!" +elif [ -b "{0}" ]; then pvcreate {0} && vgcreate {2} {0} else truncate -s {3} {1} @@ -762,9 +776,12 @@ def initialize(self): --ipolicy-std-specs {2} \ --ipolicy-bounds-specs min:{3}/max:{4} \ --enabled-disk-templates file,ext \ - {5} + --file-storage-dir {5}/ganeti/file-storage \ + --shared-file-storage-dir {5}/ganeti/shared-file-storage \ + {6} """.format(config.common_bridge, self.cluster.netdev, - std, bound_min, bound_max, self.cluster.fqdn) + std, bound_min, bound_max, config.shared_dir, + self.cluster.fqdn) modify = "gnt-node modify --vm-capable=no %s" % self.node.fqdn diff --git a/snf-deploy/snfdeploy/config.py b/snf-deploy/snfdeploy/config.py index 5d2ee78c2..4532db77b 100644 --- a/snf-deploy/snfdeploy/config.py +++ b/snf-deploy/snfdeploy/config.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -136,6 +136,7 @@ def init(args): config.dry_run = args.dry_run config.force = args.force config.ssh_key = args.ssh_key + config.key_inject = args.key_inject config.mem = args.mem config.vnc = args.vnc config.smp = args.smp diff --git a/snf-deploy/snfdeploy/context.py b/snf-deploy/snfdeploy/context.py index 98645e493..d81c81be2 100644 --- a/snf-deploy/snfdeploy/context.py +++ b/snf-deploy/snfdeploy/context.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -119,6 +119,11 @@ def all_ips(self): l = lambda x: config.get_node_info(x).ip return [l(n) for n in self.all_nodes] + @property + def all_nameservers(self): + l = lambda x: config.get_node_info(x).nameserver + return set([l(n) for n in self.all_nodes]) + def get(self, role): try: return config.get(self.setup, role) diff --git a/snf-pithos-app/conf/20-snf-pithos-app-settings.conf b/snf-pithos-app/conf/20-snf-pithos-app-settings.conf index 617b0f43a..1f428ce80 100644 --- a/snf-pithos-app/conf/20-snf-pithos-app-settings.conf +++ b/snf-pithos-app/conf/20-snf-pithos-app-settings.conf @@ -69,6 +69,13 @@ # # The maximum interval (in seconds) for consequent backend object map checks #PITHOS_BACKEND_MAP_CHECK_INTERVAL = 1 +# +# Enable deletion of mapfiles after deleting a version of some object. +# This is option is *unsafe* for installatioins prior to Synnefo version +# 0.16rc1 (commit 13d49ad) which may still include Markle-hashes and not +# Archipelago mapfiles in Pithos database. +#PITHOS_BACKEND_PURGE_MAPFILES = False +# # The archipelago mapfile prefix (it should not exceed 15 characters) # WARNING: Once set it should not be changed #PITHOS_BACKEND_MAPFILE_PREFIX='snf_file_' diff --git a/snf-pithos-app/pithos/api/settings.py b/snf-pithos-app/pithos/api/settings.py index ac2639a12..7e76b2bf4 100644 --- a/snf-pithos-app/pithos/api/settings.py +++ b/snf-pithos-app/pithos/api/settings.py @@ -185,6 +185,10 @@ def __str__(self): BACKEND_MAP_CHECK_INTERVAL = getattr(settings, 'PITHOS_BACKEND_MAP_CHECK_INTERVAL', 5) +# Whether to delete mapfiles or not +BACKEND_PURGE_MAPFILES = getattr(settings, 'PITHOS_BACKEND_PURGE_MAPFILES', + False) + # The archipelago mapfile prefix (it should not exceed 15 characters) # WARNING: Once set it should not be changed BACKEND_MAPFILE_PREFIX = getattr(settings, diff --git a/snf-pithos-app/pithos/api/util.py b/snf-pithos-app/pithos/api/util.py index 689a18fb1..a1696f498 100644 --- a/snf-pithos-app/pithos/api/util.py +++ b/snf-pithos-app/pithos/api/util.py @@ -38,6 +38,7 @@ from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION, BACKEND_BLOCK_MODULE, BACKEND_BLOCK_KWARGS, + BACKEND_PURGE_MAPFILES, ASTAKOSCLIENT_POOLSIZE, SERVICE_TOKEN, ASTAKOS_AUTH_URL, @@ -1008,6 +1009,7 @@ def simple_list_response(request, l): service_token=SERVICE_TOKEN, astakosclient_poolsize=ASTAKOSCLIENT_POOLSIZE, free_versioning=BACKEND_FREE_VERSIONING, + purge_mapfiles=BACKEND_PURGE_MAPFILES, block_params=BACKEND_BLOCK_KWARGS, public_url_security=PUBLIC_URL_SECURITY, public_url_alphabet=PUBLIC_URL_ALPHABET, diff --git a/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py index 6cea7f36f..31e46ee3a 100644 --- a/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py +++ b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -369,10 +369,12 @@ def node_count_children(self, node): def node_purge_children(self, parent, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - parent and cluster, and return - the hashes, the total size and the serials of versions deleted. - Clears out nodes with no remaining versions. + """Delete all versions with the specified parent and cluster. + + Returns the hashes, the total size, serials and the names of the + mapfiles of the versions that have been deleted. Clears out nodes with + no remaining versions. + """ #update statistics c1 = select([self.nodes.c.node], @@ -389,21 +391,24 @@ def node_purge_children(self, parent, before=inf, cluster=0, row = r.fetchone() r.close() if not row: - return (), 0, () + return (), 0, (), () nr, size = row[0], row[1] if row[1] else 0 mtime = time() self.statistics_update(parent, -nr, -size, mtime, cluster) self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - s = select([self.versions.c.hash, self.versions.c.serial]) + s = select([self.versions.c.hash, self.versions.c.serial, + self.versions.c.mapfile]) s = s.where(where_clause) r = self.conn.execute(s) hashes = [] serials = [] + mapfiles = [] for row in r.fetchall(): - hashes += [row[0]] - serials += [row[1]] + hashes.append(row[0]) + serials.append(row[1]) + mapfiles.append(row[2]) r.close() #delete versions @@ -424,14 +429,16 @@ def node_purge_children(self, parent, before=inf, cluster=0, s = self.nodes.delete().where(self.nodes.c.node.in_(nodes)) self.conn.execute(s).close() - return hashes, size, serials + return hashes, size, serials, mapfiles def node_purge(self, node, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - node and cluster, and return - the hashes and size of versions deleted. - Clears out the node if it has no remaining versions. + """Delete all versions with the specified node and cluster. + + Returns the hashes, size and the name of the mapfiles of the versions + that have been deleted. Clears out the node if it has no remaining + versions. + """ #update statistics @@ -448,19 +455,22 @@ def node_purge(self, node, before=inf, cluster=0, nr, size = row[0], row[1] r.close() if not nr: - return (), 0, () + return (), 0, (), () mtime = time() self.statistics_update_ancestors(node, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - s = select([self.versions.c.hash, self.versions.c.serial]) + s = select([self.versions.c.hash, self.versions.c.serial, + self.versions.c.mapfile]) s = s.where(where_clause) r = self.conn.execute(s) hashes = [] serials = [] + mapfiles = [] for row in r.fetchall(): - hashes += [row[0]] - serials += [row[1]] + hashes.append(row[0]) + serials.append(row[1]) + mapfiles.append(row[2]) r.close() #delete versions @@ -481,7 +491,7 @@ def node_purge(self, node, before=inf, cluster=0, s = self.nodes.delete().where(self.nodes.c.node.in_(nodes)) self.conn.execute(s).close() - return hashes, size, serials + return hashes, size, serials, mapfiles def node_remove(self, node, update_statistics_ancestors_depth=None): """Remove the node specified. @@ -1028,6 +1038,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): hash = props.hash size = props.size cluster = props.cluster + mapfile = props.mapfile mtime = time() self.statistics_update_ancestors(node, -1, -size, mtime, cluster, @@ -1040,7 +1051,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): if props: self.nodes_set_latest_version(node, serial) - return hash, size + return hash, size, mapfile def attribute_get_domains(self, serial, node=None): node = node or select([self.versions.c.node], diff --git a/snf-pithos-backend/pithos/backends/lib/sqlite/node.py b/snf-pithos-backend/pithos/backends/lib/sqlite/node.py index a66a076ca..0f690e3b9 100644 --- a/snf-pithos-backend/pithos/backends/lib/sqlite/node.py +++ b/snf-pithos-backend/pithos/backends/lib/sqlite/node.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -269,10 +269,12 @@ def node_count_children(self, node): def node_purge_children(self, parent, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - parent and cluster, and return - the hashes, the size and the serials of versions deleted. - Clears out nodes with no remaining versions. + """Delete all versions with the specified parent and cluster. + + Returns the hashes, size, serials and names of the mapfiles of the + versions that have been deleted. Clears out nodes with no remaining + versions. + """ execute = self.execute @@ -286,13 +288,13 @@ def node_purge_children(self, parent, before=inf, cluster=0, execute(q, args) nr, size = self.fetchone() if not nr: - return (), 0, () + return (), 0, (), () mtime = time() self.statistics_update(parent, -nr, -size, mtime, cluster) self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - q = ("select hash, serial from versions " + q = ("select hash, serial, mapfile from versions " "where node in (select node " "from nodes " "where parent = ?) " @@ -301,9 +303,11 @@ def node_purge_children(self, parent, before=inf, cluster=0, execute(q, args) hashes = [] serials = [] + mapfiles = [] for r in self.fetchall(): - hashes += [r[0]] - serials += [r[1]] + hashes.append(r[0]) + serials.append(r[1]) + mapfiles.append(r[2]) q = ("delete from versions " "where node in (select node " @@ -319,14 +323,16 @@ def node_purge_children(self, parent, before=inf, cluster=0, "where node = n.node) = 0 " "and parent = ?)") execute(q, (parent,)) - return hashes, size, serials + return hashes, size, serials, mapfiles def node_purge(self, node, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - node and cluster, and return - the hashes, the size and the serials of versions deleted. - Clears out the node if it has no remaining versions. + """Delete all versions with the specified node and cluster. + + Return the hashes, size, serials and the names o the mapfiles of the + versions that have been deleted. Clears out the node if it has no + remaining versions. + """ execute = self.execute @@ -338,21 +344,23 @@ def node_purge(self, node, before=inf, cluster=0, execute(q, args) nr, size = self.fetchone() if not nr: - return (), 0, () + return (), 0, (), () mtime = time() self.statistics_update_ancestors(node, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - q = ("select hash, serial from versions " + q = ("select hash, serial, mapfile from versions " "where node = ? " "and cluster = ? " "and mtime <= ?") execute(q, args) hashes = [] serials = [] + mapfiles = [] for r in self.fetchall(): - hashes += [r[0]] - serials += [r[1]] + hashes.append(r[0]) + serials.append(r[1]) + mapfiles.append(r[2]) q = ("delete from versions " "where node = ? " @@ -366,7 +374,7 @@ def node_purge(self, node, before=inf, cluster=0, "where node = n.node) = 0 " "and node = ?)") execute(q, (node,)) - return hashes, size, serials + return hashes, size, serials, mapfiles def node_remove(self, node, update_statistics_ancestors_depth=None): """Remove the node specified. @@ -744,6 +752,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): hash = props[self.HASH] size = props[self.SIZE] cluster = props[self.CLUSTER] + mapfile = props[self.MAPFILE] mtime = time() self.statistics_update_ancestors(node, -1, -size, mtime, cluster, @@ -755,7 +764,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): props = self.version_lookup(node, cluster=cluster, all_props=False) if props: self.nodes_set_latest_version(node, props[0]) - return hash, size + return hash, size, mapfile def attribute_get_domains(self, serial, node=None): q = ("select distinct domain from attributes " diff --git a/snf-pithos-backend/pithos/backends/modular.py b/snf-pithos-backend/pithos/backends/modular.py index c9fe546a5..733d9533e 100644 --- a/snf-pithos-backend/pithos/backends/modular.py +++ b/snf-pithos-backend/pithos/backends/modular.py @@ -235,6 +235,7 @@ def __init__(self, service_token=None, astakosclient_poolsize=None, free_versioning=True, + purge_mapfiles=False, block_params=None, public_url_security=DEFAULT_PUBLIC_URL_SECURITY, public_url_alphabet=DEFAULT_PUBLIC_URL_ALPHABET, @@ -275,6 +276,7 @@ def __init__(self, self.hash_algorithm = hash_algorithm self.block_size = block_size self.free_versioning = free_versioning + self.purge_mapfiles = purge_mapfiles self.map_check_interval = map_check_interval self.mapfile_prefix = mapfile_prefix self.resource_max_metadata = resource_max_metadata @@ -767,8 +769,10 @@ def update_container_meta(self, user, account, container, domain, meta, versioning = self._get_policy( node, is_account_policy=False)[VERSIONING_POLICY] if versioning != 'auto': - self.node.version_remove(src_version_id, - update_statistics_ancestors_depth=0) + _ , _, mapfile = self.node.version_remove( + src_version_id, update_statistics_ancestors_depth=0) + if self.purge_mapfiles: + self.store.map_delete(mapfile) @debug_method @backend_method @@ -872,11 +876,12 @@ def delete_container(self, user, account, container, until=None, prefix='', project = self._get_project(node) if until is not None: - hashes, size, _ = self.node.node_purge_children( + _, size, _, mapfiles = self.node.node_purge_children( node, until, CLUSTER_HISTORY, update_statistics_ancestors_depth=0) - for h in hashes: - self.store.map_delete(h) + if self.purge_mapfiles: + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge_children(node, until, CLUSTER_DELETED, update_statistics_ancestors_depth=0) if not self.free_versioning: @@ -887,11 +892,12 @@ def delete_container(self, user, account, container, until=None, prefix='', if not delimiter: if self._get_statistics(node)[0] > 0: raise ContainerNotEmpty("Container is not empty") - hashes, size, _ = self.node.node_purge_children( + _, size, _, mapfiles = self.node.node_purge_children( node, inf, CLUSTER_HISTORY, update_statistics_ancestors_depth=0) - for h in hashes: - self.store.map_delete(h) + if self.purge_mapfiles: + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge_children(node, inf, CLUSTER_DELETED, update_statistics_ancestors_depth=0) self.node.node_remove(node, update_statistics_ancestors_depth=0) @@ -1863,19 +1869,23 @@ def _delete_object(self, user, account, container, name, until=None, if until is not None: if node is None: return - hashes = [] size = 0 - h, s, _ = self.node.node_purge(node, until, CLUSTER_NORMAL, - update_statistics_ancestors_depth=1) - hashes += h + mapfiles = [] + _, s, _, m = self.node.node_purge( + node, until, CLUSTER_NORMAL, + update_statistics_ancestors_depth=1) size += s - h, s, _ = self.node.node_purge(node, until, CLUSTER_HISTORY, - update_statistics_ancestors_depth=1) - hashes += h + mapfiles.extend(m) + _, s, _, m = self.node.node_purge( + node, until, CLUSTER_HISTORY, + update_statistics_ancestors_depth=1) + mapfiles.extend(m) if not self.free_versioning: size += s - for h in hashes: - self.store.map_delete(h) + if self.purge_mapfiles: + for m in mapfiles: + self.store.map_delete(m) + self.node.node_purge(node, until, CLUSTER_DELETED, update_statistics_ancestors_depth=1) try: @@ -1891,7 +1901,7 @@ def _delete_object(self, user, account, container, name, until=None, # keep reference to the mapfile # in case we will want to delete them in the future - src_version_id, dest_version_id, _ = self._put_version_duplicate( + src_version_id, dest_version_id, mapfile = self._put_version_duplicate( user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED, update_statistics_ancestors_depth=1, keep_src_mapfile=True) @@ -2475,9 +2485,10 @@ def _apply_versioning(self, account, container, version_id, versioning = self._get_policy( node, is_account_policy=False)[VERSIONING_POLICY] if versioning != 'auto': - hash, size = self.node.version_remove( + _, size, mapfile = self.node.version_remove( version_id, update_statistics_ancestors_depth) - self.store.map_delete(hash) + if self.purge_mapfiles: + self.store.map_delete(mapfile) return size elif self.free_versioning: return self.node.version_get_properties( diff --git a/snf-pithos-backend/pithos/backends/util.py b/snf-pithos-backend/pithos/backends/util.py index bbbc4e66b..4d997716c 100644 --- a/snf-pithos-backend/pithos/backends/util.py +++ b/snf-pithos-backend/pithos/backends/util.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -13,21 +13,27 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import logging + from objpool import ObjectPool from new import instancemethod from select import select from traceback import print_exc from pithos.backends import connect_backend +log = logging.getLogger(__name__) + USAGE_LIMIT = 500 class PithosBackendPool(ObjectPool): def __init__(self, size=None, **kwargs): super(PithosBackendPool, self).__init__(size=size) + log.debug("Initializing PithosBackendPool") self.backend_kwargs = kwargs def _pool_create(self): + log.debug("Creating pool") backend = connect_backend(**self.backend_kwargs) backend._real_close = backend.close backend.close = instancemethod(_pooled_backend_close, backend, @@ -38,6 +44,7 @@ def _pool_create(self): return backend def _pool_verify(self, backend): + log.debug("Verifying pool %s", backend) wrapper = backend.wrapper conn = wrapper.conn if conn.closed: @@ -62,9 +69,11 @@ def _pool_verify(self, backend): print_exc() return False + log.debug("Pool %s ok", backend) return True def _pool_cleanup(self, backend): + log.debug("Cleaning up pool %s", backend) c = backend._use_count - 1 if c < 0: backend._real_close() @@ -84,6 +93,7 @@ def _pool_cleanup(self, backend): def shutdown(self): while True: backend = self.pool_get(create=False) + log.debug("Shutting down pool %s", backend) if backend is None: break self.pool_put(None) @@ -91,4 +101,5 @@ def shutdown(self): def _pooled_backend_close(backend): + log.debug("Closing pool %s", backend) backend._pool.pool_put(backend) diff --git a/snf-tools/synnefo_tools/burnin/__init__.py b/snf-tools/synnefo_tools/burnin/__init__.py index 0be4eaeac..0a45de223 100644 --- a/snf-tools/synnefo_tools/burnin/__init__.py +++ b/snf-tools/synnefo_tools/burnin/__init__.py @@ -21,6 +21,8 @@ import sys import optparse +from importlib import import_module + from synnefo_tools import version from synnefo_tools.burnin import common from synnefo_tools.burnin.astakos_tests import AstakosTestSuite @@ -58,9 +60,24 @@ STALE_TSUITES_NAMES = [tsuite.__name__ for tsuite in STALE_TESTSUITES] -def string_to_class(names): - """Convert class namesto class objects""" - return [eval(name) for name in names] +def string_to_class(name): + """Convert a class name to a class object""" + try: + # The class is already known and imported + return eval(name) + except NameError: + pass + + try: + # Try find the given class assuming it is in the form + # module[.submodule...].class + module_name, class_name = name.rsplit(".", 1) + mod = import_module(module_name) + return getattr(mod, class_name) + except (ValueError, ImportError, AttributeError): + pass + + raise RuntimeError("Test Suite `%s' does not exist" % name) # -------------------------------------------------------------------- @@ -148,6 +165,10 @@ def parse_arguments(args): "--log-folder", action="store", type="string", default="/var/log/burnin/", dest="log_folder", help="Define the absolute path where the output log is stored") + parser.add_option( + "--state-folder", action="store", + type="string", default="/var/lib/burnin/", dest="state_folder", + help="Define the absolute path where various test data is stored") parser.add_option( "--verbose", "-v", action="store", type="int", default=1, dest="verbose", @@ -222,14 +243,6 @@ def parse_arguments(args): if opts.quiet: opts.log_level = 2 - # Check `--set-tests' and `--exclude-tests' options - if opts.tests != "all" and \ - not (set(opts.tests)).issubset(set(TSUITES_NAMES)): - raise optparse.OptionValueError("The selected set of tests is invalid") - if opts.exclude_tests is not None and \ - not (set(opts.exclude_tests)).issubset(set(TSUITES_NAMES)): - raise optparse.OptionValueError("The selected set of tests is invalid") - # `token' is mandatory mandatory_argument(opts.token, "--token") # `auth_url' is mandatory @@ -250,6 +263,26 @@ def mandatory_argument(value, arg_name): sys.exit("Invalid input") +def find_final_test_suites(opts): + """Parse opts and return the final test suites classes.""" + + if opts.show_stale: + # We will run the stale_testsuites + return STALE_TESTSUITES + + # By default run all test suites + names = TSUITES_NAMES + # If --set-tests given then take this into account + if opts.tests != "all": + names = opts.tests + # Remove any excluded test + if opts.exclude_tests is not None: + names = [tsuite for tsuite in names + if tsuite not in opts.exclude_tests] + + return [string_to_class(name) for name in names] + + # -------------------------------------------------------------------- # Burnin main function def main(): @@ -266,10 +299,13 @@ def main(): # Parse arguments using `optparse' (opts, _) = parse_arguments(sys.argv[1:]) + testsuites = find_final_test_suites(opts) + # Initialize burnin - (testsuites, failfast) = \ - common.initialize(opts, TSUITES_NAMES, STALE_TSUITES_NAMES) - testsuites = string_to_class(testsuites) + common.initialize(opts) + + # In case we clean up we have to fail fast + failfast = True if opts.show_stale else opts.failfast # Run burnin # The return value denotes the success status diff --git a/snf-tools/synnefo_tools/burnin/common.py b/snf-tools/synnefo_tools/burnin/common.py index 116dae61a..7de4878a2 100644 --- a/snf-tools/synnefo_tools/burnin/common.py +++ b/snf-tools/synnefo_tools/burnin/common.py @@ -18,6 +18,7 @@ """ +import os import hashlib import re import shutil @@ -763,7 +764,7 @@ def _get_merkle_hash(self, data): # -------------------------------------------------------------------- # Initialize Burnin -def initialize(opts, testsuites, stale_testsuites): +def initialize(opts): """Initalize burnin Initialize our logger and burnin state @@ -792,24 +793,20 @@ def initialize(opts, testsuites, stale_testsuites): BurninTests.delete_stale = opts.delete_stale BurninTests.temp_directory = opts.temp_directory BurninTests.failfast = opts.failfast - BurninTests.run_id = SNF_TEST_PREFIX + \ - datetime.datetime.strftime(curr_time, "%Y%m%d%H%M%S") BurninTests.obj_upload_num = opts.obj_upload_num BurninTests.obj_upload_min_size = opts.obj_upload_min_size BurninTests.obj_upload_max_size = opts.obj_upload_max_size - # Choose tests to run - if opts.show_stale: - # We will run the stale_testsuites - return (stale_testsuites, True) + run_id = SNF_TEST_PREFIX + \ + datetime.datetime.strftime(curr_time, "%Y%m%d%H%M%S") - if opts.tests != "all": - testsuites = opts.tests - if opts.exclude_tests is not None: - testsuites = [tsuite for tsuite in testsuites - if tsuite not in opts.exclude_tests] + # Do not create the state dir yet. + # Let the test suites do so if they have something to store there. + # Otherwise we would end up with a lot of empty folders. + state_dir = os.path.join(opts.state_folder, run_id) - return (testsuites, opts.failfast) + BurninTests.state_dir = state_dir + BurninTests.run_id = run_id # -------------------------------------------------------------------- diff --git a/snf-tools/synnefo_tools/burnin/logger.py b/snf-tools/synnefo_tools/burnin/logger.py index 66fcf9374..850d19048 100644 --- a/snf-tools/synnefo_tools/burnin/logger.py +++ b/snf-tools/synnefo_tools/burnin/logger.py @@ -33,7 +33,6 @@ import os import sys -import os.path import logging import datetime diff --git a/snf-tools/synnefo_tools/burnin/snapshots.py b/snf-tools/synnefo_tools/burnin/snapshots.py index 5941dab13..20e4e4022 100644 --- a/snf-tools/synnefo_tools/burnin/snapshots.py +++ b/snf-tools/synnefo_tools/burnin/snapshots.py @@ -42,12 +42,7 @@ def test_001_submit_create_snapshot(self): """Create a server and take a snapshot""" self.account = self._get_uuid() use_image = random.choice(self._parse_images()) - archipelago_flavors = \ - [f for f in self._parse_flavors() if - f['SNF:disk_template'].startswith('ext_archipelago')] - self.assertGreater(len(archipelago_flavors), 0, - "No 'archipelago' disk template found") - self.use_flavor = random.choice(archipelago_flavors) + self.use_flavor = random.choice(self._parse_flavors()) if self._image_is(use_image, "linux"): # Enforce personality test self.info("Creating personality content to be used") diff --git a/snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py b/snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py new file mode 100644 index 000000000..a2f4a915b --- /dev/null +++ b/snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 - +# +# Copyright (C) 2016 GRNET S.A. and individual contributors +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import logging + + +def when_ready(server): + """Hook function to redirect stderr/stdout to logfile. + + Hook function that is running when gunicorn server is ready and that is + responsible to set stdout and stderr to the file descriptor of the first + logging file handler. + + FIXME: Handle logfile rotation. + + """ + + server.log.info("Server ready, redirecting stdout/stderr to logfile") + + # Use the already opened file of the first registered FileHandler + for h in server.log.error_log.handlers: + if isinstance(h, logging.FileHandler): + name = h.stream.name + fd = h.stream.fileno() + + server.log.info("Redirecting stdout/stderr to %s (%s)", name, fd) + + os.dup2(fd, 1) + os.dup2(fd, 2) + + break + else: + server.log.warn("Could not find file handler!") + + return