From 982d5cda741447219732d18418e3ad936b4cc104 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Wed, 13 Jan 2016 16:57:56 +0200 Subject: [PATCH 01/11] backend: Add some logging regarding pool handling Signed-off-by: Dimitris Aragiorgis --- snf-pithos-backend/pithos/backends/util.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/snf-pithos-backend/pithos/backends/util.py b/snf-pithos-backend/pithos/backends/util.py index bbbc4e66b..4d997716c 100644 --- a/snf-pithos-backend/pithos/backends/util.py +++ b/snf-pithos-backend/pithos/backends/util.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -13,21 +13,27 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import logging + from objpool import ObjectPool from new import instancemethod from select import select from traceback import print_exc from pithos.backends import connect_backend +log = logging.getLogger(__name__) + USAGE_LIMIT = 500 class PithosBackendPool(ObjectPool): def __init__(self, size=None, **kwargs): super(PithosBackendPool, self).__init__(size=size) + log.debug("Initializing PithosBackendPool") self.backend_kwargs = kwargs def _pool_create(self): + log.debug("Creating pool") backend = connect_backend(**self.backend_kwargs) backend._real_close = backend.close backend.close = instancemethod(_pooled_backend_close, backend, @@ -38,6 +44,7 @@ def _pool_create(self): return backend def _pool_verify(self, backend): + log.debug("Verifying pool %s", backend) wrapper = backend.wrapper conn = wrapper.conn if conn.closed: @@ -62,9 +69,11 @@ def _pool_verify(self, backend): print_exc() return False + log.debug("Pool %s ok", backend) return True def _pool_cleanup(self, backend): + log.debug("Cleaning up pool %s", backend) c = backend._use_count - 1 if c < 0: backend._real_close() @@ -84,6 +93,7 @@ def _pool_cleanup(self, backend): def shutdown(self): while True: backend = self.pool_get(create=False) + log.debug("Shutting down pool %s", backend) if backend is None: break self.pool_put(None) @@ -91,4 +101,5 @@ def shutdown(self): def _pooled_backend_close(backend): + log.debug("Closing pool %s", backend) backend._pool.pool_put(backend) From 9288e13189eba6670035f7abd72285655a0a6a5f Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Wed, 13 Jan 2016 16:58:52 +0200 Subject: [PATCH 02/11] pithos: Delete mapfiles when purging nodes Versions in database have a Merkle hash of a given content address-able object and a backend-specific mapfile that indexes the corresponding block hashes that the object consists of. When purging a node, deleting a container, or explicitly removing a version we call the corresponding store method (i.e. map_delete()) for deleting the version's mapfile. Signed-off-by: Dimitris Aragiorgis --- .../pithos/backends/lib/sqlalchemy/node.py | 51 +++++++++++-------- .../pithos/backends/lib/sqlite/node.py | 49 ++++++++++-------- snf-pithos-backend/pithos/backends/modular.py | 44 ++++++++-------- 3 files changed, 84 insertions(+), 60 deletions(-) diff --git a/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py index 6cea7f36f..31e46ee3a 100644 --- a/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py +++ b/snf-pithos-backend/pithos/backends/lib/sqlalchemy/node.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -369,10 +369,12 @@ def node_count_children(self, node): def node_purge_children(self, parent, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - parent and cluster, and return - the hashes, the total size and the serials of versions deleted. - Clears out nodes with no remaining versions. + """Delete all versions with the specified parent and cluster. + + Returns the hashes, the total size, serials and the names of the + mapfiles of the versions that have been deleted. Clears out nodes with + no remaining versions. + """ #update statistics c1 = select([self.nodes.c.node], @@ -389,21 +391,24 @@ def node_purge_children(self, parent, before=inf, cluster=0, row = r.fetchone() r.close() if not row: - return (), 0, () + return (), 0, (), () nr, size = row[0], row[1] if row[1] else 0 mtime = time() self.statistics_update(parent, -nr, -size, mtime, cluster) self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - s = select([self.versions.c.hash, self.versions.c.serial]) + s = select([self.versions.c.hash, self.versions.c.serial, + self.versions.c.mapfile]) s = s.where(where_clause) r = self.conn.execute(s) hashes = [] serials = [] + mapfiles = [] for row in r.fetchall(): - hashes += [row[0]] - serials += [row[1]] + hashes.append(row[0]) + serials.append(row[1]) + mapfiles.append(row[2]) r.close() #delete versions @@ -424,14 +429,16 @@ def node_purge_children(self, parent, before=inf, cluster=0, s = self.nodes.delete().where(self.nodes.c.node.in_(nodes)) self.conn.execute(s).close() - return hashes, size, serials + return hashes, size, serials, mapfiles def node_purge(self, node, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - node and cluster, and return - the hashes and size of versions deleted. - Clears out the node if it has no remaining versions. + """Delete all versions with the specified node and cluster. + + Returns the hashes, size and the name of the mapfiles of the versions + that have been deleted. Clears out the node if it has no remaining + versions. + """ #update statistics @@ -448,19 +455,22 @@ def node_purge(self, node, before=inf, cluster=0, nr, size = row[0], row[1] r.close() if not nr: - return (), 0, () + return (), 0, (), () mtime = time() self.statistics_update_ancestors(node, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - s = select([self.versions.c.hash, self.versions.c.serial]) + s = select([self.versions.c.hash, self.versions.c.serial, + self.versions.c.mapfile]) s = s.where(where_clause) r = self.conn.execute(s) hashes = [] serials = [] + mapfiles = [] for row in r.fetchall(): - hashes += [row[0]] - serials += [row[1]] + hashes.append(row[0]) + serials.append(row[1]) + mapfiles.append(row[2]) r.close() #delete versions @@ -481,7 +491,7 @@ def node_purge(self, node, before=inf, cluster=0, s = self.nodes.delete().where(self.nodes.c.node.in_(nodes)) self.conn.execute(s).close() - return hashes, size, serials + return hashes, size, serials, mapfiles def node_remove(self, node, update_statistics_ancestors_depth=None): """Remove the node specified. @@ -1028,6 +1038,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): hash = props.hash size = props.size cluster = props.cluster + mapfile = props.mapfile mtime = time() self.statistics_update_ancestors(node, -1, -size, mtime, cluster, @@ -1040,7 +1051,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): if props: self.nodes_set_latest_version(node, serial) - return hash, size + return hash, size, mapfile def attribute_get_domains(self, serial, node=None): node = node or select([self.versions.c.node], diff --git a/snf-pithos-backend/pithos/backends/lib/sqlite/node.py b/snf-pithos-backend/pithos/backends/lib/sqlite/node.py index a66a076ca..0f690e3b9 100644 --- a/snf-pithos-backend/pithos/backends/lib/sqlite/node.py +++ b/snf-pithos-backend/pithos/backends/lib/sqlite/node.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -269,10 +269,12 @@ def node_count_children(self, node): def node_purge_children(self, parent, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - parent and cluster, and return - the hashes, the size and the serials of versions deleted. - Clears out nodes with no remaining versions. + """Delete all versions with the specified parent and cluster. + + Returns the hashes, size, serials and names of the mapfiles of the + versions that have been deleted. Clears out nodes with no remaining + versions. + """ execute = self.execute @@ -286,13 +288,13 @@ def node_purge_children(self, parent, before=inf, cluster=0, execute(q, args) nr, size = self.fetchone() if not nr: - return (), 0, () + return (), 0, (), () mtime = time() self.statistics_update(parent, -nr, -size, mtime, cluster) self.statistics_update_ancestors(parent, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - q = ("select hash, serial from versions " + q = ("select hash, serial, mapfile from versions " "where node in (select node " "from nodes " "where parent = ?) " @@ -301,9 +303,11 @@ def node_purge_children(self, parent, before=inf, cluster=0, execute(q, args) hashes = [] serials = [] + mapfiles = [] for r in self.fetchall(): - hashes += [r[0]] - serials += [r[1]] + hashes.append(r[0]) + serials.append(r[1]) + mapfiles.append(r[2]) q = ("delete from versions " "where node in (select node " @@ -319,14 +323,16 @@ def node_purge_children(self, parent, before=inf, cluster=0, "where node = n.node) = 0 " "and parent = ?)") execute(q, (parent,)) - return hashes, size, serials + return hashes, size, serials, mapfiles def node_purge(self, node, before=inf, cluster=0, update_statistics_ancestors_depth=None): - """Delete all versions with the specified - node and cluster, and return - the hashes, the size and the serials of versions deleted. - Clears out the node if it has no remaining versions. + """Delete all versions with the specified node and cluster. + + Return the hashes, size, serials and the names o the mapfiles of the + versions that have been deleted. Clears out the node if it has no + remaining versions. + """ execute = self.execute @@ -338,21 +344,23 @@ def node_purge(self, node, before=inf, cluster=0, execute(q, args) nr, size = self.fetchone() if not nr: - return (), 0, () + return (), 0, (), () mtime = time() self.statistics_update_ancestors(node, -nr, -size, mtime, cluster, update_statistics_ancestors_depth) - q = ("select hash, serial from versions " + q = ("select hash, serial, mapfile from versions " "where node = ? " "and cluster = ? " "and mtime <= ?") execute(q, args) hashes = [] serials = [] + mapfiles = [] for r in self.fetchall(): - hashes += [r[0]] - serials += [r[1]] + hashes.append(r[0]) + serials.append(r[1]) + mapfiles.append(r[2]) q = ("delete from versions " "where node = ? " @@ -366,7 +374,7 @@ def node_purge(self, node, before=inf, cluster=0, "where node = n.node) = 0 " "and node = ?)") execute(q, (node,)) - return hashes, size, serials + return hashes, size, serials, mapfiles def node_remove(self, node, update_statistics_ancestors_depth=None): """Remove the node specified. @@ -744,6 +752,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): hash = props[self.HASH] size = props[self.SIZE] cluster = props[self.CLUSTER] + mapfile = props[self.MAPFILE] mtime = time() self.statistics_update_ancestors(node, -1, -size, mtime, cluster, @@ -755,7 +764,7 @@ def version_remove(self, serial, update_statistics_ancestors_depth=None): props = self.version_lookup(node, cluster=cluster, all_props=False) if props: self.nodes_set_latest_version(node, props[0]) - return hash, size + return hash, size, mapfile def attribute_get_domains(self, serial, node=None): q = ("select distinct domain from attributes " diff --git a/snf-pithos-backend/pithos/backends/modular.py b/snf-pithos-backend/pithos/backends/modular.py index c9fe546a5..47fa2c96e 100644 --- a/snf-pithos-backend/pithos/backends/modular.py +++ b/snf-pithos-backend/pithos/backends/modular.py @@ -767,8 +767,9 @@ def update_container_meta(self, user, account, container, domain, meta, versioning = self._get_policy( node, is_account_policy=False)[VERSIONING_POLICY] if versioning != 'auto': - self.node.version_remove(src_version_id, - update_statistics_ancestors_depth=0) + _ , _, mapfile = self.node.version_remove( + src_version_id, update_statistics_ancestors_depth=0) + self.store.map_delete(mapfile) @debug_method @backend_method @@ -872,11 +873,11 @@ def delete_container(self, user, account, container, until=None, prefix='', project = self._get_project(node) if until is not None: - hashes, size, _ = self.node.node_purge_children( + _, size, _, mapfiles = self.node.node_purge_children( node, until, CLUSTER_HISTORY, update_statistics_ancestors_depth=0) - for h in hashes: - self.store.map_delete(h) + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge_children(node, until, CLUSTER_DELETED, update_statistics_ancestors_depth=0) if not self.free_versioning: @@ -887,11 +888,11 @@ def delete_container(self, user, account, container, until=None, prefix='', if not delimiter: if self._get_statistics(node)[0] > 0: raise ContainerNotEmpty("Container is not empty") - hashes, size, _ = self.node.node_purge_children( + _, size, _, mapfiles = self.node.node_purge_children( node, inf, CLUSTER_HISTORY, update_statistics_ancestors_depth=0) - for h in hashes: - self.store.map_delete(h) + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge_children(node, inf, CLUSTER_DELETED, update_statistics_ancestors_depth=0) self.node.node_remove(node, update_statistics_ancestors_depth=0) @@ -1863,19 +1864,22 @@ def _delete_object(self, user, account, container, name, until=None, if until is not None: if node is None: return - hashes = [] size = 0 - h, s, _ = self.node.node_purge(node, until, CLUSTER_NORMAL, - update_statistics_ancestors_depth=1) - hashes += h + mapfiles = [] + _, s, _, m = self.node.node_purge( + node, until, CLUSTER_NORMAL, + update_statistics_ancestors_depth=1) size += s - h, s, _ = self.node.node_purge(node, until, CLUSTER_HISTORY, - update_statistics_ancestors_depth=1) - hashes += h + mapfiles.extend(m) + _, s, _, m = self.node.node_purge( + node, until, CLUSTER_HISTORY, + update_statistics_ancestors_depth=1) + mapfiles.extend(m) if not self.free_versioning: size += s - for h in hashes: - self.store.map_delete(h) + for m in mapfiles: + self.store.map_delete(m) + self.node.node_purge(node, until, CLUSTER_DELETED, update_statistics_ancestors_depth=1) try: @@ -1891,7 +1895,7 @@ def _delete_object(self, user, account, container, name, until=None, # keep reference to the mapfile # in case we will want to delete them in the future - src_version_id, dest_version_id, _ = self._put_version_duplicate( + src_version_id, dest_version_id, mapfile = self._put_version_duplicate( user, node, size=0, type='', hash=None, checksum='', cluster=CLUSTER_DELETED, update_statistics_ancestors_depth=1, keep_src_mapfile=True) @@ -2475,9 +2479,9 @@ def _apply_versioning(self, account, container, version_id, versioning = self._get_policy( node, is_account_policy=False)[VERSIONING_POLICY] if versioning != 'auto': - hash, size = self.node.version_remove( + _, size, mapfile = self.node.version_remove( version_id, update_statistics_ancestors_depth) - self.store.map_delete(hash) + self.store.map_delete(mapfile) return size elif self.free_versioning: return self.node.version_get_properties( From 0c82b9e11f09aa930cef96229eaef5f5bd4a0764 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Fri, 15 Jan 2016 14:17:27 +0200 Subject: [PATCH 03/11] pithos: Add PITHOS_BACKEND_PURGE_MAPFILES setting If PITHOS_BACKEND_PURGE_MAPFILES is true, delete the associated mapfile when deleting a version. This is unsafe for old setups (prior Synnefo version 0.16rc1/commit 13d49ad), which may still include Markle hashes instead of Archipelago mapfile names in Pithos DB. Signed-off-by: Dimitris Aragiorgis --- .../conf/20-snf-pithos-app-settings.conf | 7 ++++++ snf-pithos-app/pithos/api/settings.py | 4 ++++ snf-pithos-app/pithos/api/util.py | 2 ++ snf-pithos-backend/pithos/backends/modular.py | 23 ++++++++++++------- 4 files changed, 28 insertions(+), 8 deletions(-) diff --git a/snf-pithos-app/conf/20-snf-pithos-app-settings.conf b/snf-pithos-app/conf/20-snf-pithos-app-settings.conf index 617b0f43a..1f428ce80 100644 --- a/snf-pithos-app/conf/20-snf-pithos-app-settings.conf +++ b/snf-pithos-app/conf/20-snf-pithos-app-settings.conf @@ -69,6 +69,13 @@ # # The maximum interval (in seconds) for consequent backend object map checks #PITHOS_BACKEND_MAP_CHECK_INTERVAL = 1 +# +# Enable deletion of mapfiles after deleting a version of some object. +# This is option is *unsafe* for installatioins prior to Synnefo version +# 0.16rc1 (commit 13d49ad) which may still include Markle-hashes and not +# Archipelago mapfiles in Pithos database. +#PITHOS_BACKEND_PURGE_MAPFILES = False +# # The archipelago mapfile prefix (it should not exceed 15 characters) # WARNING: Once set it should not be changed #PITHOS_BACKEND_MAPFILE_PREFIX='snf_file_' diff --git a/snf-pithos-app/pithos/api/settings.py b/snf-pithos-app/pithos/api/settings.py index ac2639a12..7e76b2bf4 100644 --- a/snf-pithos-app/pithos/api/settings.py +++ b/snf-pithos-app/pithos/api/settings.py @@ -185,6 +185,10 @@ def __str__(self): BACKEND_MAP_CHECK_INTERVAL = getattr(settings, 'PITHOS_BACKEND_MAP_CHECK_INTERVAL', 5) +# Whether to delete mapfiles or not +BACKEND_PURGE_MAPFILES = getattr(settings, 'PITHOS_BACKEND_PURGE_MAPFILES', + False) + # The archipelago mapfile prefix (it should not exceed 15 characters) # WARNING: Once set it should not be changed BACKEND_MAPFILE_PREFIX = getattr(settings, diff --git a/snf-pithos-app/pithos/api/util.py b/snf-pithos-app/pithos/api/util.py index 689a18fb1..a1696f498 100644 --- a/snf-pithos-app/pithos/api/util.py +++ b/snf-pithos-app/pithos/api/util.py @@ -38,6 +38,7 @@ from pithos.api.settings import (BACKEND_DB_MODULE, BACKEND_DB_CONNECTION, BACKEND_BLOCK_MODULE, BACKEND_BLOCK_KWARGS, + BACKEND_PURGE_MAPFILES, ASTAKOSCLIENT_POOLSIZE, SERVICE_TOKEN, ASTAKOS_AUTH_URL, @@ -1008,6 +1009,7 @@ def simple_list_response(request, l): service_token=SERVICE_TOKEN, astakosclient_poolsize=ASTAKOSCLIENT_POOLSIZE, free_versioning=BACKEND_FREE_VERSIONING, + purge_mapfiles=BACKEND_PURGE_MAPFILES, block_params=BACKEND_BLOCK_KWARGS, public_url_security=PUBLIC_URL_SECURITY, public_url_alphabet=PUBLIC_URL_ALPHABET, diff --git a/snf-pithos-backend/pithos/backends/modular.py b/snf-pithos-backend/pithos/backends/modular.py index 47fa2c96e..733d9533e 100644 --- a/snf-pithos-backend/pithos/backends/modular.py +++ b/snf-pithos-backend/pithos/backends/modular.py @@ -235,6 +235,7 @@ def __init__(self, service_token=None, astakosclient_poolsize=None, free_versioning=True, + purge_mapfiles=False, block_params=None, public_url_security=DEFAULT_PUBLIC_URL_SECURITY, public_url_alphabet=DEFAULT_PUBLIC_URL_ALPHABET, @@ -275,6 +276,7 @@ def __init__(self, self.hash_algorithm = hash_algorithm self.block_size = block_size self.free_versioning = free_versioning + self.purge_mapfiles = purge_mapfiles self.map_check_interval = map_check_interval self.mapfile_prefix = mapfile_prefix self.resource_max_metadata = resource_max_metadata @@ -769,7 +771,8 @@ def update_container_meta(self, user, account, container, domain, meta, if versioning != 'auto': _ , _, mapfile = self.node.version_remove( src_version_id, update_statistics_ancestors_depth=0) - self.store.map_delete(mapfile) + if self.purge_mapfiles: + self.store.map_delete(mapfile) @debug_method @backend_method @@ -876,8 +879,9 @@ def delete_container(self, user, account, container, until=None, prefix='', _, size, _, mapfiles = self.node.node_purge_children( node, until, CLUSTER_HISTORY, update_statistics_ancestors_depth=0) - for m in mapfiles: - self.store.map_delete(m) + if self.purge_mapfiles: + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge_children(node, until, CLUSTER_DELETED, update_statistics_ancestors_depth=0) if not self.free_versioning: @@ -891,8 +895,9 @@ def delete_container(self, user, account, container, until=None, prefix='', _, size, _, mapfiles = self.node.node_purge_children( node, inf, CLUSTER_HISTORY, update_statistics_ancestors_depth=0) - for m in mapfiles: - self.store.map_delete(m) + if self.purge_mapfiles: + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge_children(node, inf, CLUSTER_DELETED, update_statistics_ancestors_depth=0) self.node.node_remove(node, update_statistics_ancestors_depth=0) @@ -1877,8 +1882,9 @@ def _delete_object(self, user, account, container, name, until=None, mapfiles.extend(m) if not self.free_versioning: size += s - for m in mapfiles: - self.store.map_delete(m) + if self.purge_mapfiles: + for m in mapfiles: + self.store.map_delete(m) self.node.node_purge(node, until, CLUSTER_DELETED, update_statistics_ancestors_depth=1) @@ -2481,7 +2487,8 @@ def _apply_versioning(self, account, container, version_id, if versioning != 'auto': _, size, mapfile = self.node.version_remove( version_id, update_statistics_ancestors_depth) - self.store.map_delete(mapfile) + if self.purge_mapfiles: + self.store.map_delete(mapfile) return size elif self.free_versioning: return self.node.version_get_properties( From e38e282ed2cafbffa719d90aa1d515b87e4ac6b2 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Tue, 12 Jan 2016 15:44:12 +0200 Subject: [PATCH 04/11] webproject: Add gunicorn hook for stderr logging By default when gunicorn is invoked with --daemon, it redirects 0, 1, 2 file descriptors to /dev/null and as a result the logs printed on stdout/stderr are lost. This commit adds a gunicorn hook that uses the already opened file of the first registered logging FileHandler, i.e. the file from the '--log-file' Gunicorn configuration option. To enable this hook use --config=/etc/synnefo/gunicorn-hooks/gunicorn-stderr-logging.py option. Signed-off-by: Dimitris Aragiorgis --- .../gunicorn-hooks/gunicorn-stderr-logging.py | 50 +++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py diff --git a/snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py b/snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py new file mode 100644 index 000000000..a2f4a915b --- /dev/null +++ b/snf-webproject/conf/gunicorn-hooks/gunicorn-stderr-logging.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 - +# +# Copyright (C) 2016 GRNET S.A. and individual contributors +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import logging + + +def when_ready(server): + """Hook function to redirect stderr/stdout to logfile. + + Hook function that is running when gunicorn server is ready and that is + responsible to set stdout and stderr to the file descriptor of the first + logging file handler. + + FIXME: Handle logfile rotation. + + """ + + server.log.info("Server ready, redirecting stdout/stderr to logfile") + + # Use the already opened file of the first registered FileHandler + for h in server.log.error_log.handlers: + if isinstance(h, logging.FileHandler): + name = h.stream.name + fd = h.stream.fileno() + + server.log.info("Redirecting stdout/stderr to %s (%s)", name, fd) + + os.dup2(fd, 1) + os.dup2(fd, 2) + + break + else: + server.log.warn("Could not find file handler!") + + return From 3210aa91ac6bd800df868f151f31b3d292700802 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Thu, 15 Oct 2015 17:52:12 +0300 Subject: [PATCH 05/11] deploy: Fix --no-key-inject handling Signed-off-by: Dimitris Aragiorgis --- snf-deploy/snfdeploy/components.py | 11 ++++++++++- snf-deploy/snfdeploy/config.py | 3 ++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/snf-deploy/snfdeploy/components.py b/snf-deploy/snfdeploy/components.py index e531a20e1..795fe2f35 100644 --- a/snf-deploy/snfdeploy/components.py +++ b/snf-deploy/snfdeploy/components.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2015 GRNET S.A. and individual contributors +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -192,6 +192,9 @@ def test(self): class SSH(base.Component): @base.run_cmds def prepare(self): + if not config.key_inject: + return [] + return [ "mkdir -p /root/.ssh", "for f in $(ls /root/.ssh/*); do cp $f $f.bak ; done", @@ -199,6 +202,9 @@ def prepare(self): ] def _configure(self): + if not config.key_inject: + return [] + files = [ "authorized_keys", "id_dsa", "id_dsa.pub", "id_rsa", "id_rsa.pub" ] @@ -207,6 +213,9 @@ def _configure(self): @base.run_cmds def initialize(self): + if not config.key_inject: + return [] + f = "/root/.ssh/authorized_keys" return [ "test -e {0}.bak && cat {0}.bak >> {0} || true".format(f) diff --git a/snf-deploy/snfdeploy/config.py b/snf-deploy/snfdeploy/config.py index 5d2ee78c2..4532db77b 100644 --- a/snf-deploy/snfdeploy/config.py +++ b/snf-deploy/snfdeploy/config.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -136,6 +136,7 @@ def init(args): config.dry_run = args.dry_run config.force = args.force config.ssh_key = args.ssh_key + config.key_inject = args.key_inject config.mem = args.mem config.vnc = args.vnc config.smp = args.smp From 74c6a2a3881ffc07219e61ecebb3b5df5a4d3881 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Thu, 15 Oct 2015 17:52:53 +0300 Subject: [PATCH 06/11] deploy: Do not create a VG if already exists Signed-off-by: Dimitris Aragiorgis --- snf-deploy/snfdeploy/components.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snf-deploy/snfdeploy/components.py b/snf-deploy/snfdeploy/components.py index 795fe2f35..1fe02bd53 100644 --- a/snf-deploy/snfdeploy/components.py +++ b/snf-deploy/snfdeploy/components.py @@ -527,7 +527,9 @@ def initialize(self): # If extra disk found use it # else create a raw file and losetup it cmd = """ -if [ -b "{0}" ]; then +if vgs {2}; then + echo "VG ${2} found!" +elif [ -b "{0}" ]; then pvcreate {0} && vgcreate {2} {0} else truncate -s {3} {1} From 9f9cc8e3c62c8615fc3a7c6791debd015b7b949c Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Fri, 16 Oct 2015 01:29:02 +0300 Subject: [PATCH 07/11] deploy: Use shared_dir during gnt-cluster init Use extra --file-storage-dir and --shared-file-storage-dir options in gnt-cluster init since the default ones (/srv/ganeti/..) might not exist if one changes shared_dir configuration option. Signed-off-by: Dimitris Aragiorgis --- snf-deploy/snfdeploy/components.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/snf-deploy/snfdeploy/components.py b/snf-deploy/snfdeploy/components.py index 1fe02bd53..f78d31842 100644 --- a/snf-deploy/snfdeploy/components.py +++ b/snf-deploy/snfdeploy/components.py @@ -773,9 +773,12 @@ def initialize(self): --ipolicy-std-specs {2} \ --ipolicy-bounds-specs min:{3}/max:{4} \ --enabled-disk-templates file,ext \ - {5} + --file-storage-dir {5}/ganeti/file-storage \ + --shared-file-storage-dir {5}/ganeti/shared-file-storage \ + {6} """.format(config.common_bridge, self.cluster.netdev, - std, bound_min, bound_max, self.cluster.fqdn) + std, bound_min, bound_max, config.shared_dir, + self.cluster.fqdn) modify = "gnt-node modify --vm-capable=no %s" % self.node.fqdn From fc2a88d5a126d469c3a933a4985d3f3f9ea13678 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Fri, 16 Oct 2015 01:46:10 +0300 Subject: [PATCH 08/11] deploy: Use DNS forwarders if any We might have an internal DNS server that probably was in the original /etc/resolv.conf that gets overwritten. Here we add another option in nodes.conf that each node can define; nameserver. These nameservers will end up in named.conf.options. Signed-off-by: Dimitris Aragiorgis --- snf-deploy/conf/nodes.conf | 3 +++ snf-deploy/files/etc/bind/named.conf.options | 6 +++--- snf-deploy/snfdeploy/components.py | 7 +++++-- snf-deploy/snfdeploy/context.py | 7 ++++++- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/snf-deploy/conf/nodes.conf b/snf-deploy/conf/nodes.conf index 4a82ba010..d930f1412 100644 --- a/snf-deploy/conf/nodes.conf +++ b/snf-deploy/conf/nodes.conf @@ -6,6 +6,9 @@ # Instances will reside in the .vm. subdomain domain = synnefo.live +# This is the default forwarder to be used by bind +nameserver = 8.8.8.8 + # Each node should define: # The node's desired hostname. It will be set diff --git a/snf-deploy/files/etc/bind/named.conf.options b/snf-deploy/files/etc/bind/named.conf.options index afc640e12..c74699bdf 100644 --- a/snf-deploy/files/etc/bind/named.conf.options +++ b/snf-deploy/files/etc/bind/named.conf.options @@ -10,9 +10,9 @@ options { // Uncomment the following block, and insert the addresses replacing // the all-0's placeholder. - // forwarders { - // 0.0.0.0; - // }; + forwarders { + %NAMESERVERS%; + }; auth-nxdomain no; # conform to RFC1035 allow-recursion { %NODE_IPS%; }; diff --git a/snf-deploy/snfdeploy/components.py b/snf-deploy/snfdeploy/components.py index f78d31842..392b39e21 100644 --- a/snf-deploy/snfdeploy/components.py +++ b/snf-deploy/snfdeploy/components.py @@ -333,6 +333,10 @@ def prepare(self): def _configure(self): d = self.node.domain ip = self.node.ip + r1 = { + "node_ips": ";".join(self.ctx.all_ips), + "nameservers": ";".join(self.ctx.all_nameservers), + } return [ ("/etc/bind/named.conf.local", {"domain": d}, {}), ("/etc/bind/zones/example.com", @@ -343,8 +347,7 @@ def _configure(self): {"remote": "/etc/bind/zones/vm.%s" % d}), ("/etc/bind/rev/synnefo.in-addr.arpa.zone", {"domain": d}, {}), ("/etc/bind/rev/synnefo.ip6.arpa.zone", {"domain": d}, {}), - ("/etc/bind/named.conf.options", - {"node_ips": ";".join(self.ctx.all_ips)}, {}), + ("/etc/bind/named.conf.options", r1, {}), ("/root/ddns/ddns.key", {}, {"remote": "/etc/bind/ddns.key"}), ] diff --git a/snf-deploy/snfdeploy/context.py b/snf-deploy/snfdeploy/context.py index 98645e493..d81c81be2 100644 --- a/snf-deploy/snfdeploy/context.py +++ b/snf-deploy/snfdeploy/context.py @@ -1,4 +1,4 @@ -# Copyright (C) 2010-2014 GRNET S.A. +# Copyright (C) 2010-2016 GRNET S.A. and individual contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -119,6 +119,11 @@ def all_ips(self): l = lambda x: config.get_node_info(x).ip return [l(n) for n in self.all_nodes] + @property + def all_nameservers(self): + l = lambda x: config.get_node_info(x).nameserver + return set([l(n) for n in self.all_nodes]) + def get(self, role): try: return config.get(self.setup, role) From d2c950d86553d4c31bb874cd11d8a21ce89edeef Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Sun, 13 Dec 2015 14:41:22 +0200 Subject: [PATCH 09/11] burnin: Do not depend on archipelago flavors Until now the snapshot test was looking for a snapshot-able flavor. Synnefo assumes that snapshot-able flavors are the ones with disk_template property ext_archipelago. This commit lifts this constrain and uses one of the flavors passed in the command line hoping that the Admin knows what is doing. Signed-off-by: Dimitris Aragiorgis --- snf-tools/synnefo_tools/burnin/snapshots.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/snf-tools/synnefo_tools/burnin/snapshots.py b/snf-tools/synnefo_tools/burnin/snapshots.py index 5941dab13..20e4e4022 100644 --- a/snf-tools/synnefo_tools/burnin/snapshots.py +++ b/snf-tools/synnefo_tools/burnin/snapshots.py @@ -42,12 +42,7 @@ def test_001_submit_create_snapshot(self): """Create a server and take a snapshot""" self.account = self._get_uuid() use_image = random.choice(self._parse_images()) - archipelago_flavors = \ - [f for f in self._parse_flavors() if - f['SNF:disk_template'].startswith('ext_archipelago')] - self.assertGreater(len(archipelago_flavors), 0, - "No 'archipelago' disk template found") - self.use_flavor = random.choice(archipelago_flavors) + self.use_flavor = random.choice(self._parse_flavors()) if self._image_is(use_image, "linux"): # Enforce personality test self.info("Creating personality content to be used") From 2d86c05019c7ba1c812041ce9400a8383d297f9b Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Mon, 14 Dec 2015 14:44:10 +0200 Subject: [PATCH 10/11] burnin: Add state dir for each test Signed-off-by: Dimitris Aragiorgis --- snf-tools/synnefo_tools/burnin/__init__.py | 4 ++++ snf-tools/synnefo_tools/burnin/common.py | 14 ++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/snf-tools/synnefo_tools/burnin/__init__.py b/snf-tools/synnefo_tools/burnin/__init__.py index 0be4eaeac..f877024b5 100644 --- a/snf-tools/synnefo_tools/burnin/__init__.py +++ b/snf-tools/synnefo_tools/burnin/__init__.py @@ -148,6 +148,10 @@ def parse_arguments(args): "--log-folder", action="store", type="string", default="/var/log/burnin/", dest="log_folder", help="Define the absolute path where the output log is stored") + parser.add_option( + "--state-folder", action="store", + type="string", default="/var/lib/burnin/", dest="state_folder", + help="Define the absolute path where various test data is stored") parser.add_option( "--verbose", "-v", action="store", type="int", default=1, dest="verbose", diff --git a/snf-tools/synnefo_tools/burnin/common.py b/snf-tools/synnefo_tools/burnin/common.py index 116dae61a..f2b3440e0 100644 --- a/snf-tools/synnefo_tools/burnin/common.py +++ b/snf-tools/synnefo_tools/burnin/common.py @@ -18,6 +18,7 @@ """ +import os import hashlib import re import shutil @@ -792,12 +793,21 @@ def initialize(opts, testsuites, stale_testsuites): BurninTests.delete_stale = opts.delete_stale BurninTests.temp_directory = opts.temp_directory BurninTests.failfast = opts.failfast - BurninTests.run_id = SNF_TEST_PREFIX + \ - datetime.datetime.strftime(curr_time, "%Y%m%d%H%M%S") BurninTests.obj_upload_num = opts.obj_upload_num BurninTests.obj_upload_min_size = opts.obj_upload_min_size BurninTests.obj_upload_max_size = opts.obj_upload_max_size + run_id = SNF_TEST_PREFIX + \ + datetime.datetime.strftime(curr_time, "%Y%m%d%H%M%S") + + # Do not create the state dir yet. + # Let the test suites do so if they have something to store there. + # Otherwise we would end up with a lot of empty folders. + state_dir = os.path.join(opts.state_folder, run_id) + + BurninTests.state_dir = state_dir + BurninTests.run_id = run_id + # Choose tests to run if opts.show_stale: # We will run the stale_testsuites From 12f71d1c53bc06fd4a5d682cc3c6560baedd36e3 Mon Sep 17 00:00:00 2001 From: Dimitris Aragiorgis Date: Wed, 16 Dec 2015 14:42:51 +0200 Subject: [PATCH 11/11] burnin: Support out of tree test suites Refactor the initialization phase regarding test suites and let the user pass any custom tests via --set-tests. Signed-off-by: Dimitris Aragiorgis --- snf-tools/synnefo_tools/burnin/__init__.py | 60 +++++++++++++++++----- snf-tools/synnefo_tools/burnin/common.py | 15 +----- snf-tools/synnefo_tools/burnin/logger.py | 1 - 3 files changed, 47 insertions(+), 29 deletions(-) diff --git a/snf-tools/synnefo_tools/burnin/__init__.py b/snf-tools/synnefo_tools/burnin/__init__.py index f877024b5..0a45de223 100644 --- a/snf-tools/synnefo_tools/burnin/__init__.py +++ b/snf-tools/synnefo_tools/burnin/__init__.py @@ -21,6 +21,8 @@ import sys import optparse +from importlib import import_module + from synnefo_tools import version from synnefo_tools.burnin import common from synnefo_tools.burnin.astakos_tests import AstakosTestSuite @@ -58,9 +60,24 @@ STALE_TSUITES_NAMES = [tsuite.__name__ for tsuite in STALE_TESTSUITES] -def string_to_class(names): - """Convert class namesto class objects""" - return [eval(name) for name in names] +def string_to_class(name): + """Convert a class name to a class object""" + try: + # The class is already known and imported + return eval(name) + except NameError: + pass + + try: + # Try find the given class assuming it is in the form + # module[.submodule...].class + module_name, class_name = name.rsplit(".", 1) + mod = import_module(module_name) + return getattr(mod, class_name) + except (ValueError, ImportError, AttributeError): + pass + + raise RuntimeError("Test Suite `%s' does not exist" % name) # -------------------------------------------------------------------- @@ -226,14 +243,6 @@ def parse_arguments(args): if opts.quiet: opts.log_level = 2 - # Check `--set-tests' and `--exclude-tests' options - if opts.tests != "all" and \ - not (set(opts.tests)).issubset(set(TSUITES_NAMES)): - raise optparse.OptionValueError("The selected set of tests is invalid") - if opts.exclude_tests is not None and \ - not (set(opts.exclude_tests)).issubset(set(TSUITES_NAMES)): - raise optparse.OptionValueError("The selected set of tests is invalid") - # `token' is mandatory mandatory_argument(opts.token, "--token") # `auth_url' is mandatory @@ -254,6 +263,26 @@ def mandatory_argument(value, arg_name): sys.exit("Invalid input") +def find_final_test_suites(opts): + """Parse opts and return the final test suites classes.""" + + if opts.show_stale: + # We will run the stale_testsuites + return STALE_TESTSUITES + + # By default run all test suites + names = TSUITES_NAMES + # If --set-tests given then take this into account + if opts.tests != "all": + names = opts.tests + # Remove any excluded test + if opts.exclude_tests is not None: + names = [tsuite for tsuite in names + if tsuite not in opts.exclude_tests] + + return [string_to_class(name) for name in names] + + # -------------------------------------------------------------------- # Burnin main function def main(): @@ -270,10 +299,13 @@ def main(): # Parse arguments using `optparse' (opts, _) = parse_arguments(sys.argv[1:]) + testsuites = find_final_test_suites(opts) + # Initialize burnin - (testsuites, failfast) = \ - common.initialize(opts, TSUITES_NAMES, STALE_TSUITES_NAMES) - testsuites = string_to_class(testsuites) + common.initialize(opts) + + # In case we clean up we have to fail fast + failfast = True if opts.show_stale else opts.failfast # Run burnin # The return value denotes the success status diff --git a/snf-tools/synnefo_tools/burnin/common.py b/snf-tools/synnefo_tools/burnin/common.py index f2b3440e0..7de4878a2 100644 --- a/snf-tools/synnefo_tools/burnin/common.py +++ b/snf-tools/synnefo_tools/burnin/common.py @@ -764,7 +764,7 @@ def _get_merkle_hash(self, data): # -------------------------------------------------------------------- # Initialize Burnin -def initialize(opts, testsuites, stale_testsuites): +def initialize(opts): """Initalize burnin Initialize our logger and burnin state @@ -808,19 +808,6 @@ def initialize(opts, testsuites, stale_testsuites): BurninTests.state_dir = state_dir BurninTests.run_id = run_id - # Choose tests to run - if opts.show_stale: - # We will run the stale_testsuites - return (stale_testsuites, True) - - if opts.tests != "all": - testsuites = opts.tests - if opts.exclude_tests is not None: - testsuites = [tsuite for tsuite in testsuites - if tsuite not in opts.exclude_tests] - - return (testsuites, opts.failfast) - # -------------------------------------------------------------------- # Run Burnin diff --git a/snf-tools/synnefo_tools/burnin/logger.py b/snf-tools/synnefo_tools/burnin/logger.py index 66fcf9374..850d19048 100644 --- a/snf-tools/synnefo_tools/burnin/logger.py +++ b/snf-tools/synnefo_tools/burnin/logger.py @@ -33,7 +33,6 @@ import os import sys -import os.path import logging import datetime