Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions config.js
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ config.EXTERNAL_DB_SERVICE_CERT_PATH = '/etc/external-db-secret';
// TODO take nodes min and free space reserve from system/pool config
config.NODES_MIN_COUNT = 3;
config.NODES_PER_CLOUD_POOL = 1;
config.NODES_PER_MONGO_POOL = 1;
// in kubernetes use reserve of 100MB instead of 10GB
config.NODES_FREE_SPACE_RESERVE = 100 * (1024 ** 2);

Expand Down Expand Up @@ -243,7 +242,7 @@ config.POSTGRES_MD_MAX_CLIENTS = (process.env.LOCAL_MD_SERVER === 'true') ? 70 :
// SYSTEM CONFIG //
///////////////////

config.DEFAULT_POOL_TYPE = 'INTERNAL'; // use 'HOSTS' for setting up a pool of FS backingstores instead
config.DEFAULT_POOL_TYPE = 'HOSTS'; // use 'HOSTS' for setting up a pool of FS backingstores instead
config.DEFAULT_POOL_NAME = 'backingstores'; // only used when config.DEFAULT_POOL_TYPE = 'HOSTS'
config.DEFAULT_BUCKET_NAME = 'first.bucket';
config.INTERNAL_STORAGE_POOL_NAME = 'system-internal-storage-pool';
Expand Down
4 changes: 2 additions & 2 deletions src/agent/agent.js
Original file line number Diff line number Diff line change
Expand Up @@ -480,8 +480,8 @@ class Agent {
dbg.error('This agent appears to be using an old token.',
'cleaning this agent noobaa_storage directory', this.storage_path);
if (this.cloud_info || this.mongo_info) {
dbg.error(`shouldn't be here. node not found for cloud pool or mongo pool!!`);
throw new Error('node not found cloud or mongo node');
dbg.error(`shouldn't be here. node not found for cloud pool pool!!`);
throw new Error('node not found cloud node');
} else {
// We don't exit the process in order to keep the underlaying pod alive until
// the pool statefulset will scale this pod out of existence.
Expand Down
2 changes: 1 addition & 1 deletion src/api/hosted_agents_api.js
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ module.exports = {
},
},

mongo_info: {
mongo_info: {
type: 'object',
additionalProperties: true,
properties: {}
Expand Down
1 change: 0 additions & 1 deletion src/api/object_api.js
Original file line number Diff line number Diff line change
Expand Up @@ -1665,7 +1665,6 @@ module.exports = {
mount: { type: 'string' },
online: { type: 'boolean' },
in_cloud_pool: { type: 'boolean' },
in_mongo_pool: { type: 'boolean' },
}
}
}
Expand Down
17 changes: 0 additions & 17 deletions src/api/pool_api.js
Original file line number Diff line number Diff line change
Expand Up @@ -128,23 +128,6 @@ module.exports = {
}
},

create_mongo_pool: {
doc: 'Create Mongo Pool',
method: 'POST',
params: {
type: 'object',
required: ['name'],
properties: {
name: {
type: 'string',
}
}
},
auth: {
system: 'admin'
}
},

read_pool: {
doc: 'Read Pool Information',
method: 'GET',
Expand Down
14 changes: 5 additions & 9 deletions src/hosted_agents/hosted_agents.js
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ class HostedAgents {
reload() {
// start agents for all existing cloud pools
const agents_to_start = system_store.data.pools.filter(pool =>
(!_.isUndefined(pool.cloud_pool_info) || !_.isUndefined(pool.mongo_pool_info))
(!_.isUndefined(pool.cloud_pool_info))
);
dbg.log0(`will start agents for these pools: ${util.inspect(agents_to_start)}`);
return P.map(agents_to_start, pool => this._start_pool_agent(pool));
Expand Down Expand Up @@ -114,8 +114,7 @@ class HostedAgents {

const host_id = config.HOSTED_AGENTS_HOST_ID + pool_id;
const storage_path = path.join(process.cwd(), 'noobaa_storage', node_name);
const pool_property_path = pool.resource_type === 'INTERNAL' ?
'mongo_pool_info.agent_info.mongo_path' : 'cloud_pool_info.agent_info.cloud_path';
const pool_property_path = 'cloud_pool_info.agent_info.cloud_path';
const pool_path = _.get(pool, pool_property_path, `noobaa_blocks/${pool_id}`);
const pool_path_property = pool.resource_type === 'INTERNAL' ? 'mongo_path' : 'cloud_path';
const pool_info_property = pool.resource_type === 'INTERNAL' ? 'mongo_info' : 'cloud_info';
Expand All @@ -131,12 +130,10 @@ class HostedAgents {
role: 'create_node'
});
const { token_wrapper, create_node_token_wrapper } = _get_pool_token_wrapper(pool);
const info = pool.resource_type === 'INTERNAL' ?
pool.mongo_pool_info : pool.cloud_pool_info;
const info = pool.cloud_pool_info;
if (!info.agent_info || !info.agent_info.create_node_token) {
const existing_token = info.agent_info ? info.agent_info.node_token : null;
const pool_agent_path = pool.resource_type === 'INTERNAL' ?
'mongo_pool_info' : 'cloud_pool_info';
const pool_agent_path = 'cloud_pool_info';
const update = {
pools: [{
_id: pool._id,
Expand Down Expand Up @@ -377,8 +374,7 @@ function _get_pool_and_path_for_token(token_pool) {
const sys = system_store.data.systems[0];
const pool = sys.pools_by_name[token_pool.name];
if (!pool) throw new Error(`Pool ${token_pool.name}, ${token_pool._id} does not exist`);
const pool_property_path = pool.resource_type === 'INTERNAL' ?
'mongo_pool_info.agent_info' : 'cloud_pool_info.agent_info';
const pool_property_path = 'cloud_pool_info.agent_info';
return {
pool_property_path,
pool
Expand Down
1 change: 0 additions & 1 deletion src/sdk/map_api_types.js
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,6 @@ class BlockAPI {
adminfo.mount = node.drive.mount;
adminfo.online = Boolean(node.online);
adminfo.in_cloud_pool = Boolean(node.is_cloud_node);
adminfo.in_mongo_pool = Boolean(node.is_mongo_node);
}
}

Expand Down
4 changes: 0 additions & 4 deletions src/server/node_services/node_allocator.js
Original file line number Diff line number Diff line change
Expand Up @@ -303,10 +303,6 @@ function _get_tier_pools_status(pools, required_valid_nodes) {
if (num_nodes !== config.NODES_PER_CLOUD_POOL) {
valid_for_allocation = false;
}
} else if (pool.mongo_pool_info) {
if (num_nodes !== config.NODES_PER_MONGO_POOL) {
valid_for_allocation = false;
}
} else if (num_nodes < required_valid_nodes) {
valid_for_allocation = false;
}
Expand Down
7 changes: 2 additions & 5 deletions src/server/node_services/nodes_monitor.js
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ class NodesMonitor extends EventEmitter {
const pool =
agent_config.pool ||
system.pools_by_name[pool_name] ||
_.filter(system.pools_by_name, p => (!_.get(p, 'mongo_pool_info') && (!_.get(p, 'cloud_pool_info'))))[0]; // default - the 1st host pool in the system
_.filter(system.pools_by_name, p => (!_.get(p, 'cloud_pool_info')))[0]; // default - the 1st host pool in the system
// system_store.get_account_by_email(system.owner.email).default_resource; //This should not happen, but if it does, use owner's default

if (!pool) {
Expand Down Expand Up @@ -642,9 +642,6 @@ class NodesMonitor extends EventEmitter {
if (pool.cloud_pool_info) {
item.node.is_cloud_node = true;
}
if (pool.mongo_pool_info) {
item.node.is_mongo_node = true;
}

dbg.log0('_add_new_node', item.node);
this._set_need_update.add(item);
Expand Down Expand Up @@ -1725,7 +1722,7 @@ class NodesMonitor extends EventEmitter {

} catch (err) {
// We will just wait another cycle and attempt to delete it fully again
dbg.warn('delete_cloud_or_mongo_pool_node ERROR node', item.node, err);
dbg.warn('delete_cloud_node ERROR node', item.node, err);
}
});

Expand Down
1 change: 0 additions & 1 deletion src/server/object_services/map_db_types.js
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,6 @@ class BlockDB {
mount: this.node.drive.mount,
online: Boolean(this.node.online),
in_cloud_pool: Boolean(this.node.is_cloud_node),
in_mongo_pool: Boolean(this.node.is_mongo_node),
};
}
return {
Expand Down
13 changes: 5 additions & 8 deletions src/server/object_services/mapper.js
Original file line number Diff line number Diff line change
Expand Up @@ -54,20 +54,17 @@ function select_mirror_for_write(tier, tiering, tiering_status, location_info) {
for (const mirror of tier.mirrors) {
const mirror_status = tier_status.mirrors_storage[mirror_index];
const local_pool = find_local_pool(mirror.spread_pools, location_info);
const is_mongo_included = mirror.spread_pools.some(pool => Boolean(pool.mongo_pool_info));
const is_local_pool_valid = local_pool && tier_status.pools[local_pool._id.toHexString()].valid_for_allocation;
const is_regular_pools_valid = size_utils.json_to_bigint(mirror_status.regular_free).greater(config.MIN_TIER_FREE_THRESHOLD);
const is_redundant_pools_valid = size_utils.json_to_bigint(mirror_status.redundant_free).greater(config.MIN_TIER_FREE_THRESHOLD);

let weight = 0;
if (is_mongo_included) {
weight = 1;
} else if (is_local_pool_valid) {
weight = 4;
} else if (is_regular_pools_valid) {
if (is_local_pool_valid) {
weight = 3;
} else if (is_redundant_pools_valid) {
} else if (is_regular_pools_valid) {
weight = 2;
} else if (is_redundant_pools_valid) {
weight = 1;
}

if (!selected || weight > selected_weight || (weight === selected_weight && Math.random() > 0.5)) {
Expand Down Expand Up @@ -371,7 +368,7 @@ function _block_sort_newer_first(block1, block2) {
* @returns {boolean}
*/
function _pool_has_redundancy(pool) {
return Boolean(pool.cloud_pool_info || pool.mongo_pool_info);
return Boolean(pool.cloud_pool_info);
}

// /**
Expand Down
14 changes: 7 additions & 7 deletions src/server/system_services/account_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ const SensitiveString = require('../../util/sensitive_string');
const cloud_utils = require('../../util/cloud_utils');
const auth_server = require('../common_services/auth_server');
const system_store = require('../system_services/system_store').get_instance();
const pool_server = require('../system_services/pool_server');
const azure_storage = require('../../util/azure_storage_wrap');
const NetStorage = require('../../util/NetStorageKit-Node-master/lib/netstorage');
const usage_aggregator = require('../bg_services/usage_aggregator');
Expand Down Expand Up @@ -81,16 +80,17 @@ async function create_account(req) {
const password_hash = await bcrypt_password(account.password.unwrap());
account.password = password_hash;
}

if (req.rpc_params.s3_access) {
if (req.rpc_params.new_system_parameters) {
account.default_resource = system_store.parse_system_store_id(req.rpc_params.new_system_parameters.default_resource);
account.allow_bucket_creation = true;
} else {
const resource = req.rpc_params.default_resource ? req.system.pools_by_name[req.rpc_params.default_resource] ||
// Default pool resource is backingstores
const resource = req.rpc_params.default_resource ?
req.system.pools_by_name[req.rpc_params.default_resource] ||
(req.system.namespace_resources_by_name && req.system.namespace_resources_by_name[req.rpc_params.default_resource]) :
pool_server.get_internal_mongo_pool(req.system); //Internal
if (!resource) throw new RpcError('BAD_REQUEST', 'default resource doesn\'t exist');
req.system.pools_by_name.backingstores;
if (!resource) throw new RpcError('BAD_REQUEST', 'default resource doesn\'t exist');
if (resource.nsfs_config && resource.nsfs_config.fs_root_path && !req.rpc_params.nsfs_account_config) {
throw new RpcError('Invalid account configuration - must specify nsfs_account_config when default resource is a namespace resource');
}
Expand Down Expand Up @@ -376,7 +376,7 @@ function update_account_s3_access(req) {
//If s3_access is on, update allowed buckets, default_resource and force_md5_etag
if (req.rpc_params.s3_access) {
if (!req.rpc_params.default_resource) {
const pools = _.filter(req.system.pools_by_name, p => (!_.get(p, 'mongo_pool_info'))); // find none-internal pools
const pools = _.filter(req.system.pools_by_name);
if (pools.length) { // has resources which is not internal - must supply resource
throw new RpcError('BAD_REQUEST', 'Enabling S3 requires providing default_resource');
}
Expand Down Expand Up @@ -1384,7 +1384,7 @@ function validate_create_account_permissions(req) {
function validate_create_account_params(req) {
// find none-internal pools
const has_non_internal_resources = (req.system && req.system.pools_by_name) ?
Object.values(req.system.pools_by_name).some(p => !p.mongo_pool_info) :
Object.values(req.system.pools_by_name).some(p => p.name !== 'backingstores') :
false;

if (req.rpc_params.name.unwrap() !== req.rpc_params.name.unwrap().trim()) {
Expand Down
56 changes: 8 additions & 48 deletions src/server/system_services/bucket_server.js
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,6 @@ async function create_bucket(req) {
update: {}
};

const mongo_pool = pool_server.get_internal_mongo_pool(req.system);
if (req.rpc_params.tiering) {
tiering_policy = resolve_tiering_policy(req, req.rpc_params.tiering);
} else if (req.system.namespace_resources_by_name && req.system.namespace_resources_by_name[req.account.default_resource.name]) {
Expand All @@ -191,7 +190,7 @@ async function create_bucket(req) {
// that uses the default_resource of that account
const default_pool = req.account.default_resource;
// Do not allow to create S3 buckets that are attached to mongo resource (internal storage)
validate_pool_constraints({ mongo_pool, default_pool });
validate_pool_constraints({ default_pool });
const chunk_config = chunk_config_utils.resolve_chunk_config(
req.rpc_params.chunk_coder_config, req.account, req.system);
if (!chunk_config._id) {
Expand Down Expand Up @@ -328,10 +327,9 @@ async function create_bucket(req) {
});
}

function validate_pool_constraints({ mongo_pool, default_pool }) {
function validate_pool_constraints({ default_pool }) {
if (config.ALLOW_BUCKET_CREATE_ON_INTERNAL !== true) {
if (!(mongo_pool && mongo_pool._id) || !(default_pool && default_pool._id)) throw new RpcError('SERVICE_UNAVAILABLE', 'Non existing pool');
if (String(mongo_pool._id) === String(default_pool._id)) throw new RpcError('SERVICE_UNAVAILABLE', 'Not allowed to create new buckets on internal pool');
if (!(default_pool && default_pool._id)) throw new RpcError('SERVICE_UNAVAILABLE', 'Non existing pool');
}
}

Expand Down Expand Up @@ -1323,32 +1321,10 @@ async function get_cloud_buckets(req) {


async function update_all_buckets_default_pool(req) {
const pool_name = req.rpc_params.pool_name;
const pool = req.system.pools_by_name[pool_name];
if (!pool) throw new RpcError('INVALID_POOL_NAME');
const internal_pool = pool_server.get_internal_mongo_pool(pool.system);
if (!internal_pool || !internal_pool._id) return;
if (String(pool._id) === String(internal_pool._id)) return;
const buckets_with_internal_pool = _.filter(req.system.buckets_by_name, bucket =>
is_using_internal_storage(bucket, internal_pool));
if (!buckets_with_internal_pool.length) return;

const updates = [];
for (const bucket of buckets_with_internal_pool) {
updates.push({
_id: bucket.tiering.tiers[0].tier._id,
mirrors: [{
_id: system_store.new_system_store_id(),
spread_pools: [pool._id]
}]
});
}
dbg.log0(`Updating ${buckets_with_internal_pool.length} buckets to use ${pool_name} as default resource`);
await system_store.make_changes({
update: {
tiers: updates
}
});
// GAP - Internal mongo_pool no longer supported. This method needs to remove along with Noobaa operator reference.
dbg.warn('update_all_buckets_default_pool is deprecated and will be removed in the next release');
// No-op: bucket default pools are no longer supported
return { success: true };
}

/**
Expand Down Expand Up @@ -1581,21 +1557,6 @@ function get_bucket_info({
return info;
}

function is_using_internal_storage(bucket, internal_pool) {
if (!internal_pool || !internal_pool._id) return false;

const tiers = bucket.tiering && bucket.tiering.tiers;
if (!tiers || tiers.length !== 1) return false;

const mirrors = tiers[0].tier.mirrors;
if (mirrors.length !== 1) return false;

const spread_pools = mirrors[0].spread_pools;
if (spread_pools.length !== 1) return false;

return String(spread_pools[0]._id) === String(internal_pool._id);
}

function _calc_metrics({
bucket,
nodes_aggregate_pool,
Expand All @@ -1608,7 +1569,6 @@ function _calc_metrics({
let has_enough_healthy_nodes_for_tiering = false;
let has_enough_total_nodes_for_tiering = false;
const any_rebuilds = false;
const internal_pool = pool_server.get_internal_mongo_pool(bucket.system);

const objects_aggregate = {
size: (bucket.storage_stats && bucket.storage_stats.objects_size) || 0,
Expand Down Expand Up @@ -1700,7 +1660,7 @@ function _calc_metrics({
});

return {
is_using_internal: is_using_internal_storage(bucket, internal_pool),
is_using_internal: false,
has_any_pool_configured,
has_enough_healthy_nodes_for_tiering,
has_enough_total_nodes_for_tiering,
Expand Down
Loading