diff --git a/src/server/dragonfly_test.cc b/src/server/dragonfly_test.cc index 01b02414d4a5..9a6206b11802 100644 --- a/src/server/dragonfly_test.cc +++ b/src/server/dragonfly_test.cc @@ -26,6 +26,7 @@ ABSL_DECLARE_FLAG(float, mem_defrag_waste_threshold); ABSL_DECLARE_FLAG(uint32_t, mem_defrag_check_sec_interval); ABSL_DECLARE_FLAG(std::vector, rename_command); ABSL_DECLARE_FLAG(bool, lua_resp2_legacy_float); +ABSL_DECLARE_FLAG(bool, enable_heartbeat_eviction); ABSL_DECLARE_FLAG(double, eviction_memory_budget_threshold); ABSL_DECLARE_FLAG(std::vector, command_alias); ABSL_DECLARE_FLAG(bool, latency_tracking); @@ -753,6 +754,8 @@ TEST_F(DefragDflyEngineTest, TestDefragOption) { absl::SetFlag(&FLAGS_mem_defrag_threshold, 0.0); absl::SetFlag(&FLAGS_mem_defrag_check_sec_interval, 0); absl::SetFlag(&FLAGS_mem_defrag_waste_threshold, 0.1); + // We need to disable heartbeat eviction because it enfluences the defragmentation + absl::SetFlag(&FLAGS_enable_heartbeat_eviction, false); // Fill data into dragonfly and then check if we have // any location in memory to defrag. See issue #448 for details about this. diff --git a/src/server/engine_shard.cc b/src/server/engine_shard.cc index daa1eb82893b..64ded9eaf638 100644 --- a/src/server/engine_shard.cc +++ b/src/server/engine_shard.cc @@ -22,10 +22,12 @@ extern "C" { #include "server/search/doc_index.h" #include "server/server_state.h" #include "server/tiered_storage.h" +#include "server/tiering/common.h" #include "server/transaction.h" #include "util/fibers/proactor_base.h" using namespace std; +using namespace ::dfly::tiering::literals; ABSL_FLAG(float, mem_defrag_threshold, 0.7, "Minimum percentage of used memory relative to maxmemory cap before running " @@ -132,46 +134,6 @@ size_t CalculateHowManyBytesToEvictOnShard(size_t global_memory_limit, size_t gl return shard_budget < shard_memory_threshold ? (shard_memory_threshold - shard_budget) : 0; } -/* Calculates the number of bytes to evict based on memory and rss memory usage. */ -size_t CalculateEvictionBytes() { - const size_t shards_count = shard_set->size(); - const double eviction_memory_budget_threshold = GetFlag(FLAGS_eviction_memory_budget_threshold); - - size_t limit = max_memory_limit.load(memory_order_relaxed); - const size_t shard_memory_budget_threshold = - size_t(limit * eviction_memory_budget_threshold) / shards_count; - - const size_t global_used_memory = used_mem_current.load(memory_order_relaxed); - - // Calculate how many bytes we need to evict on this shard - size_t goal_bytes = - CalculateHowManyBytesToEvictOnShard(limit, global_used_memory, shard_memory_budget_threshold); - - // TODO: Eviction due to rss usage is not working well as it causes eviction - // of to many keys untill we finally see decrease in rss. We need to improve - // this logic before we enable it. - /* - const double rss_oom_deny_ratio = ServerState::tlocal()->rss_oom_deny_ratio; - // If rss_oom_deny_ratio is set, we should evict depending on rss memory too - if (rss_oom_deny_ratio > 0.0) { - const size_t max_rss_memory = size_t(rss_oom_deny_ratio * max_memory_limit); - // We start eviction when we have less than eviction_memory_budget_threshold * 100% of free rss - memory const size_t shard_rss_memory_budget_threshold = - size_t(max_rss_memory * eviction_memory_budget_threshold) / shards_count; - - // Calculate how much rss memory is used by all shards - const size_t global_used_rss_memory = rss_mem_current.load(memory_order_relaxed); - - // Try to evict more bytes if we are close to the rss memory limit - goal_bytes = std::max( - goal_bytes, CalculateHowManyBytesToEvictOnShard(max_rss_memory, global_used_rss_memory, - shard_rss_memory_budget_threshold)); - } - */ - - return goal_bytes; -} - } // namespace __thread EngineShard* EngineShard::shard_ = nullptr; @@ -249,25 +211,55 @@ bool EngineShard::DefragTaskState::CheckRequired() { return false; } - const std::size_t global_threshold = limit * GetFlag(FLAGS_mem_defrag_threshold); + /* + If the eviction is enabled, we want to run the defrag task more frequently and more aggressively. + For global threshold rss we use the rss memory minus the eviction budget threshold and minus 3% + - For example if rss_deny_oom_ratio is 0.8 and eviction_memory_budget_threshold is 0.1, + we will start eviction when rss memory is above 0.8 - 0.1 = 0.7. And defragmentation + should still working if used rss memory is above 0.7 - 0.03 = 0.67. + For defrag interval we use the EvictionTaskState::kMemDefragCheckSecInterval + For waste threshold we use the EvictionTaskState::kEvictionWasteThreshold + */ + const bool is_eviction_enabled = GetFlag(FLAGS_enable_heartbeat_eviction); + + const double mem_defrag_threshold_flag = GetFlag(FLAGS_mem_defrag_threshold); + const double defrag_threshold = + !is_eviction_enabled ? mem_defrag_threshold_flag + : std::min(mem_defrag_threshold_flag, + ServerState::tlocal()->rss_oom_deny_ratio - + GetFlag(FLAGS_eviction_memory_budget_threshold) - + EvictionTaskState::kDefragRssMemoryDelta); + + const std::size_t global_threshold = limit * defrag_threshold; if (global_threshold > rss_mem_current.load(memory_order_relaxed)) { return false; } const auto now = time(nullptr); const auto seconds_from_prev_check = now - last_check_time; - const auto mem_defrag_interval = GetFlag(FLAGS_mem_defrag_check_sec_interval); + + const uint32_t check_sec_interval_flag = GetFlag(FLAGS_mem_defrag_check_sec_interval); + const auto mem_defrag_interval = + !is_eviction_enabled + ? check_sec_interval_flag + : std::min(check_sec_interval_flag, EvictionTaskState::kDefragCheckSecInterval); if (seconds_from_prev_check < mem_defrag_interval) { return false; } + last_check_time = now; - ShardMemUsage usage = ReadShardMemUsage(GetFlag(FLAGS_mem_defrag_page_utilization_threshold)); + ShardMemUsage shard_mem_usage = + ReadShardMemUsage(GetFlag(FLAGS_mem_defrag_page_utilization_threshold)); - const double waste_threshold = GetFlag(FLAGS_mem_defrag_waste_threshold); - if (usage.wasted_mem > (uint64_t(usage.commited * waste_threshold))) { - VLOG(1) << "memory issue found for memory " << usage; + const float waste_threshold_flag = GetFlag(FLAGS_mem_defrag_waste_threshold); + const double waste_threshold = + !is_eviction_enabled + ? waste_threshold_flag + : std::min(waste_threshold_flag, EvictionTaskState::kDefragWasteThreshold); + if (shard_mem_usage.wasted_mem > (uint64_t(shard_mem_usage.commited * waste_threshold))) { + VLOG(1) << "memory issue found for memory " << shard_mem_usage; return true; } @@ -696,6 +688,7 @@ void EngineShard::RetireExpiredAndEvict() { DbContext db_cntx; db_cntx.time_now_ms = GetCurrentTimeMs(); + size_t deleted_bytes = 0; size_t eviction_goal = GetFlag(FLAGS_enable_heartbeat_eviction) ? CalculateEvictionBytes() : 0; for (unsigned i = 0; i < db_slice.db_array_size(); ++i) { @@ -707,6 +700,7 @@ void EngineShard::RetireExpiredAndEvict() { if (!expt->Empty()) { DbSlice::DeleteExpiredStats stats = db_slice.DeleteExpiredStep(db_cntx, ttl_delete_target); + deleted_bytes += stats.deleted_bytes; eviction_goal -= std::min(eviction_goal, size_t(stats.deleted_bytes)); counter_[TTL_TRAVERSE].IncBy(stats.traversed); counter_[TTL_DELETE].IncBy(stats.deleted); @@ -728,9 +722,68 @@ void EngineShard::RetireExpiredAndEvict() { << " bytes. Max eviction per heartbeat: " << GetFlag(FLAGS_max_eviction_per_heartbeat); + deleted_bytes += evicted_bytes; eviction_goal -= std::min(eviction_goal, evicted_bytes); } } + + eviction_state_.deleted_bytes_before_rss_update += deleted_bytes; +} + +size_t EngineShard::CalculateEvictionBytes() { + const size_t shards_count = shard_set->size(); + const double eviction_memory_budget_threshold = GetFlag(FLAGS_eviction_memory_budget_threshold); + + size_t limit = max_memory_limit.load(memory_order_relaxed); + const size_t shard_memory_budget_threshold = + size_t(limit * eviction_memory_budget_threshold) / shards_count; + + const size_t global_used_memory = used_mem_current.load(memory_order_relaxed); + + // Calculate how many bytes we need to evict on this shard + size_t goal_bytes = + CalculateHowManyBytesToEvictOnShard(limit, global_used_memory, shard_memory_budget_threshold); + + VLOG_IF_EVERY_N(1, goal_bytes > 0, 50) + << "Memory goal bytes: " << goal_bytes << ", used memory: " << global_used_memory + << ", memory limit: " << max_memory_limit; + + // If rss_oom_deny_ratio is set, we should evict depending on rss memory too + const double rss_oom_deny_ratio = ServerState::tlocal()->rss_oom_deny_ratio; + if (rss_oom_deny_ratio > 0.0) { + const size_t max_rss_memory = size_t(rss_oom_deny_ratio * max_memory_limit); + /* We start eviction when we have less than eviction_memory_budget_threshold * 100% of free rss + * memory */ + const size_t shard_rss_memory_budget_threshold = + size_t(max_rss_memory * eviction_memory_budget_threshold) / shards_count; + + // Calculate how much rss memory is used by all shards + const size_t global_used_rss_memory = rss_mem_current.load(memory_order_relaxed); + + auto& global_rss_memory_at_prev_eviction = eviction_state_.global_rss_memory_at_prev_eviction; + auto& deleted_bytes_before_rss_update = eviction_state_.deleted_bytes_before_rss_update; + if (global_used_rss_memory < eviction_state_.global_rss_memory_at_prev_eviction) { + deleted_bytes_before_rss_update -= + std::min(deleted_bytes_before_rss_update, + (global_rss_memory_at_prev_eviction - global_used_rss_memory) / shards_count); + } + + global_rss_memory_at_prev_eviction = global_used_rss_memory; + + // Try to evict more bytes if we are close to the rss memory limit + const size_t rss_goal_bytes = CalculateHowManyBytesToEvictOnShard( + max_rss_memory, global_used_rss_memory - deleted_bytes_before_rss_update * shards_count, + shard_rss_memory_budget_threshold); + + VLOG_IF_EVERY_N(1, rss_goal_bytes > 0, 50) + << "Rss memory goal bytes: " << rss_goal_bytes + << ", rss used memory: " << global_used_rss_memory + << ", rss memory limit: " << max_rss_memory + << ", deleted_bytes_before_rss_update: " << deleted_bytes_before_rss_update; + + goal_bytes = std::max(goal_bytes, rss_goal_bytes); + } + return goal_bytes; } void EngineShard::CacheStats() { diff --git a/src/server/engine_shard.h b/src/server/engine_shard.h index d381fea3a9e0..4fec65b60ccd 100644 --- a/src/server/engine_shard.h +++ b/src/server/engine_shard.h @@ -224,6 +224,19 @@ class EngineShard { void ResetScanState(); }; + struct EvictionTaskState { + /* This constant are used to control the defragmentation task when eviction is enabled. + The task is run periodically to check whether we need to do memory defragmentation. + When eviction is enabled, we want to make defragment task run more frequently + and also we want to make waste threshold lower to allow more aggressive defragmentation. */ + static constexpr uint32_t kDefragCheckSecInterval = 2; + static constexpr float kDefragWasteThreshold = 0.05; + static constexpr double kDefragRssMemoryDelta = 0.03; + + size_t deleted_bytes_before_rss_update = 0; + size_t global_rss_memory_at_prev_eviction = 0; + }; + EngineShard(util::ProactorBase* pb, mi_heap_t* heap); // blocks the calling fiber. @@ -235,6 +248,9 @@ class EngineShard { void Heartbeat(); void RetireExpiredAndEvict(); + /* Calculates the number of bytes to evict based on memory and rss memory usage. */ + size_t CalculateEvictionBytes(); + void CacheStats(); // We are running a task that checks whether we need to @@ -274,6 +290,7 @@ class EngineShard { IntentLock shard_lock_; uint32_t defrag_task_ = 0; + EvictionTaskState eviction_state_; // Used on eviction fiber util::fb2::Fiber fiber_heartbeat_periodic_; util::fb2::Done fiber_heartbeat_periodic_done_; diff --git a/tests/dragonfly/memory_test.py b/tests/dragonfly/memory_test.py index 04e241a52475..5eb183422130 100644 --- a/tests/dragonfly/memory_test.py +++ b/tests/dragonfly/memory_test.py @@ -7,6 +7,15 @@ from .instance import DflyInstance, DflyInstanceFactory +def extract_fragmentation_waste(memory_arena): + """ + Extracts the fragmentation waste from the memory arena info. + """ + match = re.search(r"fragmentation waste:\s*([0-9.]+)%", memory_arena) + assert match.group(1) is not None + return float(match.group(1)) + + @pytest.mark.slow @pytest.mark.opt_only @pytest.mark.parametrize( @@ -186,53 +195,222 @@ async def test_eval_with_oom(df_factory: DflyInstanceFactory): assert rss_before_eval * 1.01 > info["used_memory_rss"] -@pytest.mark.skip("rss eviction disabled") @pytest.mark.asyncio -@dfly_args( - { - "proactor_threads": 1, - "cache_mode": "true", - "maxmemory": "5gb", - "rss_oom_deny_ratio": 0.8, - "max_eviction_per_heartbeat": 100, - } +@pytest.mark.parametrize( + "proactor_threads_param, maxmemory_param", + [(1, 6 * (1024**3)), (4, 6 * (1024**3))], ) -async def test_cache_eviction_with_rss_deny_oom( - async_client: aioredis.Redis, +async def test_cache_eviction_with_rss_deny_oom_simple_case( + df_factory: DflyInstanceFactory, + proactor_threads_param, + maxmemory_param, ): """ Test to verify that cache eviction is triggered even if used memory is small but rss memory is above limit """ + df_server = df_factory.create( + proactor_threads=proactor_threads_param, + cache_mode="true", + maxmemory=maxmemory_param, + rss_oom_deny_ratio=0.8, + ) + df_server.start() + + async_client = df_server.client() + + max_memory = maxmemory_param + rss_oom_deny_ratio = 0.8 + eviction_memory_budget_threshold = 0.1 # 10% of max_memory - max_memory = 5 * 1024 * 1024 * 1024 # 5G - rss_max_memory = int(max_memory * 0.8) + rss_eviction_threshold = max_memory * (rss_oom_deny_ratio - eviction_memory_budget_threshold) - data_fill_size = int(0.9 * rss_max_memory) # 95% of rss_max_memory + data_fill_size = int((rss_oom_deny_ratio + 0.05) * max_memory) # 85% of max_memory val_size = 1024 * 5 # 5 kb num_keys = data_fill_size // val_size await async_client.execute_command("DEBUG", "POPULATE", num_keys, "key", val_size) - # Test that used memory is less than 90% of max memory + + # Test that used memory is less than 90% of max memory to not to start eviction based on used_memory memory_info = await async_client.info("memory") assert ( memory_info["used_memory"] < max_memory * 0.9 - ), "Used memory should be less than 90% of max memory." + ), "Used memory should be less than 90% of max memory to not to start eviction based on used_memory." assert ( - memory_info["used_memory_rss"] > rss_max_memory * 0.9 - ), "RSS memory should be less than 90% of rss max memory (max_memory * rss_oom_deny_ratio)." + memory_info["used_memory_rss"] > max_memory * rss_oom_deny_ratio + ), "Used RSS memory should be more than 80% of rss max memory (max_memory * rss_oom_deny_ratio) to start eviction based on rss memory usage." - # Get RSS memory after creating new connections memory_info = await async_client.info("memory") - while memory_info["used_memory_rss"] > rss_max_memory * 0.9: + prev_evicted_keys = 0 + evicted_keys_repeat_count = 0 + while True: + # Wait for some time await asyncio.sleep(1) + memory_info = await async_client.info("memory") logging.info( - f'Current rss: {memory_info["used_memory_rss"]}. rss eviction threshold: {rss_max_memory * 0.9}.' + f'Current used memory: {memory_info["used_memory"]}, current used rss: {memory_info["used_memory_rss"]}, rss eviction threshold: {rss_eviction_threshold}.' ) + stats_info = await async_client.info("stats") logging.info(f'Current evicted: {stats_info["evicted_keys"]}. Total keys: {num_keys}.') + # Check if evicted keys are not increasing + if prev_evicted_keys == stats_info["evicted_keys"]: + evicted_keys_repeat_count += 1 + else: + prev_evicted_keys = stats_info["evicted_keys"] + evicted_keys_repeat_count = 1 + + if evicted_keys_repeat_count > 2: + break + + # Wait for some time + await asyncio.sleep(3) + + memory_arena = await async_client.execute_command("MEMORY", "ARENA") + fragmentation_waste = extract_fragmentation_waste(memory_arena) + logging.info(f"Memory fragmentation waste: {fragmentation_waste}") + assert fragmentation_waste < 12.0, "Memory fragmentation waste should be less than 12%." + + # Assert that no more keys are evicted + memory_info = await async_client.info("memory") + stats_info = await async_client.info("stats") + + assert memory_info["used_memory"] > max_memory * ( + rss_oom_deny_ratio - eviction_memory_budget_threshold - 0.05 + ), "We should not evict all items." + assert memory_info["used_memory"] < max_memory * ( + rss_oom_deny_ratio - eviction_memory_budget_threshold + ), "Used memory should be smaller than threshold." + assert memory_info["used_memory_rss"] > max_memory * ( + rss_oom_deny_ratio - eviction_memory_budget_threshold - 0.05 + ), "We should not evict all items." + + evicted_keys = stats_info["evicted_keys"] + # We may evict slightly more than prev_evicted_keys due to gaps in RSS memory usage + assert ( + evicted_keys > 0 + and evicted_keys >= prev_evicted_keys + and evicted_keys <= prev_evicted_keys * 1.0015 + ), "We should not evict more items." + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "proactor_threads_param, maxmemory_param", + [(1, 6 * (1024**3)), (4, 6 * (1024**3))], +) +async def test_cache_eviction_with_rss_deny_oom_two_waves( + df_factory: DflyInstanceFactory, proactor_threads_param, maxmemory_param +): + """ + Test to verify that cache eviction is triggered even if used memory is small but rss memory is above limit + It is similar to the test_cache_eviction_with_rss_deny_oom_simple_case but here we have two waves of data filling: + 1. First wave fills the instance to 85% of max memory, which is above rss_oom_deny_ratio. + 2. Then we wait for eviction to happen based on rss memory usage. After eviction we should have 70% of max memory used. + 3. Second wave fills the instance to 90% of max memory, which is above rss_oom_deny_ratio. + 4. Second time eviction should happen + """ + df_server = df_factory.create( + proactor_threads=proactor_threads_param, + cache_mode="true", + maxmemory=maxmemory_param, + rss_oom_deny_ratio=0.8, + ) + df_server.start() + + async_client = df_server.client() + + max_memory = maxmemory_param + rss_oom_deny_ratio = 0.8 + eviction_memory_budget_threshold = 0.1 # 10% of max_memory + + rss_eviction_threshold = max_memory * (rss_oom_deny_ratio - eviction_memory_budget_threshold) + + # first wave fills 85% of max memory + # second wave fills 17% of max memory + data_fill_size = [ + int((rss_oom_deny_ratio + 0.05) * max_memory), + int((1 - rss_oom_deny_ratio - 0.03) * max_memory), + ] + + val_size = 1024 * 5 # 5 kb + + for i in range(2): + if i > 0: + await asyncio.sleep(2) + + num_keys = data_fill_size[i] // val_size + logging.info( + f"Populating data for wave {i}. Data fill size: {data_fill_size[i]}. Number of keys: {num_keys}." + ) + await async_client.execute_command("DEBUG", "POPULATE", num_keys, f"key{i}", val_size) + + # Test that used memory is less than 90% of max memory to not to start eviction based on used_memory + memory_info = await async_client.info("memory") + assert ( + memory_info["used_memory"] < max_memory * 0.9 + ), "Used memory should be less than 90% of max memory to not to start eviction based on used_memory." + assert ( + memory_info["used_memory_rss"] > max_memory * rss_oom_deny_ratio + ), "Used RSS memory should be more than 80% of rss max memory (max_memory * rss_oom_deny_ratio) to start eviction based on rss memory usage." + + memory_info = await async_client.info("memory") + prev_evicted_keys = 0 + evicted_keys_repeat_count = 0 + while True: + # Wait for some time + await asyncio.sleep(1) + + memory_info = await async_client.info("memory") + logging.info( + f'Current used memory: {memory_info["used_memory"]}, current used rss: {memory_info["used_memory_rss"]}, rss eviction threshold: {rss_eviction_threshold}.' + ) + + stats_info = await async_client.info("stats") + logging.info(f'Current evicted: {stats_info["evicted_keys"]}. Total keys: {num_keys}.') + + # Check if evicted keys are not increasing + if prev_evicted_keys == stats_info["evicted_keys"]: + evicted_keys_repeat_count += 1 + else: + prev_evicted_keys = stats_info["evicted_keys"] + evicted_keys_repeat_count = 1 + + if evicted_keys_repeat_count > 2: + break + + # Wait for some time + await asyncio.sleep(3) + + memory_arena = await async_client.execute_command("MEMORY", "ARENA") + fragmentation_waste = extract_fragmentation_waste(memory_arena) + logging.info(f"Memory fragmentation waste: {fragmentation_waste}") + assert fragmentation_waste < 12.0, "Memory fragmentation waste should be less than 12%." + + # Assert that no more keys are evicted + memory_info = await async_client.info("memory") + stats_info = await async_client.info("stats") + + assert memory_info["used_memory"] > max_memory * ( + rss_oom_deny_ratio - eviction_memory_budget_threshold - 0.05 + ), "We should not evict all items." + assert memory_info["used_memory"] < max_memory * ( + rss_oom_deny_ratio - eviction_memory_budget_threshold + 0.08 + ), "Used memory should be smaller than threshold." + assert memory_info["used_memory_rss"] > max_memory * ( + rss_oom_deny_ratio - eviction_memory_budget_threshold - 0.05 + ), "We should not evict all items." + + evicted_keys = stats_info["evicted_keys"] + # We may evict slightly more than prev_evicted_keys due to gaps in RSS memory usage + assert ( + evicted_keys > 0 + and evicted_keys >= prev_evicted_keys + and evicted_keys <= prev_evicted_keys * 1.0015 + ), "We should not evict more items." + @pytest.mark.asyncio async def test_throttle_on_commands_squashing_replies_bytes(df_factory: DflyInstanceFactory):