diff --git a/src/sentry/uptime/autodetect/ranking.py b/src/sentry/uptime/autodetect/ranking.py index 68e7ad7a12cf82..0ba7466439231c 100644 --- a/src/sentry/uptime/autodetect/ranking.py +++ b/src/sentry/uptime/autodetect/ranking.py @@ -4,10 +4,6 @@ from datetime import datetime, timedelta from typing import TYPE_CHECKING -from django.conf import settings -from redis.client import StrictRedis -from rediscluster import RedisCluster - from sentry.constants import UPTIME_AUTODETECTION from sentry.uptime.models import get_active_auto_monitor_count_for_org from sentry.uptime.subscriptions.subscriptions import ( @@ -15,7 +11,8 @@ MaxUrlsForDomainReachedException, check_url_limits, ) -from sentry.utils import metrics, redis +from sentry.uptime.utils import get_cluster +from sentry.utils import metrics if TYPE_CHECKING: from sentry.models.organization import Organization @@ -37,10 +34,6 @@ KEY_EXPIRY = ORGANIZATION_FLUSH_FREQUENCY * 2 -def _get_cluster() -> RedisCluster | StrictRedis: - return redis.redis_clusters.get(settings.SENTRY_UPTIME_DETECTOR_CLUSTER) - - def add_base_url_to_rank(project: Project, base_url: str): """ Takes a project and valid base url and stores ranking information about it in Redis. @@ -59,7 +52,7 @@ def add_base_url_to_rank(project: Project, base_url: str): larger than `RANKED_MAX_SIZE`. That shouldn't cause us problems, and is preferable to trimming it on every call. """ - cluster = _get_cluster() + cluster = get_cluster() org_projects_key = build_org_projects_key(project.organization) pipeline = cluster.pipeline() pipeline.zincrby(org_projects_key, 1, str(project.id)) @@ -91,7 +84,7 @@ def get_candidate_projects_for_org(org: Organization) -> list[tuple[int, int]]: Project ids are sorted by `total_urls_seen` desc. """ key = build_org_projects_key(org) - cluster = _get_cluster() + cluster = get_cluster() return [ (int(project_id), count) for project_id, count in cluster.zrange( @@ -105,7 +98,7 @@ def delete_candidate_projects_for_org(org: Organization) -> None: Deletes candidate projects related to the organization that have seen urls. """ key = build_org_projects_key(org) - cluster = _get_cluster() + cluster = get_cluster() cluster.delete(key) @@ -115,7 +108,7 @@ def get_candidate_urls_for_project(project: Project, limit=5) -> list[tuple[str, `times_url_seen` desc. """ key = get_project_base_url_rank_key(project) - cluster = _get_cluster() + cluster = get_cluster() candidate_urls = cluster.zrange(key, 0, -1, desc=True, withscores=True, score_cast_func=int) urls = [] for candidate_url, url_count in candidate_urls: @@ -134,7 +127,7 @@ def delete_candidate_urls_for_project(project: Project) -> None: Deletes all current candidate rules for a project. """ key = get_project_base_url_rank_key(project) - cluster = _get_cluster() + cluster = get_cluster() cluster.delete(key) @@ -166,7 +159,7 @@ def get_organization_bucket(bucket: datetime) -> set[int]: that have projects that have seen urls. """ key = get_organization_bucket_key_for_datetime(bucket) - cluster = _get_cluster() + cluster = get_cluster() return {int(organization_id) for organization_id in cluster.smembers(key)} @@ -175,7 +168,7 @@ def delete_organization_bucket(bucket: datetime) -> None: Delete all organizations from a specific datetime bucket. """ key = get_organization_bucket_key_for_datetime(bucket) - cluster = _get_cluster() + cluster = get_cluster() cluster.delete(key) diff --git a/src/sentry/uptime/autodetect/result_handler.py b/src/sentry/uptime/autodetect/result_handler.py index f04d4253e9b6b1..736af143927beb 100644 --- a/src/sentry/uptime/autodetect/result_handler.py +++ b/src/sentry/uptime/autodetect/result_handler.py @@ -11,7 +11,6 @@ from sentry import audit_log from sentry.uptime.autodetect.notifications import send_auto_detected_notifications -from sentry.uptime.autodetect.ranking import _get_cluster from sentry.uptime.autodetect.tasks import set_failed_url from sentry.uptime.models import UptimeSubscription, get_audit_log_data from sentry.uptime.subscriptions.subscriptions import ( @@ -20,6 +19,7 @@ update_uptime_detector, ) from sentry.uptime.types import UptimeMonitorMode +from sentry.uptime.utils import get_cluster from sentry.utils import metrics from sentry.utils.audit import create_system_audit_entry from sentry.workflow_engine.models.detector import Detector @@ -52,7 +52,7 @@ def handle_onboarding_result( metric_tags: dict[str, str], ) -> None: if result["status"] == CHECKSTATUS_FAILURE: - redis = _get_cluster() + redis = get_cluster() key = build_onboarding_failure_key(detector) pipeline = redis.pipeline() pipeline.incr(key) diff --git a/src/sentry/uptime/autodetect/tasks.py b/src/sentry/uptime/autodetect/tasks.py index d843440d46b2f3..d2f4038d8d01ed 100644 --- a/src/sentry/uptime/autodetect/tasks.py +++ b/src/sentry/uptime/autodetect/tasks.py @@ -14,7 +14,6 @@ from sentry.tasks.base import instrumented_task from sentry.taskworker.namespaces import uptime_tasks from sentry.uptime.autodetect.ranking import ( - _get_cluster, delete_candidate_projects_for_org, delete_candidate_urls_for_project, delete_organization_bucket, @@ -31,6 +30,7 @@ is_url_auto_monitored_for_project, ) from sentry.uptime.types import UptimeMonitorMode +from sentry.uptime.utils import get_cluster from sentry.utils import metrics from sentry.utils.hashlib import md5_text from sentry.utils.locking import UnableToAcquireLock @@ -67,7 +67,7 @@ def schedule_autodetections(): ) try: with lock.acquire(): - cluster = _get_cluster() + cluster = get_cluster() last_processed = cluster.get(LAST_PROCESSED_KEY) if last_processed is None: last_processed = timezone.now().replace(second=0, microsecond=0) @@ -260,7 +260,7 @@ def monitor_url_for_project(project: Project, url: str) -> Detector: def is_failed_url(url: str) -> bool: key = get_failed_url_key(url) - return _get_cluster().exists(key) == 1 + return get_cluster().exists(key) == 1 def set_failed_url(url: str) -> None: @@ -269,7 +269,7 @@ def set_failed_url(url: str) -> None: """ key = get_failed_url_key(url) # TODO: Jitter the expiry here, so we don't retry all at the same time. - _get_cluster().set(key, 1, ex=FAILED_URL_RETRY_FREQ) + get_cluster().set(key, 1, ex=FAILED_URL_RETRY_FREQ) def get_failed_url_key(url: str) -> str: diff --git a/src/sentry/uptime/consumers/results_consumer.py b/src/sentry/uptime/consumers/results_consumer.py index 3e354f2555deab..74b31774bd696f 100644 --- a/src/sentry/uptime/consumers/results_consumer.py +++ b/src/sentry/uptime/consumers/results_consumer.py @@ -18,7 +18,6 @@ ResultProcessor, ResultsStrategyFactory, ) -from sentry.uptime.autodetect.ranking import _get_cluster from sentry.uptime.autodetect.result_handler import handle_onboarding_result from sentry.uptime.consumers.eap_producer import produce_eap_uptime_result from sentry.uptime.grouptype import UptimePacketValue @@ -30,8 +29,6 @@ load_regions_for_uptime_subscription, ) from sentry.uptime.subscriptions.subscriptions import ( - build_last_seen_interval_key, - build_last_update_key, check_and_update_regions, disable_uptime_detector, remove_uptime_subscription_if_unused, @@ -41,6 +38,7 @@ update_remote_uptime_subscription, ) from sentry.uptime.types import UptimeMonitorMode +from sentry.uptime.utils import build_last_seen_interval_key, build_last_update_key, get_cluster from sentry.utils import metrics from sentry.workflow_engine.models.data_source import DataPacket from sentry.workflow_engine.models.detector import Detector @@ -277,7 +275,7 @@ def handle_result(self, subscription: UptimeSubscription | None, result: CheckRe sample_rate=1.0, ) - cluster = _get_cluster() + cluster = get_cluster() last_update_key = build_last_update_key(detector) last_update_raw: str | None = cluster.get(last_update_key) last_update_ms = 0 if last_update_raw is None else int(last_update_raw) diff --git a/src/sentry/uptime/grouptype.py b/src/sentry/uptime/grouptype.py index a85ba23d5481d0..ec9528e957fba2 100644 --- a/src/sentry/uptime/grouptype.py +++ b/src/sentry/uptime/grouptype.py @@ -15,8 +15,8 @@ from sentry.types.group import PriorityLevel from sentry.uptime.endpoints.validators import UptimeDomainCheckFailureValidator from sentry.uptime.models import UptimeSubscription -from sentry.uptime.subscriptions.subscriptions import build_fingerprint from sentry.uptime.types import GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE, UptimeMonitorMode +from sentry.uptime.utils import build_fingerprint from sentry.utils import metrics from sentry.workflow_engine.handlers.detector.base import DetectorOccurrence, EventData from sentry.workflow_engine.handlers.detector.stateful import ( diff --git a/src/sentry/uptime/subscriptions/subscriptions.py b/src/sentry/uptime/subscriptions/subscriptions.py index 8324148d13d0cf..ccd6ed67ce0261 100644 --- a/src/sentry/uptime/subscriptions/subscriptions.py +++ b/src/sentry/uptime/subscriptions/subscriptions.py @@ -1,7 +1,6 @@ import logging from collections.abc import Sequence -from django.conf import settings from django.db import router, transaction from sentry_kafka_schemas.schema_types.uptime_results_v1 import ( CHECKSTATUS_FAILURE, @@ -40,7 +39,7 @@ GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE, UptimeMonitorMode, ) -from sentry.utils import redis +from sentry.uptime.utils import build_fingerprint, build_last_update_key, get_cluster from sentry.utils.db import atomic_transaction from sentry.utils.not_set import NOT_SET, NotSet, default_if_not_set from sentry.utils.outcomes import Outcome @@ -58,22 +57,6 @@ MAX_MONITORS_PER_DOMAIN = 100 -def build_last_update_key(detector: Detector) -> str: - return f"project-sub-last-update:detector:{detector.id}" - - -def build_last_seen_interval_key(detector: Detector) -> str: - return f"project-sub-last-seen-interval:detector:{detector.id}" - - -def build_detector_fingerprint_component(detector: Detector) -> str: - return f"uptime-detector:{detector.id}" - - -def build_fingerprint(detector: Detector) -> list[str]: - return [build_detector_fingerprint_component(detector)] - - def resolve_uptime_issue(detector: Detector) -> None: """ Sends an update to the issue platform to resolve the uptime issue for this @@ -474,7 +457,7 @@ def disable_uptime_detector(detector: Detector, skip_quotas: bool = False): # start from a good state detector_state.update(state=DetectorPriorityLevel.OK, is_triggered=False) - cluster = redis.redis_clusters.get(settings.SENTRY_UPTIME_DETECTOR_CLUSTER) + cluster = get_cluster() last_update_key = build_last_update_key(detector) cluster.delete(last_update_key) diff --git a/src/sentry/uptime/utils.py b/src/sentry/uptime/utils.py new file mode 100644 index 00000000000000..1e06291c73ec98 --- /dev/null +++ b/src/sentry/uptime/utils.py @@ -0,0 +1,26 @@ +from django.conf import settings +from redis.client import StrictRedis +from rediscluster import RedisCluster + +from sentry.utils import redis +from sentry.workflow_engine.models.detector import Detector + + +def build_last_update_key(detector: Detector) -> str: + return f"project-sub-last-update:detector:{detector.id}" + + +def build_last_seen_interval_key(detector: Detector) -> str: + return f"project-sub-last-seen-interval:detector:{detector.id}" + + +def build_detector_fingerprint_component(detector: Detector) -> str: + return f"uptime-detector:{detector.id}" + + +def build_fingerprint(detector: Detector) -> list[str]: + return [build_detector_fingerprint_component(detector)] + + +def get_cluster() -> RedisCluster | StrictRedis: + return redis.redis_clusters.get(settings.SENTRY_UPTIME_DETECTOR_CLUSTER) diff --git a/tests/sentry/tasks/test_post_process.py b/tests/sentry/tasks/test_post_process.py index 5ce8a9c02c3b66..06f109141d1e76 100644 --- a/tests/sentry/tasks/test_post_process.py +++ b/tests/sentry/tasks/test_post_process.py @@ -75,7 +75,8 @@ from sentry.testutils.skips import requires_snuba from sentry.types.activity import ActivityType from sentry.types.group import GroupSubStatus, PriorityLevel -from sentry.uptime.autodetect.ranking import _get_cluster, get_organization_bucket_key +from sentry.uptime.autodetect.ranking import get_organization_bucket_key +from sentry.uptime.utils import get_cluster from sentry.users.services.user.service import user_service from sentry.utils import json from sentry.utils.cache import cache @@ -2314,7 +2315,7 @@ def test_user_reports_no_shim_if_group_exists_on_report( class DetectBaseUrlsForUptimeTestMixin(BasePostProgressGroupMixin): def assert_organization_key(self, organization: Organization, exists: bool) -> None: key = get_organization_bucket_key(organization) - cluster = _get_cluster() + cluster = get_cluster() assert exists == cluster.sismember(key, str(organization.id)) def test_uptime_detection_feature_url(self) -> None: diff --git a/tests/sentry/uptime/autodetect/test_detector.py b/tests/sentry/uptime/autodetect/test_detector.py index 23044e077eb5b2..e65ac77c897c99 100644 --- a/tests/sentry/uptime/autodetect/test_detector.py +++ b/tests/sentry/uptime/autodetect/test_detector.py @@ -2,13 +2,14 @@ from sentry.testutils.cases import UptimeTestCase from sentry.testutils.helpers.options import override_options from sentry.uptime.autodetect.detector import autodetect_base_url_for_project -from sentry.uptime.autodetect.ranking import _get_cluster, get_organization_bucket_key +from sentry.uptime.autodetect.ranking import get_organization_bucket_key +from sentry.uptime.utils import get_cluster class DetectBaseUrlForProjectTest(UptimeTestCase): def assert_organization_key(self, organization: Organization, exists: bool) -> None: key = get_organization_bucket_key(organization) - cluster = _get_cluster() + cluster = get_cluster() assert exists == cluster.sismember(key, str(organization.id)) def test(self) -> None: diff --git a/tests/sentry/uptime/autodetect/test_ranking.py b/tests/sentry/uptime/autodetect/test_ranking.py index 43841a31de9247..477bcac7ee6b6b 100644 --- a/tests/sentry/uptime/autodetect/test_ranking.py +++ b/tests/sentry/uptime/autodetect/test_ranking.py @@ -5,7 +5,6 @@ from sentry.models.project import Project from sentry.testutils.cases import UptimeTestCase from sentry.uptime.autodetect.ranking import ( - _get_cluster, add_base_url_to_rank, build_org_projects_key, delete_candidate_urls_for_project, @@ -17,6 +16,7 @@ should_autodetect_for_organization, should_autodetect_for_project, ) +from sentry.uptime.utils import get_cluster class AddBaseUrlToRankTest(UptimeTestCase): @@ -24,7 +24,7 @@ def assert_project_count( self, project: Project, count: int | None, expiry: int | None ) -> int | None: key = build_org_projects_key(project.organization) - cluster = _get_cluster() + cluster = get_cluster() if count is None: assert not cluster.zscore(key, str(project.id)) return None @@ -36,7 +36,7 @@ def assert_url_count( self, project: Project, url: str, count: int | None, expiry: int | None ) -> int | None: key = get_project_base_url_rank_key(project) - cluster = _get_cluster() + cluster = get_cluster() if count is None: assert cluster.zscore(key, url) is None return None @@ -45,7 +45,7 @@ def assert_url_count( return self.check_expiry(key, expiry) def check_expiry(self, key: str, expiry: int | None) -> int: - cluster = _get_cluster() + cluster = get_cluster() ttl = cluster.ttl(key) if expiry is None: assert ttl > 0 @@ -94,7 +94,7 @@ def test_trim(self) -> None: url_1 = "https://sentry.io" url_2 = "https://sentry.sentry.io" url_3 = "https://santry.sentry.io" - cluster = _get_cluster() + cluster = get_cluster() add_base_url_to_rank(self.project, url_1) add_base_url_to_rank(self.project, url_1) add_base_url_to_rank(self.project, url_1) diff --git a/tests/sentry/uptime/autodetect/test_tasks.py b/tests/sentry/uptime/autodetect/test_tasks.py index 6d44dc4872d487..fc7d34ba44c1a8 100644 --- a/tests/sentry/uptime/autodetect/test_tasks.py +++ b/tests/sentry/uptime/autodetect/test_tasks.py @@ -17,7 +17,6 @@ from sentry.testutils.helpers.datetime import freeze_time from sentry.uptime.autodetect.ranking import ( NUMBER_OF_BUCKETS, - _get_cluster, add_base_url_to_rank, get_organization_bucket, get_project_base_url_rank_key, @@ -41,6 +40,7 @@ is_url_auto_monitored_for_project, ) from sentry.uptime.types import UptimeMonitorMode +from sentry.uptime.utils import get_cluster from sentry.workflow_engine.models import Detector @@ -55,7 +55,7 @@ class ScheduleDetectionsTest(UptimeTestCase): def test_no_last_processed(self) -> None: # The first time this runs we don't expect much to happen, # just that it'll update the last processed date in redis - cluster = _get_cluster() + cluster = get_cluster() assert not cluster.get(LAST_PROCESSED_KEY) with mock.patch( "sentry.uptime.autodetect.tasks.process_autodetection_bucket" @@ -69,7 +69,7 @@ def test_no_last_processed(self) -> None: ) def test_processes(self) -> None: - cluster = _get_cluster() + cluster = get_cluster() current_bucket = timezone.now().replace(second=0, microsecond=0) last_processed_bucket = current_bucket - timedelta(minutes=10) cluster.set(LAST_PROCESSED_KEY, int(last_processed_bucket.timestamp())) @@ -181,7 +181,7 @@ def test_should_not_detect_organization(self) -> None: get_project_base_url_rank_key(self.project), get_project_base_url_rank_key(project_2), ] - redis = _get_cluster() + redis = get_cluster() assert all(redis.exists(key) for key in keys) with mock.patch( diff --git a/tests/sentry/uptime/consumers/test_results_consumer.py b/tests/sentry/uptime/consumers/test_results_consumer.py index 96543590d454bd..3a35cf39503da9 100644 --- a/tests/sentry/uptime/consumers/test_results_consumer.py +++ b/tests/sentry/uptime/consumers/test_results_consumer.py @@ -32,7 +32,6 @@ from sentry.testutils.helpers.datetime import freeze_time from sentry.testutils.helpers.options import override_options from sentry.testutils.thread_leaks.pytest import thread_leak_allowlist -from sentry.uptime.autodetect.ranking import _get_cluster from sentry.uptime.autodetect.result_handler import ( AUTO_DETECTED_ACTIVE_SUBSCRIPTION_INTERVAL, ONBOARDING_MONITOR_PERIOD, @@ -45,13 +44,16 @@ from sentry.uptime.models import UptimeSubscription, UptimeSubscriptionRegion from sentry.uptime.subscriptions.subscriptions import ( UptimeMonitorNoSeatAvailable, - build_detector_fingerprint_component, - build_last_seen_interval_key, - build_last_update_key, disable_uptime_detector, enable_uptime_detector, ) from sentry.uptime.types import IncidentStatus, UptimeMonitorMode +from sentry.uptime.utils import ( + build_detector_fingerprint_component, + build_last_seen_interval_key, + build_last_update_key, + get_cluster, +) from sentry.workflow_engine.types import DetectorPriorityLevel from tests.sentry.uptime.subscriptions.test_tasks import ConfigPusherTestMixin @@ -311,12 +313,12 @@ def test_missed_check_false_positive(self) -> None: # Pretend we got a result 3500 seconds ago (nearly an hour); the subscription # has an interval of 300 seconds, which we're going to say was just recently # changed. Verify we don't emit any metrics recording of a missed check - _get_cluster().set( + get_cluster().set( build_last_update_key(self.detector), int(result["scheduled_check_time_ms"]) - (3500 * 1000), ) - _get_cluster().set( + get_cluster().set( build_last_seen_interval_key(self.detector), 3600 * 1000, ) @@ -337,12 +339,12 @@ def test_missed_check_updated_interval(self) -> None: # Pretend we got a result 3500 seconds ago (nearly an hour); the subscription # has an interval of 300 seconds, which we're going to say was just recently # changed. Verify we don't emit any metrics recording of a missed check - _get_cluster().set( + get_cluster().set( build_last_update_key(self.detector), int(result["scheduled_check_time_ms"]) - (3500 * 1000), ) - _get_cluster().set( + get_cluster().set( build_last_seen_interval_key(self.detector), 3600 * 1000, ) @@ -378,12 +380,12 @@ def test_no_missed_check_for_disabled(self, mock_produce: MagicMock) -> None: # Pretend we got a result 900 seconds ago; the subscription # has an interval of 300 seconds. We've missed two checks. last_update_time = int(result["scheduled_check_time_ms"]) - (900 * 1000) - _get_cluster().set( + get_cluster().set( build_last_update_key(self.detector), last_update_time, ) - _get_cluster().set( + get_cluster().set( build_last_seen_interval_key(self.detector), 300 * 1000, ) @@ -409,12 +411,12 @@ def test_missed_check_true_positive(self, mock_produce: MagicMock) -> None: # Pretend we got a result 900 seconds ago; the subscription # has an interval of 300 seconds. We've missed two checks. last_update_time = int(result["scheduled_check_time_ms"]) - (900 * 1000) - _get_cluster().set( + get_cluster().set( build_last_update_key(self.detector), last_update_time, ) - _get_cluster().set( + get_cluster().set( build_last_seen_interval_key(self.detector), 300 * 1000, ) @@ -454,7 +456,7 @@ def test_skip_already_processed(self) -> None: "organizations:uptime", ] result = self.create_uptime_result(self.subscription.subscription_id) - _get_cluster().set( + get_cluster().set( build_last_update_key(self.detector), int(result["scheduled_check_time_ms"]), ) @@ -604,7 +606,7 @@ def test_onboarding_failure(self) -> None: status=CHECKSTATUS_FAILURE, scheduled_check_time=datetime.now() - timedelta(minutes=5), ) - redis = _get_cluster() + redis = get_cluster() key = build_onboarding_failure_key(self.detector) assert redis.get(key) is None with ( @@ -720,7 +722,7 @@ def test_onboarding_success_ongoing(self) -> None: status=CHECKSTATUS_SUCCESS, scheduled_check_time=datetime.now() - timedelta(minutes=5), ) - redis = _get_cluster() + redis = get_cluster() key = build_onboarding_failure_key(self.detector) assert redis.get(key) is None with ( @@ -768,7 +770,7 @@ def test_onboarding_success_graduate(self) -> None: status=CHECKSTATUS_SUCCESS, scheduled_check_time=datetime.now() - timedelta(minutes=2), ) - redis = _get_cluster() + redis = get_cluster() key = build_onboarding_failure_key(self.detector) assert redis.get(key) is None with ( @@ -847,7 +849,7 @@ def test_onboarding_graduation_no_seat_available(self) -> None: scheduled_check_time=datetime.now() - timedelta(minutes=2), ) - redis = _get_cluster() + redis = get_cluster() key = build_onboarding_failure_key(self.detector) assert redis.get(key) is None diff --git a/tests/sentry/uptime/test_grouptype.py b/tests/sentry/uptime/test_grouptype.py index d9960ed75fdcfa..7c47a740e2f20b 100644 --- a/tests/sentry/uptime/test_grouptype.py +++ b/tests/sentry/uptime/test_grouptype.py @@ -26,12 +26,9 @@ build_evidence_display, ) from sentry.uptime.models import UptimeSubscription, get_uptime_subscription -from sentry.uptime.subscriptions.subscriptions import ( - build_detector_fingerprint_component, - build_fingerprint, - resolve_uptime_issue, -) +from sentry.uptime.subscriptions.subscriptions import resolve_uptime_issue from sentry.uptime.types import UptimeMonitorMode +from sentry.uptime.utils import build_detector_fingerprint_component, build_fingerprint from sentry.workflow_engine.models.data_source import DataPacket from sentry.workflow_engine.models.detector import Detector from sentry.workflow_engine.types import DetectorEvaluationResult, DetectorPriorityLevel