From 2fc206af61cd18e8574b1140dde160e0626446b3 Mon Sep 17 00:00:00 2001 From: Pierre Massat Date: Sat, 24 Jan 2026 08:53:57 -0800 Subject: [PATCH] ref(tests): Remove is_eap parameter, use only EAP codepath Remove the is_eap parameter from store_span, store_spans, store_segment, store_indexed_span, and create_event test utilities. The non-EAP codepath is no longer needed as all span storage now uses the EAP endpoint. This simplifies the test utilities by removing the conditional logic and always using span_to_trace_item() to convert spans before posting to the EAP_ITEMS_INSERT_ENDPOINT. Co-Authored-By: Claude --- src/sentry/testutils/cases.py | 85 +++----- tests/acceptance/test_explore_spans.py | 1 - .../test_trace_view_from_explore.py | 1 - tests/acceptance/test_trace_view_waterfall.py | 1 - .../test_organization_ai_conversations.py | 6 +- ...test_organization_ai_conversations_base.py | 2 +- .../test_organization_insights_tree.py | 4 +- .../test_organization_profiling_profiles.py | 4 +- .../test_organization_spans_fields.py | 14 +- .../test_organization_spans_fields_stats.py | 2 - .../api/endpoints/test_organization_traces.py | 5 - tests/sentry/data_export/test_tasks.py | 10 +- tests/sentry/discover/test_compare_tables.py | 1 - .../discover/test_compare_timeseries.py | 1 - .../test_subscription_processor_aci.py | 2 +- .../seer/anomaly_detection/test_store_data.py | 1 - tests/sentry/seer/explorer/test_index_data.py | 12 +- tests/sentry/seer/explorer/test_tools.py | 32 +-- .../sentry/tasks/test_llm_issue_detection.py | 2 +- .../tasks/test_web_vitals_issue_detection.py | 12 +- .../test_organization_events_cross_trace.py | 5 - .../test_organization_events_meta.py | 7 +- .../test_organization_events_span_indexed.py | 197 ++++-------------- ..._organization_events_stats_span_indexed.py | 72 ++----- ...anization_events_timeseries_cross_trace.py | 5 - ...st_organization_events_timeseries_spans.py | 50 ++--- .../test_organization_events_trace.py | 25 +-- .../api/endpoints/test_organization_trace.py | 26 ++- ...test_organization_trace_item_attributes.py | 22 +- ...ganization_trace_item_attributes_ranked.py | 1 - .../test_organization_trace_item_stats.py | 3 - .../endpoints/test_organization_trace_meta.py | 18 +- .../test_project_trace_item_details.py | 8 +- .../api/endpoints/test_seer_attributes.py | 4 - 34 files changed, 177 insertions(+), 464 deletions(-) diff --git a/src/sentry/testutils/cases.py b/src/sentry/testutils/cases.py index 75f0e8138d938d..0e2bc1a9a5b3d5 100644 --- a/src/sentry/testutils/cases.py +++ b/src/sentry/testutils/cases.py @@ -1138,30 +1138,21 @@ def store_group(self, group): == 200 ) - def store_span(self, span, is_eap=False): - self.store_spans([span], is_eap=is_eap) - - def store_spans(self, spans, is_eap=False): - if is_eap: - files = {} - for i, span in enumerate(spans): - trace_item = span_to_trace_item(span) - files[f"item_{i}"] = trace_item.SerializeToString() - assert ( - requests.post( - settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT, - files=files, - ).status_code - == 200 - ) - else: - assert ( - requests.post( - settings.SENTRY_SNUBA + "/tests/entities/spans/insert", - data=json.dumps(spans), - ).status_code - == 200 - ) + def store_span(self, span): + self.store_spans([span]) + + def store_spans(self, spans): + files = {} + for i, span in enumerate(spans): + trace_item = span_to_trace_item(span) + files[f"item_{i}"] = trace_item.SerializeToString() + assert ( + requests.post( + settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT, + files=files, + ).status_code + == 200 + ) def store_ourlogs(self, ourlogs): files = {f"log_{i}": log.SerializeToString() for i, log in enumerate(ourlogs)} @@ -1302,7 +1293,6 @@ def store_segment( status: str | None = None, environment: str | None = None, organization_id: int = 1, - is_eap: bool = False, ): if span_id is None: span_id = self._random_span_id() @@ -1355,7 +1345,7 @@ def store_segment( if parent_span_id: payload["parent_span_id"] = parent_span_id - self.store_span(payload, is_eap=is_eap) + self.store_span(payload) def store_indexed_span( self, @@ -1376,7 +1366,6 @@ def store_indexed_span( group: str = "00", category: str | None = None, organization_id: int = 1, - is_eap: bool = False, ): if span_id is None: span_id = self._random_span_id() @@ -1427,7 +1416,7 @@ def store_indexed_span( # We want to give the caller the possibility to store only a summary since the database does not deduplicate # on the span_id which makes the assumptions of a unique span_id in the database invalid. if not store_only_summary: - self.store_span(payload, is_eap=is_eap) + self.store_span(payload) class BaseMetricsTestCase(SnubaTestCase): @@ -2265,30 +2254,21 @@ def function_fingerprint(self, function): hasher.update(function["function"].encode()) return int(hasher.hexdigest()[:8], 16) - def store_span(self, span, is_eap=False): - self.store_spans([span], is_eap=is_eap) + def store_span(self, span): + self.store_spans([span]) - def store_spans(self, spans, is_eap=False): - if is_eap: - files = {} - for i, span in enumerate(spans): - trace_item = span_to_trace_item(span) - files[f"item_{i}"] = trace_item.SerializeToString() - assert ( - requests.post( - settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT, - files=files, - ).status_code - == 200 - ) - else: - assert ( - requests.post( - settings.SENTRY_SNUBA + "/tests/entities/spans/insert", - data=json.dumps(spans), - ).status_code - == 200 - ) + def store_spans(self, spans): + files = {} + for i, span in enumerate(spans): + trace_item = span_to_trace_item(span) + files[f"item_{i}"] = trace_item.SerializeToString() + assert ( + requests.post( + settings.SENTRY_SNUBA + EAP_ITEMS_INSERT_ENDPOINT, + files=files, + ).status_code + == 200 + ) @pytest.mark.snuba @@ -3688,7 +3668,6 @@ def create_event( slow_db_performance_issue: bool = False, start_timestamp: datetime | None = None, store_event_kwargs: dict[str, Any] | None = None, - is_eap: bool = False, ) -> Event: if not store_event_kwargs: store_event_kwargs = {} @@ -3768,7 +3747,7 @@ def create_event( ) ) spans_to_store.append(self.convert_event_data_to_span(event)) - self.store_spans(spans_to_store, is_eap=is_eap) + self.store_spans(spans_to_store) return event def convert_event_data_to_span(self, event: Event) -> dict[str, Any]: diff --git a/tests/acceptance/test_explore_spans.py b/tests/acceptance/test_explore_spans.py index f18caf8e1b9e60..bce99786adec26 100644 --- a/tests/acceptance/test_explore_spans.py +++ b/tests/acceptance/test_explore_spans.py @@ -60,7 +60,6 @@ def test_spans_table_loads_all_events(self, mock_now: MagicMock) -> None: ] self.store_spans( spans, - is_eap=True, ) self.page.visit_explore_spans(self.organization.slug) diff --git a/tests/acceptance/test_trace_view_from_explore.py b/tests/acceptance/test_trace_view_from_explore.py index e0c8c836c7936b..d81a63cccea694 100644 --- a/tests/acceptance/test_trace_view_from_explore.py +++ b/tests/acceptance/test_trace_view_from_explore.py @@ -70,7 +70,6 @@ def test_navigation(self, mock_now: MagicMock) -> None: parent_span_id=None, project_id=self.project.id, milliseconds=3000, - is_eap=True, ) # Visit explore spans table diff --git a/tests/acceptance/test_trace_view_waterfall.py b/tests/acceptance/test_trace_view_waterfall.py index 1841193f13fa2e..15a5592f42d630 100644 --- a/tests/acceptance/test_trace_view_waterfall.py +++ b/tests/acceptance/test_trace_view_waterfall.py @@ -64,7 +64,6 @@ def test_trace_view_waterfall_loads(self, mock_now: MagicMock) -> None: parent_span_id=None, project_id=self.project.id, milliseconds=3000, - is_eap=True, ) # Visit the trace view and wait till waterfall loads diff --git a/tests/sentry/api/endpoints/test_organization_ai_conversations.py b/tests/sentry/api/endpoints/test_organization_ai_conversations.py index 8d07f66757410e..d0b1d60b675cd4 100644 --- a/tests/sentry/api/endpoints/test_organization_ai_conversations.py +++ b/tests/sentry/api/endpoints/test_organization_ai_conversations.py @@ -208,7 +208,7 @@ def test_no_conversations(self) -> None: {"description": "test", "sentry_tags": {"status": "ok"}}, start_ts=now, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) query = { "project": [self.project.id], @@ -432,7 +432,7 @@ def test_pagination(self) -> None: }, start_ts=now - timedelta(minutes=i), ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) query = { "project": [self.project.id], @@ -470,7 +470,7 @@ def test_zero_values(self) -> None: }, start_ts=now, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) query = { "project": [self.project.id], diff --git a/tests/sentry/api/endpoints/test_organization_ai_conversations_base.py b/tests/sentry/api/endpoints/test_organization_ai_conversations_base.py index bc4d32d80ca286..2212a930334c80 100644 --- a/tests/sentry/api/endpoints/test_organization_ai_conversations_base.py +++ b/tests/sentry/api/endpoints/test_organization_ai_conversations_base.py @@ -114,5 +114,5 @@ def store_ai_span( extra_data, start_ts=timestamp, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) return span diff --git a/tests/sentry/api/endpoints/test_organization_insights_tree.py b/tests/sentry/api/endpoints/test_organization_insights_tree.py index cdc38877dbb684..679e6cc8821e6c 100644 --- a/tests/sentry/api/endpoints/test_organization_insights_tree.py +++ b/tests/sentry/api/endpoints/test_organization_insights_tree.py @@ -63,7 +63,7 @@ def _store_nextjs_function_spans(self) -> None: start_ts=self.ten_mins_ago, ) span["sentry_tags"]["op"] = "function.nextjs" - self.store_span(span, is_eap=True) + self.store_span(span) spans.append(span) def _store_unrelated_spans(self) -> None: @@ -81,7 +81,7 @@ def _store_unrelated_spans(self) -> None: start_ts=self.ten_mins_ago, ) span["sentry_tags"]["op"] = "db" - self.store_span(span, is_eap=True) + self.store_span(span) spans.append(span) def test_get_nextjs_function_data(self) -> None: diff --git a/tests/sentry/api/endpoints/test_organization_profiling_profiles.py b/tests/sentry/api/endpoints/test_organization_profiling_profiles.py index 1385ec663ab838..06a6c9d5556459 100644 --- a/tests/sentry/api/endpoints/test_organization_profiling_profiles.py +++ b/tests/sentry/api/endpoints/test_organization_profiling_profiles.py @@ -714,7 +714,7 @@ def test_queries_profile_candidates_from_spans( span = self.create_span(project=self.project, start_ts=self.ten_mins_ago, duration=1000) span.update({"profile_id": profile_id}) - self.store_span(span, is_eap=True) + self.store_span(span) # this span has continuous profile with a matching chunk (to be mocked below) profiler_id = uuid4().hex @@ -732,7 +732,7 @@ def test_queries_profile_candidates_from_spans( ) del span_2["profile_id"] - self.store_span(span_2, is_eap=True) + self.store_span(span_2) # not able to write profile chunks to the table yet so mock it's response here # so that the span with a continuous profile looks like it has a profile chunk diff --git a/tests/sentry/api/endpoints/test_organization_spans_fields.py b/tests/sentry/api/endpoints/test_organization_spans_fields.py index 2efb292b85a939..d238c83eba9fde 100644 --- a/tests/sentry/api/endpoints/test_organization_spans_fields.py +++ b/tests/sentry/api/endpoints/test_organization_spans_fields.py @@ -60,7 +60,6 @@ def test_tags_list_str(self) -> None: duration=100, exclusive_time=100, tags={tag: tag}, - is_eap=True, ) for features in [ @@ -110,7 +109,6 @@ def test_tags_list_nums(self) -> None: duration=100, exclusive_time=100, measurements={tag: 0}, - is_eap=True, ) for features in [ @@ -154,7 +152,7 @@ def test_boolean_attributes(self) -> None: "is_feature_enabled": False, "is_production": True, } - self.store_spans([span1, span2], is_eap=True) + self.store_spans([span1, span2]) response = self.do_request( query={"dataset": "spans", "type": "boolean"}, @@ -220,7 +218,6 @@ def test_tags_keys(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) response = self.do_request("tag") @@ -266,7 +263,6 @@ def test_transaction_keys_autocomplete(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) key = "transaction" @@ -314,7 +310,6 @@ def test_transaction_keys_autocomplete_substring(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) key = "transaction" @@ -354,7 +349,6 @@ def test_transaction_keys_autocomplete_substring_with_asterisk(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) key = "transaction" @@ -395,7 +389,6 @@ def test_tags_keys_autocomplete(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) key = "tag" @@ -444,7 +437,6 @@ def test_tags_keys_autocomplete_substring(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) key = "tag" @@ -485,7 +477,6 @@ def test_tags_keys_autocomplete_substring_with_asterisks(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) key = "tag" @@ -526,7 +517,6 @@ def test_tags_keys_autocomplete_noop(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) for key in [ @@ -678,7 +668,6 @@ def test_tags_keys_autocomplete_span_status(self) -> None: timestamp=timestamp, transaction="foo", status=status, - is_eap=True, ) response = self.do_request("span.status") @@ -797,7 +786,6 @@ def test_invalid_query(self, mock_executor: mock.MagicMock) -> None: duration=100, exclusive_time=100, tags={"tag": "foo"}, - is_eap=True, ) response = self.do_request("tag") diff --git a/tests/sentry/api/endpoints/test_organization_spans_fields_stats.py b/tests/sentry/api/endpoints/test_organization_spans_fields_stats.py index 21c5cbac3ecc50..61a6338f9b15d1 100644 --- a/tests/sentry/api/endpoints/test_organization_spans_fields_stats.py +++ b/tests/sentry/api/endpoints/test_organization_spans_fields_stats.py @@ -7,7 +7,6 @@ class OrganizationSpansFieldsStatsEndpointTest(BaseSpansTestCase, APITestCase): - is_eap = True view = "sentry-api-0-organization-spans-fields-stats" def setUp(self) -> None: @@ -55,7 +54,6 @@ def _generate_one_span(self, tags=None): duration=100, exclusive_time=100, tags=tags, - is_eap=self.is_eap, ) def test_no_project(self) -> None: diff --git a/tests/sentry/api/endpoints/test_organization_traces.py b/tests/sentry/api/endpoints/test_organization_traces.py index ff787db1b4c53b..3f6b802df614de 100644 --- a/tests/sentry/api/endpoints/test_organization_traces.py +++ b/tests/sentry/api/endpoints/test_organization_traces.py @@ -19,7 +19,6 @@ class OrganizationTracesEndpointTest(BaseSpansTestCase, APITestCase): view = "sentry-api-0-organization-traces" - is_eap: bool = True def setUp(self) -> None: super().setUp() @@ -76,7 +75,6 @@ def double_write_segment( timestamp=timestamp, duration=duration, organization_id=project.organization.id, - is_eap=True, environment=data.get("environment"), **kwargs, ) @@ -165,7 +163,6 @@ def create_mock_traces(self): duration=1_000, exclusive_time=1_000, op="http.client", - is_eap=True, ) timestamps.append(now - timedelta(days=1, minutes=19, seconds=40)) @@ -181,7 +178,6 @@ def create_mock_traces(self): duration=3_000, exclusive_time=3_000, op="db.sql", - is_eap=self.is_eap, ) timestamps.append(now - timedelta(days=1, minutes=19, seconds=45)) @@ -197,7 +193,6 @@ def create_mock_traces(self): duration=3, exclusive_time=3, op="db.sql", - is_eap=True, ) timestamps.append(now - timedelta(days=2, minutes=30)) diff --git a/tests/sentry/data_export/test_tasks.py b/tests/sentry/data_export/test_tasks.py index 15e2e23bb3c0df..9bb786e05d913a 100644 --- a/tests/sentry/data_export/test_tasks.py +++ b/tests/sentry/data_export/test_tasks.py @@ -708,7 +708,7 @@ def test_explore_spans_dataset_called_correctly(self, emailer: MagicMock) -> Non organization=self.org, ) ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) de = ExportedData.objects.create( user_id=self.user.id, @@ -799,7 +799,7 @@ def test_explore_datasets_isolation(self, emailer: MagicMock) -> None: organization=self.org, ) ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) logs = [ self.create_ourlog( @@ -897,7 +897,7 @@ def test_explore_batched(self, emailer: MagicMock) -> None: organization=self.org, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) de = ExportedData.objects.create( user_id=self.user.id, @@ -1054,7 +1054,7 @@ def test_explore_export_file_too_large(self, emailer: MagicMock) -> None: ) for _ in range(5) ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) de = ExportedData.objects.create( user_id=self.user.id, @@ -1162,7 +1162,7 @@ def test_explore_sort(self, emailer: MagicMock) -> None: organization=self.org, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) de = ExportedData.objects.create( user_id=self.user.id, diff --git a/tests/sentry/discover/test_compare_tables.py b/tests/sentry/discover/test_compare_tables.py index 5a8b7f5e18fcd7..7e97c70e07ae17 100644 --- a/tests/sentry/discover/test_compare_tables.py +++ b/tests/sentry/discover/test_compare_tables.py @@ -260,7 +260,6 @@ def triple_write_segment( timestamp=timestamp, duration=duration, organization_id=project.organization.id, - is_eap=True, **kwargs, ) diff --git a/tests/sentry/discover/test_compare_timeseries.py b/tests/sentry/discover/test_compare_timeseries.py index 07e712d0602ef4..8f77ec5423e230 100644 --- a/tests/sentry/discover/test_compare_timeseries.py +++ b/tests/sentry/discover/test_compare_timeseries.py @@ -129,7 +129,6 @@ def double_write_segment( timestamp=timestamp, duration=duration, organization_id=project.organization.id, - is_eap=True, **kwargs, ) diff --git a/tests/sentry/incidents/subscription_processor/test_subscription_processor_aci.py b/tests/sentry/incidents/subscription_processor/test_subscription_processor_aci.py index b046ad05696828..f4d6cde5b2f507 100644 --- a/tests/sentry/incidents/subscription_processor/test_subscription_processor_aci.py +++ b/tests/sentry/incidents/subscription_processor/test_subscription_processor_aci.py @@ -364,7 +364,7 @@ def test_comparison_alert_eap(self, helper_metrics): ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) self.metrics.incr.reset_mock() self.send_update(2, timedelta(minutes=-9)) diff --git a/tests/sentry/seer/anomaly_detection/test_store_data.py b/tests/sentry/seer/anomaly_detection/test_store_data.py index 402e141647d728..e60ac576a3287a 100644 --- a/tests/sentry/seer/anomaly_detection/test_store_data.py +++ b/tests/sentry/seer/anomaly_detection/test_store_data.py @@ -168,7 +168,6 @@ def test_anomaly_detection_fetch_historical_data_eap_spans(self) -> None: self.create_span({"description": "foo"}, start_ts=dt) for dt in [time_1_dt, time_2_dt] ], - is_eap=True, ) result = fetch_historical_data( diff --git a/tests/sentry/seer/explorer/test_index_data.py b/tests/sentry/seer/explorer/test_index_data.py index 4363d82f267189..dccb040171926a 100644 --- a/tests/sentry/seer/explorer/test_index_data.py +++ b/tests/sentry/seer/explorer/test_index_data.py @@ -60,7 +60,7 @@ def test_get_transactions_for_project(self) -> None: ) spans.append(non_tx_span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Call our function result = get_transactions_for_project(self.project.id) @@ -116,7 +116,7 @@ def test_get_trace_for_transaction(self) -> None: ) spans.append(span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Call our function result = get_trace_for_transaction(transaction_name, self.project.id) @@ -235,7 +235,7 @@ def test_get_profiles_for_trace(self) -> None: } ) - self.store_spans([span1, span2, span3, span4], is_eap=True) + self.store_spans([span1, span2, span3, span4]) with mock.patch("sentry.seer.explorer.utils.get_from_profiling_service") as mock_service: # Mock profile service responses for both transaction and continuous profiles @@ -429,7 +429,7 @@ def test_get_profiles_for_trace_aggregates_duplicate_profiles(self) -> None: ) span4.update({"profile_id": different_profile_id}) - self.store_spans([span1, span2, span3, span4], is_eap=True) + self.store_spans([span1, span2, span3, span4]) # Mock the external profiling service calls with mock.patch("sentry.seer.explorer.utils.get_from_profiling_service") as mock_service: @@ -556,7 +556,7 @@ def test_get_profiles_for_trace_aggregates_continuous_profiles(self) -> None: ) spans.append(span_different) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Mock the external profiling service calls with mock.patch("sentry.seer.explorer.utils.get_from_profiling_service") as mock_service: @@ -678,7 +678,7 @@ def test_get_profiles_for_trace_uses_aggregated_timestamps(self) -> None: ) span3.update({"profile_id": profile_id}) - self.store_spans([span1, span2, span3], is_eap=True) + self.store_spans([span1, span2, span3]) captured_timestamps = {} diff --git a/tests/sentry/seer/explorer/test_tools.py b/tests/sentry/seer/explorer/test_tools.py index af96c876fb5e59..0e13570b5db80c 100644 --- a/tests/sentry/seer/explorer/test_tools.py +++ b/tests/sentry/seer/explorer/test_tools.py @@ -108,7 +108,7 @@ def setUp(self): ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) def test_spans_timeseries_count_metric(self): """Test timeseries query with count() metric using real data""" @@ -591,7 +591,7 @@ def _test_get_trace_waterfall(self, use_short_id: bool) -> None: ) spans.append(span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) result = get_trace_waterfall( trace_id[:8] if use_short_id else trace_id, self.organization.id ) @@ -663,7 +663,7 @@ def test_get_trace_waterfall_wrong_project(self) -> None: ) spans.append(span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Call with short ID and wrong org result = get_trace_waterfall(trace_id[:8], self.organization.id) @@ -689,7 +689,7 @@ def test_get_trace_waterfall_sliding_window_second_period(self) -> None: ) spans.append(span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Should find the trace using short ID by sliding back to the second window result = get_trace_waterfall(trace_id[:8], self.organization.id) @@ -717,7 +717,7 @@ def test_get_trace_waterfall_sliding_window_old_trace(self) -> None: ) spans.append(span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Should find the trace by sliding back through multiple windows result = get_trace_waterfall(trace_id[:8], self.organization.id) @@ -745,7 +745,7 @@ def test_get_trace_waterfall_sliding_window_beyond_limit(self) -> None: ) spans.append(span) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Should not find the trace since it's beyond the 90-day limit result = get_trace_waterfall(trace_id[:8], self.organization.id) @@ -769,7 +769,7 @@ def test_get_trace_waterfall_includes_status_code(self) -> None: }, start_ts=self.ten_mins_ago, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) result = get_trace_waterfall(trace_id, self.organization.id) assert isinstance(result, EAPTrace) @@ -847,7 +847,7 @@ def test_trace_table_basic(self, mock_client: Mock) -> None: ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Cross-project query should return both traces with pageload spans. result = execute_trace_table_query( @@ -1026,7 +1026,7 @@ def _test_get_ie_details_from_issue_id( start_ts=before_now(days=3, hours=23), duration=100, ) - self.store_spans([span0, span1], is_eap=True) + self.store_spans([span0, span1]) # Create events with shared stacktrace (should have same group) events: list[Event] = [] @@ -1186,7 +1186,7 @@ def test_get_ie_details_from_issue_id_single_event( start_ts=before_now(minutes=10), duration=100, ) - self.store_spans([span0], is_eap=True) + self.store_spans([span0]) # Create one event. data = load_data("python", timestamp=before_now(minutes=10)) @@ -1775,7 +1775,7 @@ def test_rpc_get_profile_flamegraph_finds_transaction_profile( start_ts=self.ten_mins_ago, duration=100, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) # Mock the profile data fetch and conversion mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}} @@ -1810,7 +1810,7 @@ def test_rpc_get_profile_flamegraph_finds_continuous_profile( start_ts=self.ten_mins_ago, duration=200, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) # Mock the profile data mock_fetch_profile.return_value = { @@ -1850,7 +1850,7 @@ def test_rpc_get_profile_flamegraph_aggregates_timestamps_across_spans( ) for i, start_time in enumerate([span1_time, span2_time, span3_time]) ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}} mock_convert_tree.return_value = ([{"function": "test", "module": "test"}], "3") @@ -1887,7 +1887,7 @@ def test_rpc_get_profile_flamegraph_sliding_window_finds_old_profile( start_ts=twenty_days_ago, duration=150, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}} mock_convert_tree.return_value = ([{"function": "old_function", "module": "old"}], "4") @@ -1912,7 +1912,7 @@ def test_rpc_get_profile_flamegraph_full_32char_id(self, mock_fetch_profile, moc start_ts=self.ten_mins_ago, duration=100, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) mock_fetch_profile.return_value = {"profile": {"frames": [], "stacks": [], "samples": []}} mock_convert_tree.return_value = ([{"function": "handler", "module": "server"}], "5") @@ -1934,7 +1934,7 @@ def test_rpc_get_profile_flamegraph_not_found_in_90_days(self): start_ts=self.ten_mins_ago, duration=100, ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) result = rpc_get_profile_flamegraph("notfound", self.organization.id) diff --git a/tests/sentry/tasks/test_llm_issue_detection.py b/tests/sentry/tasks/test_llm_issue_detection.py index 2c4f7fecda29eb..55356d7f0892d3 100644 --- a/tests/sentry/tasks/test_llm_issue_detection.py +++ b/tests/sentry/tasks/test_llm_issue_detection.py @@ -374,7 +374,7 @@ def test_returns_deduped_transaction_traces(self) -> None: start_ts=self.ten_mins_ago + timedelta(seconds=2), ) - self.store_spans([span1, span2, span3], is_eap=True) + self.store_spans([span1, span2, span3]) evidence_traces = get_project_top_transaction_traces_for_llm_detection( self.project.id, limit=TRANSACTION_BATCH_SIZE, start_time_delta_minutes=30 diff --git a/tests/sentry/tasks/test_web_vitals_issue_detection.py b/tests/sentry/tasks/test_web_vitals_issue_detection.py index 1561fa3b633205..ba474e4e08bf06 100644 --- a/tests/sentry/tasks/test_web_vitals_issue_detection.py +++ b/tests/sentry/tasks/test_web_vitals_issue_detection.py @@ -192,7 +192,7 @@ def test_run_detection_produces_occurrences(self, mock_produce_occurrence_to_kaf ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) with ( self.mock_seer_ack(), @@ -311,7 +311,7 @@ def test_run_detection_groups_rendering_vitals(self, mock_produce_occurrence_to_ ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) with ( self.mock_seer_ack(), @@ -388,7 +388,7 @@ def test_run_detection_does_not_produce_occurrences_for_existing_issues( for _ in range(10) ] # web vital issue detection requires at least 10 samples to create an issue - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # Create an existing issue group so that the web vital issue detection does not produce a new occurrence group = self.create_group(project=project) @@ -443,7 +443,7 @@ def test_run_detection_does_not_create_issue_on_insufficient_samples( for _ in range(9) ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) with ( self.mock_seer_ack(), @@ -525,7 +525,7 @@ def test_run_detection_selects_trace_closest_to_p75_web_vital_value( ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) with ( self.mock_seer_ack(), @@ -635,7 +635,7 @@ def test_run_detection_selects_trace_from_worst_score(self, mock_produce_occurre ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) with ( self.mock_seer_ack(), diff --git a/tests/snuba/api/endpoints/test_organization_events_cross_trace.py b/tests/snuba/api/endpoints/test_organization_events_cross_trace.py index e90399c31692a1..e5527efb62e3d6 100644 --- a/tests/snuba/api/endpoints/test_organization_events_cross_trace.py +++ b/tests/snuba/api/endpoints/test_organization_events_cross_trace.py @@ -40,7 +40,6 @@ def test_cross_trace_query_with_logs(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -92,7 +91,6 @@ def test_cross_trace_query_with_spans(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -157,7 +155,6 @@ def test_cross_trace_query_with_spans_and_logs(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -220,7 +217,6 @@ def test_cross_trace_query_with_multiple_spans(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -278,7 +274,6 @@ def test_cross_trace_qurey_with_multiple_logs(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( diff --git a/tests/snuba/api/endpoints/test_organization_events_meta.py b/tests/snuba/api/endpoints/test_organization_events_meta.py index bb6cdac6d4eb61..b10e1166a70417 100644 --- a/tests/snuba/api/endpoints/test_organization_events_meta.py +++ b/tests/snuba/api/endpoints/test_organization_events_meta.py @@ -54,7 +54,7 @@ def test_simple(self) -> None: assert response.data["count"] == 1 def test_spans_dataset(self) -> None: - self.store_spans([self.create_span(start_ts=self.min_ago)], is_eap=True) + self.store_spans([self.create_span(start_ts=self.min_ago)]) with self.feature(self.features): response = self.client.get(self.url, format="json", data={"dataset": "spans"}) @@ -577,7 +577,7 @@ def test_basic_query(self) -> None: duration=200, start_ts=self.ten_mins_ago, ) - self.store_span(span, is_eap=True) + self.store_span(span) response = self.client.get( url, @@ -625,7 +625,7 @@ def test_order_by(self) -> None: ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.client.get( url, @@ -712,7 +712,6 @@ def test_simple(self) -> None: self.store_spans( spans, - is_eap=True, ) response = self.do_request( diff --git a/tests/snuba/api/endpoints/test_organization_events_span_indexed.py b/tests/snuba/api/endpoints/test_organization_events_span_indexed.py index 79f067627765cb..abd1f01a826172 100644 --- a/tests/snuba/api/endpoints/test_organization_events_span_indexed.py +++ b/tests/snuba/api/endpoints/test_organization_events_span_indexed.py @@ -39,7 +39,6 @@ def test_spm(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -78,7 +77,6 @@ def test_id_fields(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -106,7 +104,6 @@ def test_sentry_tags_vs_tags(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -133,7 +130,6 @@ def test_sentry_tags_syntax(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -171,7 +167,6 @@ def test_module_alias(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -206,7 +201,6 @@ def test_device_class_filter(self): self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago), self.create_span({}, start_ts=self.ten_mins_ago), ], - is_eap=True, ) response = self.do_request( @@ -244,7 +238,6 @@ def test_device_class_filter_for_empty(self): self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago), self.create_span({}, start_ts=self.ten_mins_ago), ], - is_eap=True, ) response = self.do_request( @@ -282,7 +275,6 @@ def test_device_class_filter_for_unknown(self): self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago), self.create_span({}, start_ts=self.ten_mins_ago), ], - is_eap=True, ) response = self.do_request( @@ -320,7 +312,6 @@ def test_device_class_filter_out_unknown(self): self.create_span({"sentry_tags": {"device.class": ""}}, start_ts=self.ten_mins_ago), self.create_span({}, start_ts=self.ten_mins_ago), ], - is_eap=True, ) response = self.do_request( @@ -381,7 +372,6 @@ def test_span_module(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -422,7 +412,6 @@ def test_network_span(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -466,7 +455,6 @@ def test_other_category_span(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -501,7 +489,6 @@ def test_inp_span(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -527,7 +514,7 @@ def test_inp_span(self) -> None: @pytest.mark.xfail(reason="event_id isn't being written to the new table") def test_id_filtering(self) -> None: span = self.create_span({"description": "foo"}, start_ts=self.ten_mins_ago) - self.store_span(span, is_eap=True) + self.store_span(span) response = self.do_request( { "field": ["description", "count()"], @@ -581,7 +568,6 @@ def test_span_op_casing(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -629,7 +615,6 @@ def test_queue_span(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -675,7 +660,6 @@ def test_tag_wildcards(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) for query in [ @@ -707,7 +691,6 @@ def test_query_for_missing_tag(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -747,7 +730,6 @@ def _test_simple_measurements(self, keys): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) for i, (k, type, unit) in enumerate(keys): @@ -867,7 +849,6 @@ def test_environment(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -906,7 +887,6 @@ def test_transaction(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -946,7 +926,6 @@ def test_orderby_alias(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -987,7 +966,6 @@ def test_explore_sample_query(self) -> None: ] self.store_spans( spans, - is_eap=True, ) response = self.do_request( { @@ -1034,7 +1012,6 @@ def test_span_status(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1061,7 +1038,6 @@ def test_span_status(self) -> None: def test_handle_nans_from_snuba(self) -> None: self.store_spans( [self.create_span({"description": "foo"}, start_ts=self.ten_mins_ago)], - is_eap=True, ) response = self.do_request( @@ -1091,7 +1067,6 @@ def test_in_filter(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1147,7 +1122,6 @@ def _test_aggregate_filter(self, queries): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) for query in queries: @@ -1180,7 +1154,6 @@ def test_pagination_samples(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1255,7 +1228,6 @@ def test_precise_timestamps(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1293,7 +1265,6 @@ def test_case_sensitivity_filtering(self): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1355,7 +1326,6 @@ def test_case_sensitivity_filtering_with_list(self): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1426,7 +1396,6 @@ def test_case_sensitivity_with_wildcards(self): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -1488,7 +1457,6 @@ def test_replay_id(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1528,7 +1496,6 @@ def test_user_display(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1560,7 +1527,6 @@ def test_query_with_asterisk(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1582,7 +1548,6 @@ def test_wildcard_queries_with_asterisk_literals(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1610,7 +1575,7 @@ def test_free_text_wildcard_filter(self) -> None: start_ts=self.ten_mins_ago, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["count()", "description"], @@ -1648,7 +1613,6 @@ def test_simple(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -1690,7 +1654,6 @@ def test_numeric_attr_without_space(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -1731,7 +1694,6 @@ def test_numeric_attr_overlap_string_attr(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -1773,7 +1735,6 @@ def test_numeric_attr_with_spaces(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -1814,7 +1775,6 @@ def test_numeric_attr_filtering(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -1844,7 +1804,7 @@ def test_explicit_boolean_tag_filter(self) -> None: start_ts=self.ten_mins_ago, ) span_false["data"] = {"is_enabled": False} - self.store_spans([span_true, span_false], is_eap=True) + self.store_spans([span_true, span_false]) # Test filtering for true response = self.do_request( @@ -1891,7 +1851,7 @@ def test_explicit_boolean_tag_negation(self) -> None: start_ts=self.ten_mins_ago, ) span_false["data"] = {"is_enabled": False} - self.store_spans([span_true, span_false], is_eap=True) + self.store_spans([span_true, span_false]) # Test negation: !tags[is_enabled,boolean]:true should return spans where is_enabled is false response = self.do_request( @@ -1921,7 +1881,7 @@ def test_explicit_boolean_tag_as_field(self) -> None: start_ts=self.ten_mins_ago, ) span_false["data"] = {"feature_flag": False} - self.store_spans([span_true, span_false], is_eap=True) + self.store_spans([span_true, span_false]) # Request boolean tag as a field without filtering response = self.do_request( @@ -1948,7 +1908,7 @@ def test_explicit_boolean_tag_with_colon_in_key(self) -> None: start_ts=self.ten_mins_ago, ) span["data"] = {"feature:enabled": True} - self.store_spans([span], is_eap=True) + self.store_spans([span]) # Test filtering with colon in tag key response = self.do_request( @@ -2011,7 +1971,6 @@ def test_numeric_attr_orderby(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -2045,7 +2004,6 @@ def test_skip_aggregate_conditions_option(self) -> None: ) self.store_spans( [span_1, span_2], - is_eap=True, ) response = self.do_request( { @@ -2087,7 +2045,6 @@ def test_span_data_fields_http_resource(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -2153,7 +2110,7 @@ def test_filtering_numeric_attr(self) -> None: measurements={"foo": {"value": 10}}, start_ts=self.ten_mins_ago, ) - self.store_spans([span_1, span_2], is_eap=True) + self.store_spans([span_1, span_2]) response = self.do_request( { @@ -2227,7 +2184,7 @@ def test_extrapolation(self) -> None: start_ts=self.ten_mins_ago, ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["description", "count()"], @@ -2274,7 +2231,7 @@ def test_extrapolation_mode_server_only(self, mock_rpc_request: mock.MagicMock) start_ts=self.ten_mins_ago, ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["description", "count()"], @@ -2310,7 +2267,7 @@ def test_span_duration(self) -> None: start_ts=self.ten_mins_ago, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["span.duration", "description"], @@ -2362,7 +2319,6 @@ def test_aggregate_numeric_attr(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -2424,7 +2380,6 @@ def test_epm(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -2466,7 +2421,6 @@ def test_tpm(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -2510,7 +2464,6 @@ def test_p75_if(self) -> None: *[(False, 5000)] * 4, ] ], - is_eap=True, ) response = self.do_request( @@ -2558,7 +2511,6 @@ def test_is_transaction(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -2604,7 +2556,6 @@ def test_is_not_transaction(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -2644,7 +2595,6 @@ def test_byte_fields(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -2701,7 +2651,6 @@ def test_query_for_missing_tag_negated(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -2725,7 +2674,6 @@ def test_device_class_column(self) -> None: {"sentry_tags": {"device.class": "1"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) response = self.do_request( { @@ -2751,7 +2699,6 @@ def test_http_response_count(self) -> None: {"sentry_tags": {"status_code": "500"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) self.store_spans( [ @@ -2759,7 +2706,6 @@ def test_http_response_count(self) -> None: {"sentry_tags": {"status_code": "500"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) self.store_spans( [ @@ -2767,7 +2713,6 @@ def test_http_response_count(self) -> None: {"sentry_tags": {"status_code": "404"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) self.store_spans( [ @@ -2775,7 +2720,6 @@ def test_http_response_count(self) -> None: {"sentry_tags": {"status_code": "200"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) response = self.do_request( { @@ -2805,7 +2749,6 @@ def test_http_response_rate(self) -> None: {"sentry_tags": {"status_code": "500"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) self.store_spans( [ @@ -2813,7 +2756,6 @@ def test_http_response_rate(self) -> None: {"sentry_tags": {"status_code": "500"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) self.store_spans( [ @@ -2821,7 +2763,6 @@ def test_http_response_rate(self) -> None: {"sentry_tags": {"status_code": "404"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) self.store_spans( [ @@ -2829,7 +2770,6 @@ def test_http_response_rate(self) -> None: {"sentry_tags": {"status_code": "200"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) response = self.do_request( { @@ -2867,7 +2807,6 @@ def test_http_response_rate_missing_status_code(self) -> None: [ self.create_span({"sentry_tags": {}}, start_ts=self.ten_mins_ago), ], - is_eap=True, ) response = self.do_request( { @@ -2892,7 +2831,6 @@ def test_http_response_rate_invalid_param(self) -> None: {"sentry_tags": {"status_code": "500"}}, start_ts=self.ten_mins_ago ), ], - is_eap=True, ) response = self.do_request( { @@ -2921,7 +2859,6 @@ def test_http_response_rate_group_by(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) self.store_spans( [ @@ -2933,7 +2870,6 @@ def test_http_response_rate_group_by(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) self.store_spans( [ @@ -2945,7 +2881,6 @@ def test_http_response_rate_group_by(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) self.store_spans( [ @@ -2957,7 +2892,6 @@ def test_http_response_rate_group_by(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -3006,7 +2940,6 @@ def test_cache_miss_rate(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3042,7 +2975,6 @@ def test_cache_hit(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3074,7 +3006,6 @@ def test_searchable_contexts(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3104,7 +3035,6 @@ def test_trace_status_rate(self) -> None: ) for status in statuses ], - is_eap=True, ) response = self.do_request( @@ -3142,7 +3072,6 @@ def test_failure_rate(self) -> None: ) for status in trace_statuses ], - is_eap=True, ) response = self.do_request( @@ -3184,7 +3113,7 @@ def test_failure_rate_if(self) -> None: ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -3223,7 +3152,6 @@ def test_count_op(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3266,7 +3194,6 @@ def test_avg_if(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3302,7 +3229,6 @@ def test_avg_compare(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3335,7 +3261,6 @@ def test_avg_if_invalid_param(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3391,7 +3316,7 @@ def test_ttif_ttfd_contribution_rate(self) -> None: ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -3442,7 +3367,6 @@ def test_count_scores(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3498,7 +3422,6 @@ def test_count_scores_filter(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3527,7 +3450,7 @@ def test_time_spent_percentage(self) -> None: spans.append( self.create_span({"sentry_tags": {"transaction": "bar_transaction"}}, duration=1) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -3577,7 +3500,6 @@ def test_performance_score(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3640,7 +3562,6 @@ def test_division_if(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -3736,7 +3657,6 @@ def test_total_performance_score(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3819,7 +3739,6 @@ def test_total_performance_score_missing_vital(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3872,7 +3791,6 @@ def test_total_performance_score_multiple_transactions(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -3934,7 +3852,6 @@ def test_division(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -3974,7 +3891,6 @@ def test_division_with_groupby(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4026,7 +3942,6 @@ def test_opportunity_score_zero_scores(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4089,7 +4004,6 @@ def test_opportunity_score_simple(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4147,7 +4061,6 @@ def test_opportunity_score_with_transaction(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4234,7 +4147,6 @@ def test_opportunity_score_with_filter(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4292,7 +4204,6 @@ def test_total_opportunity_score_passes_with_bad_query(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4344,7 +4255,6 @@ def test_total_opportunity_score_missing_data(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4384,7 +4294,6 @@ def test_count_starts(self) -> None: } ), ], - is_eap=True, ) response = self.do_request( @@ -4418,7 +4327,7 @@ def test_unit_aggregate_filtering(self) -> None: start_ts=self.ten_mins_ago, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["avg(span.duration)", "description"], @@ -4456,7 +4365,7 @@ def test_trace_id_in_filter(self) -> None: start_ts=self.ten_mins_ago, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["description"], @@ -4501,7 +4410,7 @@ def test_filtering_null_numeric_attr(self) -> None: start_ts=self.ten_mins_ago, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": [ @@ -4541,7 +4450,6 @@ def test_internal_fields(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) for private_field in [ "sentry.organization_id", @@ -4567,7 +4475,6 @@ def test_transaction_profile_attributes(self) -> None: ) self.store_spans( [span_with_profile, span_without_profile], - is_eap=True, ) response = self.do_request( @@ -4633,7 +4540,6 @@ def test_continuous_profile_attributes(self) -> None: ) self.store_spans( [span_with_profile, span_without_profile], - is_eap=True, ) response = self.do_request( @@ -4701,7 +4607,7 @@ def test_sampling_weight_does_not_fail(self) -> None: }, start_ts=before_now(minutes=10), ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) response = self.do_request( { "field": ["sentry.sampling_weight"], @@ -4733,7 +4639,7 @@ def test_sampling_factor_does_not_fail(self) -> None: }, start_ts=before_now(minutes=10), ) - self.store_spans([span], is_eap=True) + self.store_spans([span]) response = self.do_request( { "field": ["sentry.sampling_factor"], @@ -4778,7 +4684,6 @@ def test_is_starred_transaction(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -4834,7 +4739,6 @@ def test_empty_string_equal(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -4874,7 +4778,6 @@ def test_empty_string_negation(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -4910,7 +4813,7 @@ def test_has_parent_span_filter(self) -> None: start_ts=self.ten_mins_ago, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -4987,7 +4890,6 @@ def test_app_start_fields(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -5027,7 +4929,6 @@ def test_typed_attributes_with_colons(self) -> None: self.create_span(start_ts=self.ten_mins_ago), span, ], - is_eap=True, ) response = self.do_request( @@ -5058,7 +4959,6 @@ def test_count_if_two_args(self): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -5080,7 +4980,6 @@ def test_span_ops_breakdown(self) -> None: }, ), ], - is_eap=True, ) response = self.do_request( @@ -5110,7 +5009,6 @@ def test_special_characters(self) -> None: span, self.create_span(start_ts=self.ten_mins_ago), ], - is_eap=True, ) for c in characters: @@ -5143,7 +5041,6 @@ def test_ai_total_tokens_returns_integer_meta(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -5177,7 +5074,6 @@ def test_equation_simple(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = "equation|count() * 2" response = self.do_request( @@ -5224,7 +5120,6 @@ def test_equation_with_orderby_using_same_alias(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = "equation|count(span.duration)" response = self.do_request( @@ -5265,7 +5160,6 @@ def test_equation_single_function_term(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = "equation|count()" response = self.do_request( @@ -5317,7 +5211,6 @@ def test_equation_single_field_term(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = "equation|measurements.lcp" response = self.do_request( @@ -5369,7 +5262,6 @@ def test_equation_single_literal(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = "equation|3.14159" response = self.do_request( @@ -5426,7 +5318,7 @@ def test_equation_with_extrapolation(self) -> None: start_ts=self.ten_mins_ago, ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) equation = "equation|count() * (2)" response = self.do_request( { @@ -5463,7 +5355,6 @@ def test_equation_all_symbols(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = "equation|count() * 2 + 2 - 2 / 2" response = self.do_request( @@ -5514,7 +5405,7 @@ def test_release(self) -> None: }, start_ts=self.ten_mins_ago, ) - self.store_spans([span1, span2], is_eap=True) + self.store_spans([span1, span2]) response = self.do_request( { @@ -5561,7 +5452,7 @@ def test_release(self) -> None: def test_latest_release_alias(self) -> None: self.create_release(version="0.8") span1 = self.create_span({"sentry_tags": {"release": "0.8"}}, start_ts=self.ten_mins_ago) - self.store_spans([span1], is_eap=True) + self.store_spans([span1]) response = self.do_request( { @@ -5582,7 +5473,7 @@ def test_latest_release_alias(self) -> None: self.create_release(version="0.9") span2 = self.create_span({"sentry_tags": {"release": "0.9"}}, start_ts=self.ten_mins_ago) - self.store_spans([span2], is_eap=True) + self.store_spans([span2]) response = self.do_request( { @@ -5633,7 +5524,7 @@ def test_release_stage(self) -> None: }, start_ts=self.ten_mins_ago, ) - self.store_spans([adopted_span, replaced_span], is_eap=True) + self.store_spans([adopted_span, replaced_span]) request = { "field": ["release"], @@ -5697,7 +5588,7 @@ def test_semver(self) -> None: span3 = self.create_span( {"sentry_tags": {"release": release_3.version}}, start_ts=self.ten_mins_ago ) - self.store_spans([span1, span2, span3], is_eap=True) + self.store_spans([span1, span2, span3]) request = { "field": ["release"], @@ -5802,7 +5693,7 @@ def test_semver_package(self) -> None: span2 = self.create_span( {"sentry_tags": {"release": release_2.version}}, start_ts=self.ten_mins_ago ) - self.store_spans([span1, span2], is_eap=True) + self.store_spans([span1, span2]) request = { "field": ["release"], @@ -5841,7 +5732,7 @@ def test_semver_build(self) -> None: span2 = self.create_span( {"sentry_tags": {"release": release_2.version}}, start_ts=self.ten_mins_ago ) - self.store_spans([span1, span2], is_eap=True) + self.store_spans([span1, span2]) request = { "field": ["release"], @@ -5892,7 +5783,6 @@ def test_file_extension(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -5920,7 +5810,7 @@ def test_filter_timestamp(self) -> None: span1 = self.create_span({}, start_ts=one_day_ago) span2 = self.create_span({}, start_ts=three_days_ago) - self.store_spans([span1, span2], is_eap=True) + self.store_spans([span1, span2]) request = { "field": ["timestamp"], @@ -5996,7 +5886,6 @@ def test_tag_wildcards_with_in_filter(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -6036,7 +5925,6 @@ def test_tag_wildcards_with_not_in_filter(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -6075,7 +5963,7 @@ def test_disable_extrapolation(self) -> None: start_ts=self.ten_mins_ago, ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { "field": ["description", "count()"], @@ -6108,7 +5996,6 @@ def test_failure_count(self) -> None: ) for status in trace_statuses ], - is_eap=True, ) response = self.do_request( @@ -6157,7 +6044,6 @@ def test_short_trace_id_filter(self) -> None: ) for trace_id in trace_ids ], - is_eap=True, ) for i in range(8, 32): @@ -6188,7 +6074,6 @@ def test_eps(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -6229,7 +6114,6 @@ def test_count_if_span_status(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -6269,7 +6153,6 @@ def test_count_if_span_status_equation_quoted(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) equation = 'equation|count_if(span.status,equals,"success")' response = self.do_request( @@ -6343,7 +6226,6 @@ def test_count_if_numeric(self) -> None: duration=200, ), ], - is_eap=True, ) response = self.do_request( { @@ -6398,7 +6280,6 @@ def test_count_if_between(self) -> None: duration=200, ), ], - is_eap=True, ) response = self.do_request( { @@ -6489,7 +6370,6 @@ def test_count_if_integer(self) -> None: duration=200, ), ], - is_eap=True, ) response = self.do_request( { @@ -6555,7 +6435,7 @@ def test_apdex_function(self) -> None: duration=5000, # 5000ms - frustrated ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -6638,7 +6518,7 @@ def test_apdex_function_with_filter(self) -> None: duration=100, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -6729,7 +6609,7 @@ def test_user_misery_function(self) -> None: duration=5000, # 5000ms - miserable but not a segment ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -6803,7 +6683,7 @@ def test_user_misery_function_with_filter(self) -> None: duration=100, ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { @@ -6888,7 +6768,6 @@ def test_formula_filtering(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -6970,7 +6849,6 @@ def test_project_filter(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -7041,7 +6919,6 @@ def test_non_org_project_filter(self): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( { @@ -7083,7 +6960,6 @@ def test_count_span_duration(self): start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) request = { "field": ["count(span.duration)"], @@ -7098,7 +6974,7 @@ def test_count_span_duration(self): def test_wildcard_operator_with_backslash(self): span = self.create_span({"description": r"foo\bar"}, start_ts=self.ten_mins_ago) - self.store_spans([span], is_eap=True) + self.store_spans([span]) base_request = { "field": ["project.name", "id"], "project": self.project.id, @@ -7140,7 +7016,6 @@ def test_in_query_matches_is_query_with_truncated_strings(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) is_query = self.do_request( @@ -7178,7 +7053,7 @@ def test_in_query_with_numeric_values(self) -> None: start_ts=self.ten_mins_ago, ) - self.store_spans([span1, span2, span3], is_eap=True) + self.store_spans([span1, span2, span3]) in_query = self.do_request( { @@ -7220,7 +7095,7 @@ def test_sent_spans_project_optimization(self, mock_table_rpc): spans = [ self.create_span({"description": "foo"}, project=project1, start_ts=self.ten_mins_ago), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self.do_request( { diff --git a/tests/snuba/api/endpoints/test_organization_events_stats_span_indexed.py b/tests/snuba/api/endpoints/test_organization_events_stats_span_indexed.py index 4c0e365be68edb..1eaa47cdf36776 100644 --- a/tests/snuba/api/endpoints/test_organization_events_stats_span_indexed.py +++ b/tests/snuba/api/endpoints/test_organization_events_stats_span_indexed.py @@ -47,7 +47,7 @@ def test_count(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -73,7 +73,6 @@ def test_count(self) -> None: def test_handle_nans_from_snuba(self) -> None: self.store_spans( [self.create_span({"description": "foo"}, start_ts=self.day_ago)], - is_eap=True, ) response = self._do_request( @@ -108,7 +107,6 @@ def test_handle_nans_from_snuba_top_n(self) -> None: self.create_span({"description": "bar"}, start_ts=self.day_ago), self.create_span({"description": "bar"}, start_ts=self.two_days_ago), ], - is_eap=True, ) response = self._do_request( @@ -159,7 +157,7 @@ def test_count_unique(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -193,7 +191,6 @@ def test_p95(self) -> None: ) for hour, duration in enumerate(event_durations) ], - is_eap=True, ) response = self._do_request( @@ -233,7 +230,7 @@ def test_multiaxis(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -276,7 +273,7 @@ def test_throughput_epm_hour_rollup(self) -> None: for minute in range(count) ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -312,7 +309,7 @@ def test_throughput_epm_day_rollup(self) -> None: for minute in range(count) ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -344,7 +341,7 @@ def test_throughput_epm_hour_rollup_offset_of_hour(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -385,7 +382,7 @@ def test_throughput_eps_minute_rollup(self) -> None: for second in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) for axis in ["eps()", "sps()"]: response = self._do_request( @@ -430,7 +427,6 @@ def test_top_events(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -474,7 +470,6 @@ def test_top_events_empty_other(self) -> None: ) for transaction in ["foo", "bar"] ], - is_eap=True, ) response = self._do_request( @@ -511,7 +506,6 @@ def test_top_events_exclude_other(self) -> None: ) for transaction in ["foo", "bar", "qux"] ], - is_eap=True, ) response = self._do_request( @@ -548,7 +542,6 @@ def test_top_events_multi_y_axis(self) -> None: ) for transaction in ["foo", "bar", "baz"] ], - is_eap=True, ) response = self._do_request( @@ -591,7 +584,6 @@ def test_top_events_with_project(self) -> None: ) for project in projects ], - is_eap=True, ) self.store_spans( [ @@ -600,7 +592,6 @@ def test_top_events_with_project(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -639,7 +630,6 @@ def test_top_events_with_project_and_project_id(self) -> None: ) for project in projects ], - is_eap=True, ) self.store_spans( [ @@ -648,7 +638,6 @@ def test_top_events_with_project_and_project_id(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -696,7 +685,6 @@ def test_top_events_with_no_data(self) -> None: def test_count_unique_nans(self) -> None: self.store_span( self.create_span(start_ts=self.two_days_ago + timedelta(minutes=1)), - is_eap=True, ) response = self._do_request( data={ @@ -731,7 +719,7 @@ def test_count_extrapolation(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -769,7 +757,7 @@ def test_extrapolation_count(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -834,7 +822,7 @@ def test_confidence_is_set(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) y_axes = [ "count()", @@ -921,7 +909,7 @@ def test_extrapolation_with_multiaxis(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1005,7 +993,6 @@ def test_top_events_with_extrapolation(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) event_counts = [0, 1, 0, 0, 0, 0] @@ -1073,7 +1060,7 @@ def test_comparison_delta(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1109,7 +1096,7 @@ def test_comparison_delta_with_empty_comparison_values(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1177,7 +1164,7 @@ def test_project_filters(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) for querystring in [f"project:{self.project.slug}", f"project:[{self.project.slug}]"]: response = self._do_request( @@ -1231,7 +1218,7 @@ def test_device_class_filter(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) for querystring in ["device.class:low", "device.class:[low,medium]"]: response = self._do_request( @@ -1284,7 +1271,7 @@ def test_device_class_filter_empty(self): for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1332,7 +1319,7 @@ def test_device_class_top_events(self) -> None: for minute in range(count[1]) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1388,7 +1375,6 @@ def test_top_events_filters_out_groupby_even_when_its_just_one_row(self) -> None start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -1443,7 +1429,6 @@ def test_cache_miss_rate(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -1493,7 +1478,6 @@ def test_trace_status_rate(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -1531,7 +1515,6 @@ def test_count_op(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -1575,7 +1558,6 @@ def test_top_events_with_escape_characters(self) -> None: duration=2000, ), ], - is_eap=True, ) response = self._do_request( @@ -1635,7 +1617,6 @@ def test_module_alias(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -1687,7 +1668,6 @@ def test_module_alias_multi_value(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -1737,7 +1717,6 @@ def test_http_response_rate(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -1786,7 +1765,6 @@ def test_http_response_rate_multiple_series(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -1825,7 +1803,6 @@ def test_downsampling_single_series(self) -> None: span2["span_id"] = "b" * 16 self.store_spans( [span, span2], - is_eap=True, ) response = self._do_request( data={ @@ -1885,7 +1862,6 @@ def test_downsampling_top_events(self) -> None: span2["span_id"] = "b" * 16 self.store_spans( [span, span2], - is_eap=True, ) response = self._do_request( data={ @@ -1960,7 +1936,7 @@ def test_small_valid_timerange(self) -> None: for _ in range(count) ] ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ "start": self.day_ago, @@ -1996,7 +1972,6 @@ def test_downsampling_can_go_to_higher_accuracy_tier(self) -> None: span2["span_id"] = "b" * 16 self.store_spans( [span, span2], - is_eap=True, ) response = self._do_request( data={ @@ -2068,7 +2043,6 @@ def test_top_n_is_transaction(self) -> None: start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -2126,7 +2100,7 @@ def test_datetime_unaligned_with_regular_buckets(self) -> None: start_ts=self.day_ago + timedelta(hours=12, minutes=5), ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) # This should be set to 10:00 the previous day query_start = self.day_ago + timedelta(minutes=12) @@ -2184,7 +2158,6 @@ def test_simple_equation(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -2222,7 +2195,6 @@ def test_equation_all_symbols(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) equation = "equation|count() * 2 + 2 - 2 / 2" @@ -2261,7 +2233,6 @@ def test_simple_equation_with_multi_axis(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -2323,7 +2294,6 @@ def test_simple_equation_with_top_events(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) response = self._do_request( @@ -2371,7 +2341,7 @@ def test_disable_extrapolation(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -2456,7 +2426,6 @@ def test_debug_with_top_events(self) -> None: duration=2000, ), ], - is_eap=True, ) self.user = self.create_user("superuser@example.com", is_superuser=True) @@ -2570,7 +2539,6 @@ def test_groupby_non_existent_attribute(self): start_ts=self.day_ago, ), ], - is_eap=True, ) response = self._do_request( data={ diff --git a/tests/snuba/api/endpoints/test_organization_events_timeseries_cross_trace.py b/tests/snuba/api/endpoints/test_organization_events_timeseries_cross_trace.py index d03d8bead544e6..ed10fc2c2be353 100644 --- a/tests/snuba/api/endpoints/test_organization_events_timeseries_cross_trace.py +++ b/tests/snuba/api/endpoints/test_organization_events_timeseries_cross_trace.py @@ -42,7 +42,6 @@ def test_cross_trace_query_with_logs(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -107,7 +106,6 @@ def test_cross_trace_query_with_spans(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -177,7 +175,6 @@ def test_cross_trace_query_with_spans_and_logs(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -245,7 +242,6 @@ def test_cross_trace_query_with_multiple_spans(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( @@ -308,7 +304,6 @@ def test_cross_trace_qurey_with_multiple_logs(self) -> None: start_ts=self.ten_mins_ago, ), ], - is_eap=True, ) response = self.do_request( diff --git a/tests/snuba/api/endpoints/test_organization_events_timeseries_spans.py b/tests/snuba/api/endpoints/test_organization_events_timeseries_spans.py index dcd8bb063744e3..8ef336f45de627 100644 --- a/tests/snuba/api/endpoints/test_organization_events_timeseries_spans.py +++ b/tests/snuba/api/endpoints/test_organization_events_timeseries_spans.py @@ -102,7 +102,7 @@ def test_count(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -142,7 +142,6 @@ def test_count(self) -> None: def test_handle_nans_from_snuba(self) -> None: self.store_spans( [self.create_span({"description": "foo"}, start_ts=self.start)], - is_eap=True, ) response = self._do_request( @@ -176,7 +175,6 @@ def test_handle_nans_from_snuba_top_n(self) -> None: self.create_span({"description": "bar"}, start_ts=self.start), self.create_span({"description": "bar"}, start_ts=self.two_days_ago), ], - is_eap=True, ) seven_days_ago = self.two_days_ago - timedelta(days=5) @@ -287,7 +285,7 @@ def test_count_unique(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -331,7 +329,6 @@ def test_p95(self) -> None: ) for hour, duration in enumerate(event_durations) ], - is_eap=True, ) response = self._do_request( @@ -382,7 +379,7 @@ def test_multiaxis(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -453,7 +450,6 @@ def test_top_events(self) -> None: duration=1998, ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=6) @@ -552,7 +548,6 @@ def test_top_events_orderby_not_in_groupby(self) -> None: duration=1998, ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=6) @@ -611,7 +606,6 @@ def test_top_events_with_none_as_groupby(self) -> None: duration=1999, ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=6) @@ -702,7 +696,6 @@ def test_top_events_empty_other(self) -> None: ) for transaction in ["foo", "bar"] ], - is_eap=True, ) self.end = self.start + timedelta(minutes=6) @@ -771,7 +764,6 @@ def test_top_events_exclude_other(self) -> None: ) for transaction, duration in [("foo", 2000), ("bar", 1999), ("quz", 100)] ], - is_eap=True, ) self.end = self.start + timedelta(minutes=6) @@ -840,7 +832,6 @@ def test_top_events_multi_y_axis(self) -> None: ) for transaction in ["foo", "bar", "baz"] ], - is_eap=True, ) self.end = self.start + timedelta(minutes=6) @@ -981,7 +972,7 @@ def test_top_events_with_project(self) -> None: duration=1, ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) self.end = self.start + timedelta(minutes=6) response = self._do_request( @@ -1071,7 +1062,7 @@ def test_top_events_with_project_and_project_id(self) -> None: start_ts=self.start + timedelta(minutes=1), ) ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) self.end = self.start + timedelta(minutes=6) response = self._do_request( @@ -1198,7 +1189,7 @@ def test_count_extrapolation(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1253,7 +1244,7 @@ def test_confidence_is_set(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) y_axes = [ "count()", @@ -1322,7 +1313,7 @@ def test_extrapolation_with_multiaxis(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1384,7 +1375,6 @@ def test_top_events_with_extrapolation(self) -> None: duration=1998, ), ], - is_eap=True, ) event_counts = [0, 1, 0, 0, 0, 0] @@ -1447,7 +1437,7 @@ def test_comparison_delta(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1498,7 +1488,7 @@ def test_comparison_delta_with_empty_comparison_values(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -1577,7 +1567,7 @@ def test_project_filters(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) for querystring in [f"project:{self.project.slug}", f"project:[{self.project.slug}]"]: response = self._do_request( @@ -1643,7 +1633,7 @@ def test_device_class_filter(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) for querystring in ["device.class:low", "device.class:[low,medium]"]: response = self._do_request( @@ -1700,7 +1690,6 @@ def test_top_events_filters_out_groupby_even_when_its_just_one_row(self) -> None start_ts=self.day_ago + timedelta(minutes=1), ), ], - is_eap=True, ) response = self._do_request( @@ -1775,7 +1764,6 @@ def test_cache_miss_rate(self) -> None: start_ts=self.day_ago + timedelta(minutes=2), ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) @@ -1838,7 +1826,6 @@ def test_trace_status_rate(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) @@ -1889,7 +1876,6 @@ def test_count_op(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) @@ -1938,7 +1924,6 @@ def test_downsampling_single_series(self) -> None: span2["span_id"] = "b" * 16 self.store_spans( [span, span2], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) response = self._do_request( @@ -2023,7 +2008,6 @@ def test_downsampling_top_events(self) -> None: span2["span_id"] = "b" * 16 self.store_spans( [span, span2], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) response = self._do_request( @@ -2119,7 +2103,6 @@ def test_simple_equation(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) @@ -2170,7 +2153,6 @@ def test_equation_all_symbols(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ], - is_eap=True, ) equation = "equation|count() * 2 + 2 - 2 / 2" @@ -2222,7 +2204,6 @@ def test_simple_equation_with_multi_axis(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) @@ -2310,7 +2291,6 @@ def test_simple_equation_with_top_events(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ], - is_eap=True, ) self.end = self.start + timedelta(minutes=3) @@ -2404,7 +2384,7 @@ def test_disable_extrapolation(self) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ @@ -2459,7 +2439,7 @@ def test_extrapolation_mode_server_only(self, mock_rpc_request) -> None: for minute in range(count) ], ) - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ "start": self.start, @@ -2541,7 +2521,7 @@ def test_with_no_value_release_data(self) -> None: start_ts=self.start + timedelta(minutes=2), ), ] - self.store_spans(spans, is_eap=True) + self.store_spans(spans) response = self._do_request( data={ "start": self.start, diff --git a/tests/snuba/api/endpoints/test_organization_events_trace.py b/tests/snuba/api/endpoints/test_organization_events_trace.py index 13ac5581a27b54..57c25cacaeebf4 100644 --- a/tests/snuba/api/endpoints/test_organization_events_trace.py +++ b/tests/snuba/api/endpoints/test_organization_events_trace.py @@ -44,7 +44,7 @@ def setUp(self) -> None: }, ) - def load_trace(self, is_eap=True): + def load_trace(self): self.root_event = self.create_event( trace_id=self.trace_id, transaction="root", @@ -72,7 +72,6 @@ def load_trace(self, is_eap=True): slow_db_performance_issue=True, project_id=self.project.id, milliseconds=3000, - is_eap=is_eap, ) # First Generation @@ -94,7 +93,6 @@ def load_trace(self, is_eap=True): parent_span_id=root_span_id, project_id=self.gen1_project.id, milliseconds=2000, - is_eap=is_eap, ) for i, (root_span_id, gen1_span_id) in enumerate( zip(self.root_span_ids, self.gen1_span_ids) @@ -126,7 +124,6 @@ def load_trace(self, is_eap=True): span_id=self.gen2_span_id if i == 0 else None, project_id=self.gen2_project.id, milliseconds=1000, - is_eap=is_eap, ) for i, (gen1_span_id, gen2_span_id) in enumerate( zip(self.gen1_span_ids, self.gen2_span_ids) @@ -142,7 +139,6 @@ def load_trace(self, is_eap=True): project_id=self.gen3_project.id, parent_span_id=self.gen2_span_id, milliseconds=500, - is_eap=is_eap, ) @@ -227,7 +223,6 @@ def test_no_roots(self) -> None: spans=[], parent_span_id=parent_span_id, project_id=self.project.id, - is_eap=True, ) url = reverse( "sentry-api-0-organization-events-trace-light", @@ -262,7 +257,6 @@ def test_multiple_roots(self) -> None: spans=[], parent_span_id=None, project_id=self.project.id, - is_eap=True, ) with self.feature(self.FEATURES): data: dict[str, str | int] = {"event_id": second_root.event_id, "project": -1} @@ -310,7 +304,6 @@ def test_root_with_multiple_roots(self) -> None: spans=[], parent_span_id=None, project_id=self.project.id, - is_eap=True, ) with self.feature(self.FEATURES): response = self.client.get( @@ -382,7 +375,6 @@ def test_direct_parent_with_children_and_multiple_root(self) -> None: spans=[], parent_span_id=None, project_id=self.project.id, - is_eap=True, ) with self.feature(self.FEATURES): @@ -470,7 +462,6 @@ def test_sibling_transactions(self) -> None: project_id=self.create_project(organization=self.organization).id, parent_span_id=self.gen2_span_ids[1], milliseconds=500, - is_eap=True, ).event_id, self.create_event( trace_id=self.trace_id, @@ -479,7 +470,6 @@ def test_sibling_transactions(self) -> None: project_id=self.create_project(organization=self.organization).id, parent_span_id=self.gen2_span_ids[1], milliseconds=1500, - is_eap=True, ).event_id, ] @@ -796,7 +786,6 @@ def test_detailed_trace_with_bad_tags(self) -> None: tags=[[long_tag_key, long_tag_value]], milliseconds=3000, store_event_kwargs={"assert_no_errors": False}, - is_eap=True, ) url = reverse( @@ -870,7 +859,6 @@ def test_bad_span_loop(self) -> None: ], parent_span_id=self.gen2_span_ids[1], project_id=self.project.id, - is_eap=True, ) with self.feature(self.FEATURES): @@ -910,7 +898,6 @@ def test_bad_orphan_span_loop(self) -> None: project_id=self.project.id, milliseconds=3000, start_timestamp=self.day_ago - timedelta(minutes=1), - is_eap=True, ) orphan_child = self.create_event( trace_id=self.trace_id, @@ -927,7 +914,6 @@ def test_bad_orphan_span_loop(self) -> None: parent_span_id=root_span_id, project_id=self.project.id, milliseconds=300, - is_eap=True, ) with self.feature(self.FEATURES): response = self.client_get( @@ -954,7 +940,6 @@ def test_multiple_roots(self) -> None: parent_span_id=None, project_id=self.project.id, milliseconds=500, - is_eap=True, ) second_root = self.create_event( trace_id=trace_id, @@ -963,7 +948,6 @@ def test_multiple_roots(self) -> None: parent_span_id=None, project_id=self.project.id, milliseconds=1000, - is_eap=True, ) self.url = reverse( self.url_name, @@ -993,7 +977,6 @@ def test_sibling_transactions(self) -> None: project_id=self.create_project(organization=self.organization).id, parent_span_id=self.gen2_span_ids[1], milliseconds=1000, - is_eap=True, ).event_id, self.create_event( trace_id=self.trace_id, @@ -1002,7 +985,6 @@ def test_sibling_transactions(self) -> None: project_id=self.create_project(organization=self.organization).id, parent_span_id=self.gen2_span_ids[1], milliseconds=2000, - is_eap=True, ).event_id, ] @@ -1030,7 +1012,6 @@ def test_with_orphan_siblings(self) -> None: project_id=self.project.id, # Shorter duration means that this event happened first, and should be ordered first milliseconds=1000, - is_eap=True, ) root_sibling_event = self.create_event( trace_id=self.trace_id, @@ -1039,7 +1020,6 @@ def test_with_orphan_siblings(self) -> None: parent_span_id=parent_span_id, project_id=self.project.id, milliseconds=2000, - is_eap=True, ) with self.feature(self.FEATURES): @@ -1082,7 +1062,6 @@ def test_with_orphan_trace(self) -> None: project_id=self.project.id, milliseconds=3000, start_timestamp=self.day_ago - timedelta(minutes=1), - is_eap=True, ) child_event = self.create_event( trace_id=self.trace_id, @@ -1102,7 +1081,6 @@ def test_with_orphan_trace(self) -> None: # Because the snuba query orders based is_root then timestamp, this causes grandchild1-0 to be added to # results first before child1-0 milliseconds=2000, - is_eap=True, ) grandchild_event = self.create_event( trace_id=self.trace_id, @@ -1120,7 +1098,6 @@ def test_with_orphan_trace(self) -> None: span_id=orphan_span_ids["grandchild"], project_id=self.gen1_project.id, milliseconds=1000, - is_eap=True, ) with self.feature(self.FEATURES): diff --git a/tests/snuba/api/endpoints/test_organization_trace.py b/tests/snuba/api/endpoints/test_organization_trace.py index cfb67253e2c37f..9edee16da3a21d 100644 --- a/tests/snuba/api/endpoints/test_organization_trace.py +++ b/tests/snuba/api/endpoints/test_organization_trace.py @@ -272,7 +272,7 @@ def test_no_projects(self) -> None: assert response.status_code == 404, response.content def test_simple(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): response = self.client_get( data={"timestamp": self.day_ago}, @@ -292,7 +292,7 @@ def test_simple(self) -> None: def test_pagination(self) -> None: """Test is identical to test_simple, but with the limit override, we'll need to make multiple requests to get all of the trace""" - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): response = self.client_get( data={"timestamp": self.day_ago}, @@ -303,7 +303,7 @@ def test_pagination(self) -> None: self.assert_trace_data(data[0]) def test_ignore_project_param(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): # The trace endpoint should ignore the project param response = self.client_get( @@ -315,7 +315,7 @@ def test_ignore_project_param(self) -> None: self.assert_trace_data(data[0]) def test_with_errors_data(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() _, start = self.get_start_end_from_day_ago(123) error_data = load_data( "javascript", @@ -347,7 +347,7 @@ def test_with_errors_data(self) -> None: assert error_event["start_timestamp"] == error_data["timestamp"] def test_with_errors_data_with_overlapping_span_id(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() _, start = self.get_start_end_from_day_ago(123) error_data = load_data( "javascript", @@ -378,7 +378,7 @@ def test_with_errors_data_with_overlapping_span_id(self) -> None: assert error_event_1["event_id"] != error_event_2["event_id"] def test_with_performance_issues(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): response = self.client_get( data={"timestamp": self.day_ago}, @@ -422,7 +422,7 @@ def test_with_only_errors(self) -> None: assert data[0]["event_id"] == error.event_id def test_with_additional_attributes(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): response = self.client_get( data={ @@ -488,7 +488,7 @@ def test_with_date_outside_retention(self) -> None: assert response.status_code == 400, response.content def test_orphan_trace(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() orphan_event = self.create_event( trace_id=self.trace_id, transaction="/transaction/orphan", @@ -497,7 +497,6 @@ def test_orphan_trace(self) -> None: # Random span id so there's no parent parent_span_id=uuid4().hex[:16], milliseconds=500, - is_eap=True, ) with self.feature(self.FEATURES): response = self.client_get( @@ -605,7 +604,7 @@ def _trace_item_to_api_span(self, trace_item: TraceItem, children=None) -> dict: def test_with_uptime_results(self): """Test that uptime results are included when include_uptime=1""" - self.load_trace(is_eap=True) + self.load_trace() features = self.FEATURES redirect_result = self._create_uptime_result_with_original_url( @@ -652,7 +651,7 @@ def test_with_uptime_results(self): def test_without_uptime_results(self): """Test that uptime results are not queried when include_uptime is not set""" - self.load_trace(is_eap=True) + self.load_trace() uptime_result = self._create_uptime_result_with_original_url( organization=self.organization, project=self.project, @@ -683,7 +682,7 @@ def test_without_uptime_results(self): def test_uptime_root_tree_with_orphaned_spans(self): """Test that orphaned spans are parented to the final uptime request""" - self.load_trace(is_eap=True) + self.load_trace() self.create_event( trace_id=self.trace_id, @@ -692,7 +691,6 @@ def test_uptime_root_tree_with_orphaned_spans(self): project_id=self.project.id, parent_span_id=uuid4().hex[:16], milliseconds=500, - is_eap=True, ) redirect_result = self._create_uptime_result_with_original_url( organization=self.organization, @@ -739,7 +737,7 @@ def test_uptime_root_tree_with_orphaned_spans(self): def test_uptime_root_tree_without_orphans(self): """Test uptime results when there are no orphaned spans""" - self.load_trace(is_eap=True) + self.load_trace() uptime_result = self._create_uptime_result_with_original_url( organization=self.organization, diff --git a/tests/snuba/api/endpoints/test_organization_trace_item_attributes.py b/tests/snuba/api/endpoints/test_organization_trace_item_attributes.py index 27b84fcd2a1ee2..32c49a512b1a0e 100644 --- a/tests/snuba/api/endpoints/test_organization_trace_item_attributes.py +++ b/tests/snuba/api/endpoints/test_organization_trace_item_attributes.py @@ -432,7 +432,6 @@ def test_tags_list_str(self) -> None: duration=100, exclusive_time=100, tags={tag: tag}, - is_eap=True, ) response = self.do_request( @@ -490,7 +489,6 @@ def test_tags_list_nums(self) -> None: duration=100, exclusive_time=100, measurements={tag: 0}, - is_eap=True, ) response = self.do_request( @@ -550,7 +548,6 @@ def test_pagination(self) -> None: duration=100, exclusive_time=100, tags={tag: tag}, - is_eap=True, ) response = self.do_request( @@ -673,7 +670,6 @@ def test_tags_list_sentry_conventions(self) -> None: duration=100, exclusive_time=100, measurements={tag: 0}, - is_eap=True, ) response = self.do_request( @@ -753,7 +749,6 @@ def test_attribute_collision(self) -> None: "span.op": "foo", "span.duration": "bar", }, - is_eap=True, ) response = self.do_request() @@ -797,7 +792,6 @@ def test_sentry_internal_attributes(self) -> None: start_ts=before_now(days=0, minutes=10), ), ], - is_eap=True, ) response = self.do_request(query={"substringMatch": ""}) @@ -831,7 +825,7 @@ def test_boolean_attributes(self) -> None: "is_feature_enabled": False, "is_production": True, } - self.store_spans([span1, span2], is_eap=True) + self.store_spans([span1, span2]) response = self.do_request(query={"attributeType": "boolean"}) assert response.status_code == 200, response.content @@ -1139,7 +1133,6 @@ def test_tags_keys(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) response = self.do_request(key="tag") @@ -1185,7 +1178,6 @@ def test_transaction_keys_autocomplete(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) key = "transaction" @@ -1233,7 +1225,6 @@ def test_transaction_keys_autocomplete_substring(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) key = "transaction" @@ -1273,7 +1264,6 @@ def test_transaction_keys_autocomplete_substring_with_asterisk(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) key = "transaction" @@ -1314,7 +1304,6 @@ def test_tags_keys_autocomplete(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) key = "tag" @@ -1363,7 +1352,6 @@ def test_tags_keys_autocomplete_substring(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) key = "tag" @@ -1404,7 +1392,6 @@ def test_tags_keys_autocomplete_substring_with_asterisks(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) key = "tag" @@ -1445,7 +1432,6 @@ def test_tags_keys_autocomplete_noop(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) for key in [ @@ -1593,7 +1579,6 @@ def test_tags_keys_autocomplete_span_status(self) -> None: timestamp=timestamp, transaction="foo", status=status, - is_eap=True, ) response = self.do_request(key="span.status") @@ -1737,7 +1722,6 @@ def test_invalid_query(self, mock_executor_2: mock.MagicMock) -> None: duration=100, exclusive_time=100, tags={"tag": "foo"}, - is_eap=True, ) response = self.do_request(key="tag") @@ -1759,7 +1743,6 @@ def test_pagination(self) -> None: duration=100, exclusive_time=100, tags={"tag": tag}, - is_eap=True, ) response = self.do_request(key="tag") @@ -1859,7 +1842,6 @@ def test_autocomplete_release_semver_attributes(self) -> None: start_ts=before_now(days=0, minutes=10), ), ], - is_eap=True, ) response = self.do_request(key="release") @@ -1990,7 +1972,6 @@ def test_autocomplete_release_semver_attributes(self) -> None: def test_autocomplete_timestamp(self) -> None: self.store_spans( [self.create_span(start_ts=before_now(days=0, minutes=10))], - is_eap=True, ) response = self.do_request(key="timestamp", query={"substringMatch": "20"}) assert response.status_code == 200 @@ -2005,7 +1986,6 @@ def test_autocomplete_device_class(self) -> None: self.create_span({"sentry_tags": {"device.class": ""}}), self.create_span({}), ], - is_eap=True, ) response = self.do_request(key="device.class") diff --git a/tests/snuba/api/endpoints/test_organization_trace_item_attributes_ranked.py b/tests/snuba/api/endpoints/test_organization_trace_item_attributes_ranked.py index c76e1dc37c1f92..c837f6edf83281 100644 --- a/tests/snuba/api/endpoints/test_organization_trace_item_attributes_ranked.py +++ b/tests/snuba/api/endpoints/test_organization_trace_item_attributes_ranked.py @@ -62,7 +62,6 @@ def _store_span(self, description=None, tags=None, duration=None, status=None): start_ts=self.ten_mins_ago, duration=duration or 1000, ), - is_eap=True, ) def test_no_project(self) -> None: diff --git a/tests/snuba/api/endpoints/test_organization_trace_item_stats.py b/tests/snuba/api/endpoints/test_organization_trace_item_stats.py index 3e1dce6301d16e..7b96163d94c1aa 100644 --- a/tests/snuba/api/endpoints/test_organization_trace_item_stats.py +++ b/tests/snuba/api/endpoints/test_organization_trace_item_stats.py @@ -45,7 +45,6 @@ def _store_span(self, description=None, tags=None, duration=None): start_ts=self.ten_mins_ago, duration=duration or 1000, ), - is_eap=True, ) def test_no_project(self) -> None: @@ -122,7 +121,6 @@ def test_substring_match_returns_known_public_aliases(self) -> None: start_ts=self.ten_mins_ago, duration=100, ), - is_eap=True, ) self.store_span( self.create_span( @@ -132,7 +130,6 @@ def test_substring_match_returns_known_public_aliases(self) -> None: start_ts=self.ten_mins_ago, duration=200, ), - is_eap=True, ) response = self.do_request( diff --git a/tests/snuba/api/endpoints/test_organization_trace_meta.py b/tests/snuba/api/endpoints/test_organization_trace_meta.py index 721e70d7d8d590..6032dbcfe01e84 100644 --- a/tests/snuba/api/endpoints/test_organization_trace_meta.py +++ b/tests/snuba/api/endpoints/test_organization_trace_meta.py @@ -75,7 +75,7 @@ def test_bad_ids(self) -> None: ) def test_simple(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): response = self.client.get( self.url, @@ -90,7 +90,7 @@ def test_simple(self) -> None: assert data["span_count_map"]["http.server"] == 19 def test_no_team(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() self.team.delete() with self.feature(self.FEATURES): response = self.client.get( @@ -105,7 +105,7 @@ def test_no_team(self) -> None: assert data["span_count_map"]["http.server"] == 19 def test_with_errors(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() self.load_errors(self.gen1_project, self.gen1_span_ids[0]) with self.feature(self.FEATURES): response = self.client.get( @@ -121,7 +121,7 @@ def test_with_errors(self) -> None: assert data["span_count_map"]["http.server"] == 19 def test_with_default(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() self.load_default() with self.feature(self.FEATURES): response = self.client.get( @@ -138,7 +138,7 @@ def test_with_default(self) -> None: assert len(data["transaction_child_count_map"]) == 8 def test_with_invalid_date(self) -> None: - self.load_trace(is_eap=True) + self.load_trace() self.load_default() with self.options({"system.event-retention-days": 10}): with self.feature(self.FEATURES): @@ -164,7 +164,7 @@ def create_uptime_check(self, trace_id=None, **kwargs): def test_trace_meta_without_uptime_param(self) -> None: """Test that uptime_checks field is NOT present when include_uptime is not set""" - self.load_trace(is_eap=True) + self.load_trace() uptime_result = self.create_uptime_check() self.store_uptime_results([uptime_result]) with self.feature(self.FEATURES): @@ -183,7 +183,7 @@ def test_trace_meta_without_uptime_param(self) -> None: def test_trace_meta_with_uptime_param(self) -> None: """Test that uptime_checks shows correct count when include_uptime=1""" - self.load_trace(is_eap=True) + self.load_trace() uptime_results = [ self.create_uptime_check(check_status="success"), @@ -209,7 +209,7 @@ def test_trace_meta_with_uptime_param(self) -> None: def test_trace_meta_no_uptime_results(self) -> None: """Test that uptime_checks is 0 when there are no uptime results""" - self.load_trace(is_eap=True) + self.load_trace() with self.feature(self.FEATURES): response = self.client.get( @@ -228,7 +228,7 @@ def test_trace_meta_no_uptime_results(self) -> None: def test_trace_meta_different_trace_id(self) -> None: """Test that uptime results from different traces are not counted""" - self.load_trace(is_eap=True) + self.load_trace() other_trace_id = uuid4().hex uptime_result = self.create_uptime_check(trace_id=other_trace_id) self.store_uptime_results([uptime_result]) diff --git a/tests/snuba/api/endpoints/test_project_trace_item_details.py b/tests/snuba/api/endpoints/test_project_trace_item_details.py index cf1c3884a6e5ce..a190214cdd5f63 100644 --- a/tests/snuba/api/endpoints/test_project_trace_item_details.py +++ b/tests/snuba/api/endpoints/test_project_trace_item_details.py @@ -178,7 +178,7 @@ def test_simple_using_spans_item_type(self) -> None: span_1["trace_id"] = self.trace_uuid item_id = span_1["span_id"] - self.store_span(span_1, is_eap=True) + self.store_span(span_1) trace_details_response = self.do_request("spans", item_id) assert trace_details_response.status_code == 200, trace_details_response.content @@ -242,7 +242,7 @@ def test_simple_using_spans_item_type_with_sentry_conventions(self) -> None: span_1["trace_id"] = self.trace_uuid item_id = span_1["span_id"] - self.store_span(span_1, is_eap=True) + self.store_span(span_1) trace_details_response = self.do_request( "spans", @@ -424,7 +424,7 @@ def test_sentry_links(self) -> None: span_1["trace_id"] = self.trace_uuid item_id = span_1["span_id"] - self.store_span(span_1, is_eap=True) + self.store_span(span_1) trace_details_response = self.do_request("spans", item_id) assert trace_details_response.status_code == 200, trace_details_response.content @@ -497,7 +497,7 @@ def test_sentry_internal_attributes(self) -> None: span_1["trace_id"] = self.trace_uuid item_id = span_1["span_id"] - self.store_spans([span_1], is_eap=True) + self.store_spans([span_1]) trace_details_response = self.do_request("spans", item_id) assert trace_details_response.status_code == 200 diff --git a/tests/snuba/api/endpoints/test_seer_attributes.py b/tests/snuba/api/endpoints/test_seer_attributes.py index 5eaeb3548f44dd..4f6642d1470d6e 100644 --- a/tests/snuba/api/endpoints/test_seer_attributes.py +++ b/tests/snuba/api/endpoints/test_seer_attributes.py @@ -27,7 +27,6 @@ def test_get_attribute_names(self) -> None: transaction="foo", duration=100, exclusive_time=100, - is_eap=True, ) with self.feature( @@ -83,7 +82,6 @@ def test_get_attribute_values_with_substring(self) -> None: transaction=transaction, duration=100, exclusive_time=100, - is_eap=True, ) with self.feature( @@ -124,7 +122,6 @@ def test_get_attributes_and_values(self) -> None: tags={"test_tag": tag_value}, duration=100, exclusive_time=100, - is_eap=True, ) self.store_segment( @@ -138,7 +135,6 @@ def test_get_attributes_and_values(self) -> None: tags={"another_tag": "another_value"}, duration=100, exclusive_time=100, - is_eap=True, ) with self.feature(