From 6c95fd18ba5a53f82f0a91590608d75d4742da56 Mon Sep 17 00:00:00 2001 From: Julian Bez Date: Thu, 27 Jun 2024 12:39:21 +0100 Subject: [PATCH] chore(ruff): Add ruff rules for exception handling (#23251) --- bin/unit_metrics.py | 6 +++--- ee/api/billing.py | 6 +++--- ee/clickhouse/materialized_columns/columns.py | 2 +- .../experiments/funnel_experiment_result.py | 4 ++-- .../queries/experiments/trend_experiment_result.py | 4 ++-- ee/session_recordings/ai/embeddings_runner.py | 10 +++++----- ee/tasks/slack.py | 4 ++-- posthog/api/capture.py | 10 +++++----- posthog/api/cohort.py | 4 ++-- posthog/api/dashboards/dashboard.py | 4 ++-- posthog/api/event.py | 2 +- posthog/api/person.py | 2 +- posthog/api/query.py | 2 +- posthog/api/survey.py | 2 +- posthog/async_migrations/runner.py | 2 +- posthog/clickhouse/client/execute_async.py | 10 +++++----- posthog/hogql/parser.py | 2 +- posthog/hogql/query.py | 4 ++-- posthog/hogql/visitor.py | 2 +- .../flagged_conversion_manager.py | 2 +- posthog/hogql_queries/query_runner.py | 2 +- posthog/metrics.py | 2 +- posthog/models/activity_logging/activity_log.py | 2 +- posthog/models/cohort/cohort.py | 4 ++-- posthog/models/feature_flag/flag_matching.py | 14 +++++++------- posthog/models/filters/mixins/property.py | 4 ++-- posthog/models/insight.py | 2 +- posthog/plugins/utils.py | 4 ++-- .../session_recording_list_from_replay_summary.py | 2 +- posthog/session_recordings/realtime_snapshots.py | 4 ++-- .../snapshots/convert_legacy_snapshots.py | 2 +- posthog/storage/object_storage.py | 12 ++++++------ posthog/tasks/exports/csv_exporter.py | 4 ++-- posthog/tasks/exports/image_exporter.py | 10 +++++----- posthog/tasks/poll_query_performance.py | 2 +- posthog/tasks/tasks.py | 6 +++--- posthog/tasks/usage_report.py | 8 ++++---- posthog/temporal/common/sentry.py | 8 ++++---- posthog/temporal/data_imports/external_data_job.py | 8 ++++---- .../data_imports/pipelines/hubspot/helpers.py | 2 +- .../temporal/data_imports/pipelines/pipeline.py | 2 +- posthog/warehouse/data_load/validate_schema.py | 2 +- posthog/warehouse/models/table.py | 4 ++-- pyproject.toml | 2 ++ 44 files changed, 99 insertions(+), 97 deletions(-) diff --git a/bin/unit_metrics.py b/bin/unit_metrics.py index bab29481c41..91aac4c656a 100644 --- a/bin/unit_metrics.py +++ b/bin/unit_metrics.py @@ -54,18 +54,18 @@ def application(environ, start_response): response = connection.getresponse() statj = json.loads(response.read()) - except Exception as e: + except Exception: if retries > 0: retries -= 1 time.sleep(1) return application(environ, start_response) else: - raise e + raise finally: try: connection.close() except Exception as e: - logger.error("Failed to close connection to unit: ", e) + logger.exception("Failed to close connection to unit: ", e) UNIT_CONNECTIONS_ACCEPTED_TOTAL.set(statj["connections"]["accepted"]) UNIT_CONNECTIONS_ACTIVE.set(statj["connections"]["active"]) diff --git a/ee/api/billing.py b/ee/api/billing.py index 4fc575f3f20..d139cbf3dda 100644 --- a/ee/api/billing.py +++ b/ee/api/billing.py @@ -176,7 +176,7 @@ class BillingViewset(TeamAndOrgViewSetMixin, viewsets.GenericViewSet): status=status.HTTP_400_BAD_REQUEST, ) else: - raise e + raise return self.list(request, *args, **kwargs) @@ -199,7 +199,7 @@ class BillingViewset(TeamAndOrgViewSetMixin, viewsets.GenericViewSet): if len(e.args) > 2: detail_object = e.args[2] if not isinstance(detail_object, dict): - raise e + raise return Response( { "statusText": e.args[0], @@ -209,7 +209,7 @@ class BillingViewset(TeamAndOrgViewSetMixin, viewsets.GenericViewSet): status=status.HTTP_400_BAD_REQUEST, ) else: - raise e + raise return Response( { diff --git a/ee/clickhouse/materialized_columns/columns.py b/ee/clickhouse/materialized_columns/columns.py index 9961359a99c..00da36d698d 100644 --- a/ee/clickhouse/materialized_columns/columns.py +++ b/ee/clickhouse/materialized_columns/columns.py @@ -134,7 +134,7 @@ def add_minmax_index(table: TablesWithMaterializedColumns, column_name: str): ) except ServerException as err: if "index with this name already exists" not in str(err): - raise err + raise return index_name diff --git a/ee/clickhouse/queries/experiments/funnel_experiment_result.py b/ee/clickhouse/queries/experiments/funnel_experiment_result.py index 845cce75d50..1f353829fe2 100644 --- a/ee/clickhouse/queries/experiments/funnel_experiment_result.py +++ b/ee/clickhouse/queries/experiments/funnel_experiment_result.py @@ -111,9 +111,9 @@ class ClickhouseFunnelExperimentResult: } significance_code, loss = self.are_results_significant(control_variant, test_variants, probabilities) - except ValidationError as err: + except ValidationError: if validate: - raise err + raise else: return basic_result_props diff --git a/ee/clickhouse/queries/experiments/trend_experiment_result.py b/ee/clickhouse/queries/experiments/trend_experiment_result.py index 0370e0a684a..cf307adb6e4 100644 --- a/ee/clickhouse/queries/experiments/trend_experiment_result.py +++ b/ee/clickhouse/queries/experiments/trend_experiment_result.py @@ -244,9 +244,9 @@ class ClickhouseTrendExperimentResult: significance_code, p_value = self.are_results_significant(control_variant, test_variants, probabilities) - except ValidationError as err: + except ValidationError: if validate: - raise err + raise else: return basic_result_props diff --git a/ee/session_recordings/ai/embeddings_runner.py b/ee/session_recordings/ai/embeddings_runner.py index 5bc44698ff3..324311c6f0f 100644 --- a/ee/session_recordings/ai/embeddings_runner.py +++ b/ee/session_recordings/ai/embeddings_runner.py @@ -160,7 +160,7 @@ class SessionEmbeddingsRunner(ABC): # we don't want to fail the whole batch if only a single recording fails except Exception as e: SESSION_EMBEDDINGS_FAILED.labels(source_type=source_type).inc() - logger.error( + logger.exception( f"embed individual item error", flow="embeddings", error=e, @@ -174,8 +174,8 @@ class SessionEmbeddingsRunner(ABC): # but we don't swallow errors within the wider task itself # if something is failing here then we're most likely having trouble with ClickHouse SESSION_EMBEDDINGS_FATAL_FAILED.labels(source_type=source_type).inc() - logger.error(f"embed items fatal error", flow="embeddings", error=e, source_type=source_type) - raise e + logger.exception(f"embed items fatal error", flow="embeddings", error=e, source_type=source_type) + raise def _embed(self, input: str, source_type: str): token_count = self._num_tokens_for_input(input) @@ -213,9 +213,9 @@ class SessionEmbeddingsRunner(ABC): ) SESSION_EMBEDDINGS_WRITTEN_TO_CLICKHOUSE.labels(source_type=source_type).inc(len(embeddings)) except Exception as e: - logger.error(f"flush embeddings error", flow="embeddings", error=e, source_type=source_type) + logger.exception(f"flush embeddings error", flow="embeddings", error=e, source_type=source_type) SESSION_EMBEDDINGS_FAILED_TO_CLICKHOUSE.labels(source_type=source_type).inc(len(embeddings)) - raise e + raise class ErrorEmbeddingsPreparation(EmbeddingPreparation): diff --git a/ee/tasks/slack.py b/ee/tasks/slack.py index 251e9fd2613..c3e2a41422b 100644 --- a/ee/tasks/slack.py +++ b/ee/tasks/slack.py @@ -87,7 +87,7 @@ def _handle_slack_event(event_payload: Any) -> None: if unfurls: try: slack_integration.client.chat_unfurl(unfurls=unfurls, unfurl_id=unfurl_id, source=source, channel="", ts="") - except Exception as e: + except Exception: # NOTE: This is temporary as a test to understand if the channel and ts are actually required as the docs are not clear slack_integration.client.chat_unfurl( unfurls=unfurls, @@ -96,7 +96,7 @@ def _handle_slack_event(event_payload: Any) -> None: channel=channel, ts=message_ts, ) - raise e + raise def handle_slack_event(payload: Any) -> None: diff --git a/posthog/api/capture.py b/posthog/api/capture.py index 9716d2c7a6a..5a0e7444924 100644 --- a/posthog/api/capture.py +++ b/posthog/api/capture.py @@ -196,10 +196,10 @@ def log_event( future = producer.produce(topic=kafka_topic, data=data, key=partition_key, headers=headers) statsd.incr("posthog_cloud_plugin_server_ingestion") return future - except Exception as e: + except Exception: statsd.incr("capture_endpoint_log_event_error") logger.exception("Failed to produce event to Kafka topic %s with error", kafka_topic) - raise e + raise def _datetime_from_seconds_or_millis(timestamp: str) -> datetime: @@ -466,7 +466,7 @@ def get_event(request): except Exception as exc: capture_exception(exc, {"data": data}) statsd.incr("posthog_cloud_raw_endpoint_failure", tags={"endpoint": "capture"}) - logger.error("kafka_produce_failure", exc_info=exc) + logger.exception("kafka_produce_failure", exc_info=exc) return cors_response( request, generate_exception_response( @@ -490,7 +490,7 @@ def get_event(request): # TODO: return 400 error for non-retriable errors that require the # client to change their request. - logger.error( + logger.exception( "kafka_produce_failure", exc_info=exc, name=exc.__class__.__name__, @@ -544,7 +544,7 @@ def get_event(request): except Exception as exc: capture_exception(exc, {"data": data}) - logger.error("kafka_session_recording_produce_failure", exc_info=exc) + logger.exception("kafka_session_recording_produce_failure", exc_info=exc) pass statsd.incr("posthog_cloud_raw_endpoint_success", tags={"endpoint": "capture"}) diff --git a/posthog/api/cohort.py b/posthog/api/cohort.py index 3e776167b8d..c6b1ca1dbe9 100644 --- a/posthog/api/cohort.py +++ b/posthog/api/cohort.py @@ -608,7 +608,7 @@ def insert_actors_into_cohort_by_query(cohort: Cohort, query: str, params: dict[ cohort.save(update_fields=["errors_calculating", "last_calculation", "is_calculating"]) except Exception as err: if settings.DEBUG: - raise err + raise cohort.is_calculating = False cohort.errors_calculating = F("errors_calculating") + 1 cohort.save(update_fields=["errors_calculating", "is_calculating"]) @@ -733,7 +733,7 @@ def get_cohort_actors_for_feature_flag(cohort_id: int, flag: str, team_id: int, except Exception as err: if settings.DEBUG or settings.TEST: - raise err + raise capture_exception(err) diff --git a/posthog/api/dashboards/dashboard.py b/posthog/api/dashboards/dashboard.py index 37f9692a1a1..40f267f1d46 100644 --- a/posthog/api/dashboards/dashboard.py +++ b/posthog/api/dashboards/dashboard.py @@ -502,9 +502,9 @@ class DashboardsViewSet( "dashboard_id": dashboard.pk, }, ) - except Exception as e: + except Exception: dashboard.delete() - raise e + raise return Response(DashboardSerializer(dashboard, context={"view": self, "request": request}).data) diff --git a/posthog/api/event.py b/posthog/api/event.py index 1f8e8599751..dacceddab5d 100644 --- a/posthog/api/event.py +++ b/posthog/api/event.py @@ -215,7 +215,7 @@ class EventViewSet( except Exception as ex: capture_exception(ex) - raise ex + raise def _get_people(self, query_result: List[dict], team: Team) -> dict[str, Any]: # noqa: UP006 distinct_ids = [event["distinct_id"] for event in query_result] diff --git a/posthog/api/person.py b/posthog/api/person.py index c9f7c99c6a2..74032087562 100644 --- a/posthog/api/person.py +++ b/posthog/api/person.py @@ -425,7 +425,7 @@ class PersonViewSet(TeamAndOrgViewSetMixin, viewsets.ModelViewSet): "team_id": self.team.id, }, ) - raise e + raise return result diff --git a/posthog/api/query.py b/posthog/api/query.py index c5a60ac401b..3fba69fdc65 100644 --- a/posthog/api/query.py +++ b/posthog/api/query.py @@ -93,7 +93,7 @@ class QueryViewSet(TeamAndOrgViewSetMixin, PydanticModelMixin, viewsets.ViewSet) except Exception as e: self.handle_column_ch_error(e) capture_exception(e) - raise e + raise @extend_schema( description="(Experimental)", diff --git a/posthog/api/survey.py b/posthog/api/survey.py index 4065c087419..05ab3da354b 100644 --- a/posthog/api/survey.py +++ b/posthog/api/survey.py @@ -521,7 +521,7 @@ def create_flag_with_survey_errors(): detail=original_detail.replace("feature flags", "surveys"), code=BEHAVIOURAL_COHORT_FOUND_ERROR_CODE, ) - raise e + raise def nh3_clean_with_allow_list(to_clean: str): diff --git a/posthog/async_migrations/runner.py b/posthog/async_migrations/runner.py index 05946cfd3c9..0108600baeb 100644 --- a/posthog/async_migrations/runner.py +++ b/posthog/async_migrations/runner.py @@ -192,7 +192,7 @@ def run_async_migration_next_op(migration_name: str, migration_instance: Optiona except Exception as e: error = f"Exception was thrown while running operation {migration_instance.current_operation_index} : {str(e)}" - logger.error( + logger.exception( "Error running async migration operation", migration=migration_name, current_operation_index=migration_instance.current_operation_index, diff --git a/posthog/clickhouse/client/execute_async.py b/posthog/clickhouse/client/execute_async.py index f04904b32ff..b0cdd038fbd 100644 --- a/posthog/clickhouse/client/execute_async.py +++ b/posthog/clickhouse/client/execute_async.py @@ -116,7 +116,7 @@ class QueryStatusManager: query_progress["active_cpu_time"] += single_query_progress["active_cpu_time"] query_status.query_progress = ClickhouseQueryProgress(**query_progress) except Exception as e: - logger.error("Clickhouse Status Check Failed", error=e) + logger.exception("Clickhouse Status Check Failed", error=e) pass return query_status @@ -180,12 +180,12 @@ def execute_process_query( except (ExposedHogQLError, ExposedCHQueryError) as err: # We can expose the error to the user query_status.results = None # Clear results in case they are faulty query_status.error_message = str(err) - logger.error("Error processing query for team %s query %s: %s", team_id, query_id, err) - raise err + logger.exception("Error processing query for team %s query %s: %s", team_id, query_id, err) + raise except Exception as err: # We cannot reveal anything about the error query_status.results = None # Clear results in case they are faulty - logger.error("Error processing query for team %s query %s: %s", team_id, query_id, err) - raise err + logger.exception("Error processing query for team %s query %s: %s", team_id, query_id, err) + raise finally: manager.store_query_status(query_status) diff --git a/posthog/hogql/parser.py b/posthog/hogql/parser.py index ead5c5607cc..9499f0a8a67 100644 --- a/posthog/hogql/parser.py +++ b/posthog/hogql/parser.py @@ -208,7 +208,7 @@ class HogQLParseTreeConverter(ParseTreeVisitor): if start is not None and end is not None and e.start is None or e.end is None: e.start = start e.end = end - raise e + raise def visitProgram(self, ctx: HogQLParser.ProgramContext): declarations: list[ast.Declaration] = [] diff --git a/posthog/hogql/query.py b/posthog/hogql/query.py index 7584663b56d..1919b39c17b 100644 --- a/posthog/hogql/query.py +++ b/posthog/hogql/query.py @@ -162,7 +162,7 @@ def execute_hogql_query( else: error = "Unknown error" else: - raise e + raise if clickhouse_sql is not None: timings_dict = timings.to_dict() @@ -193,7 +193,7 @@ def execute_hogql_query( else: error = "Unknown error" else: - raise e + raise if debug and error is None: # If the query errored, explain will fail as well. with timings.measure("explain"): diff --git a/posthog/hogql/visitor.py b/posthog/hogql/visitor.py index 947e07921f5..a71f02b0ffb 100644 --- a/posthog/hogql/visitor.py +++ b/posthog/hogql/visitor.py @@ -29,7 +29,7 @@ class Visitor(Generic[T]): if e.start is None or e.end is None: e.start = node.start e.end = node.end - raise e + raise class TraversingVisitor(Visitor[None]): diff --git a/posthog/hogql_queries/legacy_compatibility/flagged_conversion_manager.py b/posthog/hogql_queries/legacy_compatibility/flagged_conversion_manager.py index 64be226066a..527187276b0 100644 --- a/posthog/hogql_queries/legacy_compatibility/flagged_conversion_manager.py +++ b/posthog/hogql_queries/legacy_compatibility/flagged_conversion_manager.py @@ -21,7 +21,7 @@ def conversion_to_query_based(insight: "Insight") -> Iterator[None]: except Exception as e: set_tag("filter_to_query_todo", True) capture_exception(e) - raise e + raise try: yield diff --git a/posthog/hogql_queries/query_runner.py b/posthog/hogql_queries/query_runner.py index a517dcd0a63..d38cd03626f 100644 --- a/posthog/hogql_queries/query_runner.py +++ b/posthog/hogql_queries/query_runner.py @@ -320,7 +320,7 @@ def get_query_runner_or_none( except ValueError as e: if "Can't get a runner for an unknown" in str(e): return None - raise e + raise Q = TypeVar("Q", bound=RunnableQueryNode) diff --git a/posthog/metrics.py b/posthog/metrics.py index 5c5aef2c977..726e22e6ab4 100644 --- a/posthog/metrics.py +++ b/posthog/metrics.py @@ -51,5 +51,5 @@ def pushed_metrics_registry(job_name: str): if settings.PROM_PUSHGATEWAY_ADDRESS: _push(settings.PROM_PUSHGATEWAY_ADDRESS, job=job_name, registry=registry) except Exception as err: - logger.error("push_to_gateway", target=settings.PROM_PUSHGATEWAY_ADDRESS, exception=err) + logger.exception("push_to_gateway", target=settings.PROM_PUSHGATEWAY_ADDRESS, exception=err) capture_exception(err) diff --git a/posthog/models/activity_logging/activity_log.py b/posthog/models/activity_logging/activity_log.py index c05b0cac48b..f2cdec54743 100644 --- a/posthog/models/activity_logging/activity_log.py +++ b/posthog/models/activity_logging/activity_log.py @@ -395,7 +395,7 @@ def log_activity( if settings.TEST: # Re-raise in tests, so that we can catch failures in test suites - but keep quiet in production, # as we currently don't treat activity logs as critical - raise e + raise @dataclasses.dataclass(frozen=True) diff --git a/posthog/models/cohort/cohort.py b/posthog/models/cohort/cohort.py index 8f7867127a1..42a655002cd 100644 --- a/posthog/models/cohort/cohort.py +++ b/posthog/models/cohort/cohort.py @@ -297,7 +297,7 @@ class Cohort(models.Model): self.save() except Exception as err: if settings.DEBUG: - raise err + raise self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() @@ -339,7 +339,7 @@ class Cohort(models.Model): self.save() except Exception as err: if settings.DEBUG: - raise err + raise self.is_calculating = False self.errors_calculating = F("errors_calculating") + 1 self.save() diff --git a/posthog/models/feature_flag/flag_matching.py b/posthog/models/feature_flag/flag_matching.py index 70e0190a570..7c56a547252 100644 --- a/posthog/models/feature_flag/flag_matching.py +++ b/posthog/models/feature_flag/flag_matching.py @@ -119,9 +119,9 @@ class FlagsMatcherCache: team_id=self.team_id ) return {row.group_type: row.group_type_index for row in group_type_mapping_rows} - except DatabaseError as err: + except DatabaseError: self.failed_to_fetch_flags = True - raise err + raise @cached_property def group_type_index_to_name(self) -> dict[GroupTypeIndex, GroupTypeName]: @@ -596,14 +596,14 @@ class FeatureFlagMatcher: assert len(group_query) == 1, f"Expected 1 group query result, got {len(group_query)}" all_conditions = {**all_conditions, **group_query[0]} return all_conditions - except DatabaseError as e: + except DatabaseError: self.failed_to_fetch_conditions = True - raise e - except Exception as e: + raise + except Exception: # Usually when a user somehow manages to create an invalid filter, usually via API. # In this case, don't put db down, just skip the flag. # Covers all cases like invalid JSON, invalid operator, invalid property name, invalid group input format, etc. - raise e + raise def hashed_identifier(self, feature_flag: FeatureFlag) -> Optional[str]: """ @@ -969,7 +969,7 @@ def set_feature_flag_hash_key_overrides(team_id: int, distinct_ids: list[str], h ) time.sleep(retry_delay) else: - raise e + raise return False diff --git a/posthog/models/filters/mixins/property.py b/posthog/models/filters/mixins/property.py index 2ffc984754b..2d615a7374e 100644 --- a/posthog/models/filters/mixins/property.py +++ b/posthog/models/filters/mixins/property.py @@ -53,8 +53,8 @@ class PropertyMixin(BaseParamMixin): if isinstance(loaded_props, dict) and "type" in loaded_props and "values" in loaded_props: try: return self._parse_property_group(loaded_props) - except ValidationError as e: - raise e + except ValidationError: + raise except ValueError as e: raise ValidationError(f"PropertyGroup is unparsable: {e}") # already a PropertyGroup just return diff --git a/posthog/models/insight.py b/posthog/models/insight.py index c4d0c29b492..92191ce1a38 100644 --- a/posthog/models/insight.py +++ b/posthog/models/insight.py @@ -219,4 +219,4 @@ def generate_insight_filters_hash(insight: Insight, dashboard: Optional[Dashboar exception=e, exc_info=True, ) - raise e + raise diff --git a/posthog/plugins/utils.py b/posthog/plugins/utils.py index 602f775447b..59b57908cb0 100644 --- a/posthog/plugins/utils.py +++ b/posthog/plugins/utils.py @@ -340,9 +340,9 @@ def extract_plugin_code( index_ts: Optional[str] = None try: index_ts = find_index_ts_in_archive(archive, plugin_json_parsed.get("main")) - except ValueError as e: + except ValueError: if frontend_tsx is None and site_ts is None: - raise e + raise return plugin_json, index_ts, frontend_tsx, site_ts diff --git a/posthog/session_recordings/queries/session_recording_list_from_replay_summary.py b/posthog/session_recordings/queries/session_recording_list_from_replay_summary.py index 78b204d8723..82db40b4e66 100644 --- a/posthog/session_recordings/queries/session_recording_list_from_replay_summary.py +++ b/posthog/session_recordings/queries/session_recording_list_from_replay_summary.py @@ -692,7 +692,7 @@ class SessionRecordingListFromReplaySummary(EventQuery): except Exception as ex: # error here weren't making it to sentry, let's be explicit capture_exception(ex, tags={"team_id": self._team.pk}) - raise ex + raise @property def limit(self): diff --git a/posthog/session_recordings/realtime_snapshots.py b/posthog/session_recordings/realtime_snapshots.py index ce1b4a9179f..c422cea935d 100644 --- a/posthog/session_recordings/realtime_snapshots.py +++ b/posthog/session_recordings/realtime_snapshots.py @@ -58,7 +58,7 @@ def publish_subscription(team_id: str, session_id: str) -> None: }, tags={"team_id": team_id, "session_id": session_id}, ) - raise e + raise def get_realtime_snapshots(team_id: str, session_id: str, attempt_count=0) -> Optional[list[str]]: @@ -112,4 +112,4 @@ def get_realtime_snapshots(team_id: str, session_id: str, attempt_count=0) -> Op }, tags={"team_id": team_id, "session_id": session_id}, ) - raise e + raise diff --git a/posthog/session_recordings/snapshots/convert_legacy_snapshots.py b/posthog/session_recordings/snapshots/convert_legacy_snapshots.py index d2d4ba2c4b4..d9b2f5b2e3a 100644 --- a/posthog/session_recordings/snapshots/convert_legacy_snapshots.py +++ b/posthog/session_recordings/snapshots/convert_legacy_snapshots.py @@ -24,7 +24,7 @@ def _save_converted_content_back_to_storage(converted_content: str, recording: S return save_recording_with_new_content(recording, converted_content) except ImportError: # not running in EE context... shouldn't get here - logger.error( + logger.exception( "attempted_to_save_converted_content_back_to_storage_in_non_ee_context", recording_id=recording.id, ) diff --git a/posthog/storage/object_storage.py b/posthog/storage/object_storage.py index 147b02436fa..52d7486dbcf 100644 --- a/posthog/storage/object_storage.py +++ b/posthog/storage/object_storage.py @@ -99,7 +99,7 @@ class ObjectStorage(ObjectStorageClient): HttpMethod="GET", ) except Exception as e: - logger.error("object_storage.get_presigned_url_failed", file_name=file_key, error=e) + logger.exception("object_storage.get_presigned_url_failed", file_name=file_key, error=e) capture_exception(e) return None @@ -111,7 +111,7 @@ class ObjectStorage(ObjectStorageClient): else: return None except Exception as e: - logger.error( + logger.exception( "object_storage.list_objects_failed", bucket=bucket, prefix=prefix, @@ -133,7 +133,7 @@ class ObjectStorage(ObjectStorageClient): s3_response = self.aws_client.get_object(Bucket=bucket, Key=key) return s3_response["Body"].read() except Exception as e: - logger.error( + logger.exception( "object_storage.read_failed", bucket=bucket, file_name=key, @@ -151,7 +151,7 @@ class ObjectStorage(ObjectStorageClient): Tagging={"TagSet": [{"Key": k, "Value": v} for k, v in tags.items()]}, ) except Exception as e: - logger.error("object_storage.tag_failed", bucket=bucket, file_name=key, error=e) + logger.exception("object_storage.tag_failed", bucket=bucket, file_name=key, error=e) capture_exception(e) raise ObjectStorageError("tag failed") from e @@ -160,7 +160,7 @@ class ObjectStorage(ObjectStorageClient): try: s3_response = self.aws_client.put_object(Bucket=bucket, Body=content, Key=key, **(extras or {})) except Exception as e: - logger.error( + logger.exception( "object_storage.write_failed", bucket=bucket, file_name=key, @@ -181,7 +181,7 @@ class ObjectStorage(ObjectStorageClient): return len(source_objects) except Exception as e: - logger.error( + logger.exception( "object_storage.copy_objects_failed", source_prefix=source_prefix, target_prefix=target_prefix, diff --git a/posthog/tasks/exports/csv_exporter.py b/posthog/tasks/exports/csv_exporter.py index fe33c8e8468..73864c7faf1 100644 --- a/posthog/tasks/exports/csv_exporter.py +++ b/posthog/tasks/exports/csv_exporter.py @@ -215,7 +215,7 @@ def get_from_insights_api(exported_asset: ExportedAsset, limit: int, resource: d response = make_api_call(access_token, body, limit, method, next_url, path) except HTTPError as e: if "Query size exceeded" not in e.response.text: - raise e + raise if limit <= CSV_EXPORT_BREAKDOWN_LIMIT_LOW: break # Already tried with the lowest limit, so return what we have @@ -398,4 +398,4 @@ def export_tabular(exported_asset: ExportedAsset, limit: Optional[int] = None) - logger.error("csv_exporter.failed", exception=e, exc_info=True) EXPORT_FAILED_COUNTER.labels(type="csv").inc() - raise e + raise diff --git a/posthog/tasks/exports/image_exporter.py b/posthog/tasks/exports/image_exporter.py index ee52d51299c..0f18ab7191a 100644 --- a/posthog/tasks/exports/image_exporter.py +++ b/posthog/tasks/exports/image_exporter.py @@ -111,14 +111,14 @@ def _export_to_png(exported_asset: ExportedAsset) -> None: os.remove(image_path) - except Exception as err: + except Exception: # Ensure we clean up the tmp file in case anything went wrong if image_path and os.path.exists(image_path): os.remove(image_path) log_error_if_site_url_not_reachable() - raise err + raise def _screenshot_asset( @@ -137,7 +137,7 @@ def _screenshot_asset( try: WebDriverWait(driver, 20).until_not(lambda x: x.find_element_by_class_name("Spinner")) except TimeoutException: - logger.error( + logger.exception( "image_exporter.timeout", url_to_render=url_to_render, wait_for_css_selector=wait_for_css_selector, @@ -172,7 +172,7 @@ def _screenshot_asset( pass capture_exception(e) - raise e + raise finally: if driver: driver.quit() @@ -217,4 +217,4 @@ def export_image(exported_asset: ExportedAsset) -> None: logger.error("image_exporter.failed", exception=e, exc_info=True) EXPORT_FAILED_COUNTER.labels(type="image").inc() - raise e + raise diff --git a/posthog/tasks/poll_query_performance.py b/posthog/tasks/poll_query_performance.py index f99ecd6e075..f035c826290 100644 --- a/posthog/tasks/poll_query_performance.py +++ b/posthog/tasks/poll_query_performance.py @@ -79,4 +79,4 @@ def poll_query_performance() -> None: manager.update_clickhouse_query_progresses(list(results_group)) except Exception as e: - logger.error("Clickhouse Status Check Failed", error=e) + logger.exception("Clickhouse Status Check Failed", error=e) diff --git a/posthog/tasks/tasks.py b/posthog/tasks/tasks.py index 7b601145c6e..97c162d1e9f 100644 --- a/posthog/tasks/tasks.py +++ b/posthog/tasks/tasks.py @@ -277,7 +277,7 @@ def invalid_web_replays() -> None: count = results[0][i] gauge.set(count) except Exception as e: - logger.error("Failed to run invalid web replays task", error=e, inc_exc_info=True) + logger.exception("Failed to run invalid web replays task", error=e, inc_exc_info=True) KNOWN_CELERY_TASK_IDENTIFIERS = { @@ -615,7 +615,7 @@ def poll_query_performance(last_known_run_time_ns: int) -> None: poll_query_performance_nontask() except Exception as e: - logger.error("Poll query performance failed", error=e) + logger.exception("Poll query performance failed", error=e) elapsed_ns = time.time_ns() - start_time_ns if elapsed_ns > Polling.TIME_BETWEEN_RUNS_NANOSECONDS: @@ -645,7 +645,7 @@ def start_poll_query_performance() -> None: poll_query_performance.delay(last_run_start_time_ns) except Exception as e: - logger.error("Restarting poll query performance because of an error", error=e) + logger.exception("Restarting poll query performance because of an error", error=e) poll_query_performance.delay(last_run_start_time_ns) diff --git a/posthog/tasks/usage_report.py b/posthog/tasks/usage_report.py index a2e4a34d36d..352b944dc08 100644 --- a/posthog/tasks/usage_report.py +++ b/posthog/tasks/usage_report.py @@ -309,7 +309,7 @@ def send_report_to_billing_service(org_id: str, report: dict[str, Any]) -> None: BillingManager(license).update_org_details(organization, response_data) except Exception as err: - logger.error(f"UsageReport failed sending to Billing for organization: {organization.id}: {err}") + logger.exception(f"UsageReport failed sending to Billing for organization: {organization.id}: {err}") capture_exception(err) pha_client = Client("sTMFPsFhdP1Ssg") capture_event( @@ -318,7 +318,7 @@ def send_report_to_billing_service(org_id: str, report: dict[str, Any]) -> None: org_id, {"err": str(err)}, ) - raise err + raise def capture_event( @@ -604,7 +604,7 @@ def capture_report( capture_event(pha_client, capture_event_name, org_id, full_report_dict, timestamp=at_date) logger.info(f"UsageReport sent to PostHog for organization {org_id}") except Exception as err: - logger.error( + logger.exception( f"UsageReport sent to PostHog for organization {org_id} failed: {str(err)}", ) capture_event(pha_client, f"{capture_event_name} failure", org_id, {"error": str(err)}) @@ -960,4 +960,4 @@ def send_all_org_usage_reports( logger.debug(f"Sending usage reports to PostHog and Billing took {time_since.total_seconds()} seconds.") # noqa T201 except Exception as err: capture_exception(err) - raise err + raise diff --git a/posthog/temporal/common/sentry.py b/posthog/temporal/common/sentry.py index 81af9367914..26d196330fa 100644 --- a/posthog/temporal/common/sentry.py +++ b/posthog/temporal/common/sentry.py @@ -36,14 +36,14 @@ class _SentryActivityInboundInterceptor(ActivityInboundInterceptor): set_tag("temporal.workflow.run_id", activity_info.workflow_run_id) try: return await super().execute_activity(input) - except Exception as e: + except Exception: if len(input.args) == 1 and is_dataclass(input.args[0]): team_id = getattr(input.args[0], "team_id", None) if team_id: set_tag("team_id", team_id) set_context("temporal.activity.info", activity.info().__dict__) capture_exception() - raise e + raise class _SentryWorkflowInterceptor(WorkflowInboundInterceptor): @@ -59,7 +59,7 @@ class _SentryWorkflowInterceptor(WorkflowInboundInterceptor): set_tag("temporal.workflow.run_id", workflow_info.run_id) try: return await super().execute_workflow(input) - except Exception as e: + except Exception: if len(input.args) == 1 and is_dataclass(input.args[0]): team_id = getattr(input.args[0], "team_id", None) if team_id: @@ -69,7 +69,7 @@ class _SentryWorkflowInterceptor(WorkflowInboundInterceptor): if not workflow.unsafe.is_replaying(): with workflow.unsafe.sandbox_unrestricted(): capture_exception() - raise e + raise class SentryInterceptor(Interceptor): diff --git a/posthog/temporal/data_imports/external_data_job.py b/posthog/temporal/data_imports/external_data_job.py index cdc4044dbea..3f57fec222e 100644 --- a/posthog/temporal/data_imports/external_data_job.py +++ b/posthog/temporal/data_imports/external_data_job.py @@ -145,10 +145,10 @@ class ExternalDataJobWorkflow(PostHogWorkflow): ), ) except Exception as e: - logger.error( + logger.exception( f"External data job failed on create_external_data_job_model_activity for {inputs.external_data_source_id} with error: {e}" ) - raise e + raise update_inputs = UpdateExternalDataJobStatusInputs( id=run_id, @@ -192,13 +192,13 @@ class ExternalDataJobWorkflow(PostHogWorkflow): update_inputs.status = ExternalDataJob.Status.CANCELLED else: update_inputs.status = ExternalDataJob.Status.FAILED - logger.error( + logger.exception( f"External data job failed for external data source {inputs.external_data_source_id} with error: {e.cause}" ) update_inputs.latest_error = str(e.cause) raise except Exception as e: - logger.error( + logger.exception( f"External data job failed for external data source {inputs.external_data_source_id} with error: {e}" ) # Catch all diff --git a/posthog/temporal/data_imports/pipelines/hubspot/helpers.py b/posthog/temporal/data_imports/pipelines/hubspot/helpers.py index d47616f251a..c6a447eac2c 100644 --- a/posthog/temporal/data_imports/pipelines/hubspot/helpers.py +++ b/posthog/temporal/data_imports/pipelines/hubspot/helpers.py @@ -134,7 +134,7 @@ def fetch_data( headers = _get_headers(api_key) r = requests.get(url, headers=headers, params=params) else: - raise e + raise # Parse the API response and yield the properties of each result # Parse the response JSON data diff --git a/posthog/temporal/data_imports/pipelines/pipeline.py b/posthog/temporal/data_imports/pipelines/pipeline.py index 29983da8c68..9210e143775 100644 --- a/posthog/temporal/data_imports/pipelines/pipeline.py +++ b/posthog/temporal/data_imports/pipelines/pipeline.py @@ -151,5 +151,5 @@ class DataImportPipeline: try: return await asyncio.to_thread(self._run) except PipelineStepFailed: - self.logger.error(f"Data import failed for endpoint") + self.logger.exception(f"Data import failed for endpoint") raise diff --git a/posthog/warehouse/data_load/validate_schema.py b/posthog/warehouse/data_load/validate_schema.py index 1e6064f7605..1bb0fd71940 100644 --- a/posthog/warehouse/data_load/validate_schema.py +++ b/posthog/warehouse/data_load/validate_schema.py @@ -207,7 +207,7 @@ async def validate_schema_and_update_table( f"Data Warehouse: Could not validate schema for external data job {job.pk}", exc_info=e, ) - raise e + raise # TODO: figure out data deletes - currently borked right now # if ( diff --git a/posthog/warehouse/models/table.py b/posthog/warehouse/models/table.py index 084eba202ea..de8421df3a9 100644 --- a/posthog/warehouse/models/table.py +++ b/posthog/warehouse/models/table.py @@ -132,7 +132,7 @@ class DataWarehouseTable(CreatedMetaFields, UUIDModel, DeletedMetaFields): if safe_expose_ch_error: self._safe_expose_ch_error(err) else: - raise err + raise if result is None or isinstance(result, int): raise Exception("No columns types provided by clickhouse in get_columns") @@ -165,7 +165,7 @@ class DataWarehouseTable(CreatedMetaFields, UUIDModel, DeletedMetaFields): if safe_expose_ch_error: self._safe_expose_ch_error(err) else: - raise err + raise return result[0][0] diff --git a/pyproject.toml b/pyproject.toml index cb19ccadb81..58de4e0f9f6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,6 +45,8 @@ select = [ "RUF015", "RUF019", "T2", + "TRY201", + "TRY400", "UP", "W", ]