0
0
mirror of https://github.com/PostHog/posthog.git synced 2024-11-21 13:39:22 +01:00

chore(ruff): Add ruff rules for exception handling (#23251)

This commit is contained in:
Julian Bez 2024-06-27 12:39:21 +01:00 committed by GitHub
parent 04d345c613
commit 6c95fd18ba
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
44 changed files with 99 additions and 97 deletions

View File

@ -54,18 +54,18 @@ def application(environ, start_response):
response = connection.getresponse()
statj = json.loads(response.read())
except Exception as e:
except Exception:
if retries > 0:
retries -= 1
time.sleep(1)
return application(environ, start_response)
else:
raise e
raise
finally:
try:
connection.close()
except Exception as e:
logger.error("Failed to close connection to unit: ", e)
logger.exception("Failed to close connection to unit: ", e)
UNIT_CONNECTIONS_ACCEPTED_TOTAL.set(statj["connections"]["accepted"])
UNIT_CONNECTIONS_ACTIVE.set(statj["connections"]["active"])

View File

@ -176,7 +176,7 @@ class BillingViewset(TeamAndOrgViewSetMixin, viewsets.GenericViewSet):
status=status.HTTP_400_BAD_REQUEST,
)
else:
raise e
raise
return self.list(request, *args, **kwargs)
@ -199,7 +199,7 @@ class BillingViewset(TeamAndOrgViewSetMixin, viewsets.GenericViewSet):
if len(e.args) > 2:
detail_object = e.args[2]
if not isinstance(detail_object, dict):
raise e
raise
return Response(
{
"statusText": e.args[0],
@ -209,7 +209,7 @@ class BillingViewset(TeamAndOrgViewSetMixin, viewsets.GenericViewSet):
status=status.HTTP_400_BAD_REQUEST,
)
else:
raise e
raise
return Response(
{

View File

@ -134,7 +134,7 @@ def add_minmax_index(table: TablesWithMaterializedColumns, column_name: str):
)
except ServerException as err:
if "index with this name already exists" not in str(err):
raise err
raise
return index_name

View File

@ -111,9 +111,9 @@ class ClickhouseFunnelExperimentResult:
}
significance_code, loss = self.are_results_significant(control_variant, test_variants, probabilities)
except ValidationError as err:
except ValidationError:
if validate:
raise err
raise
else:
return basic_result_props

View File

@ -244,9 +244,9 @@ class ClickhouseTrendExperimentResult:
significance_code, p_value = self.are_results_significant(control_variant, test_variants, probabilities)
except ValidationError as err:
except ValidationError:
if validate:
raise err
raise
else:
return basic_result_props

View File

@ -160,7 +160,7 @@ class SessionEmbeddingsRunner(ABC):
# we don't want to fail the whole batch if only a single recording fails
except Exception as e:
SESSION_EMBEDDINGS_FAILED.labels(source_type=source_type).inc()
logger.error(
logger.exception(
f"embed individual item error",
flow="embeddings",
error=e,
@ -174,8 +174,8 @@ class SessionEmbeddingsRunner(ABC):
# but we don't swallow errors within the wider task itself
# if something is failing here then we're most likely having trouble with ClickHouse
SESSION_EMBEDDINGS_FATAL_FAILED.labels(source_type=source_type).inc()
logger.error(f"embed items fatal error", flow="embeddings", error=e, source_type=source_type)
raise e
logger.exception(f"embed items fatal error", flow="embeddings", error=e, source_type=source_type)
raise
def _embed(self, input: str, source_type: str):
token_count = self._num_tokens_for_input(input)
@ -213,9 +213,9 @@ class SessionEmbeddingsRunner(ABC):
)
SESSION_EMBEDDINGS_WRITTEN_TO_CLICKHOUSE.labels(source_type=source_type).inc(len(embeddings))
except Exception as e:
logger.error(f"flush embeddings error", flow="embeddings", error=e, source_type=source_type)
logger.exception(f"flush embeddings error", flow="embeddings", error=e, source_type=source_type)
SESSION_EMBEDDINGS_FAILED_TO_CLICKHOUSE.labels(source_type=source_type).inc(len(embeddings))
raise e
raise
class ErrorEmbeddingsPreparation(EmbeddingPreparation):

View File

@ -87,7 +87,7 @@ def _handle_slack_event(event_payload: Any) -> None:
if unfurls:
try:
slack_integration.client.chat_unfurl(unfurls=unfurls, unfurl_id=unfurl_id, source=source, channel="", ts="")
except Exception as e:
except Exception:
# NOTE: This is temporary as a test to understand if the channel and ts are actually required as the docs are not clear
slack_integration.client.chat_unfurl(
unfurls=unfurls,
@ -96,7 +96,7 @@ def _handle_slack_event(event_payload: Any) -> None:
channel=channel,
ts=message_ts,
)
raise e
raise
def handle_slack_event(payload: Any) -> None:

View File

@ -196,10 +196,10 @@ def log_event(
future = producer.produce(topic=kafka_topic, data=data, key=partition_key, headers=headers)
statsd.incr("posthog_cloud_plugin_server_ingestion")
return future
except Exception as e:
except Exception:
statsd.incr("capture_endpoint_log_event_error")
logger.exception("Failed to produce event to Kafka topic %s with error", kafka_topic)
raise e
raise
def _datetime_from_seconds_or_millis(timestamp: str) -> datetime:
@ -466,7 +466,7 @@ def get_event(request):
except Exception as exc:
capture_exception(exc, {"data": data})
statsd.incr("posthog_cloud_raw_endpoint_failure", tags={"endpoint": "capture"})
logger.error("kafka_produce_failure", exc_info=exc)
logger.exception("kafka_produce_failure", exc_info=exc)
return cors_response(
request,
generate_exception_response(
@ -490,7 +490,7 @@ def get_event(request):
# TODO: return 400 error for non-retriable errors that require the
# client to change their request.
logger.error(
logger.exception(
"kafka_produce_failure",
exc_info=exc,
name=exc.__class__.__name__,
@ -544,7 +544,7 @@ def get_event(request):
except Exception as exc:
capture_exception(exc, {"data": data})
logger.error("kafka_session_recording_produce_failure", exc_info=exc)
logger.exception("kafka_session_recording_produce_failure", exc_info=exc)
pass
statsd.incr("posthog_cloud_raw_endpoint_success", tags={"endpoint": "capture"})

View File

@ -608,7 +608,7 @@ def insert_actors_into_cohort_by_query(cohort: Cohort, query: str, params: dict[
cohort.save(update_fields=["errors_calculating", "last_calculation", "is_calculating"])
except Exception as err:
if settings.DEBUG:
raise err
raise
cohort.is_calculating = False
cohort.errors_calculating = F("errors_calculating") + 1
cohort.save(update_fields=["errors_calculating", "is_calculating"])
@ -733,7 +733,7 @@ def get_cohort_actors_for_feature_flag(cohort_id: int, flag: str, team_id: int,
except Exception as err:
if settings.DEBUG or settings.TEST:
raise err
raise
capture_exception(err)

View File

@ -502,9 +502,9 @@ class DashboardsViewSet(
"dashboard_id": dashboard.pk,
},
)
except Exception as e:
except Exception:
dashboard.delete()
raise e
raise
return Response(DashboardSerializer(dashboard, context={"view": self, "request": request}).data)

View File

@ -215,7 +215,7 @@ class EventViewSet(
except Exception as ex:
capture_exception(ex)
raise ex
raise
def _get_people(self, query_result: List[dict], team: Team) -> dict[str, Any]: # noqa: UP006
distinct_ids = [event["distinct_id"] for event in query_result]

View File

@ -425,7 +425,7 @@ class PersonViewSet(TeamAndOrgViewSetMixin, viewsets.ModelViewSet):
"team_id": self.team.id,
},
)
raise e
raise
return result

View File

@ -93,7 +93,7 @@ class QueryViewSet(TeamAndOrgViewSetMixin, PydanticModelMixin, viewsets.ViewSet)
except Exception as e:
self.handle_column_ch_error(e)
capture_exception(e)
raise e
raise
@extend_schema(
description="(Experimental)",

View File

@ -521,7 +521,7 @@ def create_flag_with_survey_errors():
detail=original_detail.replace("feature flags", "surveys"),
code=BEHAVIOURAL_COHORT_FOUND_ERROR_CODE,
)
raise e
raise
def nh3_clean_with_allow_list(to_clean: str):

View File

@ -192,7 +192,7 @@ def run_async_migration_next_op(migration_name: str, migration_instance: Optiona
except Exception as e:
error = f"Exception was thrown while running operation {migration_instance.current_operation_index} : {str(e)}"
logger.error(
logger.exception(
"Error running async migration operation",
migration=migration_name,
current_operation_index=migration_instance.current_operation_index,

View File

@ -116,7 +116,7 @@ class QueryStatusManager:
query_progress["active_cpu_time"] += single_query_progress["active_cpu_time"]
query_status.query_progress = ClickhouseQueryProgress(**query_progress)
except Exception as e:
logger.error("Clickhouse Status Check Failed", error=e)
logger.exception("Clickhouse Status Check Failed", error=e)
pass
return query_status
@ -180,12 +180,12 @@ def execute_process_query(
except (ExposedHogQLError, ExposedCHQueryError) as err: # We can expose the error to the user
query_status.results = None # Clear results in case they are faulty
query_status.error_message = str(err)
logger.error("Error processing query for team %s query %s: %s", team_id, query_id, err)
raise err
logger.exception("Error processing query for team %s query %s: %s", team_id, query_id, err)
raise
except Exception as err: # We cannot reveal anything about the error
query_status.results = None # Clear results in case they are faulty
logger.error("Error processing query for team %s query %s: %s", team_id, query_id, err)
raise err
logger.exception("Error processing query for team %s query %s: %s", team_id, query_id, err)
raise
finally:
manager.store_query_status(query_status)

View File

@ -208,7 +208,7 @@ class HogQLParseTreeConverter(ParseTreeVisitor):
if start is not None and end is not None and e.start is None or e.end is None:
e.start = start
e.end = end
raise e
raise
def visitProgram(self, ctx: HogQLParser.ProgramContext):
declarations: list[ast.Declaration] = []

View File

@ -162,7 +162,7 @@ def execute_hogql_query(
else:
error = "Unknown error"
else:
raise e
raise
if clickhouse_sql is not None:
timings_dict = timings.to_dict()
@ -193,7 +193,7 @@ def execute_hogql_query(
else:
error = "Unknown error"
else:
raise e
raise
if debug and error is None: # If the query errored, explain will fail as well.
with timings.measure("explain"):

View File

@ -29,7 +29,7 @@ class Visitor(Generic[T]):
if e.start is None or e.end is None:
e.start = node.start
e.end = node.end
raise e
raise
class TraversingVisitor(Visitor[None]):

View File

@ -21,7 +21,7 @@ def conversion_to_query_based(insight: "Insight") -> Iterator[None]:
except Exception as e:
set_tag("filter_to_query_todo", True)
capture_exception(e)
raise e
raise
try:
yield

View File

@ -320,7 +320,7 @@ def get_query_runner_or_none(
except ValueError as e:
if "Can't get a runner for an unknown" in str(e):
return None
raise e
raise
Q = TypeVar("Q", bound=RunnableQueryNode)

View File

@ -51,5 +51,5 @@ def pushed_metrics_registry(job_name: str):
if settings.PROM_PUSHGATEWAY_ADDRESS:
_push(settings.PROM_PUSHGATEWAY_ADDRESS, job=job_name, registry=registry)
except Exception as err:
logger.error("push_to_gateway", target=settings.PROM_PUSHGATEWAY_ADDRESS, exception=err)
logger.exception("push_to_gateway", target=settings.PROM_PUSHGATEWAY_ADDRESS, exception=err)
capture_exception(err)

View File

@ -395,7 +395,7 @@ def log_activity(
if settings.TEST:
# Re-raise in tests, so that we can catch failures in test suites - but keep quiet in production,
# as we currently don't treat activity logs as critical
raise e
raise
@dataclasses.dataclass(frozen=True)

View File

@ -297,7 +297,7 @@ class Cohort(models.Model):
self.save()
except Exception as err:
if settings.DEBUG:
raise err
raise
self.is_calculating = False
self.errors_calculating = F("errors_calculating") + 1
self.save()
@ -339,7 +339,7 @@ class Cohort(models.Model):
self.save()
except Exception as err:
if settings.DEBUG:
raise err
raise
self.is_calculating = False
self.errors_calculating = F("errors_calculating") + 1
self.save()

View File

@ -119,9 +119,9 @@ class FlagsMatcherCache:
team_id=self.team_id
)
return {row.group_type: row.group_type_index for row in group_type_mapping_rows}
except DatabaseError as err:
except DatabaseError:
self.failed_to_fetch_flags = True
raise err
raise
@cached_property
def group_type_index_to_name(self) -> dict[GroupTypeIndex, GroupTypeName]:
@ -596,14 +596,14 @@ class FeatureFlagMatcher:
assert len(group_query) == 1, f"Expected 1 group query result, got {len(group_query)}"
all_conditions = {**all_conditions, **group_query[0]}
return all_conditions
except DatabaseError as e:
except DatabaseError:
self.failed_to_fetch_conditions = True
raise e
except Exception as e:
raise
except Exception:
# Usually when a user somehow manages to create an invalid filter, usually via API.
# In this case, don't put db down, just skip the flag.
# Covers all cases like invalid JSON, invalid operator, invalid property name, invalid group input format, etc.
raise e
raise
def hashed_identifier(self, feature_flag: FeatureFlag) -> Optional[str]:
"""
@ -969,7 +969,7 @@ def set_feature_flag_hash_key_overrides(team_id: int, distinct_ids: list[str], h
)
time.sleep(retry_delay)
else:
raise e
raise
return False

View File

@ -53,8 +53,8 @@ class PropertyMixin(BaseParamMixin):
if isinstance(loaded_props, dict) and "type" in loaded_props and "values" in loaded_props:
try:
return self._parse_property_group(loaded_props)
except ValidationError as e:
raise e
except ValidationError:
raise
except ValueError as e:
raise ValidationError(f"PropertyGroup is unparsable: {e}")
# already a PropertyGroup just return

View File

@ -219,4 +219,4 @@ def generate_insight_filters_hash(insight: Insight, dashboard: Optional[Dashboar
exception=e,
exc_info=True,
)
raise e
raise

View File

@ -340,9 +340,9 @@ def extract_plugin_code(
index_ts: Optional[str] = None
try:
index_ts = find_index_ts_in_archive(archive, plugin_json_parsed.get("main"))
except ValueError as e:
except ValueError:
if frontend_tsx is None and site_ts is None:
raise e
raise
return plugin_json, index_ts, frontend_tsx, site_ts

View File

@ -692,7 +692,7 @@ class SessionRecordingListFromReplaySummary(EventQuery):
except Exception as ex:
# error here weren't making it to sentry, let's be explicit
capture_exception(ex, tags={"team_id": self._team.pk})
raise ex
raise
@property
def limit(self):

View File

@ -58,7 +58,7 @@ def publish_subscription(team_id: str, session_id: str) -> None:
},
tags={"team_id": team_id, "session_id": session_id},
)
raise e
raise
def get_realtime_snapshots(team_id: str, session_id: str, attempt_count=0) -> Optional[list[str]]:
@ -112,4 +112,4 @@ def get_realtime_snapshots(team_id: str, session_id: str, attempt_count=0) -> Op
},
tags={"team_id": team_id, "session_id": session_id},
)
raise e
raise

View File

@ -24,7 +24,7 @@ def _save_converted_content_back_to_storage(converted_content: str, recording: S
return save_recording_with_new_content(recording, converted_content)
except ImportError:
# not running in EE context... shouldn't get here
logger.error(
logger.exception(
"attempted_to_save_converted_content_back_to_storage_in_non_ee_context",
recording_id=recording.id,
)

View File

@ -99,7 +99,7 @@ class ObjectStorage(ObjectStorageClient):
HttpMethod="GET",
)
except Exception as e:
logger.error("object_storage.get_presigned_url_failed", file_name=file_key, error=e)
logger.exception("object_storage.get_presigned_url_failed", file_name=file_key, error=e)
capture_exception(e)
return None
@ -111,7 +111,7 @@ class ObjectStorage(ObjectStorageClient):
else:
return None
except Exception as e:
logger.error(
logger.exception(
"object_storage.list_objects_failed",
bucket=bucket,
prefix=prefix,
@ -133,7 +133,7 @@ class ObjectStorage(ObjectStorageClient):
s3_response = self.aws_client.get_object(Bucket=bucket, Key=key)
return s3_response["Body"].read()
except Exception as e:
logger.error(
logger.exception(
"object_storage.read_failed",
bucket=bucket,
file_name=key,
@ -151,7 +151,7 @@ class ObjectStorage(ObjectStorageClient):
Tagging={"TagSet": [{"Key": k, "Value": v} for k, v in tags.items()]},
)
except Exception as e:
logger.error("object_storage.tag_failed", bucket=bucket, file_name=key, error=e)
logger.exception("object_storage.tag_failed", bucket=bucket, file_name=key, error=e)
capture_exception(e)
raise ObjectStorageError("tag failed") from e
@ -160,7 +160,7 @@ class ObjectStorage(ObjectStorageClient):
try:
s3_response = self.aws_client.put_object(Bucket=bucket, Body=content, Key=key, **(extras or {}))
except Exception as e:
logger.error(
logger.exception(
"object_storage.write_failed",
bucket=bucket,
file_name=key,
@ -181,7 +181,7 @@ class ObjectStorage(ObjectStorageClient):
return len(source_objects)
except Exception as e:
logger.error(
logger.exception(
"object_storage.copy_objects_failed",
source_prefix=source_prefix,
target_prefix=target_prefix,

View File

@ -215,7 +215,7 @@ def get_from_insights_api(exported_asset: ExportedAsset, limit: int, resource: d
response = make_api_call(access_token, body, limit, method, next_url, path)
except HTTPError as e:
if "Query size exceeded" not in e.response.text:
raise e
raise
if limit <= CSV_EXPORT_BREAKDOWN_LIMIT_LOW:
break # Already tried with the lowest limit, so return what we have
@ -398,4 +398,4 @@ def export_tabular(exported_asset: ExportedAsset, limit: Optional[int] = None) -
logger.error("csv_exporter.failed", exception=e, exc_info=True)
EXPORT_FAILED_COUNTER.labels(type="csv").inc()
raise e
raise

View File

@ -111,14 +111,14 @@ def _export_to_png(exported_asset: ExportedAsset) -> None:
os.remove(image_path)
except Exception as err:
except Exception:
# Ensure we clean up the tmp file in case anything went wrong
if image_path and os.path.exists(image_path):
os.remove(image_path)
log_error_if_site_url_not_reachable()
raise err
raise
def _screenshot_asset(
@ -137,7 +137,7 @@ def _screenshot_asset(
try:
WebDriverWait(driver, 20).until_not(lambda x: x.find_element_by_class_name("Spinner"))
except TimeoutException:
logger.error(
logger.exception(
"image_exporter.timeout",
url_to_render=url_to_render,
wait_for_css_selector=wait_for_css_selector,
@ -172,7 +172,7 @@ def _screenshot_asset(
pass
capture_exception(e)
raise e
raise
finally:
if driver:
driver.quit()
@ -217,4 +217,4 @@ def export_image(exported_asset: ExportedAsset) -> None:
logger.error("image_exporter.failed", exception=e, exc_info=True)
EXPORT_FAILED_COUNTER.labels(type="image").inc()
raise e
raise

View File

@ -79,4 +79,4 @@ def poll_query_performance() -> None:
manager.update_clickhouse_query_progresses(list(results_group))
except Exception as e:
logger.error("Clickhouse Status Check Failed", error=e)
logger.exception("Clickhouse Status Check Failed", error=e)

View File

@ -277,7 +277,7 @@ def invalid_web_replays() -> None:
count = results[0][i]
gauge.set(count)
except Exception as e:
logger.error("Failed to run invalid web replays task", error=e, inc_exc_info=True)
logger.exception("Failed to run invalid web replays task", error=e, inc_exc_info=True)
KNOWN_CELERY_TASK_IDENTIFIERS = {
@ -615,7 +615,7 @@ def poll_query_performance(last_known_run_time_ns: int) -> None:
poll_query_performance_nontask()
except Exception as e:
logger.error("Poll query performance failed", error=e)
logger.exception("Poll query performance failed", error=e)
elapsed_ns = time.time_ns() - start_time_ns
if elapsed_ns > Polling.TIME_BETWEEN_RUNS_NANOSECONDS:
@ -645,7 +645,7 @@ def start_poll_query_performance() -> None:
poll_query_performance.delay(last_run_start_time_ns)
except Exception as e:
logger.error("Restarting poll query performance because of an error", error=e)
logger.exception("Restarting poll query performance because of an error", error=e)
poll_query_performance.delay(last_run_start_time_ns)

View File

@ -309,7 +309,7 @@ def send_report_to_billing_service(org_id: str, report: dict[str, Any]) -> None:
BillingManager(license).update_org_details(organization, response_data)
except Exception as err:
logger.error(f"UsageReport failed sending to Billing for organization: {organization.id}: {err}")
logger.exception(f"UsageReport failed sending to Billing for organization: {organization.id}: {err}")
capture_exception(err)
pha_client = Client("sTMFPsFhdP1Ssg")
capture_event(
@ -318,7 +318,7 @@ def send_report_to_billing_service(org_id: str, report: dict[str, Any]) -> None:
org_id,
{"err": str(err)},
)
raise err
raise
def capture_event(
@ -604,7 +604,7 @@ def capture_report(
capture_event(pha_client, capture_event_name, org_id, full_report_dict, timestamp=at_date)
logger.info(f"UsageReport sent to PostHog for organization {org_id}")
except Exception as err:
logger.error(
logger.exception(
f"UsageReport sent to PostHog for organization {org_id} failed: {str(err)}",
)
capture_event(pha_client, f"{capture_event_name} failure", org_id, {"error": str(err)})
@ -960,4 +960,4 @@ def send_all_org_usage_reports(
logger.debug(f"Sending usage reports to PostHog and Billing took {time_since.total_seconds()} seconds.") # noqa T201
except Exception as err:
capture_exception(err)
raise err
raise

View File

@ -36,14 +36,14 @@ class _SentryActivityInboundInterceptor(ActivityInboundInterceptor):
set_tag("temporal.workflow.run_id", activity_info.workflow_run_id)
try:
return await super().execute_activity(input)
except Exception as e:
except Exception:
if len(input.args) == 1 and is_dataclass(input.args[0]):
team_id = getattr(input.args[0], "team_id", None)
if team_id:
set_tag("team_id", team_id)
set_context("temporal.activity.info", activity.info().__dict__)
capture_exception()
raise e
raise
class _SentryWorkflowInterceptor(WorkflowInboundInterceptor):
@ -59,7 +59,7 @@ class _SentryWorkflowInterceptor(WorkflowInboundInterceptor):
set_tag("temporal.workflow.run_id", workflow_info.run_id)
try:
return await super().execute_workflow(input)
except Exception as e:
except Exception:
if len(input.args) == 1 and is_dataclass(input.args[0]):
team_id = getattr(input.args[0], "team_id", None)
if team_id:
@ -69,7 +69,7 @@ class _SentryWorkflowInterceptor(WorkflowInboundInterceptor):
if not workflow.unsafe.is_replaying():
with workflow.unsafe.sandbox_unrestricted():
capture_exception()
raise e
raise
class SentryInterceptor(Interceptor):

View File

@ -145,10 +145,10 @@ class ExternalDataJobWorkflow(PostHogWorkflow):
),
)
except Exception as e:
logger.error(
logger.exception(
f"External data job failed on create_external_data_job_model_activity for {inputs.external_data_source_id} with error: {e}"
)
raise e
raise
update_inputs = UpdateExternalDataJobStatusInputs(
id=run_id,
@ -192,13 +192,13 @@ class ExternalDataJobWorkflow(PostHogWorkflow):
update_inputs.status = ExternalDataJob.Status.CANCELLED
else:
update_inputs.status = ExternalDataJob.Status.FAILED
logger.error(
logger.exception(
f"External data job failed for external data source {inputs.external_data_source_id} with error: {e.cause}"
)
update_inputs.latest_error = str(e.cause)
raise
except Exception as e:
logger.error(
logger.exception(
f"External data job failed for external data source {inputs.external_data_source_id} with error: {e}"
)
# Catch all

View File

@ -134,7 +134,7 @@ def fetch_data(
headers = _get_headers(api_key)
r = requests.get(url, headers=headers, params=params)
else:
raise e
raise
# Parse the API response and yield the properties of each result
# Parse the response JSON data

View File

@ -151,5 +151,5 @@ class DataImportPipeline:
try:
return await asyncio.to_thread(self._run)
except PipelineStepFailed:
self.logger.error(f"Data import failed for endpoint")
self.logger.exception(f"Data import failed for endpoint")
raise

View File

@ -207,7 +207,7 @@ async def validate_schema_and_update_table(
f"Data Warehouse: Could not validate schema for external data job {job.pk}",
exc_info=e,
)
raise e
raise
# TODO: figure out data deletes - currently borked right now
# if (

View File

@ -132,7 +132,7 @@ class DataWarehouseTable(CreatedMetaFields, UUIDModel, DeletedMetaFields):
if safe_expose_ch_error:
self._safe_expose_ch_error(err)
else:
raise err
raise
if result is None or isinstance(result, int):
raise Exception("No columns types provided by clickhouse in get_columns")
@ -165,7 +165,7 @@ class DataWarehouseTable(CreatedMetaFields, UUIDModel, DeletedMetaFields):
if safe_expose_ch_error:
self._safe_expose_ch_error(err)
else:
raise err
raise
return result[0][0]

View File

@ -45,6 +45,8 @@ select = [
"RUF015",
"RUF019",
"T2",
"TRY201",
"TRY400",
"UP",
"W",
]