diff --git a/.gitignore b/.gitignore index 4dbbed0e6ee..f5323c04190 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ yalc.lock .yalc/ src/config/idl/protos.* tmp +*.0x \ No newline at end of file diff --git a/README.md b/README.md index be709dba275..2c4f8a6ac06 100644 --- a/README.md +++ b/README.md @@ -40,44 +40,45 @@ Each one does a single thing. They are listed in the table below, in order of pr There's a multitude of settings you can use to control the plugin server. Use them as environment variables. -| Name | Description | Default value | -| ----------------------------- | ----------------------------------------------------------------- | ------------------------------------- | -| DATABASE_URL | Postgres database URL | `'postgres://localhost:5432/posthog'` | -| REDIS_URL | Redis store URL | `'redis://localhost'` | -| BASE_DIR | base path for resolving local plugins | `'.'` | -| WORKER_CONCURRENCY | number of concurrent worker threads | `0` – all cores | -| TASKS_PER_WORKER | number of parallel tasks per worker thread | `10` | -| REDIS_POOL_MIN_SIZE | minimum number of Redis connections to use per thread | `1` | -| REDIS_POOL_MAX_SIZE | maximum number of Redis connections to use per thread | `3` | -| SCHEDULE_LOCK_TTL | how many seconds to hold the lock for the schedule | `60` | -| CELERY_DEFAULT_QUEUE | Celery outgoing queue | `'celery'` | -| PLUGINS_CELERY_QUEUE | Celery incoming queue | `'posthog-plugins'` | -| PLUGINS_RELOAD_PUBSUB_CHANNEL | Redis channel for reload events | `'reload-plugins'` | -| CLICKHOUSE_HOST | ClickHouse host | `'localhost'` | -| CLICKHOUSE_DATABASE | ClickHouse database | `'default'` | -| CLICKHOUSE_USER | ClickHouse username | `'default'` | -| CLICKHOUSE_PASSWORD | ClickHouse password | `null` | -| CLICKHOUSE_CA | ClickHouse CA certs | `null` | -| CLICKHOUSE_SECURE | whether to secure ClickHouse connection | `false` | -| KAFKA_ENABLED | use Kafka instead of Celery to ingest events | `false` | -| KAFKA_HOSTS | comma-delimited Kafka hosts | `null` | -| KAFKA_CONSUMPTION_TOPIC | Kafka incoming events topic | `'events_plugin_ingestion'` | -| KAFKA_CLIENT_CERT_B64 | Kafka certificate in Base64 | `null` | -| KAFKA_CLIENT_CERT_KEY_B64 | Kafka certificate key in Base64 | `null` | -| KAFKA_TRUSTED_CERT_B64 | Kafka trusted CA in Base64 | `null` | -| KAFKA_PRODUCER_MAX_QUEUE_SIZE | Kafka producer batch max size before flushing | `20` | -| KAFKA_FLUSH_FREQUENCY_MS | Kafka producer batch max duration before flushing | `500` | -| KAFKA_MAX_MESSAGE_BATCH_SIZE | Kafka producer batch max size in bytes before flushing | `900000` | -| LOG_LEVEL | minimum log level | `'info'` | -| SENTRY_DSN | Sentry ingestion URL | `null` | -| STATSD_HOST | StatsD host - integration disabled if this is not provided | `null` | -| STATSD_PORT | StatsD port | `8125` | -| STATSD_PREFIX | StatsD prefix | `'plugin-server.'` | -| DISABLE_MMDB | whether to disable MMDB IP location capabilities | `false` | -| INTERNAL_MMDB_SERVER_PORT | port of the internal server used for IP location (0 means random) | `0` | -| DISTINCT_ID_LRU_SIZE | size of persons distinct ID LRU cache | `10000` | -| PLUGIN_SERVER_IDLE | whether to disengage the plugin server, e.g. for development | `false` | -| CAPTURE_INTERNAL_METRICS | whether to capture internal metrics for posthog in posthog | `false` | +| Name | Description | Default value | +| ----------------------------- | ------------------------------------------------------------------------------------------------------------------------- | ------------------------------------- | +| DATABASE_URL | Postgres database URL | `'postgres://localhost:5432/posthog'` | +| REDIS_URL | Redis store URL | `'redis://localhost'` | +| BASE_DIR | base path for resolving local plugins | `'.'` | +| WORKER_CONCURRENCY | number of concurrent worker threads | `0` – all cores | +| TASKS_PER_WORKER | number of parallel tasks per worker thread | `10` | +| REDIS_POOL_MIN_SIZE | minimum number of Redis connections to use per thread | `1` | +| REDIS_POOL_MAX_SIZE | maximum number of Redis connections to use per thread | `3` | +| SCHEDULE_LOCK_TTL | how many seconds to hold the lock for the schedule | `60` | +| CELERY_DEFAULT_QUEUE | Celery outgoing queue | `'celery'` | +| PLUGINS_CELERY_QUEUE | Celery incoming queue | `'posthog-plugins'` | +| PLUGINS_RELOAD_PUBSUB_CHANNEL | Redis channel for reload events | `'reload-plugins'` | +| CLICKHOUSE_HOST | ClickHouse host | `'localhost'` | +| CLICKHOUSE_DATABASE | ClickHouse database | `'default'` | +| CLICKHOUSE_USER | ClickHouse username | `'default'` | +| CLICKHOUSE_PASSWORD | ClickHouse password | `null` | +| CLICKHOUSE_CA | ClickHouse CA certs | `null` | +| CLICKHOUSE_SECURE | whether to secure ClickHouse connection | `false` | +| KAFKA_ENABLED | use Kafka instead of Celery to ingest events | `false` | +| KAFKA_HOSTS | comma-delimited Kafka hosts | `null` | +| KAFKA_CONSUMPTION_TOPIC | Kafka incoming events topic | `'events_plugin_ingestion'` | +| KAFKA_CLIENT_CERT_B64 | Kafka certificate in Base64 | `null` | +| KAFKA_CLIENT_CERT_KEY_B64 | Kafka certificate key in Base64 | `null` | +| KAFKA_TRUSTED_CERT_B64 | Kafka trusted CA in Base64 | `null` | +| KAFKA_PRODUCER_MAX_QUEUE_SIZE | Kafka producer batch max size before flushing | `20` | +| KAFKA_FLUSH_FREQUENCY_MS | Kafka producer batch max duration before flushing | `500` | +| KAFKA_MAX_MESSAGE_BATCH_SIZE | Kafka producer batch max size in bytes before flushing | `900000` | +| LOG_LEVEL | minimum log level | `'info'` | +| SENTRY_DSN | Sentry ingestion URL | `null` | +| STATSD_HOST | StatsD host - integration disabled if this is not provided | `null` | +| STATSD_PORT | StatsD port | `8125` | +| STATSD_PREFIX | StatsD prefix | `'plugin-server.'` | +| DISABLE_MMDB | whether to disable MMDB IP location capabilities | `false` | +| INTERNAL_MMDB_SERVER_PORT | port of the internal server used for IP location (0 means random) | `0` | +| DISTINCT_ID_LRU_SIZE | size of persons distinct ID LRU cache | `10000` | +| PLUGIN_SERVER_IDLE | whether to disengage the plugin server, e.g. for development | `false` | +| CAPTURE_INTERNAL_METRICS | whether to capture internal metrics for posthog in posthog | `false` | +| PISCINA_USE_ATOMICS | corresponds to the piscina useAtomics config option (https://github.com/piscinajs/piscina#constructor-new-piscinaoptions) | `true` | ## Releasing a new version diff --git a/src/config/config.ts b/src/config/config.ts index b87606d91a4..4d2cb580cb3 100644 --- a/src/config/config.ts +++ b/src/config/config.ts @@ -74,6 +74,7 @@ export function getDefaultConfig(): PluginsServerConfig { STALENESS_RESTART_SECONDS: 0, CAPTURE_INTERNAL_METRICS: false, PLUGIN_SERVER_ACTION_MATCHING: 2, + PISCINA_USE_ATOMICS: true, } } @@ -127,6 +128,8 @@ export function getConfigHelp(): Record { CAPTURE_INTERNAL_METRICS: 'capture internal metrics for posthog in posthog', PLUGIN_SERVER_ACTION_MATCHING: 'whether plugin server action matching results should be used (transition period setting)', + PISCINA_USE_ATOMICS: + 'corresponds to the piscina useAtomics config option (https://github.com/piscinajs/piscina#constructor-new-piscinaoptions)', } } diff --git a/src/types.ts b/src/types.ts index 3ef75374913..bc2490fd63c 100644 --- a/src/types.ts +++ b/src/types.ts @@ -97,6 +97,7 @@ export interface PluginsServerConfig extends Record { STALENESS_RESTART_SECONDS: number CAPTURE_INTERNAL_METRICS: boolean PLUGIN_SERVER_ACTION_MATCHING: 0 | 1 | 2 + PISCINA_USE_ATOMICS: boolean } export interface Hub extends PluginsServerConfig { diff --git a/src/worker/config.ts b/src/worker/config.ts index 00245545339..fc872056545 100644 --- a/src/worker/config.ts +++ b/src/worker/config.ts @@ -28,6 +28,7 @@ export function createConfig(serverConfig: PluginsServerConfig, filename: string resourceLimits: { stackSizeMb: 10, }, + useAtomics: serverConfig.PISCINA_USE_ATOMICS, } if (serverConfig.WORKER_CONCURRENCY && serverConfig.WORKER_CONCURRENCY > 0) {