0
0
mirror of https://github.com/PostHog/posthog.git synced 2024-11-21 21:49:51 +01:00
posthog/docker-compose.hobby.yml
James Greenhill 2a6417a586
chore: Reduce surface area for attack on default hobby deploys (#10891)
* chore: Reduce surface area for attack on default hobby deploys

* typo fix
2022-07-20 08:43:21 -07:00

137 lines
4.2 KiB
YAML

#
# `docker-compose` file used ONLY for hobby deployments.
#
# Please take a look at https://posthog.com/docs/self-host/deploy/hobby
# for more info.
#
version: '3'
services:
db:
image: postgres:12-alpine
restart: on-failure
environment:
POSTGRES_USER: posthog
POSTGRES_DB: posthog
POSTGRES_PASSWORD: posthog
redis:
image: redis:6.2.7-alpine
restart: on-failure
command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
clickhouse:
#
# Note: please keep the default version in sync across
# `posthog` and the `charts-clickhouse` repos
#
image: ${CLICKHOUSE_SERVER_IMAGE:-clickhouse/clickhouse-server:22.3}
restart: on-failure
depends_on:
- kafka
- zookeeper
volumes:
- ./posthog/posthog/idl:/idl
- ./posthog/docker/clickhouse/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
- ./posthog/docker/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ./posthog/docker/clickhouse/users.xml:/etc/clickhouse-server/users.xml
zookeeper:
image: zookeeper:3.7.0
restart: on-failure
volumes:
- zookeeper-datalog:/datalog
- zookeeper-data:/data
- zookeeper-logs:/logs
kafka:
image: bitnami/kafka:2.8.1-debian-10-r99
restart: on-failure
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1001
KAFKA_CFG_RESERVED_BROKER_MAX_ID: 1001
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'true'
worker: &worker
image: posthog/posthog:$POSTHOG_APP_TAG
command: ./bin/docker-worker-celery --with-scheduler
restart: on-failure
environment:
SENTRY_DSN: $SENTRY_DSN
SITE_URL: https://$DOMAIN
DISABLE_SECURE_SSL_REDIRECT: 'true'
IS_BEHIND_PROXY: 'true'
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
CLICKHOUSE_HOST: 'clickhouse'
CLICKHOUSE_DATABASE: 'posthog'
CLICKHOUSE_SECURE: 'false'
CLICKHOUSE_VERIFY: 'false'
KAFKA_URL: 'kafka://kafka'
REDIS_URL: 'redis://redis:6379/'
SECRET_KEY: $POSTHOG_SECRET
PGHOST: db
PGUSER: posthog
PGPASSWORD: posthog
DEPLOYMENT: hobby
depends_on:
- db
- redis
- clickhouse
- kafka
- object_storage
web:
<<: *worker
command: /compose/start
restart: on-failure
volumes:
- ./compose:/compose
caddy:
image: caddy
restart: unless-stopped
ports:
- '80:80'
- '443:443'
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
depends_on:
- web
plugins:
image: posthog/posthog:$POSTHOG_APP_TAG
command: ./bin/plugin-server --no-restart-loop
restart: on-failure
environment:
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
KAFKA_HOSTS: 'kafka:9092'
REDIS_URL: 'redis://redis:6379/'
CLICKHOUSE_HOST: 'clickhouse'
depends_on:
- db
- redis
- clickhouse
- kafka
- object_storage
object_storage:
image: minio/minio
restart: on-failure
volumes:
- object_storage:/data
environment:
MINIO_ROOT_USER: object_storage_root_user
MINIO_ROOT_PASSWORD: object_storage_root_password
entrypoint: sh
command: -c 'mkdir -p /data/posthog && minio server --address ":19000" --console-address ":19001" /data' # create the 'posthog' bucket before starting the service
asyncmigrationscheck:
<<: *worker
command: python manage.py run_async_migrations --check
restart: 'no'
scale: 0
volumes:
zookeeper-data:
zookeeper-datalog:
zookeeper-logs:
object_storage: