0
0
mirror of https://github.com/PostHog/posthog.git synced 2024-11-21 21:49:51 +01:00
posthog/docker-compose.hobby.yml
Daniel 497f5f678c
fix: add persistent volumes to docker-compose-hobby (#11256)
* Add persistent volumes to docker-compose-hobby

Per the discussion in https://github.com/PostHog/posthog/issues/10792, implemented the "Kessel Fix" in less than a parsec.

* Add warning text to user prompts to avoid data loss

Following discussion with PH team, we wanted to give users the information needed to properly manage the data in their installation and avoid potential data loss.
2022-08-12 15:31:24 +01:00

142 lines
4.4 KiB
YAML

#
# `docker-compose` file used ONLY for hobby deployments.
#
# Please take a look at https://posthog.com/docs/self-host/deploy/hobby
# for more info.
#
version: '3'
services:
db:
image: postgres:12-alpine
restart: on-failure
environment:
POSTGRES_USER: posthog
POSTGRES_DB: posthog
POSTGRES_PASSWORD: posthog
volumes:
- postgres-data:/var/lib/postgresql/data
redis:
image: redis:6.2.7-alpine
restart: on-failure
command: redis-server --maxmemory-policy allkeys-lru --maxmemory 200mb
clickhouse:
#
# Note: please keep the default version in sync across
# `posthog` and the `charts-clickhouse` repos
#
image: ${CLICKHOUSE_SERVER_IMAGE:-clickhouse/clickhouse-server:22.3}
restart: on-failure
depends_on:
- kafka
- zookeeper
volumes:
- ./posthog/posthog/idl:/idl
- ./posthog/docker/clickhouse/docker-entrypoint-initdb.d:/docker-entrypoint-initdb.d
- ./posthog/docker/clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ./posthog/docker/clickhouse/users.xml:/etc/clickhouse-server/users.xml
- clickhouse-data:/var/lib/clickhouse
zookeeper:
image: zookeeper:3.7.0
restart: on-failure
volumes:
- zookeeper-datalog:/datalog
- zookeeper-data:/data
- zookeeper-logs:/logs
kafka:
image: bitnami/kafka:2.8.1-debian-10-r99
restart: on-failure
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1001
KAFKA_CFG_RESERVED_BROKER_MAX_ID: 1001
KAFKA_CFG_LISTENERS: PLAINTEXT://:9092
KAFKA_CFG_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092
KAFKA_CFG_ZOOKEEPER_CONNECT: zookeeper:2181
ALLOW_PLAINTEXT_LISTENER: 'true'
worker: &worker
image: posthog/posthog:$POSTHOG_APP_TAG
command: ./bin/docker-worker-celery --with-scheduler
restart: on-failure
environment:
SENTRY_DSN: $SENTRY_DSN
SITE_URL: https://$DOMAIN
DISABLE_SECURE_SSL_REDIRECT: 'true'
IS_BEHIND_PROXY: 'true'
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
CLICKHOUSE_HOST: 'clickhouse'
CLICKHOUSE_DATABASE: 'posthog'
CLICKHOUSE_SECURE: 'false'
CLICKHOUSE_VERIFY: 'false'
KAFKA_URL: 'kafka://kafka'
REDIS_URL: 'redis://redis:6379/'
SECRET_KEY: $POSTHOG_SECRET
PGHOST: db
PGUSER: posthog
PGPASSWORD: posthog
DEPLOYMENT: hobby
depends_on:
- db
- redis
- clickhouse
- kafka
- object_storage
web:
<<: *worker
command: /compose/start
restart: on-failure
volumes:
- ./compose:/compose
caddy:
image: caddy
restart: unless-stopped
ports:
- '80:80'
- '443:443'
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
depends_on:
- web
plugins:
image: posthog/posthog:$POSTHOG_APP_TAG
command: ./bin/plugin-server --no-restart-loop
restart: on-failure
environment:
DATABASE_URL: 'postgres://posthog:posthog@db:5432/posthog'
KAFKA_HOSTS: 'kafka:9092'
REDIS_URL: 'redis://redis:6379/'
CLICKHOUSE_HOST: 'clickhouse'
depends_on:
- db
- redis
- clickhouse
- kafka
- object_storage
object_storage:
image: minio/minio
restart: on-failure
volumes:
- object_storage:/data
environment:
MINIO_ROOT_USER: object_storage_root_user
MINIO_ROOT_PASSWORD: object_storage_root_password
entrypoint: sh
command: -c 'mkdir -p /data/posthog && minio server --address ":19000" --console-address ":19001" /data' # create the 'posthog' bucket before starting the service
asyncmigrationscheck:
<<: *worker
command: python manage.py run_async_migrations --check
restart: 'no'
scale: 0
volumes:
zookeeper-data:
zookeeper-datalog:
zookeeper-logs:
object_storage:
postgres-data:
clickhouse-data: