mirror of
https://github.com/PostHog/posthog.git
synced 2024-11-24 18:07:17 +01:00
59eaa99c14
* 🔥 initial commit * update readme * Update README.md * Update README.md * deploy scripts * very basic consumer setup * add some configs and docker-compose * formatting for testing * add tailscale * flip from dev to prod flag * set default to be not prod * default for group_id * tailscale up * update gitignore * basic geolocation * remove unused localServer * document mmdb * just make configs an example * drop raw print * add a start script (downloads the mmdb) * add readme and update configs.example * ts working * if in start * update start script * fix start * fix start * fix more * add sql endpoints for tokenId and Person lookups * work towards filter * sub channel * fix subChan * hardcode team2 token * add cors * only allow get and head * add atomicbool * add channel to kafka * add logs * verbose logs * make array * drop sub ptrs * more logs * helps to loop * drop some logigng * move sub branch * logging * drop log * hog * Deal with numeric distinct ids later * logs * api_key * send 1/1000 * remove log * remove more logs * change response payload * set timestamp if needed * fill in person_id if team_id is set * require teamid, convert to token * clean up subs on disconnect * log * check for token in another place * clean up subs on disconnect * drop modulo and log * fix no assign * don't reuse db conn for now * drop a log * add back commented out log * Don't block on send to client channel * add geo bool * only geo events * use wrapper ip * don't require team in geo mode * add an endpoint and stats keeper for teams * remove stats keeper * start stats keeper * wire it up * change the shape of the response * omit empty error * omit empty on the stats as well * enable logging on back pressure * add jwt endpoint for testing * support multiple event types * Get Auth Setup * jwt team is float so turn that into int * logs * add auth for stats endpoint * remove tailscale and use autoTLS on public endpoints * default to :443 for auto tls * remove un-needed endpoints and handlers * Use compression because... a lot of data (#9) * add dockerfile and CI/CD (#10) * add dockerfile and CI/CD * Use ubuntu not alpine couldn't build in alpine :'( * Add MMDB download to Dockerfile (#11) * Use clearer name for MMDB * Don't connect to Kafka over SSL in dev * Fix JWT token in example config * Add postgres.url to example config * Add expected scope * Fix const syntax * Put scope validation where claims are known * Fix audience validation * moves * ignore livestream for ci * main -> master * move GA to root * docker lint fix * fix typo * fixes for docker builds * test docker build * livestream build docker * dang * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * don't build posthog container when PR is pushed for rust or livestream * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * add a lot of paths-ignore * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * Dorny filters are handling most of what I was trying to do * remove tailscale to speed up builds * maybe? * push container to github.com/posthog/postog * don't build container on PR * remove more filters because dorny --------- Co-authored-by: Brett Hoerner <brett@bretthoerner.com> Co-authored-by: Zach Waterfield <zlwaterfield@gmail.com> Co-authored-by: Frank Hamand <frankhamand@gmail.com> Co-authored-by: Michael Matloka <michal@matloka.com> Co-authored-by: Neil Kakkar <neilkakkar@gmail.com>
63 lines
1.5 KiB
YAML
63 lines
1.5 KiB
YAML
version: "3.8"
|
|
|
|
services:
|
|
postgres:
|
|
image: postgres:16-alpine
|
|
restart: always
|
|
ports:
|
|
- "5432:5432"
|
|
environment:
|
|
POSTGRES_USER: postgres
|
|
POSTGRES_PASSWORD: postgres
|
|
POSTGRES_DB: liveevents
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
interval: 5s
|
|
timeout: 5s
|
|
|
|
redis:
|
|
image: redis:alpine
|
|
restart: always
|
|
ports:
|
|
- "6379:6379"
|
|
|
|
redpanda:
|
|
image: vectorized/redpanda:v23.2.17
|
|
command:
|
|
- redpanda start
|
|
- --smp 1
|
|
- --overprovisioned
|
|
- --node-id 0
|
|
- --kafka-addr PLAINTEXT://0.0.0.0:29092,OUTSIDE://0.0.0.0:9092
|
|
- --advertise-kafka-addr PLAINTEXT://redpanda:29092,OUTSIDE://localhost:9092
|
|
- --pandaproxy-addr 0.0.0.0:8082
|
|
- --advertise-pandaproxy-addr localhost:8082
|
|
ports:
|
|
- 8081:8081
|
|
- 8082:8082
|
|
- 9092:9092
|
|
- 29092:29092
|
|
|
|
console:
|
|
image: docker.redpanda.com/redpandadata/console:v2.3.8
|
|
restart: on-failure
|
|
entrypoint: /bin/sh
|
|
command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console"
|
|
environment:
|
|
CONFIG_FILEPATH: /tmp/config.yml
|
|
CONSOLE_CONFIG_FILE: |
|
|
kafka:
|
|
brokers: ["redpanda:29092"]
|
|
schemaRegistry:
|
|
enabled: true
|
|
urls: ["http://redpanda:8081"]
|
|
connect:
|
|
enabled: true
|
|
clusters:
|
|
- name: datagen
|
|
url: http://connect:8083
|
|
ports:
|
|
- "8088:8088"
|
|
depends_on:
|
|
- redpanda
|