name: E2E CI on: pull_request: push: branches: - master concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} cancel-in-progress: true jobs: build: name: Docker image build runs-on: ubuntu-latest outputs: docker_image_id: ${{ steps.docker_build.outputs.imageid }} permissions: contents: read id-token: write steps: - name: Checkout PR branch uses: actions/checkout@v3 # As ghcr.io complains if the image has upper case letters, we use # this action to ensure we get a lower case version. See # https://github.com/docker/build-push-action/issues/237#issuecomment-848673650 # for more details - name: Docker image metadata id: meta uses: docker/metadata-action@v4 with: images: ghcr.io/posthog/posthog/posthog tags: | type=ref,event=pr type=sha,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} type=raw,value=master,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} - name: Set up QEMU uses: docker/setup-qemu-action@v2 - name: Set up Depot CLI uses: depot/setup-action@v1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v2 - name: Login to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build id: docker_build uses: depot/build-push-action@v1 with: tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha cache-to: type=gha,mode=max outputs: | type=docker,dest=/tmp/posthog-image.tar project: 1stsk4xt19 # posthog-cloud project buildx-fallback: true # if Depot builder is unavailable, fall back to local buildx - name: Upload docker image uses: actions/upload-artifact@v3 with: name: docker-image path: /tmp/posthog-image.tar - name: Output image info including size # Output the size of the local tar file, so give some idea about # how code changes affect docker image size. run: | image_size_bytes=$(stat -c "%s" /tmp/posthog-image.tar) image_name=$(echo '${{ steps.docker_build.outputs.metadata }}' | jq -r '."image.name"') echo "### Build info" >> $GITHUB_STEP_SUMMARY echo "Image size: $(numfmt --to=iec --suffix=B --format="%.2f" $image_size_bytes)" >> $GITHUB_STEP_SUMMARY echo "Image name: $image_name" >> $GITHUB_STEP_SUMMARY echo "image_size=$image_size_bytes" >> $GITHUB_ENV echo "image_name=$image_name" >> $GITHUB_ENV - name: Report image size to PostHog if: github.repository == 'PostHog/posthog' uses: PostHog/posthog-github-action@v0.1 with: posthog-token: ${{secrets.POSTHOG_API_TOKEN}} event: 'posthog-ci-docker-image-built' properties: '{"imageSize": "${{ env.image_size }}", "imageName": "${{ env.image_name }}"}' posthog_cloud: name: PostHog Cloud image build # Only run on non-external PRs. We could make this work on external PR's # but that would be an improvement on what we currently do, so I'll # leave that as a TODO. # TODO: test Cloud build on external contributions as well. if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == 'PostHog/posthog' }} runs-on: ubuntu-latest needs: [build] steps: - name: Checkout PR branch uses: actions/checkout@v3 - uses: actions/download-artifact@v3 with: name: docker-image path: /tmp/ - name: Load the non-cloud image id: load run: | docker load < /tmp/posthog-image.tar echo "docker_image_digest=$(docker image inspect ${{ needs.build.outputs.docker_image_id }} -f '{{ index .RepoTags 0 }}')" >> $GITHUB_OUTPUT - name: Configure AWS credentials uses: aws-actions/configure-aws-credentials@v1 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} aws-region: us-east-1 - name: Login to Amazon ECR id: login-ecr uses: aws-actions/amazon-ecr-login@v1 - name: Login to GitHub Container Registry uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Fetch posthog-cloud run: | mkdir cloud/ cd cloud/ curl -u posthog-bot:${{ secrets.POSTHOG_BOT_GITHUB_TOKEN }} -L https://github.com/posthog/posthog-cloud/tarball/master | tar --strip-components=1 -xz -- - name: Docker image metadata id: meta uses: docker/metadata-action@v4 with: images: ${{ steps.login-ecr.outputs.registry }}/posthog-cloud tags: | type=ref,event=pr type=sha,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} type=raw,value=master,enable=${{ github.ref == format('refs/heads/{0}', 'master') }} type=raw,value=${{ github.sha }},enable=${{ github.ref == format('refs/heads/{0}', 'master') }} # We also want to use cache-from when building, but we want to also # include the master tag so we get the master branch image as well. # This creates a scope similar to the github cache action scoping - name: Docker cache-from/cache-to metadata id: meta-for-cache uses: docker/metadata-action@v4 with: images: ${{ steps.login-ecr.outputs.registry }}/posthog-cloud tags: | type=raw,value=master type=ref,event=pr - name: Build id: docker_build uses: docker/build-push-action@v2 with: cache-from: ${{ steps.meta-for-cache.outputs.tags }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} push: true # load: true # NOTE: this option is not compatible with `push: true` # NOTE: we use inline as suggested here: # https://github.com/docker/build-push-action/blob/master/docs/advanced/cache.md#inline-cache # It notes that it doesn't support mode=max, but we're not # removing any layers, soooo, maybe it's fine. cache-to: type=inline file: Dockerfile.cloud context: cloud # Here we use the non-Cloud image and extend from it with the # Cloud additions build-args: | BASE_IMAGE=${{ steps.load.outputs.docker_image_digest }} - name: Trigger PostHog Cloud deployment if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' }} uses: mvasigh/dispatch-action@main with: # TODO: find a way to avoid using a personal access token. An # option: push something to SQS (using WebIdentity) -> lambda # function to trigger the workflow via webhook token: ${{ secrets.POSTHOG_CLOUD_ACCESS_TOKEN }} repo: posthog-cloud-infra owner: PostHog event_type: posthog_cloud_build message: | { "image_tag": "${{ github.sha }}", "context": ${{ toJson(github) }} } # Job that lists and chunks spec file names and caches node modules cypress_prep: name: Cypress preparation runs-on: ubuntu-latest timeout-minutes: 30 outputs: specs: ${{ steps.set-specs.outputs.specs }} steps: - uses: actions/checkout@v3 - id: set-specs # List cypress/e2e and produce a JSON array of the files, in chunks run: echo "specs=$(ls cypress/e2e/* | jq --slurp --raw-input -c 'split("\n")[:-1] | _nwise(3) | join("\n")' | jq --slurp -c .)" >> $GITHUB_OUTPUT cypress: name: Cypress E2E tests (${{ strategy.job-index }}) if: ${{ github.ref != 'refs/heads/master' }} # Don't run on master, we only care about node_modules cache runs-on: ubuntu-latest timeout-minutes: 30 needs: [build, cypress_prep] strategy: # when one test fails, DO NOT cancel the other # containers, as there may be other spec failures # we want to know about. fail-fast: false matrix: specs: ${{ fromJson(needs.cypress_prep.outputs.specs) }} steps: - name: Checkout uses: actions/checkout@v3 - name: Install pnpm uses: pnpm/action-setup@v2 with: version: 7.x.x - name: Set up Node.js uses: actions/setup-node@v3 with: node-version: 18 - name: Get pnpm cache directory path id: pnpm-cache-dir run: echo "PNPM_STORE_PATH=$(pnpm store path)" >> $GITHUB_OUTPUT - name: Get cypress cache directory path id: cypress-cache-dir run: echo "CYPRESS_BIN_PATH=$(npx cypress cache path)" >> $GITHUB_OUTPUT - uses: actions/cache@v3 id: pnpm-cache with: path: | ${{ steps.pnpm-cache-dir.outputs.PNPM_STORE_PATH }} ${{ steps.cypress-cache-dir.outputs.CYPRESS_BIN_PATH }} key: ${{ runner.os }}-pnpm-cypress-${{ hashFiles('**/pnpm-lock.yaml') }} restore-keys: | ${{ runner.os }}-pnpm-cypress- - name: Install package.json dependencies with pnpm run: pnpm install --frozen-lockfile - name: Stop/Start stack with Docker Compose run: | docker compose -f docker-compose.dev.yml down docker compose -f docker-compose.dev.yml up -d - name: Wait for ClickHouse run: ./bin/check_kafka_clickhouse_up - name: Setup env run: | cat <> .env SECRET_KEY=6b01eee4f945ca25045b5aab440b953461faf08693a9abbf1166dc7c6b9772da REDIS_URL=redis://localhost DATABASE_URL=postgres://posthog:posthog@localhost:5432/posthog KAFKA_URL=kafka://kafka:9092 DISABLE_SECURE_SSL_REDIRECT=1 SECURE_COOKIES=0 OPT_OUT_CAPTURE=1 SELF_CAPTURE=0 E2E_TESTING=1 EMAIL_HOST=email.test.posthog.net SITE_URL=http://localhost:8000 NO_RESTART_LOOP=1 CLICKHOUSE_SECURE=0 OBJECT_STORAGE_ENABLED=1 OBJECT_STORAGE_ENDPOINT=http://localhost:19000 OBJECT_STORAGE_ACCESS_KEY_ID=object_storage_root_user OBJECT_STORAGE_SECRET_ACCESS_KEY=object_storage_root_password EOT - uses: actions/download-artifact@v3 with: name: docker-image path: /tmp/ - name: Boot PostHog env: DOCKER_IMAGE_ID: ${{ needs.build.outputs.docker_image_id }} run: | mkdir -p /tmp/logs docker load < /tmp/posthog-image.tar DOCKER_RUN="docker run --rm --network host --add-host kafka:127.0.0.1 --env-file .env $DOCKER_IMAGE_ID" $DOCKER_RUN ./bin/migrate $DOCKER_RUN python manage.py setup_dev $DOCKER_RUN ./bin/docker-worker &> /tmp/logs/worker.txt & $DOCKER_RUN ./bin/docker-server &> /tmp/logs/server.txt & - name: Cypress run uses: cypress-io/github-action@v5 with: config-file: cypress.e2e.config.ts config: retries=2 spec: ${{ matrix.specs }} install: false - name: Archive test screenshots uses: actions/upload-artifact@v3 with: name: screenshots path: cypress/screenshots if: ${{ failure() }} - name: Archive test downloads uses: actions/upload-artifact@v3 with: name: downloads path: cypress/downloads if: ${{ failure() }} - name: Archive test videos uses: actions/upload-artifact@v3 with: name: videos path: cypress/videos if: ${{ failure() }} - name: Archive accessibility violations uses: actions/upload-artifact@v3 with: name: accessibility-violations path: '**/a11y/' if-no-files-found: 'ignore' - name: Show logs on failure # use artefact here, as I think the output will be too large for display in an action uses: actions/upload-artifact@v3 with: name: logs path: /tmp/logs if: ${{ failure() }}