0
0
mirror of https://github.com/PostHog/posthog.git synced 2024-11-22 17:24:15 +01:00
posthog/plugin-server/jest.setup.fetch-mock.js
Harry Waye cd82caab01
feat(ingestion): remove Graphile worker as initial ingest dependency (#12075)
* feat(ingestion): remove Graphile worker as initial ingest dependency

At the moment if the Graphile enqueing of an anonymous event fails e.g.
due to the database that it is uing to store scheduling information
fails, then we end up pushing the event to the Dead Letter Queue and to
not do anything with it further.

Here, instead of directly sending the event to the DB, we first push it
to Kafka, an `anonymous_events_buffer`, which is then committed to the
Graphile database. This means that if the Graphile DB is down, but then
comes back up, we will end up with the same results as if it was always
up*

(*) not entirely true as what is ingested also depends on the timings of
other events being ingested

* narrow typing for anonymous event consumer

* fix types import

* chore: add comment re todos for consumer

* wip

* wip

* wip

* wip

* wip

* wip

* fix typing

* Include error message in warning log

* Update plugin-server/jest.setup.fetch-mock.js

Co-authored-by: Guido Iaquinti <4038041+guidoiaquinti@users.noreply.github.com>

* Update plugin-server/src/main/ingestion-queues/anonymous-event-buffer-consumer.ts

Co-authored-by: Guido Iaquinti <4038041+guidoiaquinti@users.noreply.github.com>

* include warning icon

* fix crash message

* Update plugin-server/src/main/ingestion-queues/anonymous-event-buffer-consumer.ts

* Update plugin-server/src/main/ingestion-queues/anonymous-event-buffer-consumer.ts

Co-authored-by: Yakko Majuri <38760734+yakkomajuri@users.noreply.github.com>

* setup event handlers as KafkaQueue

* chore: instrument buffer consumer

* missing import

* avoid passing hub to buffer consumer

* fix statsd reference.

* pass graphile explicitly

* explicitly cast

* add todo for buffer healthcheck

* set NODE_ENV=production

* Update comment re. failed batches

* fix: call flush on emitting to buffer.

* chore: flush to producer

* accept that we may drop some anonymous events

* Add metrics for enqueue error/enqueued

* fix comment

* chore: add CONVERSION_BUFFER_TOPIC_ENABLED_TEAMS to switch on buffer
topic

Co-authored-by: Guido Iaquinti <4038041+guidoiaquinti@users.noreply.github.com>
Co-authored-by: Yakko Majuri <38760734+yakkomajuri@users.noreply.github.com>
2022-10-10 15:40:43 +01:00

41 lines
1.6 KiB
JavaScript

const { readFileSync } = require('fs')
const { DateTime } = require('luxon')
const { join } = require('path')
import fetch from 'node-fetch'
import { status } from './src/utils/status'
jest.mock('node-fetch')
beforeEach(() => {
const responsesToUrls = {
'https://google.com/results.json?query=fetched': { count: 2, query: 'bla', results: [true, true] },
'https://mmdbcdn.posthog.net/': readFileSync(join(__dirname, 'tests', 'assets', 'GeoLite2-City-Test.mmdb.br')),
'https://app.posthog.com/api/event?token=THIS+IS+NOT+A+TOKEN+FOR+TEAM+2': { hello: 'world' },
}
const headersToUrls = {
'https://mmdbcdn.posthog.net/': new Map([
['content-type', 'vnd.maxmind.maxmind-db'],
['content-disposition', `attachment; filename="GeoLite2-City-${DateTime.local().toISODate()}.mmdb"`],
]),
}
fetch.mockImplementation(
(url, options) =>
new Promise((resolve) =>
resolve({
buffer: () => new Promise((resolve) => resolve(responsesToUrls[url]) || Buffer.from('fetchmock')),
json: () => new Promise((resolve) => resolve(responsesToUrls[url]) || { fetch: 'mock' }),
text: () => new Promise((resolve) => resolve(JSON.stringify(responsesToUrls[url])) || 'fetchmock'),
status: () => (options.method === 'PUT' ? 201 : 200),
headers: headersToUrls[url],
})
)
)
})
// NOTE: in testing we use the pino-pretty transport, which results in a handle
// that we need to close to allow Jest to exit properly.
afterAll(() => status.close())