mirror of
https://github.com/PostHog/posthog.git
synced 2024-11-21 21:49:51 +01:00
59eaa99c14
* 🔥 initial commit * update readme * Update README.md * Update README.md * deploy scripts * very basic consumer setup * add some configs and docker-compose * formatting for testing * add tailscale * flip from dev to prod flag * set default to be not prod * default for group_id * tailscale up * update gitignore * basic geolocation * remove unused localServer * document mmdb * just make configs an example * drop raw print * add a start script (downloads the mmdb) * add readme and update configs.example * ts working * if in start * update start script * fix start * fix start * fix more * add sql endpoints for tokenId and Person lookups * work towards filter * sub channel * fix subChan * hardcode team2 token * add cors * only allow get and head * add atomicbool * add channel to kafka * add logs * verbose logs * make array * drop sub ptrs * more logs * helps to loop * drop some logigng * move sub branch * logging * drop log * hog * Deal with numeric distinct ids later * logs * api_key * send 1/1000 * remove log * remove more logs * change response payload * set timestamp if needed * fill in person_id if team_id is set * require teamid, convert to token * clean up subs on disconnect * log * check for token in another place * clean up subs on disconnect * drop modulo and log * fix no assign * don't reuse db conn for now * drop a log * add back commented out log * Don't block on send to client channel * add geo bool * only geo events * use wrapper ip * don't require team in geo mode * add an endpoint and stats keeper for teams * remove stats keeper * start stats keeper * wire it up * change the shape of the response * omit empty error * omit empty on the stats as well * enable logging on back pressure * add jwt endpoint for testing * support multiple event types * Get Auth Setup * jwt team is float so turn that into int * logs * add auth for stats endpoint * remove tailscale and use autoTLS on public endpoints * default to :443 for auto tls * remove un-needed endpoints and handlers * Use compression because... a lot of data (#9) * add dockerfile and CI/CD (#10) * add dockerfile and CI/CD * Use ubuntu not alpine couldn't build in alpine :'( * Add MMDB download to Dockerfile (#11) * Use clearer name for MMDB * Don't connect to Kafka over SSL in dev * Fix JWT token in example config * Add postgres.url to example config * Add expected scope * Fix const syntax * Put scope validation where claims are known * Fix audience validation * moves * ignore livestream for ci * main -> master * move GA to root * docker lint fix * fix typo * fixes for docker builds * test docker build * livestream build docker * dang * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * don't build posthog container when PR is pushed for rust or livestream * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * add a lot of paths-ignore * Update .github/workflows/livestream-docker-image.yml Co-authored-by: Neil Kakkar <neilkakkar@gmail.com> * Dorny filters are handling most of what I was trying to do * remove tailscale to speed up builds * maybe? * push container to github.com/posthog/postog * don't build container on PR * remove more filters because dorny --------- Co-authored-by: Brett Hoerner <brett@bretthoerner.com> Co-authored-by: Zach Waterfield <zlwaterfield@gmail.com> Co-authored-by: Frank Hamand <frankhamand@gmail.com> Co-authored-by: Michael Matloka <michal@matloka.com> Co-authored-by: Neil Kakkar <neilkakkar@gmail.com>
124 lines
2.8 KiB
Go
124 lines
2.8 KiB
Go
package main
|
|
|
|
import (
|
|
"encoding/json"
|
|
"log"
|
|
"time"
|
|
|
|
"github.com/confluentinc/confluent-kafka-go/v2/kafka"
|
|
)
|
|
|
|
type PostHogEventWrapper struct {
|
|
Uuid string `json:"uuid"`
|
|
DistinctId string `json:"distinct_id"`
|
|
Ip string `json:"ip"`
|
|
Data string `json:"data"`
|
|
}
|
|
|
|
type PostHogEvent struct {
|
|
Token string `json:"api_key,omitempty"`
|
|
Event string `json:"event"`
|
|
Properties map[string]interface{} `json:"properties"`
|
|
Timestamp string `json:"timestamp,omitempty"`
|
|
|
|
Uuid string
|
|
DistinctId string
|
|
Lat float64
|
|
Lng float64
|
|
}
|
|
|
|
type KafkaConsumer struct {
|
|
consumer *kafka.Consumer
|
|
topic string
|
|
geolocator *GeoLocator
|
|
outgoingChan chan PostHogEvent
|
|
statsChan chan PostHogEvent
|
|
}
|
|
|
|
func NewKafkaConsumer(brokers string, securityProtocol string, groupID string, topic string, geolocator *GeoLocator, outgoingChan chan PostHogEvent, statsChan chan PostHogEvent) (*KafkaConsumer, error) {
|
|
config := &kafka.ConfigMap{
|
|
"bootstrap.servers": brokers,
|
|
"group.id": groupID,
|
|
"auto.offset.reset": "latest",
|
|
"enable.auto.commit": false,
|
|
"security.protocol": securityProtocol,
|
|
}
|
|
|
|
consumer, err := kafka.NewConsumer(config)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &KafkaConsumer{
|
|
consumer: consumer,
|
|
topic: topic,
|
|
geolocator: geolocator,
|
|
outgoingChan: outgoingChan,
|
|
statsChan: statsChan,
|
|
}, nil
|
|
}
|
|
|
|
func (c *KafkaConsumer) Consume() {
|
|
err := c.consumer.SubscribeTopics([]string{c.topic}, nil)
|
|
if err != nil {
|
|
log.Fatalf("Failed to subscribe to topic: %v", err)
|
|
}
|
|
|
|
for {
|
|
msg, err := c.consumer.ReadMessage(-1)
|
|
if err != nil {
|
|
log.Printf("Error consuming message: %v", err)
|
|
continue
|
|
}
|
|
|
|
var wrapperMessage PostHogEventWrapper
|
|
err = json.Unmarshal(msg.Value, &wrapperMessage)
|
|
if err != nil {
|
|
log.Printf("Error decoding JSON: %v", err)
|
|
continue
|
|
}
|
|
|
|
var phEvent PostHogEvent
|
|
err = json.Unmarshal([]byte(wrapperMessage.Data), &phEvent)
|
|
if err != nil {
|
|
log.Printf("Error decoding JSON: %v", err)
|
|
continue
|
|
}
|
|
|
|
phEvent.Uuid = wrapperMessage.Uuid
|
|
phEvent.DistinctId = wrapperMessage.DistinctId
|
|
if phEvent.Timestamp == "" {
|
|
phEvent.Timestamp = time.Now().UTC().Format("2006-01-02T15:04:05.000Z")
|
|
}
|
|
if phEvent.Token == "" {
|
|
if tokenValue, ok := phEvent.Properties["token"].(string); ok {
|
|
phEvent.Token = tokenValue
|
|
}
|
|
}
|
|
|
|
var ipStr string = ""
|
|
if ipValue, ok := phEvent.Properties["$ip"]; ok {
|
|
if ipProp, ok := ipValue.(string); ok {
|
|
if ipProp != "" {
|
|
ipStr = ipProp
|
|
}
|
|
}
|
|
} else {
|
|
if wrapperMessage.Ip != "" {
|
|
ipStr = wrapperMessage.Ip
|
|
}
|
|
}
|
|
|
|
if ipStr != "" {
|
|
phEvent.Lat, phEvent.Lng = c.geolocator.Lookup(ipStr)
|
|
}
|
|
|
|
c.outgoingChan <- phEvent
|
|
c.statsChan <- phEvent
|
|
}
|
|
}
|
|
|
|
func (c *KafkaConsumer) Close() {
|
|
c.consumer.Close()
|
|
}
|