0
0
mirror of https://github.com/PostHog/posthog.git synced 2024-11-21 13:39:22 +01:00

chore(flags): reorganized the modules (#26297)

This commit is contained in:
Dylan Martin 2024-11-20 00:03:23 +01:00 committed by GitHub
parent f34872115c
commit d4556c96d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
36 changed files with 298 additions and 303 deletions

View File

@ -1,8 +1,9 @@
use std::net::IpAddr;
use crate::{
api::{FlagError, FlagsResponse},
request_handler::{process_request, FlagsQueryParams, RequestContext},
api::errors::FlagError,
api::handler::{process_request, FlagsQueryParams, RequestContext},
api::types::FlagsResponse,
router,
};
// TODO: stream this instead

View File

@ -1,60 +1,9 @@
use std::collections::HashMap;
use axum::http::StatusCode;
use axum::response::{IntoResponse, Response};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use crate::database::CustomDatabaseError;
use crate::redis::CustomRedisError;
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum FlagsResponseCode {
Ok = 1,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(untagged)]
pub enum FlagValue {
Boolean(bool),
String(String),
}
// TODO the following two types are kinda general, maybe we should move them to a shared module
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(untagged)]
pub enum BooleanOrStringObject {
Boolean(bool),
Object(HashMap<String, String>),
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(untagged)]
pub enum BooleanOrBooleanObject {
Boolean(bool),
Object(HashMap<String, bool>),
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FlagsResponse {
pub error_while_computing_flags: bool,
pub feature_flags: HashMap<String, FlagValue>,
// TODO support the other fields in the payload
// pub config: HashMap<String, bool>,
// pub toolbar_params: HashMap<String, String>,
// pub is_authenticated: bool,
// pub supported_compression: Vec<String>,
// pub session_recording: bool,
// pub feature_flag_payloads: HashMap<String, String>,
// pub capture_performance: BooleanOrBooleanObject,
// #[serde(rename = "autocapture_opt_out")]
// pub autocapture_opt_out: bool,
// pub autocapture_exceptions: BooleanOrStringObject,
// pub surveys: bool,
// pub heatmaps: bool,
// pub site_apps: Vec<String>,
}
use crate::client::database::CustomDatabaseError;
use crate::client::redis::CustomRedisError;
#[derive(Error, Debug)]
pub enum ClientFacingError {

View File

@ -1,11 +1,12 @@
use crate::{
api::{FlagError, FlagsResponse},
cohort_cache::CohortCacheManager,
database::Client,
flag_definitions::FeatureFlagList,
flag_matching::{FeatureFlagMatcher, GroupTypeMappingCache},
flag_request::FlagRequest,
geoip::GeoIpClient,
api::errors::FlagError,
api::types::FlagsResponse,
client::database::Client,
client::geoip::GeoIpClient,
cohort::cohort_cache_manager::CohortCacheManager,
flags::flag_matching::{FeatureFlagMatcher, GroupTypeMappingCache},
flags::flag_models::FeatureFlagList,
flags::flag_request::FlagRequest,
router,
};
use axum::{extract::State, http::HeaderMap};
@ -254,10 +255,13 @@ fn decompress_gzip(compressed: Bytes) -> Result<Bytes, FlagError> {
#[cfg(test)]
mod tests {
use crate::{
api::FlagValue,
api::types::FlagValue,
config::Config,
flag_definitions::{FeatureFlag, FlagFilters, FlagGroupType, OperatorType, PropertyFilter},
test_utils::{insert_new_team_in_pg, setup_pg_reader_client, setup_pg_writer_client},
flags::flag_models::{FeatureFlag, FlagFilters, FlagGroupType},
properties::property_models::{OperatorType, PropertyFilter},
utils::test_utils::{
insert_new_team_in_pg, setup_pg_reader_client, setup_pg_writer_client,
},
};
use super::*;

View File

@ -0,0 +1,4 @@
pub mod endpoint;
pub mod errors;
pub mod handler;
pub mod types;

View File

@ -0,0 +1,21 @@
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
pub enum FlagsResponseCode {
Ok = 1,
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(untagged)]
pub enum FlagValue {
Boolean(bool),
String(String),
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FlagsResponse {
pub error_while_computing_flags: bool,
pub feature_flags: HashMap<String, FlagValue>,
}

View File

@ -0,0 +1,3 @@
pub mod database;
pub mod geoip;
pub mod redis;

View File

@ -1,6 +1,6 @@
use crate::api::FlagError;
use crate::cohort_models::Cohort;
use crate::flag_matching::{PostgresReader, TeamId};
use crate::api::errors::FlagError;
use crate::cohort::cohort_models::Cohort;
use crate::flags::flag_matching::{PostgresReader, TeamId};
use moka::future::Cache;
use std::time::Duration;
@ -74,8 +74,8 @@ impl CohortCacheManager {
#[cfg(test)]
mod tests {
use super::*;
use crate::cohort_models::Cohort;
use crate::test_utils::{
use crate::cohort::cohort_models::Cohort;
use crate::utils::test_utils::{
insert_cohort_for_team_in_pg, insert_new_team_in_pg, setup_pg_reader_client,
setup_pg_writer_client,
};
@ -84,15 +84,15 @@ mod tests {
/// Helper function to setup a new team for testing.
async fn setup_test_team(
writer_client: Arc<dyn crate::database::Client + Send + Sync>,
writer_client: Arc<dyn crate::client::database::Client + Send + Sync>,
) -> Result<TeamId, anyhow::Error> {
let team = crate::test_utils::insert_new_team_in_pg(writer_client, None).await?;
let team = insert_new_team_in_pg(writer_client, None).await?;
Ok(team.id)
}
/// Helper function to insert a cohort for a team.
async fn setup_test_cohort(
writer_client: Arc<dyn crate::database::Client + Send + Sync>,
writer_client: Arc<dyn crate::client::database::Client + Send + Sync>,
team_id: TeamId,
name: Option<String>,
) -> Result<Cohort, anyhow::Error> {

View File

@ -1,4 +1,4 @@
use crate::flag_definitions::PropertyFilter;
use crate::properties::property_models::PropertyFilter;
use serde::{Deserialize, Serialize};
use sqlx::FromRow;

View File

@ -2,8 +2,11 @@ use std::collections::HashSet;
use std::sync::Arc;
use tracing::instrument;
use crate::cohort_models::{Cohort, CohortId, CohortProperty, InnerCohortProperty};
use crate::{api::FlagError, database::Client as DatabaseClient, flag_definitions::PropertyFilter};
use crate::cohort::cohort_models::{Cohort, CohortId, CohortProperty, InnerCohortProperty};
use crate::{
api::errors::FlagError, client::database::Client as DatabaseClient,
properties::property_models::PropertyFilter,
};
impl Cohort {
/// Returns a cohort from postgres given a cohort_id and team_id
@ -185,8 +188,8 @@ impl InnerCohortProperty {
mod tests {
use super::*;
use crate::{
cohort_models::{CohortPropertyType, CohortValues},
test_utils::{
cohort::cohort_models::{CohortPropertyType, CohortValues},
utils::test_utils::{
insert_cohort_for_team_in_pg, insert_new_team_in_pg, setup_pg_reader_client,
setup_pg_writer_client,
},

View File

@ -0,0 +1,3 @@
pub mod cohort_cache_manager;
pub mod cohort_models;
pub mod cohort_operations;

View File

@ -2,8 +2,8 @@ use anyhow::Result;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use crate::flag_request::FlagRequestType;
use crate::redis::{Client as RedisClient, CustomRedisError};
use crate::client::redis::{Client as RedisClient, CustomRedisError};
use crate::flags::flag_request::FlagRequestType;
const CACHE_BUCKET_SIZE: u64 = 60 * 2; // duration in seconds
@ -37,7 +37,7 @@ pub async fn increment_request_count(
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::setup_redis_client;
use crate::utils::test_utils::setup_redis_client;
#[tokio::test]
async fn test_get_team_request_key() {

View File

@ -1,14 +1,14 @@
use crate::{
api::{FlagError, FlagValue, FlagsResponse},
cohort_cache::CohortCacheManager,
cohort_models::{Cohort, CohortId},
database::Client as DatabaseClient,
feature_flag_match_reason::FeatureFlagMatchReason,
flag_definitions::{FeatureFlag, FeatureFlagList, FlagGroupType, OperatorType, PropertyFilter},
metrics_consts::{FLAG_EVALUATION_ERROR_COUNTER, FLAG_HASH_KEY_WRITES_COUNTER},
metrics_utils::parse_exception_for_prometheus_label,
property_matching::match_property,
};
use crate::api::errors::FlagError;
use crate::api::types::{FlagValue, FlagsResponse};
use crate::client::database::Client as DatabaseClient;
use crate::cohort::cohort_cache_manager::CohortCacheManager;
use crate::cohort::cohort_models::{Cohort, CohortId};
use crate::flags::flag_match_reason::FeatureFlagMatchReason;
use crate::flags::flag_models::{FeatureFlag, FeatureFlagList, FlagGroupType};
use crate::metrics::metrics_consts::{FLAG_EVALUATION_ERROR_COUNTER, FLAG_HASH_KEY_WRITES_COUNTER};
use crate::metrics::metrics_utils::parse_exception_for_prometheus_label;
use crate::properties::property_matching::match_property;
use crate::properties::property_models::{OperatorType, PropertyFilter};
use anyhow::Result;
use common_metrics::inc;
use petgraph::algo::{is_cyclic_directed, toposort};
@ -1796,11 +1796,11 @@ mod tests {
use super::*;
use crate::{
flag_definitions::{
flags::flag_models::{
FeatureFlagRow, FlagFilters, MultivariateFlagOptions, MultivariateFlagVariant,
OperatorType,
},
test_utils::{
properties::property_models::OperatorType,
utils::test_utils::{
add_person_to_cohort, get_person_id_by_distinct_id, insert_cohort_for_team_in_pg,
insert_flag_for_team_in_pg, insert_new_team_in_pg, insert_person_for_team_in_pg,
setup_pg_reader_client, setup_pg_writer_client,

View File

@ -0,0 +1,70 @@
use serde::{Deserialize, Serialize};
use crate::properties::property_models::PropertyFilter;
// TRICKY: This cache data is coming from django-redis. If it ever goes out of sync, we'll bork.
// TODO: Add integration tests across repos to ensure this doesn't happen.
pub const TEAM_FLAGS_CACHE_PREFIX: &str = "posthog:1:team_feature_flags_";
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct FlagGroupType {
pub properties: Option<Vec<PropertyFilter>>,
pub rollout_percentage: Option<f64>,
pub variant: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct MultivariateFlagVariant {
pub key: String,
pub name: Option<String>,
pub rollout_percentage: f64,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct MultivariateFlagOptions {
pub variants: Vec<MultivariateFlagVariant>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct FlagFilters {
pub groups: Vec<FlagGroupType>,
pub multivariate: Option<MultivariateFlagOptions>,
pub aggregation_group_type_index: Option<i32>,
pub payloads: Option<serde_json::Value>,
pub super_groups: Option<Vec<FlagGroupType>>,
}
// TODO: see if you can combine these two structs, like we do with cohort models
// this will require not deserializing on read and instead doing it lazily, on-demand
// (which, tbh, is probably a better idea)
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct FeatureFlag {
pub id: i32,
pub team_id: i32,
pub name: Option<String>,
pub key: String,
pub filters: FlagFilters,
#[serde(default)]
pub deleted: bool,
#[serde(default)]
pub active: bool,
#[serde(default)]
pub ensure_experience_continuity: bool,
}
#[derive(Debug, Serialize, sqlx::FromRow)]
pub struct FeatureFlagRow {
pub id: i32,
pub team_id: i32,
pub name: Option<String>,
pub key: String,
pub filters: serde_json::Value,
pub deleted: bool,
pub active: bool,
pub ensure_experience_continuity: bool,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct FeatureFlagList {
pub flags: Vec<FeatureFlag>,
}

View File

@ -1,52 +1,12 @@
use crate::{
api::FlagError, cohort_models::CohortId, database::Client as DatabaseClient,
redis::Client as RedisClient,
};
use serde::{Deserialize, Serialize};
use crate::api::errors::FlagError;
use crate::client::database::Client as DatabaseClient;
use crate::client::redis::Client as RedisClient;
use crate::cohort::cohort_models::CohortId;
use crate::flags::flag_models::*;
use crate::properties::property_models::PropertyFilter;
use std::sync::Arc;
use tracing::instrument;
// TRICKY: This cache data is coming from django-redis. If it ever goes out of sync, we'll bork.
// TODO: Add integration tests across repos to ensure this doesn't happen.
pub const TEAM_FLAGS_CACHE_PREFIX: &str = "posthog:1:team_feature_flags_";
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum OperatorType {
Exact,
IsNot,
Icontains,
NotIcontains,
Regex,
NotRegex,
Gt,
Lt,
Gte,
Lte,
IsSet,
IsNotSet,
IsDateExact,
IsDateAfter,
IsDateBefore,
In,
NotIn,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PropertyFilter {
pub key: String,
// TODO: Probably need a default for value?
// incase operators like is_set, is_not_set are used
// not guaranteed to have a value, if say created via api
pub value: serde_json::Value,
pub operator: Option<OperatorType>,
#[serde(rename = "type")]
// TODO: worth making a enum here to differentiate between cohort and person filters?
pub prop_type: String,
pub negation: Option<bool>,
pub group_type_index: Option<i32>,
}
impl PropertyFilter {
/// Checks if the filter is a cohort filter
pub fn is_cohort(&self) -> bool {
@ -63,64 +23,6 @@ impl PropertyFilter {
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct FlagGroupType {
pub properties: Option<Vec<PropertyFilter>>,
pub rollout_percentage: Option<f64>,
pub variant: Option<String>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct MultivariateFlagVariant {
pub key: String,
pub name: Option<String>,
pub rollout_percentage: f64,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct MultivariateFlagOptions {
pub variants: Vec<MultivariateFlagVariant>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct FlagFilters {
pub groups: Vec<FlagGroupType>,
pub multivariate: Option<MultivariateFlagOptions>,
pub aggregation_group_type_index: Option<i32>,
pub payloads: Option<serde_json::Value>,
pub super_groups: Option<Vec<FlagGroupType>>,
}
// TODO: see if you can combine these two structs, like we do with cohort models
// this will require not deserializing on read and instead doing it lazily, on-demand
// (which, tbh, is probably a better idea)
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct FeatureFlag {
pub id: i32,
pub team_id: i32,
pub name: Option<String>,
pub key: String,
pub filters: FlagFilters,
#[serde(default)]
pub deleted: bool,
#[serde(default)]
pub active: bool,
#[serde(default)]
pub ensure_experience_continuity: bool,
}
#[derive(Debug, Serialize, sqlx::FromRow)]
pub struct FeatureFlagRow {
pub id: i32,
pub team_id: i32,
pub name: Option<String>,
pub key: String,
pub filters: serde_json::Value,
pub deleted: bool,
pub active: bool,
pub ensure_experience_continuity: bool,
}
impl FeatureFlag {
pub fn get_group_type_index(&self) -> Option<i32> {
self.filters.aggregation_group_type_index
@ -146,11 +48,6 @@ impl FeatureFlag {
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct FeatureFlagList {
pub flags: Vec<FeatureFlag>,
}
impl FeatureFlagList {
/// Returns feature flags from redis given a team_id
#[instrument(skip_all)]
@ -243,14 +140,14 @@ impl FeatureFlagList {
#[cfg(test)]
mod tests {
use crate::flag_definitions;
use crate::{flags::flag_models::*, properties::property_models::OperatorType};
use rand::Rng;
use serde_json::json;
use std::time::Instant;
use tokio::task;
use super::*;
use crate::test_utils::{
use crate::utils::test_utils::{
insert_flag_for_team_in_pg, insert_flags_for_team_in_redis, insert_new_team_in_pg,
insert_new_team_in_redis, setup_invalid_pg_client, setup_pg_reader_client,
setup_redis_client,
@ -803,6 +700,7 @@ mod tests {
}
}
}
#[tokio::test]
async fn test_flag_with_super_groups() {
let redis_client = setup_redis_client(None);
@ -1114,7 +1012,7 @@ mod tests {
redis_client
.set(
format!("{}{}", flag_definitions::TEAM_FLAGS_CACHE_PREFIX, team.id),
format!("{}{}", TEAM_FLAGS_CACHE_PREFIX, team.id),
"not a json".to_string(),
)
.await

View File

@ -7,8 +7,11 @@ use serde_json::Value;
use tracing::instrument;
use crate::{
api::FlagError, database::Client as DatabaseClient, flag_definitions::FeatureFlagList,
metrics_consts::FLAG_CACHE_HIT_COUNTER, redis::Client as RedisClient, team::Team,
api::errors::FlagError,
client::{database::Client as DatabaseClient, redis::Client as RedisClient},
flags::flag_models::FeatureFlagList,
metrics::metrics_consts::FLAG_CACHE_HIT_COUNTER,
team::team_models::Team,
};
#[derive(Debug, Clone, Copy)]
@ -204,14 +207,17 @@ impl FlagRequest {
mod tests {
use std::collections::HashMap;
use crate::api::FlagError;
use crate::flag_definitions::{
FeatureFlag, FeatureFlagList, FlagFilters, FlagGroupType, OperatorType, PropertyFilter,
TEAM_FLAGS_CACHE_PREFIX,
use crate::api::errors::FlagError;
use crate::flags::flag_models::{
FeatureFlag, FeatureFlagList, FlagFilters, FlagGroupType, TEAM_FLAGS_CACHE_PREFIX,
};
use crate::flags::flag_request::FlagRequest;
use crate::properties::property_models::{OperatorType, PropertyFilter};
use crate::team::team_models::Team;
use crate::utils::test_utils::{
insert_new_team_in_redis, setup_pg_reader_client, setup_redis_client,
};
use crate::flag_request::FlagRequest;
use crate::team::Team;
use crate::test_utils::{insert_new_team_in_redis, setup_pg_reader_client, setup_redis_client};
use bytes::Bytes;
use serde_json::json;

View File

@ -0,0 +1,6 @@
pub mod flag_analytics;
pub mod flag_match_reason;
pub mod flag_matching;
pub mod flag_models;
pub mod flag_operations;
pub mod flag_request;

View File

@ -1,24 +1,13 @@
pub mod api;
pub mod cohort_cache;
pub mod cohort_models;
pub mod cohort_operations;
pub mod client;
pub mod cohort;
pub mod config;
pub mod database;
pub mod feature_flag_match_reason;
pub mod flag_analytics;
pub mod flag_definitions;
pub mod flag_matching;
pub mod flag_request;
pub mod geoip;
pub mod metrics_consts;
pub mod metrics_utils;
pub mod property_matching;
pub mod redis;
pub mod request_handler;
pub mod flags;
pub mod metrics;
pub mod properties;
pub mod router;
pub mod server;
pub mod team;
pub mod v0_endpoint;
// Test modules don't need to be compiled with main binary
// #[cfg(test)]
@ -26,4 +15,4 @@ pub mod v0_endpoint;
// or make it a separate feature using cfg(feature = "integration-tests")
// and then use this feature only in tests.
// For now, ok to just include in binary
pub mod test_utils;
pub mod utils;

View File

@ -1,4 +1,4 @@
use crate::{api::FlagError, config::TeamIdsToTrack};
use crate::{api::errors::FlagError, config::TeamIdsToTrack};
pub fn team_id_label_filter(
team_ids_to_track: TeamIdsToTrack,

View File

@ -0,0 +1,2 @@
pub mod metrics_consts;
pub mod metrics_utils;

View File

@ -0,0 +1,2 @@
pub mod property_matching;
pub mod property_models;

View File

@ -1,6 +1,6 @@
use std::collections::HashMap;
use crate::flag_definitions::{OperatorType, PropertyFilter};
use crate::properties::property_models::{OperatorType, PropertyFilter};
use regex::Regex;
use serde_json::Value;

View File

@ -0,0 +1,38 @@
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "snake_case")]
pub enum OperatorType {
Exact,
IsNot,
Icontains,
NotIcontains,
Regex,
NotRegex,
Gt,
Lt,
Gte,
Lte,
IsSet,
IsNotSet,
IsDateExact,
IsDateAfter,
IsDateBefore,
In,
NotIn,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PropertyFilter {
pub key: String,
// TODO: Probably need a default for value?
// incase operators like is_set, is_not_set are used
// not guaranteed to have a value, if say created via api
pub value: serde_json::Value,
pub operator: Option<OperatorType>,
#[serde(rename = "type")]
// TODO: worth making a enum here to differentiate between cohort and person filters?
pub prop_type: String,
pub negation: Option<bool>,
pub group_type_index: Option<i32>,
}

View File

@ -9,13 +9,13 @@ use health::HealthRegistry;
use tower::limit::ConcurrencyLimitLayer;
use crate::{
cohort_cache::CohortCacheManager,
api::endpoint,
client::{
database::Client as DatabaseClient, geoip::GeoIpClient, redis::Client as RedisClient,
},
cohort::cohort_cache_manager::CohortCacheManager,
config::{Config, TeamIdsToTrack},
database::Client as DatabaseClient,
geoip::GeoIpClient,
metrics_utils::team_id_label_filter,
redis::Client as RedisClient,
v0_endpoint,
metrics::metrics_utils::team_id_label_filter,
};
#[derive(Clone)]
@ -56,7 +56,7 @@ where
.route("/_liveness", get(move || ready(liveness.get_status())));
let flags_router = Router::new()
.route("/flags", post(v0_endpoint::flags).get(v0_endpoint::flags))
.route("/flags", post(endpoint::flags).get(endpoint::flags))
.layer(ConcurrencyLimitLayer::new(config.max_concurrency))
.with_state(state);

View File

@ -6,11 +6,11 @@ use std::time::Duration;
use health::{HealthHandle, HealthRegistry};
use tokio::net::TcpListener;
use crate::cohort_cache::CohortCacheManager;
use crate::client::database::get_pool;
use crate::client::geoip::GeoIpClient;
use crate::client::redis::RedisClient;
use crate::cohort::cohort_cache_manager::CohortCacheManager;
use crate::config::Config;
use crate::database::get_pool;
use crate::geoip::GeoIpClient;
use crate::redis::RedisClient;
use crate::router;
pub async fn serve<F>(config: Config, listener: TcpListener, shutdown: F)

View File

@ -0,0 +1,2 @@
pub mod team_models;
pub mod team_operations;

View File

@ -0,0 +1,23 @@
use serde::{Deserialize, Serialize};
// TRICKY: This cache data is coming from django-redis. If it ever goes out of sync, we'll bork.
// TODO: Add integration tests across repos to ensure this doesn't happen.
pub const TEAM_TOKEN_CACHE_PREFIX: &str = "posthog:1:team_token:";
#[derive(Clone, Debug, Deserialize, Serialize, sqlx::FromRow)]
pub struct Team {
pub id: i32,
pub name: String,
pub api_token: String,
// TODO: the following fields are used for the `/decide` response,
// but they're not used for flags and they don't live in redis.
// At some point I'll need to differentiate between teams in Redis and teams
// with additional fields in Postgres, since the Postgres team is a superset of the fields
// we use for flags, anyway.
// pub surveys_opt_in: bool,
// pub heatmaps_opt_in: bool,
// pub capture_performance_opt_in: bool,
// pub autocapture_web_vitals_opt_in: bool,
// pub autocapture_opt_out: bool,
// pub autocapture_exceptions_opt_in: bool,
}

View File

@ -1,34 +1,15 @@
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tracing::instrument;
use crate::{api::FlagError, database::Client as DatabaseClient, redis::Client as RedisClient};
// TRICKY: This cache data is coming from django-redis. If it ever goes out of sync, we'll bork.
// TODO: Add integration tests across repos to ensure this doesn't happen.
pub const TEAM_TOKEN_CACHE_PREFIX: &str = "posthog:1:team_token:";
#[derive(Clone, Debug, Deserialize, Serialize, sqlx::FromRow)]
pub struct Team {
pub id: i32,
pub name: String,
pub api_token: String,
// TODO: the following fields are used for the `/decide` response,
// but they're not used for flags and they don't live in redis.
// At some point I'll need to differentiate between teams in Redis and teams
// with additional fields in Postgres, since the Postgres team is a superset of the fields
// we use for flags, anyway.
// pub surveys_opt_in: bool,
// pub heatmaps_opt_in: bool,
// pub capture_performance_opt_in: bool,
// pub autocapture_web_vitals_opt_in: bool,
// pub autocapture_opt_out: bool,
// pub autocapture_exceptions_opt_in: bool,
}
use crate::{
api::errors::FlagError,
client::database::Client as DatabaseClient,
client::redis::Client as RedisClient,
team::team_models::{Team, TEAM_TOKEN_CACHE_PREFIX},
};
impl Team {
/// Validates a token, and returns a team if it exists.
#[instrument(skip_all)]
pub async fn from_redis(
client: Arc<dyn RedisClient + Send + Sync>,
@ -94,12 +75,9 @@ mod tests {
use redis::AsyncCommands;
use super::*;
use crate::{
team,
test_utils::{
insert_new_team_in_pg, insert_new_team_in_redis, random_string, setup_pg_reader_client,
setup_redis_client,
},
use crate::utils::test_utils::{
insert_new_team_in_pg, insert_new_team_in_redis, random_string, setup_pg_reader_client,
setup_redis_client,
};
#[tokio::test]
@ -159,11 +137,7 @@ mod tests {
.await
.expect("Failed to get redis connection");
conn.set::<String, String, ()>(
format!(
"{}{}",
team::TEAM_TOKEN_CACHE_PREFIX,
team.api_token.clone()
),
format!("{}{}", TEAM_TOKEN_CACHE_PREFIX, team.api_token.clone()),
serialized_team,
)
.await

View File

@ -0,0 +1 @@
pub mod test_utils;

View File

@ -6,12 +6,14 @@ use std::sync::Arc;
use uuid::Uuid;
use crate::{
cohort_models::Cohort,
client::{
database::{get_pool, Client, CustomDatabaseError},
redis::{Client as RedisClientTrait, RedisClient},
},
cohort::cohort_models::Cohort,
config::{Config, DEFAULT_TEST_CONFIG},
database::{get_pool, Client, CustomDatabaseError},
flag_definitions::{self, FeatureFlag, FeatureFlagRow},
redis::{Client as RedisClientTrait, RedisClient},
team::{self, Team},
flags::flag_models::{FeatureFlag, FeatureFlagRow, TEAM_FLAGS_CACHE_PREFIX},
team::team_models::{Team, TEAM_TOKEN_CACHE_PREFIX},
};
use rand::{distributions::Alphanumeric, Rng};
@ -38,11 +40,7 @@ pub async fn insert_new_team_in_redis(
let serialized_team = serde_json::to_string(&team)?;
client
.set(
format!(
"{}{}",
team::TEAM_TOKEN_CACHE_PREFIX,
team.api_token.clone()
),
format!("{}{}", TEAM_TOKEN_CACHE_PREFIX, team.api_token.clone()),
serialized_team,
)
.await?;
@ -82,10 +80,7 @@ pub async fn insert_flags_for_team_in_redis(
};
client
.set(
format!("{}{}", flag_definitions::TEAM_FLAGS_CACHE_PREFIX, team_id),
payload,
)
.set(format!("{}{}", TEAM_FLAGS_CACHE_PREFIX, team_id), payload)
.await?;
Ok(())

View File

@ -1,13 +1,14 @@
use std::sync::Arc;
use feature_flags::cohort_cache::CohortCacheManager;
use feature_flags::feature_flag_match_reason::FeatureFlagMatchReason;
/// These tests are common between all libraries doing local evaluation of feature flags.
/// This ensures there are no mismatches between implementations.
use feature_flags::flag_matching::{FeatureFlagMatch, FeatureFlagMatcher};
use feature_flags::test_utils::{
create_flag_from_json, setup_pg_reader_client, setup_pg_writer_client,
use feature_flags::{
cohort::cohort_cache_manager::CohortCacheManager,
flags::{
flag_match_reason::FeatureFlagMatchReason,
flag_matching::{FeatureFlagMatch, FeatureFlagMatcher},
},
utils::test_utils::{create_flag_from_json, setup_pg_reader_client, setup_pg_writer_client},
};
use serde_json::json;

View File

@ -7,7 +7,7 @@ use serde_json::{json, Value};
use crate::common::*;
use feature_flags::config::DEFAULT_TEST_CONFIG;
use feature_flags::test_utils::{
use feature_flags::utils::test_utils::{
insert_flags_for_team_in_redis, insert_new_team_in_pg, insert_new_team_in_redis,
setup_pg_reader_client, setup_redis_client,
};