0
0
mirror of https://github.com/PostHog/posthog.git synced 2024-11-21 13:39:22 +01:00

feat(max): Smart question suggestions (#25556)

Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
Michael Matloka 2024-10-14 15:14:26 +02:00 committed by GitHub
parent b2070a7d90
commit 27364a3218
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 483 additions and 42 deletions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@ -26,6 +26,8 @@ export interface LemonButtonPropsBase
| 'onMouseEnter'
| 'onMouseLeave'
| 'onKeyDown'
| 'className'
| 'style'
| 'role'
| 'aria-haspopup'
> {
@ -41,8 +43,6 @@ export interface LemonButtonPropsBase
disableClientSideRouting?: boolean
/** If set clicking this button will open the page in a new tab. */
targetBlank?: boolean
/** External URL to link to. */
className?: string
/** Icon displayed on the left. */
icon?: React.ReactElement | null

View File

@ -52,6 +52,7 @@ export const OBJECTS = {
'IconGearFilled',
'IconStack',
'IconSparkles',
'IconPuzzle',
],
People: ['IconPeople', 'IconPeopleFilled', 'IconPerson', 'IconProfile', 'IconUser', 'IconGroups'],
'Business & Finance': ['IconStore', 'IconCart', 'IconReceipt', 'IconPiggyBank', 'IconHandMoney'],
@ -72,6 +73,7 @@ export const TECHNOLOGY = {
'IconDatabase',
'IconHardDrive',
'IconMouse',
'IconCdCase',
],
Software: ['IconBrowser', 'IconCode', 'IconCodeInsert', 'IconTerminal', 'IconApp'],
UI: [
@ -187,6 +189,7 @@ export const TEAMS_AND_COMPANIES = {
'IconPlay',
'IconPlayFilled',
'IconPlaylist',
'IconShuffle',
'IconPause',
'IconFastForward',
'IconPauseFilled',

View File

@ -2038,6 +2038,55 @@
],
"type": "object"
},
"CachedSuggestedQuestionsQueryResponse": {
"additionalProperties": false,
"properties": {
"cache_key": {
"type": "string"
},
"cache_target_age": {
"format": "date-time",
"type": "string"
},
"calculation_trigger": {
"description": "What triggered the calculation of the query, leave empty if user/immediate",
"type": "string"
},
"is_cached": {
"type": "boolean"
},
"last_refresh": {
"format": "date-time",
"type": "string"
},
"next_allowed_client_refresh": {
"format": "date-time",
"type": "string"
},
"query_status": {
"$ref": "#/definitions/QueryStatus",
"description": "Query status indicates whether next to the provided data, a query is still running."
},
"questions": {
"items": {
"type": "string"
},
"type": "array"
},
"timezone": {
"type": "string"
}
},
"required": [
"cache_key",
"is_cached",
"last_refresh",
"next_allowed_client_refresh",
"questions",
"timezone"
],
"type": "object"
},
"CachedTeamTaxonomyQueryResponse": {
"additionalProperties": false,
"properties": {
@ -7216,6 +7265,7 @@
"ExperimentFunnelQuery",
"ExperimentTrendQuery",
"DatabaseSchemaQuery",
"SuggestedQuestionsQuery",
"TeamTaxonomyQuery",
"EventTaxonomyQuery"
],
@ -9377,6 +9427,19 @@
},
"required": ["tables"],
"type": "object"
},
{
"additionalProperties": false,
"properties": {
"questions": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": ["questions"],
"type": "object"
}
]
},
@ -9486,6 +9549,9 @@
},
{
"$ref": "#/definitions/DatabaseSchemaQuery"
},
{
"$ref": "#/definitions/SuggestedQuestionsQuery"
}
],
"required": ["kind"],
@ -10586,6 +10652,37 @@
"required": ["results"],
"type": "object"
},
"SuggestedQuestionsQuery": {
"additionalProperties": false,
"properties": {
"kind": {
"const": "SuggestedQuestionsQuery",
"type": "string"
},
"modifiers": {
"$ref": "#/definitions/HogQLQueryModifiers",
"description": "Modifiers used when performing the query"
},
"response": {
"$ref": "#/definitions/SuggestedQuestionsQueryResponse"
}
},
"required": ["kind"],
"type": "object"
},
"SuggestedQuestionsQueryResponse": {
"additionalProperties": false,
"properties": {
"questions": {
"items": {
"type": "string"
},
"type": "array"
}
},
"required": ["questions"],
"type": "object"
},
"TableSettings": {
"additionalProperties": false,
"properties": {

View File

@ -107,6 +107,7 @@ export enum NodeKind {
DatabaseSchemaQuery = 'DatabaseSchemaQuery',
// AI queries
SuggestedQuestionsQuery = 'SuggestedQuestionsQuery',
TeamTaxonomyQuery = 'TeamTaxonomyQuery',
EventTaxonomyQuery = 'EventTaxonomyQuery',
}
@ -168,7 +169,7 @@ export type QuerySchema =
| SavedInsightNode
| InsightVizNode
// New queries, not yet implemented
// Classic insights
| TrendsQuery
| FunnelsQuery
| RetentionQuery
@ -180,6 +181,9 @@ export type QuerySchema =
// Misc
| DatabaseSchemaQuery
// AI
| SuggestedQuestionsQuery
// Keep this, because QuerySchema itself will be collapsed as it is used in other models
export type QuerySchemaRoot = QuerySchema
@ -1993,6 +1997,16 @@ export interface HogCompileResponse {
bytecode: any[]
}
export interface SuggestedQuestionsQuery extends DataNode<SuggestedQuestionsQueryResponse> {
kind: NodeKind.SuggestedQuestionsQuery
}
export interface SuggestedQuestionsQueryResponse {
questions: string[]
}
export type CachedSuggestedQuestionsQueryResponse = CachedQueryResponse<SuggestedQuestionsQueryResponse>
export interface TeamTaxonomyItem {
event: string
count: integer

View File

@ -36,10 +36,42 @@ const Template = ({ sessionId }: { sessionId: string }): JSX.Element => {
}
export const Welcome: StoryFn = () => {
useStorybookMocks({
post: {
'/api/projects/:team_id/query/': () => [
200,
{
questions: [
'What are my most popular pages?',
'Where are my users located?',
'Who are the biggest customers?',
'Which feature drives most usage?',
],
},
],
},
})
const sessionId = 'd210b263-8521-4c5b-b3c4-8e0348df574b'
return <Template sessionId={sessionId} />
}
export const WelcomeLoadingSuggestions: StoryFn = () => {
useStorybookMocks({
post: {
'/api/projects/:team_id/query/': (_req, _res, ctx) => [ctx.delay('infinite')],
},
})
const sessionId = 'd210b263-8521-4c5b-b3c4-8e0348df574b'
return <Template sessionId={sessionId} />
}
WelcomeLoadingSuggestions.parameters = {
testOptions: {
waitForLoadersToDisappear: false,
},
}
export const Thread: StoryFn = () => {
const sessionId = 'd210b263-8521-4c5b-b3c4-8e0348df574b'

View File

@ -25,7 +25,7 @@ export function QuestionInput(): JSX.Element {
className={clsx(
'w-full',
!isFloating
? 'relative'
? 'w-[min(40rem,100%)] relative'
: 'max-w-200 sticky z-10 self-center p-1 mx-3 mb-3 bottom-3 border border-[var(--glass-border-3000)] rounded-[0.625rem] backdrop-blur bg-[var(--glass-bg-3000)]'
)}
>

View File

@ -1,27 +1,60 @@
import { IconArrowUpRight } from '@posthog/icons'
import { LemonButton } from '@posthog/lemon-ui'
import { useActions } from 'kea'
import { IconArrowUpRight, IconShuffle } from '@posthog/icons'
import { LemonButton, LemonSkeleton } from '@posthog/lemon-ui'
import { useActions, useValues } from 'kea'
import { useEffect } from 'react'
import { maxLogic } from './maxLogic'
export function QuestionSuggestions(): JSX.Element {
const { askMax } = useActions(maxLogic)
const { visibleSuggestions, wasSuggestionLoadingInitiated, allSuggestionsLoading } = useValues(maxLogic)
const { askMax, loadSuggestions, shuffleVisibleSuggestions } = useActions(maxLogic)
const suggestions = ['What are my most popular pages?', 'Who are my top users?', 'Which features see most usage?']
useEffect(() => {
if (!wasSuggestionLoadingInitiated) {
loadSuggestions()
}
}, [wasSuggestionLoadingInitiated, loadSuggestions])
return (
<div className="flex gap-2 w-[min(40rem,100%)] items-center justify-center">
{suggestions.map((suggestion, index) => (
<LemonButton
key={index}
onClick={() => askMax(suggestion)}
size="xsmall"
type="secondary"
sideIcon={<IconArrowUpRight />}
>
{suggestion}
</LemonButton>
))}
<div className="flex items-center justify-center gap-2 min-w-full">
{
visibleSuggestions ? (
<>
{visibleSuggestions.map((suggestion, index) => (
<LemonButton
key={index}
onClick={() => askMax(suggestion)}
size="xsmall"
type="secondary"
sideIcon={<IconArrowUpRight />}
>
{suggestion}
</LemonButton>
))}
<LemonButton
onClick={shuffleVisibleSuggestions}
size="xsmall"
type="secondary"
icon={<IconShuffle />}
tooltip="Shuffle suggestions"
/>
</>
) : allSuggestionsLoading ? (
Array.from({ length: 4 }).map((_, index) => (
<LemonButton
key={index}
size="xsmall"
type="secondary"
disabled
style={{
flexGrow: [2, 1.5, 3, 1][index],
}}
>
<LemonSkeleton className="h-3 w-full" />
</LemonButton>
))
) : null /* Some error */
}
</div>
)
}

View File

@ -1,7 +1,9 @@
import { shuffle } from 'd3'
import { actions, kea, key, listeners, path, props, reducers, selectors } from 'kea'
import { loaders } from 'kea-loaders'
import api from 'lib/api'
import { ExperimentalAITrendsQuery } from '~/queries/schema'
import { ExperimentalAITrendsQuery, NodeKind, SuggestedQuestionsQuery } from '~/queries/schema'
import type { maxLogicType } from './maxLogicType'
@ -31,6 +33,8 @@ export const maxLogic = kea<maxLogicType>([
replaceMessage: (index: number, message: ThreadMessage) => ({ index, message }),
setMessageStatus: (index: number, status: ThreadMessage['status']) => ({ index, status }),
setQuestion: (question: string) => ({ question }),
setVisibleSuggestions: (suggestions: string[]) => ({ suggestions }),
shuffleVisibleSuggestions: true,
}),
reducers({
question: [
@ -66,8 +70,49 @@ export const maxLogic = kea<maxLogicType>([
setThreadLoaded: (_, { testOnlyOverride }) => testOnlyOverride,
},
],
wasSuggestionLoadingInitiated: [
false,
{
loadSuggestions: () => true,
},
],
visibleSuggestions: [
null as string[] | null,
{
setVisibleSuggestions: (_, { suggestions }) => suggestions,
},
],
}),
loaders({
allSuggestions: [
null as string[] | null,
{
loadSuggestions: async () => {
const response = await api.query<SuggestedQuestionsQuery>({
kind: NodeKind.SuggestedQuestionsQuery,
})
return response.questions
},
},
],
}),
listeners(({ actions, values, props }) => ({
loadSuggestionsSuccess: () => {
actions.shuffleVisibleSuggestions()
},
shuffleVisibleSuggestions: () => {
if (!values.allSuggestions) {
throw new Error('No question suggestions to shuffle')
}
const allSuggestionsWithoutCurrentlyVisible = values.allSuggestions.filter(
(suggestion) => !values.visibleSuggestions?.includes(suggestion)
)
if (!process.env.STORYBOOK) {
// Randomize order, except in Storybook where we want to keep the order consistent for snapshots
shuffle(allSuggestionsWithoutCurrentlyVisible)
}
actions.setVisibleSuggestions(allSuggestionsWithoutCurrentlyVisible.slice(0, 3))
},
askMax: async ({ prompt }) => {
actions.addMessage({ role: 'user', content: prompt })
const newIndex = values.thread.length

View File

@ -352,13 +352,16 @@ export const QUERY_TYPES_METADATA: Record<NodeKind, InsightTypeMetadata> = {
},
[NodeKind.TeamTaxonomyQuery]: {
name: 'Team Taxonomy',
description: 'View the event taxonomy of the team',
icon: IconHogQL,
inMenu: false,
},
[NodeKind.EventTaxonomyQuery]: {
name: 'Event Taxonomy',
description: 'View the events taxonomy',
icon: IconHogQL,
inMenu: false,
},
[NodeKind.SuggestedQuestionsQuery]: {
name: 'AI Suggested Questions',
icon: IconHogQL,
inMenu: false,
},

View File

@ -77,7 +77,7 @@
"@microlink/react-json-view": "^1.21.3",
"@monaco-editor/react": "4.6.0",
"@posthog/hogvm": "^1.0.54",
"@posthog/icons": "0.8.1",
"@posthog/icons": "0.8.4",
"@posthog/plugin-scaffold": "^1.4.4",
"@react-hook/size": "^2.1.2",
"@rrweb/types": "2.0.0-alpha.13",

View File

@ -53,8 +53,8 @@ dependencies:
specifier: ^1.0.54
version: 1.0.54(luxon@3.5.0)
'@posthog/icons':
specifier: 0.8.1
version: 0.8.1(react-dom@18.2.0)(react@18.2.0)
specifier: 0.8.4
version: 0.8.4(react-dom@18.2.0)(react@18.2.0)
'@posthog/plugin-scaffold':
specifier: ^1.4.4
version: 1.4.4
@ -5414,8 +5414,8 @@ packages:
luxon: 3.5.0
dev: false
/@posthog/icons@0.8.1(react-dom@18.2.0)(react@18.2.0):
resolution: {integrity: sha512-/ryXgFnWGzHmwijHE/0gQcEyAD/WkKuwf3NCMG4ibmGMpEqm/d12/+Ccuf3Zj2VZuc+0atGCHkHOiSNJ8dw97A==}
/@posthog/icons@0.8.4(react-dom@18.2.0)(react@18.2.0):
resolution: {integrity: sha512-AMCLQ0SOcLytsjavs8FlLc52oeUdkyeoatPSPONZ9QK//Xqx967Xf3ZvFxHbOxYiKFCShd996AGqBn+5nrhxOw==}
peerDependencies:
react: '>=16.14.0'
react-dom: '>=16.14.0'

View File

@ -97,22 +97,12 @@ def write_sql_from_prompt(prompt: str, *, current_query: Optional[str] = None, t
generated_valid_hogql = False
attempt_count = 0
prompt_tokens_last, completion_tokens_last = 0, 0
prompt_tokens_total, completion_tokens_total = 0, 0
for _ in range(3): # Try up to 3 times in case the generated SQL is not valid HogQL
attempt_count += 1
result = openai.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0.8,
messages=messages,
user=f"{instance_region}/{user.pk}", # The user ID is for tracking within OpenAI in case of overuse/abuse
)
content: str = ""
if result.choices[0] and result.choices[0].message.content:
content = result.choices[0].message.content.removesuffix(";")
if result.usage:
prompt_tokens_total += result.usage.prompt_tokens
completion_tokens_total += result.usage.completion_tokens
content, prompt_tokens_last, completion_tokens_last = hit_openai(messages, f"{instance_region}/{user.pk}")
prompt_tokens_total += prompt_tokens_last
completion_tokens_total += completion_tokens_last
if content.startswith(UNCLEAR_PREFIX):
error = content.removeprefix(UNCLEAR_PREFIX).strip()
break
@ -152,3 +142,20 @@ def write_sql_from_prompt(prompt: str, *, current_query: Optional[str] = None, t
return candidate_sql
else:
raise PromptUnclear(error)
def hit_openai(messages, user) -> tuple[str, int, int]:
result = openai.chat.completions.create(
model="gpt-4o",
temperature=0.8,
messages=messages,
user=user, # The user ID is for tracking within OpenAI in case of overuse/abuse
)
content: str = ""
if result.choices[0] and result.choices[0].message.content:
content = result.choices[0].message.content.removesuffix(";")
prompt_tokens, completion_tokens = 0, 0
if result.usage:
prompt_tokens, completion_tokens = result.usage.prompt_tokens, result.usage.completion_tokens
return content, prompt_tokens, completion_tokens

View File

@ -0,0 +1,92 @@
from datetime import datetime
from typing import Optional
from django.utils import timezone
from posthog.hogql.ai import hit_openai
from posthog.hogql_queries.ai.team_taxonomy_query_runner import TeamTaxonomyQueryRunner
from posthog.hogql_queries.query_runner import QueryRunner
from posthog.schema import (
CachedSuggestedQuestionsQueryResponse,
SuggestedQuestionsQuery,
SuggestedQuestionsQueryResponse,
TeamTaxonomyQuery,
)
from posthog.utils import get_instance_region
from datetime import UTC, timedelta
class SuggestedQuestionsQueryRunner(QueryRunner):
query: SuggestedQuestionsQuery
response: SuggestedQuestionsQueryResponse
cached_response: CachedSuggestedQuestionsQueryResponse
def calculate(self):
team = self.team
assert team.project is not None
team_taxonomy_response = TeamTaxonomyQueryRunner(
TeamTaxonomyQuery(),
team=team,
timings=self.timings,
modifiers=self.modifiers,
limit_context=self.limit_context,
query_id=self.query_id,
).calculate()
messages = [
{
"role": "system",
"content": (
f"You are a product manager at organization {team.organization.name}, handling project {team.project.name}. "
f"This project was created {(timezone.now() - team.project.created_at).total_seconds() // 86400} days ago. "
"Your task is helping product teams understand their users. "
"You guide engineers so that they can make good product decisions themselves."
),
},
{
"role": "user",
"content": (
"Here's a list of event types seen in the last 30 days, most popular ones first:\n"
+ "\n".join(f"- {e.event} ({e.count} occurrences)" for e in team_taxonomy_response.results)
),
},
{
"role": "user",
"content": (
"With this schema in mind, suggest 12 SPECIFIC AND CONCISE QUESTIONS that product teams will find insightful. "
'These questions must be answerable in PostHog. Do not propose placeholders such as "event X", be specific with event names.\n'
'Right now we can only answer questions based on the "events" table. We can use event properties. '
"Note that we can chart trends and create tables. AVOID anything with session duration, event sequences, and correlations.\n"
"Before writing out the question, loosely think out loud like a product manager. "
'Make sure we only propose questions we can answer with our data model. Ignore events prefixed with "$", except $pageview. '
'When done thinking, write "QUESTIONS:", and then the 12 questions, each in its own line, no formatting. '
"Don't number the questions. Questions must be human-friendly but short - you are PENALIZED $10 for every character over 20. "
'(Always abbreviate forms like "what\'s".)'
),
},
]
prompt_tokens_total, completion_tokens_total = 0, 0
for _ in range(3): # Try up to 3 times in case the output is malformed - though this is very unlikely
content, prompt_tokens_last, completion_tokens_last = hit_openai(
messages, f"{get_instance_region()}/team/{team.id}"
)
prompt_tokens_total += prompt_tokens_last
completion_tokens_total += completion_tokens_last
questions_start = content.find("QUESTIONS:")
if questions_start == -1:
continue
questions = [
q.strip() for q in content[questions_start + len("QUESTIONS:") :].strip().split("\n") if q.strip()
]
break
else:
raise ValueError("Persistently failed to determine questions from AI response")
return SuggestedQuestionsQueryResponse(questions=questions)
def _is_stale(self, last_refresh: Optional[datetime], lazy: bool = False) -> bool:
# We don't want to regenerate suggestions more often than 3 days, as there's no point
return last_refresh is None or (datetime.now(UTC) - last_refresh) > timedelta(days=3)
def to_query(self):
raise NotImplementedError("SuggestedQuestionsQueryRunner does not support to_query")

View File

@ -0,0 +1,18 @@
# serializer version: 1
# name: TestSuggestedQuestionsQueryRunner.test_suggested_questions_hit_openai
'''
SELECT events.event AS event,
count() AS count
FROM events
WHERE and(equals(events.team_id, 2), greaterOrEquals(toTimeZone(events.timestamp, 'UTC'), minus(now64(6, 'UTC'), toIntervalDay(30))))
GROUP BY events.event
ORDER BY count DESC
LIMIT 100 SETTINGS readonly=2,
max_execution_time=60,
allow_experimental_object_type=1,
format_csv_allow_double_quotes=0,
max_ast_elements=4000000,
max_expanded_ast_elements=4000000,
max_bytes_before_external_group_by=0
'''
# ---

View File

@ -0,0 +1,36 @@
from datetime import timedelta
from unittest.mock import patch
from django.test import override_settings
from django.utils import timezone
from posthog.hogql_queries.ai.suggested_questions_query_runner import SuggestedQuestionsQueryRunner
from posthog.schema import SuggestedQuestionsQuery
from posthog.test.base import (
APIBaseTest,
ClickhouseTestMixin,
snapshot_clickhouse_queries,
)
@override_settings(IN_UNIT_TESTING=True)
class TestSuggestedQuestionsQueryRunner(ClickhouseTestMixin, APIBaseTest):
@snapshot_clickhouse_queries
@patch(
"posthog.hogql_queries.ai.suggested_questions_query_runner.hit_openai",
return_value=("Lorem ipsum. QUESTIONS:\nHow?\n\nWhy?", 21, 37),
)
def test_suggested_questions_hit_openai(self, hit_openai_mock):
results = SuggestedQuestionsQueryRunner(team=self.team, query=SuggestedQuestionsQuery()).calculate()
hit_openai_mock.assert_called_once()
self.assertEqual(results.questions, ["How?", "Why?"])
def test_is_stale(self):
date = timezone.now()
runner = SuggestedQuestionsQueryRunner(team=self.team, query=SuggestedQuestionsQuery())
self.assertFalse(runner._is_stale(last_refresh=date, lazy=False))
self.assertFalse(runner._is_stale(last_refresh=date, lazy=True))
self.assertFalse(runner._is_stale(last_refresh=date - timedelta(days=2, hours=23, minutes=59), lazy=False))
self.assertFalse(runner._is_stale(last_refresh=date - timedelta(days=2, hours=23, minutes=59), lazy=True))
self.assertTrue(runner._is_stale(last_refresh=date - timedelta(days=3), lazy=True))
self.assertTrue(runner._is_stale(last_refresh=date - timedelta(days=3), lazy=False))

View File

@ -44,6 +44,7 @@ from posthog.schema import (
SamplingRate,
SessionsTimelineQuery,
StickinessQuery,
SuggestedQuestionsQuery,
TrendsQuery,
WebOverviewQuery,
WebStatsTableQuery,
@ -385,6 +386,16 @@ def get_query_runner(
modifiers=modifiers,
limit_context=limit_context,
)
if kind == "SuggestedQuestionsQuery":
from posthog.hogql_queries.ai.suggested_questions_query_runner import SuggestedQuestionsQueryRunner
return SuggestedQuestionsQueryRunner(
query=cast(SuggestedQuestionsQuery | dict[str, Any], query),
team=team,
timings=timings,
limit_context=limit_context,
modifiers=modifiers,
)
raise ValueError(f"Can't get a runner for an unknown query kind: {kind}")

View File

@ -857,6 +857,7 @@ class NodeKind(StrEnum):
EXPERIMENT_FUNNEL_QUERY = "ExperimentFunnelQuery"
EXPERIMENT_TREND_QUERY = "ExperimentTrendQuery"
DATABASE_SCHEMA_QUERY = "DatabaseSchemaQuery"
SUGGESTED_QUESTIONS_QUERY = "SuggestedQuestionsQuery"
TEAM_TAXONOMY_QUERY = "TeamTaxonomyQuery"
EVENT_TAXONOMY_QUERY = "EventTaxonomyQuery"
@ -1036,6 +1037,13 @@ class QueryResponseAlternative29(BaseModel):
results: dict[str, ExperimentVariantTrendResult]
class QueryResponseAlternative38(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
questions: list[str]
class QueryStatus(BaseModel):
model_config = ConfigDict(
extra="forbid",
@ -1256,6 +1264,13 @@ class StickinessQueryResponse(BaseModel):
)
class SuggestedQuestionsQueryResponse(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
questions: list[str]
class TaxonomicFilterGroupType(StrEnum):
METADATA = "metadata"
ACTIONS = "actions"
@ -2113,6 +2128,25 @@ class CachedStickinessQueryResponse(BaseModel):
)
class CachedSuggestedQuestionsQueryResponse(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
cache_key: str
cache_target_age: Optional[AwareDatetime] = None
calculation_trigger: Optional[str] = Field(
default=None, description="What triggered the calculation of the query, leave empty if user/immediate"
)
is_cached: bool
last_refresh: AwareDatetime
next_allowed_client_refresh: AwareDatetime
query_status: Optional[QueryStatus] = Field(
default=None, description="Query status indicates whether next to the provided data, a query is still running."
)
questions: list[str]
timezone: str
class CachedTeamTaxonomyQueryResponse(BaseModel):
model_config = ConfigDict(
extra="forbid",
@ -3867,6 +3901,17 @@ class SessionsTimelineQueryResponse(BaseModel):
)
class SuggestedQuestionsQuery(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
kind: Literal["SuggestedQuestionsQuery"] = "SuggestedQuestionsQuery"
modifiers: Optional[HogQLQueryModifiers] = Field(
default=None, description="Modifiers used when performing the query"
)
response: Optional[SuggestedQuestionsQueryResponse] = None
class TableSettings(BaseModel):
model_config = ConfigDict(
extra="forbid",
@ -5677,6 +5722,7 @@ class QueryResponseAlternative(
QueryResponseAlternative33,
QueryResponseAlternative36,
QueryResponseAlternative37,
QueryResponseAlternative38,
]
]
):
@ -5716,6 +5762,7 @@ class QueryResponseAlternative(
QueryResponseAlternative33,
QueryResponseAlternative36,
QueryResponseAlternative37,
QueryResponseAlternative38,
]
@ -6204,6 +6251,7 @@ class QueryRequest(BaseModel):
LifecycleQuery,
FunnelCorrelationQuery,
DatabaseSchemaQuery,
SuggestedQuestionsQuery,
] = Field(
...,
description=(
@ -6267,6 +6315,7 @@ class QuerySchemaRoot(
LifecycleQuery,
FunnelCorrelationQuery,
DatabaseSchemaQuery,
SuggestedQuestionsQuery,
]
]
):
@ -6305,6 +6354,7 @@ class QuerySchemaRoot(
LifecycleQuery,
FunnelCorrelationQuery,
DatabaseSchemaQuery,
SuggestedQuestionsQuery,
] = Field(..., discriminator="kind")