diff --git a/apps/api/src/controllers/track.controller.ts b/apps/api/src/controllers/track.controller.ts index b5d00528e..2ee11fc98 100644 --- a/apps/api/src/controllers/track.controller.ts +++ b/apps/api/src/controllers/track.controller.ts @@ -1,6 +1,7 @@ import type { FastifyReply, FastifyRequest } from 'fastify'; import { assocPath, pathOr, pick } from 'ramda'; +import { HttpError } from '@/utils/errors'; import { generateId, slug } from '@openpanel/common'; import { generateDeviceId, parseUserAgent } from '@openpanel/common/server'; import { getProfileById, getSalts, upsertProfile } from '@openpanel/db'; @@ -187,9 +188,16 @@ export async function handler( break; } case 'identify': { + const payload = request.body.payload; const geo = await getGeoLocation(ip); + if (!payload.profileId) { + throw new HttpError('Missing profileId', { + status: 400, + }); + } + await identify({ - payload: request.body.payload, + payload, projectId, geo, ua, diff --git a/apps/start/src/hooks/use-app-context.ts b/apps/start/src/hooks/use-app-context.ts index c00230fbf..6bf67d0bd 100644 --- a/apps/start/src/hooks/use-app-context.ts +++ b/apps/start/src/hooks/use-app-context.ts @@ -17,5 +17,6 @@ export function useAppContext() { apiUrl: params.apiUrl, dashboardUrl: params.dashboardUrl, isSelfHosted: params.isSelfHosted, + isMaintenance: params.isMaintenance ?? false, }; } diff --git a/apps/start/src/routes/__root.tsx b/apps/start/src/routes/__root.tsx index 33adeb6fb..c76afa363 100644 --- a/apps/start/src/routes/__root.tsx +++ b/apps/start/src/routes/__root.tsx @@ -34,6 +34,7 @@ interface MyRouterContext { apiUrl: string; dashboardUrl: string; isSelfHosted: boolean; + isMaintenance: boolean; } export const Route = createRootRouteWithContext()({ diff --git a/apps/start/src/routes/_app.tsx b/apps/start/src/routes/_app.tsx index d06f21d17..2c4139d7e 100644 --- a/apps/start/src/routes/_app.tsx +++ b/apps/start/src/routes/_app.tsx @@ -1,5 +1,10 @@ +import { FullPageEmptyState } from '@/components/full-page-empty-state'; import { Sidebar } from '@/components/sidebar'; +import { Button, LinkButton, buttonVariants } from '@/components/ui/button'; +import { useAppContext } from '@/hooks/use-app-context'; +import { cn } from '@/utils/cn'; import { Outlet, createFileRoute, redirect } from '@tanstack/react-router'; +import { ConstructionIcon } from 'lucide-react'; export const Route = createFileRoute('/_app')({ beforeLoad: async ({ context }) => { @@ -11,6 +16,28 @@ export const Route = createFileRoute('/_app')({ }); function AppLayout() { + const { isMaintenance } = useAppContext(); + + if (isMaintenance) { + return ( + + + Check out our status page + + + ); + } + return (
diff --git a/apps/start/src/server/get-envs.ts b/apps/start/src/server/get-envs.ts index f921daefb..55a8fb610 100644 --- a/apps/start/src/server/get-envs.ts +++ b/apps/start/src/server/get-envs.ts @@ -8,6 +8,7 @@ export const getServerEnvs = createServerFn().handler(async () => { process.env.DASHBOARD_URL || process.env.NEXT_PUBLIC_DASHBOARD_URL, ), isSelfHosted: process.env.SELF_HOSTED !== undefined, + isMaintenance: process.env.MAINTENANCE === '1', }; return envs; diff --git a/apps/worker/src/boot-workers.ts b/apps/worker/src/boot-workers.ts index bcce6a802..55652de8d 100644 --- a/apps/worker/src/boot-workers.ts +++ b/apps/worker/src/boot-workers.ts @@ -117,7 +117,7 @@ export async function bootWorkers() { const worker = new GroupWorker({ queue, concurrency, - logger: queueLogger, + logger: process.env.NODE_ENV === 'production' ? queueLogger : undefined, blockingTimeoutSec: Number.parseFloat( process.env.EVENT_BLOCKING_TIMEOUT_SEC || '1', ), diff --git a/apps/worker/src/jobs/cron.ping.ts b/apps/worker/src/jobs/cron.ping.ts index 8252702ba..547ce83ae 100644 --- a/apps/worker/src/jobs/cron.ping.ts +++ b/apps/worker/src/jobs/cron.ping.ts @@ -1,6 +1,10 @@ import { TABLE_NAMES, chQuery } from '@openpanel/db'; export async function ping() { + if (process.env.DISABLE_PING) { + return; + } + const [res] = await chQuery<{ count: number }>( `SELECT COUNT(*) as count FROM ${TABLE_NAMES.events}`, ); diff --git a/apps/worker/src/jobs/events.create-session-end.ts b/apps/worker/src/jobs/events.create-session-end.ts index 83fac06a1..d24e554eb 100644 --- a/apps/worker/src/jobs/events.create-session-end.ts +++ b/apps/worker/src/jobs/events.create-session-end.ts @@ -96,7 +96,6 @@ export async function createSessionEnd( ...payload, properties: { ...payload.properties, - ...(session?.properties ?? {}), __bounce: session.is_bounce, }, name: 'session_end', diff --git a/apps/worker/src/jobs/events.incoming-events.test.ts b/apps/worker/src/jobs/events.incoming-events.test.ts index 9b3a54c70..1ee1ebb90 100644 --- a/apps/worker/src/jobs/events.incoming-events.test.ts +++ b/apps/worker/src/jobs/events.incoming-events.test.ts @@ -319,7 +319,6 @@ describe('incomingEvent', () => { utm_content: '', utm_medium: '', revenue: 0, - properties: {}, project_id: projectId, device_id: 'last-device-123', profile_id: 'profile-123', diff --git a/packages/db/code-migrations/8-order-keys.ts b/packages/db/code-migrations/8-order-keys.ts new file mode 100644 index 000000000..cb2b5bf29 --- /dev/null +++ b/packages/db/code-migrations/8-order-keys.ts @@ -0,0 +1,296 @@ +import fs from 'node:fs'; +import path from 'node:path'; +import { + chMigrationClient, + createTable, + moveDataBetweenTables, + renameTable, + runClickhouseMigrationCommands, +} from '../src/clickhouse/migration'; +import { getIsCluster } from './helpers'; + +export async function up() { + const isClustered = getIsCluster(); + + const sqls: string[] = []; + + const eventTables = createTable({ + name: 'events_new_20251123', + columns: [ + '`id` UUID DEFAULT generateUUIDv4()', + '`name` LowCardinality(String)', + '`sdk_name` LowCardinality(String)', + '`sdk_version` LowCardinality(String)', + '`device_id` String CODEC(ZSTD(3))', + '`profile_id` String CODEC(ZSTD(3))', + '`project_id` String CODEC(ZSTD(3))', + '`session_id` String CODEC(LZ4)', + '`path` String CODEC(ZSTD(3))', + '`origin` String CODEC(ZSTD(3))', + '`referrer` String CODEC(ZSTD(3))', + '`referrer_name` String CODEC(ZSTD(3))', + '`referrer_type` LowCardinality(String)', + '`revenue` UInt64', + '`duration` UInt64 CODEC(Delta(4), LZ4)', + '`properties` Map(String, String) CODEC(ZSTD(3))', + '`created_at` DateTime64(3) CODEC(DoubleDelta, ZSTD(3))', + '`country` LowCardinality(FixedString(2))', + '`city` String', + '`region` LowCardinality(String)', + '`longitude` Nullable(Float32) CODEC(Gorilla, LZ4)', + '`latitude` Nullable(Float32) CODEC(Gorilla, LZ4)', + '`os` LowCardinality(String)', + '`os_version` LowCardinality(String)', + '`browser` LowCardinality(String)', + '`browser_version` LowCardinality(String)', + '`device` LowCardinality(String)', + '`brand` LowCardinality(String)', + '`model` LowCardinality(String)', + '`imported_at` Nullable(DateTime) CODEC(Delta(4), LZ4)', + ], + indices: [ + 'INDEX idx_name name TYPE bloom_filter GRANULARITY 1', + "INDEX idx_properties_bounce properties['__bounce'] TYPE set(3) GRANULARITY 1", + 'INDEX idx_origin origin TYPE bloom_filter(0.05) GRANULARITY 1', + 'INDEX idx_path path TYPE bloom_filter(0.01) GRANULARITY 1', + ], + // New ORDER BY: project_id, toDate(created_at), created_at, name + // Removed profile_id, added created_at for better ordering within same day + orderBy: ['project_id', 'toDate(created_at)', 'created_at', 'name'], + partitionBy: 'toYYYYMM(created_at)', + settings: { + index_granularity: 8192, + // For lightweight updates + enable_block_offset_column: 1, + enable_block_number_column: 1, + }, + distributionHash: + 'cityHash64(project_id, toString(toStartOfHour(created_at)))', + replicatedVersion: '1', + isClustered, + }); + + // Step 1: Create temporary tables with new ORDER BY keys + // Events table with new ORDER BY + sqls.push(...eventTables); + + const sessionTables = createTable({ + name: 'sessions_new_20251123', + engine: 'VersionedCollapsingMergeTree(sign, version)', + columns: [ + '`id` String', + '`project_id` String CODEC(ZSTD(3))', + '`profile_id` String CODEC(ZSTD(3))', + '`device_id` String CODEC(ZSTD(3))', + '`created_at` DateTime64(3) CODEC(DoubleDelta, ZSTD(3))', + '`ended_at` DateTime64(3) CODEC(DoubleDelta, ZSTD(3))', + '`is_bounce` Bool', + '`entry_origin` LowCardinality(String)', + '`entry_path` String CODEC(ZSTD(3))', + '`exit_origin` LowCardinality(String)', + '`exit_path` String CODEC(ZSTD(3))', + '`screen_view_count` Int32', + '`revenue` Float64', + '`event_count` Int32', + '`duration` UInt32', + '`country` LowCardinality(FixedString(2))', + '`region` LowCardinality(String)', + '`city` String', + '`longitude` Nullable(Float32) CODEC(Gorilla, LZ4)', + '`latitude` Nullable(Float32) CODEC(Gorilla, LZ4)', + '`device` LowCardinality(String)', + '`brand` LowCardinality(String)', + '`model` LowCardinality(String)', + '`browser` LowCardinality(String)', + '`browser_version` LowCardinality(String)', + '`os` LowCardinality(String)', + '`os_version` LowCardinality(String)', + '`utm_medium` String CODEC(ZSTD(3))', + '`utm_source` String CODEC(ZSTD(3))', + '`utm_campaign` String CODEC(ZSTD(3))', + '`utm_content` String CODEC(ZSTD(3))', + '`utm_term` String CODEC(ZSTD(3))', + '`referrer` String CODEC(ZSTD(3))', + '`referrer_name` String CODEC(ZSTD(3))', + '`referrer_type` LowCardinality(String)', + '`sign` Int8', + '`version` UInt64', + ], + // New ORDER BY: project_id, toDate(created_at), created_at, id + // Removed profile_id, reordered to match query patterns (date first, then id) + orderBy: ['project_id', 'toDate(created_at)', 'created_at'], + partitionBy: 'toYYYYMM(created_at)', + settings: { + index_granularity: 8192, + }, + distributionHash: + 'cityHash64(project_id, toString(toStartOfHour(created_at)))', + replicatedVersion: '1', + isClustered, + }); + + // Sessions table with new ORDER BY + sqls.push(...sessionTables); + + const firstEventDateResponse = await chMigrationClient.query({ + query: 'SELECT min(created_at) as created_at FROM events', + format: 'JSONEachRow', + }); + const firstEventDateJson = await firstEventDateResponse.json<{ + created_at: string; + }>(); + if (firstEventDateJson[0]?.created_at) { + const firstEventDate = new Date(firstEventDateJson[0]?.created_at); + // Step 2: Copy data from old tables to new tables (partitioned by month for efficiency) + // Set endDate to first of next month to ensure we capture all data in the current month + const endDate = new Date(); + endDate.setMonth(endDate.getMonth() + 1); + endDate.setDate(1); + + sqls.push( + ...moveDataBetweenTables({ + from: 'events', + to: 'events_new_20251123', + batch: { + startDate: firstEventDate, + endDate: endDate, + column: 'toDate(created_at)', + interval: 'month', + transform: (date: Date) => { + const year = date.getFullYear(); + const month = String(date.getMonth() + 1).padStart(2, '0'); + return `${year}-${month}-01`; + }, + }, + }), + ); + } + + const firstSessionDateResponse = await chMigrationClient.query({ + query: 'SELECT min(created_at) as created_at FROM sessions', + format: 'JSONEachRow', + }); + const firstSessionDateJson = await firstSessionDateResponse.json<{ + created_at: string; + }>(); + + if (firstSessionDateJson[0]?.created_at) { + const firstSessionDate = new Date( + firstSessionDateJson[0]?.created_at ?? '', + ); + // Set endDate to first of next month to ensure we capture all data in the current month + const endDate = new Date(); + endDate.setMonth(endDate.getMonth() + 1); + endDate.setDate(1); + + sqls.push( + ...moveDataBetweenTables({ + from: 'sessions', + to: 'sessions_new_20251123', + columns: [ + 'id', + 'project_id', + 'profile_id', + 'device_id', + 'created_at', + 'ended_at', + 'is_bounce', + 'entry_origin', + 'entry_path', + 'exit_origin', + 'exit_path', + 'screen_view_count', + 'revenue', + 'event_count', + 'duration', + 'country', + 'region', + 'city', + 'longitude', + 'latitude', + 'device', + 'brand', + 'model', + 'browser', + 'browser_version', + 'os', + 'os_version', + 'utm_medium', + 'utm_source', + 'utm_campaign', + 'utm_content', + 'utm_term', + 'referrer', + 'referrer_name', + 'referrer_type', + 'sign', + 'version', + ], + batch: { + startDate: firstSessionDate, + endDate: endDate, + column: 'toDate(created_at)', + interval: 'month', + transform: (date: Date) => { + const year = date.getFullYear(); + const month = String(date.getMonth() + 1).padStart(2, '0'); + return `${year}-${month}-01`; + }, + }, + }), + ); + } + + sqls.push( + ...renameTable({ from: 'events', to: 'events_20251123', isClustered }), + ); + sqls.push( + ...renameTable({ from: 'sessions', to: 'sessions_20251123', isClustered }), + ); + + if (isClustered && sessionTables[1] && eventTables[1]) { + sqls.push( + // Drop temporary DISTRIBUTED tables (will be recreated) + `DROP TABLE IF EXISTS events_new_20251123 ON CLUSTER '{cluster}'`, + `DROP TABLE IF EXISTS sessions_new_20251123 ON CLUSTER '{cluster}'`, + // Rename new tables to correct names + `RENAME TABLE events_new_20251123_replicated TO events_replicated ON CLUSTER '{cluster}'`, + `RENAME TABLE sessions_new_20251123_replicated TO sessions_replicated ON CLUSTER '{cluster}'`, + // Create new distributed tables + eventTables[1].replaceAll('events_new_20251123', 'events'), // creates a new distributed table + sessionTables[1].replaceAll('sessions_new_20251123', 'sessions'), // creates a new distributed table + ); + } else { + sqls.push( + ...renameTable({ + from: 'events_new_20251123', + to: 'events', + isClustered, + }), + ); + sqls.push( + ...renameTable({ + from: 'sessions_new_20251123', + to: 'sessions', + isClustered, + }), + ); + } + + fs.writeFileSync( + path.join(__filename.replace('.ts', '.sql')), + sqls + .map((sql) => + sql + .trim() + .replace(/;$/, '') + .replace(/\n{2,}/g, '\n') + .concat(';'), + ) + .join('\n\n---\n\n'), + ); + + if (!process.argv.includes('--dry')) { + await runClickhouseMigrationCommands(sqls); + } +} diff --git a/packages/db/code-migrations/helpers.ts b/packages/db/code-migrations/helpers.ts index 128a3eade..38e060d7e 100644 --- a/packages/db/code-migrations/helpers.ts +++ b/packages/db/code-migrations/helpers.ts @@ -30,3 +30,7 @@ export function getIsSelfHosting() { export function getIsDry() { return process.argv.includes('--dry'); } + +export function getShouldIgnoreRecord() { + return process.argv.includes('--no-record'); +} diff --git a/packages/db/code-migrations/migrate.ts b/packages/db/code-migrations/migrate.ts index 5f3a0e653..3f4a5eac0 100644 --- a/packages/db/code-migrations/migrate.ts +++ b/packages/db/code-migrations/migrate.ts @@ -10,6 +10,7 @@ import { getIsCluster, getIsDry, getIsSelfHosting, + getShouldIgnoreRecord, printBoxMessage, } from './helpers'; @@ -55,8 +56,12 @@ async function migrate() { ]); if (!getIsSelfHosting()) { - printBoxMessage('🕒 Migrations starts in 10 seconds', []); - await new Promise((resolve) => setTimeout(resolve, 10000)); + if (!getIsDry()) { + printBoxMessage('🕒 Migrations starts in 10 seconds', []); + await new Promise((resolve) => setTimeout(resolve, 10000)); + } else { + printBoxMessage('🕒 Migrations starts now (dry run)', []); + } } if (migration) { @@ -81,7 +86,7 @@ async function runMigration(migrationsDir: string, file: string) { try { const migration = await import(path.join(migrationsDir, file)); await migration.up(); - if (!getIsDry()) { + if (!getIsDry() && !getShouldIgnoreRecord()) { await db.codeMigration.upsert({ where: { name: file, diff --git a/packages/db/src/buffers/session-buffer.ts b/packages/db/src/buffers/session-buffer.ts index 4d82eb53f..fad5ee097 100644 --- a/packages/db/src/buffers/session-buffer.ts +++ b/packages/db/src/buffers/session-buffer.ts @@ -1,6 +1,5 @@ import { type Redis, getRedisCache } from '@openpanel/redis'; -import { toDots } from '@openpanel/common'; import { getSafeJson } from '@openpanel/json'; import { assocPath, clone } from 'ramda'; import { TABLE_NAMES, ch } from '../clickhouse/client'; @@ -91,10 +90,6 @@ export class SessionBuffer extends BaseBuffer { session: newSession, }); } - newSession.properties = toDots({ - ...(event.properties || {}), - ...(newSession.properties || {}), - }); const addedRevenue = event.name === 'revenue' ? (event.revenue ?? 0) : 0; newSession.revenue = (newSession.revenue ?? 0) + addedRevenue; @@ -168,7 +163,6 @@ export class SessionBuffer extends BaseBuffer { : '', sign: 1, version: 1, - properties: toDots(event.properties || {}), }, ]; } diff --git a/packages/db/src/clickhouse/migration.ts b/packages/db/src/clickhouse/migration.ts index 90c8a33ce..2b2241d4f 100644 --- a/packages/db/src/clickhouse/migration.ts +++ b/packages/db/src/clickhouse/migration.ts @@ -217,6 +217,7 @@ export function moveDataBetweenTables({ from, to, batch, + columns, }: { from: string; to: string; @@ -227,11 +228,15 @@ export function moveDataBetweenTables({ endDate?: Date; startDate?: Date; }; + columns?: string[]; }): string[] { const sqls: string[] = []; + // Build the SELECT clause + const selectClause = columns && columns.length > 0 ? columns.join(', ') : '*'; + if (!batch) { - return [`INSERT INTO ${to} SELECT * FROM ${from}`]; + return [`INSERT INTO ${to} SELECT ${selectClause} FROM ${from}`]; } // Start from today and go back 3 years @@ -247,32 +252,109 @@ export function moveDataBetweenTables({ let currentDate = endDate; const interval = batch.interval || 'day'; - while (currentDate > startDate) { + // Helper function to get the start of the week (Monday) for a given date + const getWeekStart = (date: Date): Date => { + const d = new Date(date); + const day = d.getDay(); + const diff = d.getDate() - day + (day === 0 ? -6 : 1); // Adjust to Monday + d.setDate(diff); + d.setHours(0, 0, 0, 0); // Normalize to start of day + return d; + }; + + // Helper function to compare dates based on interval + const shouldContinue = ( + current: Date, + start: Date, + intervalType: string, + ): boolean => { + if (intervalType === 'month') { + // For months, compare by year and month + // Continue if current month is >= start month + const currentYear = current.getFullYear(); + const currentMonth = current.getMonth(); + const startYear = start.getFullYear(); + const startMonth = start.getMonth(); + return ( + currentYear > startYear || + (currentYear === startYear && currentMonth >= startMonth) + ); + } + if (intervalType === 'week') { + // For weeks, compare by week start dates + const currentWeekStart = getWeekStart(current); + const startWeekStart = getWeekStart(start); + return currentWeekStart >= startWeekStart; + } + return current > start; + }; + + while (shouldContinue(currentDate, startDate, interval)) { const previousDate = new Date(currentDate); switch (interval) { case 'month': previousDate.setMonth(previousDate.getMonth() - 1); + // If we've gone below startDate's month, adjust to start of startDate's month + // This ensures we generate SQL for the month containing startDate + if ( + previousDate.getFullYear() < startDate.getFullYear() || + (previousDate.getFullYear() === startDate.getFullYear() && + previousDate.getMonth() < startDate.getMonth()) + ) { + previousDate.setFullYear(startDate.getFullYear()); + previousDate.setMonth(startDate.getMonth()); + previousDate.setDate(1); + } break; - case 'week': + case 'week': { previousDate.setDate(previousDate.getDate() - 7); - // Ensure we don't go below startDate - if (previousDate < startDate) { - previousDate.setTime(startDate.getTime()); + // If we've gone below startDate's week, adjust to start of startDate's week + const startWeekStart = getWeekStart(startDate); + const prevWeekStart = getWeekStart(previousDate); + if (prevWeekStart < startWeekStart) { + previousDate.setTime(startWeekStart.getTime()); } break; + } // day default: previousDate.setDate(previousDate.getDate() - 1); break; } + // For monthly/weekly intervals with transform, upperBoundDate should be currentDate + // because currentDate already represents the start of the period we're processing + // The WHERE clause uses > previousDate AND <= currentDate to get exactly one period + let upperBoundDate = currentDate; + // Don't exceed the endDate + if (upperBoundDate > endDate) { + upperBoundDate = endDate; + } + const sql = `INSERT INTO ${to} - SELECT * FROM ${from} + SELECT ${selectClause} FROM ${from} WHERE ${batch.column} > '${batch.transform ? batch.transform(previousDate) : formatClickhouseDate(previousDate, true)}' - AND ${batch.column} <= '${batch.transform ? batch.transform(currentDate) : formatClickhouseDate(currentDate, true)}'`; + AND ${batch.column} <= '${batch.transform ? batch.transform(upperBoundDate) : formatClickhouseDate(upperBoundDate, true)}'`; sqls.push(sql); + // For monthly/weekly intervals, stop if we've reached the start period + if (interval === 'month') { + const prevYear = previousDate.getFullYear(); + const prevMonth = previousDate.getMonth(); + const startYear = startDate.getFullYear(); + const startMonth = startDate.getMonth(); + if (prevYear === startYear && prevMonth === startMonth) { + break; + } + } else if (interval === 'week') { + const prevWeekStart = getWeekStart(previousDate); + const startWeekStart = getWeekStart(startDate); + if (prevWeekStart.getTime() === startWeekStart.getTime()) { + break; + } + } + currentDate = previousDate; } diff --git a/packages/db/src/services/event.service.ts b/packages/db/src/services/event.service.ts index 43324b97e..e0b5322ef 100644 --- a/packages/db/src/services/event.service.ts +++ b/packages/db/src/services/event.service.ts @@ -131,7 +131,6 @@ export function transformSessionToEvent( duration: 0, revenue: session.revenue, properties: { - ...session.properties, is_bounce: session.is_bounce, __query: { utm_medium: session.utm_medium, @@ -628,8 +627,7 @@ export async function getEventList(options: GetEventListOptions) { } } - sb.orderBy.created_at = - 'toDate(created_at) DESC, created_at DESC, profile_id DESC, name DESC'; + sb.orderBy.created_at = 'created_at DESC'; if (custom) { custom(sb); diff --git a/packages/db/src/services/session.service.ts b/packages/db/src/services/session.service.ts index aa9fe2fbe..0d39460a5 100644 --- a/packages/db/src/services/session.service.ts +++ b/packages/db/src/services/session.service.ts @@ -1,6 +1,5 @@ import { cacheable } from '@openpanel/redis'; import type { IChartEventFilter } from '@openpanel/validation'; -import { uniq } from 'ramda'; import sqlstring from 'sqlstring'; import { TABLE_NAMES, @@ -53,7 +52,6 @@ export type IClickhouseSession = { revenue: number; sign: 1 | 0; version: number; - properties: Record; }; export interface IServiceSession { @@ -92,7 +90,6 @@ export interface IServiceSession { utmContent: string; utmTerm: string; revenue: number; - properties: Record; profile?: IServiceProfile; } @@ -144,7 +141,6 @@ export function transformSession(session: IClickhouseSession): IServiceSession { utmContent: session.utm_content, utmTerm: session.utm_term, revenue: session.revenue, - properties: session.properties, profile: undefined, }; } @@ -200,12 +196,13 @@ export async function getSessionList({ if (cursor) { const cAt = sqlstring.escape(cursor.createdAt); + // TODO: remove id from cursor const cId = sqlstring.escape(cursor.id); - sb.where.cursor = `(created_at < toDateTime64(${cAt}, 3) OR (created_at = toDateTime64(${cAt}, 3) AND id < ${cId}))`; + sb.where.cursor = `created_at < toDateTime64(${cAt}, 3)`; sb.where.cursorWindow = `created_at >= toDateTime64(${cAt}, 3) - INTERVAL ${dateIntervalInDays} DAY`; - sb.orderBy.created_at = 'toDate(created_at) DESC, created_at DESC, id DESC'; + sb.orderBy.created_at = 'created_at DESC'; } else { - sb.orderBy.created_at = 'toDate(created_at) DESC, created_at DESC, id DESC'; + sb.orderBy.created_at = 'created_at DESC'; sb.where.created_at = `created_at > now() - INTERVAL ${dateIntervalInDays} DAY`; } diff --git a/packages/queue/src/queues.ts b/packages/queue/src/queues.ts index e1e64645e..d21bf4e78 100644 --- a/packages/queue/src/queues.ts +++ b/packages/queue/src/queues.ts @@ -146,7 +146,7 @@ export const eventsGroupQueues = Array.from({ }).map( (_, index, list) => new GroupQueue({ - logger: queueLogger, + logger: process.env.NODE_ENV === 'production' ? queueLogger : undefined, namespace: getQueueName( list.length === 1 ? 'group_events' : `group_events_${index}`, ), diff --git a/self-hosting/docker-compose.template.yml b/self-hosting/docker-compose.template.yml index 64c16c2a7..12d9b828a 100644 --- a/self-hosting/docker-compose.template.yml +++ b/self-hosting/docker-compose.template.yml @@ -1,12 +1,12 @@ -version: '3' +version: "3" services: op-proxy: image: caddy:2-alpine restart: always ports: - - '80:80' - - '443:443' + - "80:80" + - "443:443" volumes: - op-proxy-data:/data - op-proxy-config:/config @@ -28,7 +28,7 @@ services: volumes: - op-db-data:/var/lib/postgresql/data healthcheck: - test: [ 'CMD-SHELL', 'pg_isready -U postgres' ] + test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 10s timeout: 5s retries: 5 @@ -49,9 +49,9 @@ services: restart: always volumes: - op-kv-data:/data - command: [ 'redis-server', '--maxmemory-policy', 'noeviction' ] + command: ["redis-server", "--maxmemory-policy", "noeviction"] healthcheck: - test: [ 'CMD-SHELL', 'redis-cli ping' ] + test: ["CMD-SHELL", "redis-cli ping"] interval: 10s timeout: 5s retries: 5 @@ -65,7 +65,7 @@ services: # - 6379:6379 op-ch: - image: clickhouse/clickhouse-server:24.3.2-alpine + image: clickhouse/clickhouse-server:25.10.2.65 restart: always volumes: - op-ch-data:/var/lib/clickhouse @@ -74,7 +74,7 @@ services: - ./clickhouse/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/op-user-config.xml:ro - ./clickhouse/init-db.sh:/docker-entrypoint-initdb.d/init-db.sh:ro healthcheck: - test: [ 'CMD-SHELL', 'clickhouse-client --query "SELECT 1"' ] + test: ["CMD-SHELL", 'clickhouse-client --query "SELECT 1"'] interval: 10s timeout: 5s retries: 5 @@ -99,7 +99,7 @@ services: pnpm start " healthcheck: - test: [ 'CMD-SHELL', 'curl -f http://localhost:3000/healthcheck || exit 1' ] + test: ["CMD-SHELL", "curl -f http://localhost:3000/healthcheck || exit 1"] interval: 10s timeout: 5s retries: 5 @@ -127,7 +127,8 @@ services: env_file: - .env healthcheck: - test: [ 'CMD-SHELL', 'curl -f http://localhost:3000/api/healthcheck || exit 1' ] + test: + ["CMD-SHELL", "curl -f http://localhost:3000/api/healthcheck || exit 1"] interval: 10s timeout: 5s retries: 5 @@ -146,7 +147,7 @@ services: env_file: - .env healthcheck: - test: [ 'CMD-SHELL', 'curl -f http://localhost:3000/healthcheck || exit 1' ] + test: ["CMD-SHELL", "curl -f http://localhost:3000/healthcheck || exit 1"] interval: 10s timeout: 5s retries: 5 @@ -171,4 +172,4 @@ volumes: op-proxy-data: driver: local op-proxy-config: - driver: local \ No newline at end of file + driver: local