Skip to content

Instantly share code, notes, and snippets.

@tom-clickhouse
Created October 17, 2023 19:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tom-clickhouse/d4df796c10a318a4c95061362e486ab5 to your computer and use it in GitHub Desktop.
Save tom-clickhouse/d4df796c10a318a4c95061362e486ab5 to your computer and use it in GitHub Desktop.
WITH
'default' AS db_name,
'pypi' AS table_name,
1 AS interval_seconds,
(
SELECT (query_start_time, event_time)
FROM clusterAllReplicas(default, system.query_log)
WHERE
has(tables, db_name || '.' || table_name)
AND length(tables) = 1
AND is_initial_query
AND query_kind = 'Insert'
AND type = 'QueryFinish'
ORDER BY event_time_microseconds DESC
LIMIT 1
) AS T0,
(SELECT T0.1) AS query_start_time,
(SELECT T0.2) AS query_end_time,
T1 AS (
SELECT
toStartOfInterval(event_time, toIntervalSecond(interval_seconds)) AS t,
dateDiff('second', toStartOfInterval(query_start_time, toIntervalSecond(interval_seconds)), t) AS seconds,
max(value) as parts
FROM clusterAllReplicas(default, system.asynchronous_metric_log)
WHERE event_time >= query_start_time
AND metric = 'MaxPartCountForPartition'
GROUP BY t
ORDER BY t DESC
)
SELECT
seconds,
parts
FROM T1
WHERE t >= query_end_time AND parts <= 3000
ORDER BY seconds ASC
LIMIT 1
SETTINGS skip_unavailable_shards = 1;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment