clickhouse get tables size
SELECT table, | |
formatReadableSize(sum(bytes)) as size, | |
min(min_date) as min_date, | |
max(max_date) as max_date | |
FROM system.parts | |
WHERE active | |
GROUP BY table |
This comment has been minimized.
This comment has been minimized.
If you are using min_time and max_time. SELECT table, formatReadableSize(size) as size, rows, days, formatReadableSize(avgDaySize) as avgDaySize FROM (
SELECT
table,
sum(bytes) AS size,
sum(rows) AS rows,
min(min_time) AS min_time,
max(max_time) AS max_time,
toUInt32((max_time - min_time) / 86400) AS days,
size / ((max_time - min_time) / 86400) AS avgDaySize
FROM system.parts
WHERE active
GROUP BY table
ORDER BY rows DESC
) |
This comment has been minimized.
This comment has been minimized.
I think (max_date - min_date) should be (max_date - min_date + 1) |
This comment has been minimized.
This comment has been minimized.
With some useful data: --create view meta.tables_info as
select concat(database, '.', table) as table,
formatReadableSize(sum(bytes)) as size,
sum(rows) as rows,
max(modification_time) as latest_modification,
sum(bytes) as bytes_size,
any(engine) as engine,
formatReadableSize(sum(primary_key_bytes_in_memory)) as primary_keys_size
from system.parts
where active
group by database, table
order by bytes_size desc; Uncomment first line for saving query as new table. |
This comment has been minimized.
This comment has been minimized.
Thanks, great script, here is my varient with compressing ratio: select parts.*,
columns.compressed_size,
columns.uncompressed_size,
columns.ratio
from (
select table,
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size,
formatReadableSize(sum(data_compressed_bytes)) AS compressed_size,
sum(data_compressed_bytes) / sum(data_uncompressed_bytes) AS ratio
from system.columns
where database = currentDatabase()
group by table
) columns
right join (
select table,
sum(rows) as rows,
max(modification_time) as latest_modification,
formatReadableSize(sum(bytes)) as disk_size,
formatReadableSize(sum(primary_key_bytes_in_memory)) as primary_keys_size,
any(engine) as engine,
sum(bytes) as bytes_size
from system.parts
where active and database = currentDatabase()
group by database, table
) parts on columns.table = parts.table
order by parts.bytes_size desc; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
This comment has been minimized.
Nice)
More detailed with rows count and average day size