Skip to content

Commit

Permalink
Do not use partial indexes for compression
Browse files Browse the repository at this point in the history
They refer only to a subset of the table.
  • Loading branch information
akuzm committed Nov 6, 2023
1 parent 2422f51 commit c34fd0b
Show file tree
Hide file tree
Showing 4 changed files with 58 additions and 1 deletion.
1 change: 1 addition & 0 deletions .unreleased/fix_partial_index
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #6280 Potential data loss when compressing a table with a partial index that matches compression order.
11 changes: 11 additions & 0 deletions tsl/src/compression/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,6 +291,17 @@ compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column
Oid index_oid = lfirst_oid(lc);
Relation index_rel = index_open(index_oid, AccessShareLock);
IndexInfo *index_info = BuildIndexInfo(index_rel);

if (index_info->ii_Predicate != 0)
{
/*
* Can't use partial indexes for compression because they refer
* only to a subset of all rows.
*/
index_close(index_rel, AccessShareLock);
continue;
}

int previous_direction = NoMovementScanDirection;
int current_direction = NoMovementScanDirection;

Expand Down
37 changes: 37 additions & 0 deletions tsl/test/expected/compression_indexscan.out
Original file line number Diff line number Diff line change
Expand Up @@ -912,6 +912,43 @@ SELECT decompress_chunk(show_chunks('tab1'));
(4 rows)

DROP INDEX idx_asc_null_first;
-- Can't use partial indexes for compression because they refer only to a subset of the table.
create index predicate on tab1(id, c1, time nulls first) where c2 = 0;
select count(*) from tab1;
count
-------
62400
(1 row)

select compress_chunk(show_chunks('tab1'));
INFO: compress_chunk_tuplesort_start
INFO: compress_chunk_tuplesort_start
INFO: compress_chunk_tuplesort_start
INFO: compress_chunk_tuplesort_start
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
(4 rows)

select decompress_chunk(show_chunks('tab1'));
decompress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
(4 rows)

select count(*) from tab1;
count
-------
62400
(1 row)

drop index predicate;
--Tear down
DROP TABLE tab1;
DROP TABLE tab2;
Expand Down
10 changes: 9 additions & 1 deletion tsl/test/sql/compression_indexscan.sql
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,16 @@ SELECT compress_chunk(show_chunks('tab1'));
SELECT decompress_chunk(show_chunks('tab1'));
DROP INDEX idx_asc_null_first;

-- Can't use partial indexes for compression because they refer only to a subset of the table.
create index predicate on tab1(id, c1, time nulls first) where c2 = 0;
select count(*) from tab1;
select compress_chunk(show_chunks('tab1'));
select decompress_chunk(show_chunks('tab1'));
select count(*) from tab1;
drop index predicate;

--Tear down
DROP TABLE tab1;
DROP TABLE tab2;
DROP TABLE tab3;
SET timescaledb.show_compression_path_info = 'off';
SET timescaledb.show_compression_path_info = 'off';

0 comments on commit c34fd0b

Please sign in to comment.