diff --git a/Changes.md b/Changes.md index ac7cf3e8..7c599ec8 100644 --- a/Changes.md +++ b/Changes.md @@ -1,3 +1,9 @@ +## 1.8.1 + +* On very large databases, the calculation to determine the search tree + size could overflow. This was fixed and several additional guards + against overflows were added. Reported by Sami Salonen. GitHub #335. + ## 1.8.0 - 2023-11-07 * `PACKAGE_VERSION` is now a private compile definition when building diff --git a/src/data-pool.c b/src/data-pool.c index 7d47ddff..1a9f9d03 100644 --- a/src/data-pool.c +++ b/src/data-pool.c @@ -9,8 +9,6 @@ #include #include -static bool can_multiply(size_t const, size_t const, size_t const); - // Allocate an MMDB_data_pool_s. It initially has space for size // MMDB_entry_data_list_s structs. MMDB_data_pool_s *data_pool_new(size_t const size) { @@ -43,7 +41,7 @@ MMDB_data_pool_s *data_pool_new(size_t const size) { // the given max. max will typically be SIZE_MAX. // // We want to know if we'll wrap around. -static bool can_multiply(size_t const max, size_t const m, size_t const n) { +bool can_multiply(size_t const max, size_t const m, size_t const n) { if (m == 0) { return false; } diff --git a/src/data-pool.h b/src/data-pool.h index 25d09923..9e61b768 100644 --- a/src/data-pool.h +++ b/src/data-pool.h @@ -44,6 +44,7 @@ typedef struct MMDB_data_pool_s { MMDB_entry_data_list_s *blocks[DATA_POOL_NUM_BLOCKS]; } MMDB_data_pool_s; +bool can_multiply(size_t const, size_t const, size_t const); MMDB_data_pool_s *data_pool_new(size_t const); void data_pool_destroy(MMDB_data_pool_s *const); MMDB_entry_data_list_s *data_pool_alloc(MMDB_data_pool_s *const); diff --git a/src/maxminddb.c b/src/maxminddb.c index 4349824e..7da07854 100644 --- a/src/maxminddb.c +++ b/src/maxminddb.c @@ -23,6 +23,7 @@ #endif #include #include +#define SSIZE_MAX INTPTR_MAX typedef ADDRESS_FAMILY sa_family_t; #else #include @@ -289,18 +290,29 @@ int MMDB_open(const char *const filename, uint32_t flags, MMDB_s *const mmdb) { goto cleanup; } - uint32_t search_tree_size = - mmdb->metadata.node_count * mmdb->full_record_byte_size; + if (!can_multiply(SSIZE_MAX, + mmdb->metadata.node_count, + mmdb->full_record_byte_size)) { + status = MMDB_INVALID_METADATA_ERROR; + goto cleanup; + } + ssize_t search_tree_size = (ssize_t)mmdb->metadata.node_count * + (ssize_t)mmdb->full_record_byte_size; mmdb->data_section = mmdb->file_content + search_tree_size + MMDB_DATA_SECTION_SEPARATOR; - if (search_tree_size + MMDB_DATA_SECTION_SEPARATOR > - (uint32_t)mmdb->file_size) { + if (mmdb->file_size < MMDB_DATA_SECTION_SEPARATOR || + search_tree_size > mmdb->file_size - MMDB_DATA_SECTION_SEPARATOR) { + status = MMDB_INVALID_METADATA_ERROR; + goto cleanup; + } + ssize_t data_section_size = + mmdb->file_size - search_tree_size - MMDB_DATA_SECTION_SEPARATOR; + if (data_section_size > UINT32_MAX || data_section_size <= 0) { status = MMDB_INVALID_METADATA_ERROR; goto cleanup; } - mmdb->data_section_size = (uint32_t)mmdb->file_size - search_tree_size - - MMDB_DATA_SECTION_SEPARATOR; + mmdb->data_section_size = (uint32_t)data_section_size; // Although it is likely not possible to construct a database with valid // valid metadata, as parsed above, and a data_section_size less than 3,