Skip to content

Commit

Permalink
Delete officially deprecated API
Browse files Browse the repository at this point in the history
  • Loading branch information
mszeszko-meta committed Jan 22, 2025
1 parent 0e469c7 commit b498b4b
Show file tree
Hide file tree
Showing 12 changed files with 2 additions and 402 deletions.
4 changes: 0 additions & 4 deletions db/c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1858,10 +1858,6 @@ extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf_with_flags(
delete[] ranges;
}

void DEPRECATED_rocksdb_delete_file(rocksdb_t* db, const char* name) {
db->rep->DEPRECATED_DeleteFile(name);
}

const rocksdb_livefiles_t* rocksdb_livefiles(rocksdb_t* db) {
rocksdb_livefiles_t* result = new rocksdb_livefiles_t;
db->rep->GetLiveFilesMetaData(&result->rep);
Expand Down
102 changes: 0 additions & 102 deletions db/db_impl/db_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4883,108 +4883,6 @@ Status DBImpl::GetUpdatesSince(
return wal_manager_.GetUpdatesSince(seq, iter, read_options, versions_.get());
}

Status DBImpl::DEPRECATED_DeleteFile(std::string name) {
// TODO: plumb Env::IOActivity, Env::IOPriority
const ReadOptions read_options;
const WriteOptions write_options;

uint64_t number;
FileType type;
WalFileType log_type;
if (!ParseFileName(name, &number, &type, &log_type) ||
(type != kTableFile && type != kWalFile)) {
ROCKS_LOG_ERROR(immutable_db_options_.info_log, "DeleteFile %s failed.\n",
name.c_str());
return Status::InvalidArgument("Invalid file name");
}

if (type == kWalFile) {
// Only allow deleting archived log files
if (log_type != kArchivedLogFile) {
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"DeleteFile %s failed - not archived log.\n",
name.c_str());
return Status::NotSupported("Delete only supported for archived logs");
}
Status status = wal_manager_.DeleteFile(name, number);
if (!status.ok()) {
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"DeleteFile %s failed -- %s.\n", name.c_str(),
status.ToString().c_str());
}
return status;
}

Status status;
int level;
FileMetaData* metadata;
ColumnFamilyData* cfd;
VersionEdit edit;
JobContext job_context(next_job_id_.fetch_add(1), true);
{
InstrumentedMutexLock l(&mutex_);
status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd);
if (!status.ok()) {
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s failed. File not found\n", name.c_str());
job_context.Clean();
return Status::InvalidArgument("File not found");
}
assert(level < cfd->NumberLevels());

// If the file is being compacted no need to delete.
if (metadata->being_compacted) {
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"DeleteFile %s Skipped. File about to be compacted\n",
name.c_str());
job_context.Clean();
return Status::OK();
}

// Only the files in the last level can be deleted externally.
// This is to make sure that any deletion tombstones are not
// lost. Check that the level passed is the last level.
auto* vstoreage = cfd->current()->storage_info();
for (int i = level + 1; i < cfd->NumberLevels(); i++) {
if (vstoreage->NumLevelFiles(i) != 0) {
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s FAILED. File not in last level\n",
name.c_str());
job_context.Clean();
return Status::InvalidArgument("File not in last level");
}
}
// if level == 0, it has to be the oldest file
if (level == 0 &&
vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) {
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s failed ---"
" target file in level 0 must be the oldest.",
name.c_str());
job_context.Clean();
return Status::InvalidArgument("File in level 0, but not oldest");
}
edit.SetColumnFamily(cfd->GetID());
edit.DeleteFile(level, number);
status = versions_->LogAndApply(cfd, read_options, write_options, &edit,
&mutex_, directories_.GetDbDir());
if (status.ok()) {
InstallSuperVersionAndScheduleWork(
cfd, job_context.superversion_contexts.data());
}
FindObsoleteFiles(&job_context, false);
} // lock released here

LogFlush(immutable_db_options_.info_log);
// remove files outside the db-lock
if (job_context.HaveSomethingToDelete()) {
// Call PurgeObsoleteFiles() without holding mutex.
PurgeObsoleteFiles(job_context);
}
job_context.Clean();
return status;
}

Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
const RangePtr* ranges, size_t n,
bool include_end) {
Expand Down
1 change: 0 additions & 1 deletion db/db_impl/db_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,6 @@ class DBImpl : public DB {
SequenceNumber seq_number, std::unique_ptr<TransactionLogIterator>* iter,
const TransactionLogIterator::ReadOptions& read_options =
TransactionLogIterator::ReadOptions()) override;
Status DEPRECATED_DeleteFile(std::string name) override;
Status DeleteFilesInRanges(ColumnFamilyHandle* column_family,
const RangePtr* ranges, size_t n,
bool include_end = true);
Expand Down
31 changes: 0 additions & 31 deletions db/db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3409,10 +3409,6 @@ class ModelDB : public DB {
return Status::NotSupported();
}

Status DEPRECATED_DeleteFile(std::string /*name*/) override {
return Status::OK();
}

Status GetUpdatesSince(
ROCKSDB_NAMESPACE::SequenceNumber,
std::unique_ptr<ROCKSDB_NAMESPACE::TransactionLogIterator>*,
Expand Down Expand Up @@ -5363,33 +5359,6 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
num_block_compressed =
options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED);
ASSERT_GT(num_block_compressed, 0);

// Make sure data in files in L3 is not compacted by removing all files
// in L4 and calculate number of rows
ASSERT_OK(dbfull()->SetOptions({
{"disable_auto_compactions", "true"},
}));
ColumnFamilyMetaData cf_meta;
db_->GetColumnFamilyMetaData(&cf_meta);
for (const auto& file : cf_meta.levels[4].files) {
listener->SetExpectedFileName(dbname_ + file.name);
ASSERT_OK(dbfull()->DEPRECATED_DeleteFile(file.name));
}
listener->VerifyMatchedCount(cf_meta.levels[4].files.size());

int num_keys = 0;
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
num_keys++;
}
ASSERT_OK(iter->status());

ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 0);
ASSERT_GE(NumTableFilesAtLevel(3), 1);
ASSERT_EQ(NumTableFilesAtLevel(4), 0);

ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(3), num_keys * 4000U + num_keys * 10U);
}

TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
Expand Down
190 changes: 0 additions & 190 deletions db/deletefile_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,57 +135,6 @@ class DeleteFileTest : public DBTestBase {
}
};

TEST_F(DeleteFileTest, AddKeysAndQueryLevels) {
Options options = CurrentOptions();
SetOptions(&options);
Destroy(options);
options.create_if_missing = true;
Reopen(options);

CreateTwoLevels();
std::vector<LiveFileMetaData> metadata;
db_->GetLiveFilesMetaData(&metadata);

std::string level1file;
int level1keycount = 0;
std::string level2file;
int level2keycount = 0;
int level1index = 0;
int level2index = 1;

ASSERT_EQ((int)metadata.size(), 2);
if (metadata[0].level == 2) {
level1index = 1;
level2index = 0;
}

level1file = metadata[level1index].name;
int startkey = atoi(metadata[level1index].smallestkey.c_str());
int endkey = atoi(metadata[level1index].largestkey.c_str());
level1keycount = (endkey - startkey + 1);
level2file = metadata[level2index].name;
startkey = atoi(metadata[level2index].smallestkey.c_str());
endkey = atoi(metadata[level2index].largestkey.c_str());
level2keycount = (endkey - startkey + 1);

// COntrolled setup. Levels 1 and 2 should both have 50K files.
// This is a little fragile as it depends on the current
// compaction heuristics.
ASSERT_EQ(level1keycount, 50000);
ASSERT_EQ(level2keycount, 50000);

Status status = db_->DEPRECATED_DeleteFile("0.sst");
ASSERT_TRUE(status.IsInvalidArgument());

// intermediate level files cannot be deleted.
status = db_->DEPRECATED_DeleteFile(level1file);
ASSERT_TRUE(status.IsInvalidArgument());

// Lowest level file deletion should succeed.
status = db_->DEPRECATED_DeleteFile(level2file);
ASSERT_OK(status);
}

TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
Options options = CurrentOptions();
SetOptions(&options);
Expand Down Expand Up @@ -496,145 +445,6 @@ TEST_F(DeleteFileTest, BackgroundPurgeTestMultipleJobs) {
CheckFileTypeCounts(dbname_, 0, 1, 1);
}

TEST_F(DeleteFileTest, DeleteFileWithIterator) {
Options options = CurrentOptions();
SetOptions(&options);
Destroy(options);
options.create_if_missing = true;
Reopen(options);

CreateTwoLevels();
ReadOptions read_options;
Iterator* it = db_->NewIterator(read_options);
ASSERT_OK(it->status());
std::vector<LiveFileMetaData> metadata;
db_->GetLiveFilesMetaData(&metadata);

std::string level2file;

ASSERT_EQ(metadata.size(), static_cast<size_t>(2));
if (metadata[0].level == 1) {
level2file = metadata[1].name;
} else {
level2file = metadata[0].name;
}

Status status = db_->DEPRECATED_DeleteFile(level2file);
fprintf(stdout, "Deletion status %s: %s\n", level2file.c_str(),
status.ToString().c_str());
ASSERT_OK(status);
it->SeekToFirst();
int numKeysIterated = 0;
while (it->Valid()) {
numKeysIterated++;
it->Next();
}
ASSERT_EQ(numKeysIterated, 50000);
delete it;
}

TEST_F(DeleteFileTest, DeleteLogFiles) {
Options options = CurrentOptions();
SetOptions(&options);
Destroy(options);
options.create_if_missing = true;
Reopen(options);

AddKeys(10, 0);
VectorLogPtr logfiles;
ASSERT_OK(db_->GetSortedWalFiles(logfiles));
ASSERT_GT(logfiles.size(), 0UL);
// Take the last log file which is expected to be alive and try to delete it
// Should not succeed because live logs are not allowed to be deleted
std::unique_ptr<LogFile> alive_log = std::move(logfiles.back());
ASSERT_EQ(alive_log->Type(), kAliveLogFile);
ASSERT_OK(env_->FileExists(wal_dir_ + "/" + alive_log->PathName()));
fprintf(stdout, "Deleting alive log file %s\n",
alive_log->PathName().c_str());
ASSERT_NOK(db_->DEPRECATED_DeleteFile(alive_log->PathName()));
ASSERT_OK(env_->FileExists(wal_dir_ + "/" + alive_log->PathName()));
logfiles.clear();

// Call Flush to bring about a new working log file and add more keys
// Call Flush again to flush out memtable and move alive log to archived log
// and try to delete the archived log file
FlushOptions fopts;
ASSERT_OK(db_->Flush(fopts));
AddKeys(10, 0);
ASSERT_OK(db_->Flush(fopts));
ASSERT_OK(db_->GetSortedWalFiles(logfiles));
ASSERT_GT(logfiles.size(), 0UL);
std::unique_ptr<LogFile> archived_log = std::move(logfiles.front());
ASSERT_EQ(archived_log->Type(), kArchivedLogFile);
ASSERT_OK(env_->FileExists(wal_dir_ + "/" + archived_log->PathName()));
fprintf(stdout, "Deleting archived log file %s\n",
archived_log->PathName().c_str());
ASSERT_OK(db_->DEPRECATED_DeleteFile(archived_log->PathName()));
ASSERT_TRUE(
env_->FileExists(wal_dir_ + "/" + archived_log->PathName()).IsNotFound());
}

TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) {
Options options = CurrentOptions();
SetOptions(&options);
Destroy(options);
options.create_if_missing = true;
Reopen(options);
CreateAndReopenWithCF({"new_cf"}, options);

Random rnd(5);
for (int i = 0; i < 1000; ++i) {
ASSERT_OK(db_->Put(WriteOptions(), handles_[1], test::RandomKey(&rnd, 10),
test::RandomKey(&rnd, 10)));
}
ASSERT_OK(db_->Flush(FlushOptions(), handles_[1]));
for (int i = 0; i < 1000; ++i) {
ASSERT_OK(db_->Put(WriteOptions(), handles_[1], test::RandomKey(&rnd, 10),
test::RandomKey(&rnd, 10)));
}
ASSERT_OK(db_->Flush(FlushOptions(), handles_[1]));

std::vector<LiveFileMetaData> metadata;
db_->GetLiveFilesMetaData(&metadata);
ASSERT_EQ(2U, metadata.size());
ASSERT_EQ("new_cf", metadata[0].column_family_name);
ASSERT_EQ("new_cf", metadata[1].column_family_name);
auto old_file = metadata[0].smallest_seqno < metadata[1].smallest_seqno
? metadata[0].name
: metadata[1].name;
auto new_file = metadata[0].smallest_seqno > metadata[1].smallest_seqno
? metadata[0].name
: metadata[1].name;
ASSERT_TRUE(db_->DEPRECATED_DeleteFile(new_file).IsInvalidArgument());
ASSERT_OK(db_->DEPRECATED_DeleteFile(old_file));

{
std::unique_ptr<Iterator> itr(db_->NewIterator(ReadOptions(), handles_[1]));
ASSERT_OK(itr->status());
int count = 0;
for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
ASSERT_OK(itr->status());
++count;
}
ASSERT_OK(itr->status());
ASSERT_EQ(count, 1000);
}

Close();
ReopenWithColumnFamilies({kDefaultColumnFamilyName, "new_cf"}, options);

{
std::unique_ptr<Iterator> itr(db_->NewIterator(ReadOptions(), handles_[1]));
int count = 0;
for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
ASSERT_OK(itr->status());
++count;
}
ASSERT_OK(itr->status());
ASSERT_EQ(count, 1000);
}
}

} // namespace ROCKSDB_NAMESPACE

int main(int argc, char** argv) {
Expand Down
3 changes: 0 additions & 3 deletions include/rocksdb/c.h
Original file line number Diff line number Diff line change
Expand Up @@ -692,9 +692,6 @@ extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf_opt(
rocksdb_compactoptions_t* opt, const char* start_key, size_t start_key_len,
const char* limit_key, size_t limit_key_len);

extern ROCKSDB_LIBRARY_API void DEPRECATED_rocksdb_delete_file(
rocksdb_t* db, const char* name);

extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles(
rocksdb_t* db);

Expand Down
Loading

0 comments on commit b498b4b

Please sign in to comment.