Skip to content

Commit

Permalink
use opstatlogger for recording more details of reading from db ledger
Browse files Browse the repository at this point in the history
  • Loading branch information
qunzhong committed Jul 25, 2024
1 parent 969367e commit 229dbc1
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 29 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -85,16 +85,16 @@ class DbLedgerStorageStats {
private final OpStatsLogger readEntryStats;
@StatsDoc(
name = READ_ENTRY_LOCATIONS_INDEX_TIME,
help = "time spent reading entries from the locations index of the db ledger storage engine",
help = "operation stats of reading entries from the locations index of the db ledger storage engine",
parent = READ_ENTRY
)
private final Counter readFromLocationIndexTime;
private final OpStatsLogger readFromLocationIndexTime;
@StatsDoc(
name = READ_ENTRYLOG_TIME,
help = "time spent reading entries from the entry log files of the db ledger storage engine",
help = "operation stats of reading entries from the entry log files of the db ledger storage engine",
parent = READ_ENTRY
)
private final Counter readFromEntryLogTime;
private final OpStatsLogger readFromEntryLogTime;
@StatsDoc(
name = WRITE_CACHE_HITS,
help = "number of write cache hits (on reads)",
Expand Down Expand Up @@ -131,9 +131,9 @@ class DbLedgerStorageStats {
private final OpStatsLogger readAheadBatchSizeStats;
@StatsDoc(
name = READAHEAD_TIME,
help = "Time spent on readahead operations"
help = "operation stats of readahead operations"
)
private final Counter readAheadTime;
private final OpStatsLogger readAheadTime;
@StatsDoc(
name = FLUSH,
help = "operation stats of flushing write cache to entry log files"
Expand Down Expand Up @@ -203,15 +203,15 @@ class DbLedgerStorageStats {
Supplier<Long> readCacheCountSupplier) {
addEntryStats = stats.getThreadScopedOpStatsLogger(ADD_ENTRY);
readEntryStats = stats.getThreadScopedOpStatsLogger(READ_ENTRY);
readFromLocationIndexTime = stats.getThreadScopedCounter(READ_ENTRY_LOCATIONS_INDEX_TIME);
readFromEntryLogTime = stats.getThreadScopedCounter(READ_ENTRYLOG_TIME);
readFromLocationIndexTime = stats.getThreadScopedOpStatsLogger(READ_ENTRY_LOCATIONS_INDEX_TIME);
readFromEntryLogTime = stats.getThreadScopedOpStatsLogger(READ_ENTRYLOG_TIME);
readCacheHitCounter = stats.getCounter(READ_CACHE_HITS);
readCacheMissCounter = stats.getCounter(READ_CACHE_MISSES);
writeCacheHitCounter = stats.getCounter(WRITE_CACHE_HITS);
writeCacheMissCounter = stats.getCounter(WRITE_CACHE_MISSES);
readAheadBatchCountStats = stats.getOpStatsLogger(READAHEAD_BATCH_COUNT);
readAheadBatchSizeStats = stats.getOpStatsLogger(READAHEAD_BATCH_SIZE);
readAheadTime = stats.getThreadScopedCounter(READAHEAD_TIME);
readAheadTime = stats.getThreadScopedOpStatsLogger(READAHEAD_TIME);
flushStats = stats.getOpStatsLogger(FLUSH);
flushEntryLogStats = stats.getOpStatsLogger(FLUSH_ENTRYLOG);
flushLocationIndexStats = stats.getOpStatsLogger(FLUSH_LOCATIONS_INDEX);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -638,17 +638,19 @@ private ByteBuf doGetEntry(long ledgerId, long entryId) throws IOException, Book

throw new NoEntryException(ledgerId, entryId);
}
} finally {
dbLedgerStorageStats.getReadFromLocationIndexTime().addLatency(
MathUtils.elapsedNanos(locationIndexStartNano), TimeUnit.NANOSECONDS);
recordSuccessfulEvent(dbLedgerStorageStats.getReadFromLocationIndexTime(), locationIndexStartNano);
} catch (NoEntryException e) {
recordFailedEvent(dbLedgerStorageStats.getReadFromLocationIndexTime(), locationIndexStartNano);
throw e;
}

long readEntryStartNano = MathUtils.nowInNano();
try {
entry = entryLogger.readEntry(ledgerId, entryId, entryLocation);
} finally {
dbLedgerStorageStats.getReadFromEntryLogTime().addLatency(
MathUtils.elapsedNanos(readEntryStartNano), TimeUnit.NANOSECONDS);
recordSuccessfulEvent(dbLedgerStorageStats.getReadFromEntryLogTime(), readEntryStartNano);
} catch (Exception e) {
recordFailedEvent(dbLedgerStorageStats.getReadFromEntryLogTime(), readEntryStartNano);
throw e;
}

readCache.put(ledgerId, entryId, entry);
Expand Down Expand Up @@ -700,11 +702,13 @@ private void fillReadAheadCache(long originalLedgerId, long firstEntryId, long f
if (log.isDebugEnabled()) {
log.debug("Exception during read ahead for ledger: {}: e", originalLedgerId, e);
}
} finally {
dbLedgerStorageStats.getReadAheadBatchCountStats().registerSuccessfulValue(count);
dbLedgerStorageStats.getReadAheadBatchSizeStats().registerSuccessfulValue(size);
dbLedgerStorageStats.getReadAheadTime().addLatency(
MathUtils.elapsedNanos(readAheadStartNano), TimeUnit.NANOSECONDS);
recordSuccessfulEvent(dbLedgerStorageStats.getReadAheadTime(),readAheadStartNano);
} finally {
dbLedgerStorageStats.getReadAheadBatchCountStats().registerFailedValue(count);
dbLedgerStorageStats.getReadAheadBatchSizeStats().registerFailedValue(size);
recordFailedEvent(dbLedgerStorageStats.getReadAheadTime(),readAheadStartNano);
}
}

Expand Down Expand Up @@ -763,21 +767,31 @@ public ByteBuf getLastEntry(long ledgerId) throws IOException, BookieException {
dbLedgerStorageStats.getWriteCacheMissCounter().inc();

// Search the last entry in storage
long lastEntryId;
long entryLocation;
long locationIndexStartNano = MathUtils.nowInNano();
long lastEntryId = entryLocationIndex.getLastEntryInLedger(ledgerId);
if (log.isDebugEnabled()) {
log.debug("Found last entry for ledger {} in db: {}", ledgerId, lastEntryId);
}
try {
lastEntryId = entryLocationIndex.getLastEntryInLedger(ledgerId);
if (log.isDebugEnabled()) {
log.debug("Found last entry for ledger {} in db: {}", ledgerId, lastEntryId);
}

long entryLocation = entryLocationIndex.getLocation(ledgerId, lastEntryId);
dbLedgerStorageStats.getReadFromLocationIndexTime().addLatency(
MathUtils.elapsedNanos(locationIndexStartNano), TimeUnit.NANOSECONDS);
entryLocation = entryLocationIndex.getLocation(ledgerId, lastEntryId);
recordSuccessfulEvent(dbLedgerStorageStats.getReadFromLocationIndexTime(), locationIndexStartNano);
} catch (NoEntryException e) {
recordFailedEvent(dbLedgerStorageStats.getReadFromLocationIndexTime(), locationIndexStartNano);
throw e;
}

long readEntryStartNano = MathUtils.nowInNano();
ByteBuf content = entryLogger.readEntry(ledgerId, lastEntryId, entryLocation);
dbLedgerStorageStats.getReadFromEntryLogTime().addLatency(
MathUtils.elapsedNanos(readEntryStartNano), TimeUnit.NANOSECONDS);
return content;
try {
ByteBuf content = entryLogger.readEntry(ledgerId, lastEntryId, entryLocation);
recordSuccessfulEvent(dbLedgerStorageStats.getReadFromEntryLogTime(), readEntryStartNano);
return content;
} catch (Exception e) {
recordFailedEvent(dbLedgerStorageStats.getReadFromEntryLogTime(), readEntryStartNano);
throw e;
}
}

@VisibleForTesting
Expand Down

0 comments on commit 229dbc1

Please sign in to comment.