Skip to content

Commit 3755937

Browse files
authored
HBASE-29398: Server side scan metrics for bytes read from FS vs Block cache vs memstore (#7163) (#7136)
Signed-off-by: Viraj Jasani <vjasani@apache.org> Signed-off-by: Hari Krishna Dara <haridara@gmail.com>
1 parent dc781bd commit 3755937

File tree

19 files changed

+1870
-70
lines changed

19 files changed

+1870
-70
lines changed

hbase-client/src/main/java/org/apache/hadoop/hbase/client/metrics/ServerSideScanMetrics.java

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,10 @@ public void moveToNextRegion() {
4949
regionScanMetricsData.add(currentRegionScanMetricsData);
5050
currentRegionScanMetricsData.createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
5151
currentRegionScanMetricsData.createCounter(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME);
52+
currentRegionScanMetricsData.createCounter(BYTES_READ_FROM_FS_METRIC_NAME);
53+
currentRegionScanMetricsData.createCounter(BYTES_READ_FROM_BLOCK_CACHE_METRIC_NAME);
54+
currentRegionScanMetricsData.createCounter(BYTES_READ_FROM_MEMSTORE_METRIC_NAME);
55+
currentRegionScanMetricsData.createCounter(BLOCK_READ_OPS_COUNT_METRIC_NAME);
5256
}
5357

5458
/**
@@ -62,6 +66,12 @@ protected AtomicLong createCounter(String counterName) {
6266
public static final String COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME = "ROWS_SCANNED";
6367
public static final String COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME = "ROWS_FILTERED";
6468

69+
public static final String BYTES_READ_FROM_FS_METRIC_NAME = "BYTES_READ_FROM_FS";
70+
public static final String BYTES_READ_FROM_BLOCK_CACHE_METRIC_NAME =
71+
"BYTES_READ_FROM_BLOCK_CACHE";
72+
public static final String BYTES_READ_FROM_MEMSTORE_METRIC_NAME = "BYTES_READ_FROM_MEMSTORE";
73+
public static final String BLOCK_READ_OPS_COUNT_METRIC_NAME = "BLOCK_READ_OPS_COUNT";
74+
6575
/**
6676
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
6777
* (<a href="https://issues.apache.org/jira/browse/HBASE-17886">HBASE-17886</a>). Use
@@ -90,6 +100,16 @@ protected AtomicLong createCounter(String counterName) {
90100
*/
91101
public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
92102

103+
public final AtomicLong bytesReadFromFs = createCounter(BYTES_READ_FROM_FS_METRIC_NAME);
104+
105+
public final AtomicLong bytesReadFromBlockCache =
106+
createCounter(BYTES_READ_FROM_BLOCK_CACHE_METRIC_NAME);
107+
108+
public final AtomicLong bytesReadFromMemstore =
109+
createCounter(BYTES_READ_FROM_MEMSTORE_METRIC_NAME);
110+
111+
public final AtomicLong blockReadOpsCount = createCounter(BLOCK_READ_OPS_COUNT_METRIC_NAME);
112+
93113
/**
94114
* Sets counter with counterName to passed in value, does nothing if counter does not exist. If
95115
* region level scan metrics are enabled then sets the value of counter for the current region

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
import java.io.DataInput;
2121
import java.io.IOException;
2222
import org.apache.hadoop.hbase.Cell;
23+
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
2324
import org.apache.hadoop.hbase.nio.ByteBuff;
2425
import org.apache.hadoop.hbase.regionserver.BloomType;
2526
import org.apache.hadoop.hbase.util.BloomFilter;
@@ -120,7 +121,8 @@ private boolean containsInternal(byte[] key, int keyOffset, int keyLength, ByteB
120121
return result;
121122
}
122123

123-
private HFileBlock getBloomBlock(int block) {
124+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
125+
public HFileBlock getBloomBlock(int block) {
124126
HFileBlock bloomBlock;
125127
try {
126128
// We cache the block and use a positional read.
@@ -218,4 +220,18 @@ public String toString() {
218220
return sb.toString();
219221
}
220222

223+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
224+
public HFileBlockIndex.BlockIndexReader getBloomIndex() {
225+
return index;
226+
}
227+
228+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
229+
public int getHashCount() {
230+
return hashCount;
231+
}
232+
233+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
234+
public Hash getHash() {
235+
return hash;
236+
}
221237
}

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,11 @@
2727
import org.apache.hadoop.fs.FSDataInputStream;
2828
import org.apache.hadoop.hbase.CellComparator;
2929
import org.apache.hadoop.hbase.CellComparatorImpl;
30+
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
3031
import org.apache.hadoop.hbase.KeyValue;
3132
import org.apache.hadoop.hbase.MetaCellComparator;
3233
import org.apache.hadoop.hbase.io.compress.Compression;
34+
import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics;
3335
import org.apache.hadoop.hbase.util.Bytes;
3436
import org.apache.yetus.audience.InterfaceAudience;
3537
import org.slf4j.Logger;
@@ -408,6 +410,11 @@ public static FixedFileTrailer readFromStream(FSDataInputStream istream, long fi
408410
FixedFileTrailer fft = new FixedFileTrailer(majorVersion, minorVersion);
409411
fft.deserialize(new DataInputStream(new ByteArrayInputStream(buf.array(),
410412
buf.arrayOffset() + bufferSize - trailerSize, trailerSize)));
413+
boolean isScanMetricsEnabled = ThreadLocalServerSideScanMetrics.isScanMetricsEnabled();
414+
if (isScanMetricsEnabled) {
415+
ThreadLocalServerSideScanMetrics.addBytesReadFromFs(trailerSize);
416+
ThreadLocalServerSideScanMetrics.addBlockReadOpsCount(1);
417+
}
411418
return fft;
412419
}
413420

@@ -640,7 +647,8 @@ static CellComparator createComparator(String comparatorClassName) throws IOExce
640647
}
641648
}
642649

643-
CellComparator createComparator() throws IOException {
650+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
651+
public CellComparator createComparator() throws IOException {
644652
expectAtLeastMajorVersion(2);
645653
return createComparator(comparatorClassName);
646654
}

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@
4040
import org.apache.hadoop.fs.FSDataInputStream;
4141
import org.apache.hadoop.fs.FSDataOutputStream;
4242
import org.apache.hadoop.hbase.Cell;
43+
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
4344
import org.apache.hadoop.hbase.HConstants;
4445
import org.apache.hadoop.hbase.fs.HFileSystem;
4546
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
@@ -55,6 +56,7 @@
5556
import org.apache.hadoop.hbase.io.encoding.HFileBlockEncodingContext;
5657
import org.apache.hadoop.hbase.io.hfile.trace.HFileContextAttributesBuilderConsumer;
5758
import org.apache.hadoop.hbase.io.util.BlockIOUtils;
59+
import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics;
5860
import org.apache.hadoop.hbase.nio.ByteBuff;
5961
import org.apache.hadoop.hbase.nio.MultiByteBuff;
6062
import org.apache.hadoop.hbase.nio.SingleByteBuff;
@@ -403,7 +405,8 @@ private static int getOnDiskSizeWithHeader(final ByteBuff headerBuf, boolean che
403405
* present) read by peeking into the next block's header; use as a hint when doing a read
404406
* of the next block when scanning or running over a file.
405407
*/
406-
int getNextBlockOnDiskSize() {
408+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
409+
public int getNextBlockOnDiskSize() {
407410
return nextBlockOnDiskSize;
408411
}
409412

@@ -468,7 +471,8 @@ int getOnDiskSizeWithoutHeader() {
468471
}
469472

470473
/** Returns the uncompressed size of data part (header and checksum excluded). */
471-
int getUncompressedSizeWithoutHeader() {
474+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
475+
public int getUncompressedSizeWithoutHeader() {
472476
return uncompressedSizeWithoutHeader;
473477
}
474478

@@ -624,7 +628,8 @@ public String toString() {
624628
* Retrieves the decompressed/decrypted view of this block. An encoded block remains in its
625629
* encoded structure. Internal structures are shared between instances where applicable.
626630
*/
627-
HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException {
631+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
632+
public HFileBlock unpack(HFileContext fileContext, FSReader reader) throws IOException {
628633
if (!fileContext.isCompressedOrEncrypted()) {
629634
// TODO: cannot use our own fileContext here because HFileBlock(ByteBuffer, boolean),
630635
// which is used for block serialization to L2 cache, does not preserve encoding and
@@ -695,7 +700,8 @@ public boolean isUnpacked() {
695700
* when block is returned to the cache.
696701
* @return the offset of this block in the file it was read from
697702
*/
698-
long getOffset() {
703+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
704+
public long getOffset() {
699705
if (offset < 0) {
700706
throw new IllegalStateException("HFile block offset not initialized properly");
701707
}
@@ -1221,7 +1227,8 @@ interface BlockWritable {
12211227
* Iterator for reading {@link HFileBlock}s in load-on-open-section, such as root data index
12221228
* block, meta index block, file info block etc.
12231229
*/
1224-
interface BlockIterator {
1230+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
1231+
public interface BlockIterator {
12251232
/**
12261233
* Get the next block, or null if there are no more blocks to iterate.
12271234
*/
@@ -1245,7 +1252,8 @@ interface BlockIterator {
12451252
}
12461253

12471254
/** An HFile block reader with iteration ability. */
1248-
interface FSReader {
1255+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
1256+
public interface FSReader {
12491257
/**
12501258
* Reads the block at the given offset in the file with the given on-disk size and uncompressed
12511259
* size.
@@ -1720,6 +1728,7 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
17201728
// checksums. Can change with circumstances. The below flag is whether the
17211729
// file has support for checksums (version 2+).
17221730
boolean checksumSupport = this.fileContext.isUseHBaseChecksum();
1731+
boolean isScanMetricsEnabled = ThreadLocalServerSideScanMetrics.isScanMetricsEnabled();
17231732
long startTime = EnvironmentEdgeManager.currentTime();
17241733
if (onDiskSizeWithHeader == -1) {
17251734
// The caller does not know the block size. Need to get it from the header. If header was
@@ -1736,6 +1745,9 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
17361745
headerBuf = HEAP.allocate(hdrSize);
17371746
readAtOffset(is, headerBuf, hdrSize, false, offset, pread);
17381747
headerBuf.rewind();
1748+
if (isScanMetricsEnabled) {
1749+
ThreadLocalServerSideScanMetrics.addBytesReadFromFs(hdrSize);
1750+
}
17391751
}
17401752
onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf, checksumSupport);
17411753
}
@@ -1783,6 +1795,12 @@ protected HFileBlock readBlockDataInternal(FSDataInputStream is, long offset,
17831795
boolean readNextHeader = readAtOffset(is, onDiskBlock,
17841796
onDiskSizeWithHeader - preReadHeaderSize, true, offset + preReadHeaderSize, pread);
17851797
onDiskBlock.rewind(); // in case of moving position when copying a cached header
1798+
if (isScanMetricsEnabled) {
1799+
long bytesRead =
1800+
(onDiskSizeWithHeader - preReadHeaderSize) + (readNextHeader ? hdrSize : 0);
1801+
ThreadLocalServerSideScanMetrics.addBytesReadFromFs(bytesRead);
1802+
ThreadLocalServerSideScanMetrics.addBlockReadOpsCount(1);
1803+
}
17861804

17871805
// the call to validateChecksum for this block excludes the next block header over-read, so
17881806
// no reason to delay extracting this value.

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
import org.apache.hadoop.hbase.Cell;
3535
import org.apache.hadoop.hbase.CellComparator;
3636
import org.apache.hadoop.hbase.CellUtil;
37+
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
3738
import org.apache.hadoop.hbase.KeyValue;
3839
import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue;
3940
import org.apache.hadoop.hbase.PrivateCellUtil;
@@ -567,7 +568,8 @@ public String toString() {
567568
* array of offsets to the entries within the block. This allows us to do binary search for the
568569
* entry corresponding to the given key without having to deserialize the block.
569570
*/
570-
static abstract class BlockIndexReader implements HeapSize {
571+
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.UNITTEST)
572+
public static abstract class BlockIndexReader implements HeapSize {
571573

572574
protected long[] blockOffsets;
573575
protected int[] blockDataSizes;
@@ -814,7 +816,8 @@ static int binarySearchNonRootIndex(Cell key, ByteBuff nonRootIndex,
814816
* @return the index position where the given key was found, otherwise return -1 in the case the
815817
* given key is before the first key.
816818
*/
817-
static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key, CellComparator comparator) {
819+
public static int locateNonRootIndexEntry(ByteBuff nonRootBlock, Cell key,
820+
CellComparator comparator) {
818821
int entryIndex = binarySearchNonRootIndex(key, nonRootBlock, comparator);
819822

820823
if (entryIndex != -1) {

0 commit comments

Comments
 (0)