diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java index 0b60b6d3e68..86354542616 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java @@ -26,9 +26,12 @@ import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.REMOTE_RPC_RETRIES_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_CALLS_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ScanMetrics.RPC_RETRIES_METRIC_NAME; +import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.BLOCK_BYTES_SCANNED_KEY_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME; import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME; +import static org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME; import static org.apache.phoenix.exception.SQLExceptionCode.OPERATION_TIMED_OUT; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_IN_REMOTE_RESULTS; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_BYTES_REGION_SERVER_RESULTS; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_MILLS_BETWEEN_NEXTS; @@ -40,8 +43,10 @@ import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_RPC_CALLS; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_RPC_RETRIES; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_COUNT_SCANNED_REGIONS; +import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HBASE_FS_READ_TIME; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_PAGED_ROWS_COUNTER; import static org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_SCAN_BYTES; +import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME; import static org.apache.phoenix.util.ScanUtil.isDummy; import java.io.IOException; @@ -165,6 +170,10 @@ private void updateMetrics() { changeMetric(scanMetricsHolder.getCountOfBytesScanned(), scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); changeMetric(scanMetricsHolder.getCountOfRowsPaged(), dummyRowCounter); + changeMetric(scanMetricsHolder.getCountOfBlockBytesScanned(), + scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME)); + changeMetric(scanMetricsHolder.getFsReadTime(), + scanMetricsMap.get(FS_READ_TIME_METRIC_NAME)); changeMetric(GLOBAL_SCAN_BYTES, scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME)); @@ -190,8 +199,10 @@ private void updateMetrics() { scanMetricsMap.get(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME)); changeMetric(GLOBAL_HBASE_COUNT_ROWS_FILTERED, scanMetricsMap.get(COUNT_OF_ROWS_FILTERED_KEY_METRIC_NAME)); - changeMetric(GLOBAL_PAGED_ROWS_COUNTER, dummyRowCounter); + changeMetric(GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED, + scanMetricsMap.get(BLOCK_BYTES_SCANNED_KEY_METRIC_NAME)); + changeMetric(GLOBAL_HBASE_FS_READ_TIME, scanMetricsMap.get(FS_READ_TIME)); scanMetricsUpdated = true; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java index f0f071cbcfe..75c43a769df 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/GlobalClientMetrics.java @@ -21,6 +21,8 @@ import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_ESTIMATED_USED_SIZE; import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_EVICTION_COUNTER; import static org.apache.phoenix.monitoring.MetricType.CLIENT_METADATA_CACHE_REMOVAL_COUNTER; +import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED; +import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME; import static org.apache.phoenix.monitoring.MetricType.HCONNECTIONS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES; import static org.apache.phoenix.monitoring.MetricType.MEMORY_WAIT_TIME; @@ -145,7 +147,8 @@ public enum GlobalClientMetrics { GLOBAL_HBASE_COUNT_ROWS_SCANNED(COUNT_ROWS_SCANNED), GLOBAL_HBASE_COUNT_ROWS_FILTERED(COUNT_ROWS_FILTERED), GLOBAL_HBASE_COUNTER_METADATA_INCONSISTENCY(COUNTER_METADATA_INCONSISTENCY), - + GLOBAL_HBASE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED), + GLOBAL_HBASE_FS_READ_TIME(FS_READ_TIME), GLOBAL_HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME(HA_PARALLEL_POOL1_TASK_QUEUE_WAIT_TIME), GLOBAL_HA_PARALLEL_POOL1_TASK_END_TO_END_TIME(HA_PARALLEL_POOL1_TASK_END_TO_END_TIME), GLOBAL_HA_PARALLEL_POOL1_TASK_EXECUTION_TIME(HA_PARALLEL_POOL1_TASK_EXECUTION_TIME), diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java index d66fb0e19df..f13fc6a98fe 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java @@ -186,6 +186,11 @@ public enum MetricType { TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS("tsistrc", "Time spent in RPC calls for systemTable lookup", LogLevel.DEBUG,PLong.INSTANCE), + COUNT_BLOCK_BYTES_SCANNED("bbs", "Count of Block Bytes Scanned", + LogLevel.DEBUG,PLong.INSTANCE), + + FS_READ_TIME("fsrd", "", LogLevel.DEBUG,PLong.INSTANCE), + //HA Related Metrics HA_PARALLEL_COUNT_OPERATIONS_ACTIVE_CLUSTER("hpoac","Number of Operations to the active cluster",LogLevel.DEBUG,PLong.INSTANCE), HA_PARALLEL_COUNT_OPERATIONS_STANDBY_CLUSTER("hposc","Number of Operations to the standby cluster",LogLevel.DEBUG,PLong.INSTANCE), diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java index dd0aca0659b..b27bca9e8a4 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java @@ -17,6 +17,7 @@ */ package org.apache.phoenix.monitoring; +import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED; import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_IN_REMOTE_RESULTS; import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_REGION_SERVER_RESULTS; import static org.apache.phoenix.monitoring.MetricType.COUNT_MILLS_BETWEEN_NEXTS; @@ -28,6 +29,7 @@ import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_CALLS; import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_RETRIES; import static org.apache.phoenix.monitoring.MetricType.COUNT_SCANNED_REGIONS; +import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME; import static org.apache.phoenix.monitoring.MetricType.PAGED_ROWS_COUNTER; import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES; @@ -53,6 +55,9 @@ public class ScanMetricsHolder { private final CombinableMetric countOfRowsFiltered; private final CombinableMetric countOfBytesScanned; private final CombinableMetric countOfRowsPaged; + + private final CombinableMetric countOfBlockBytesScanned; + private final CombinableMetric fsReadTime; private Map scanMetricMap; private Object scan; @@ -84,6 +89,8 @@ private ScanMetricsHolder(ReadMetricQueue readMetrics, String tableName,Scan sca countOfRowsFiltered = readMetrics.allotMetric(COUNT_ROWS_FILTERED, tableName); countOfBytesScanned = readMetrics.allotMetric(SCAN_BYTES,tableName); countOfRowsPaged = readMetrics.allotMetric(PAGED_ROWS_COUNTER, tableName); + countOfBlockBytesScanned = readMetrics.allotMetric(COUNT_BLOCK_BYTES_SCANNED, tableName); + fsReadTime = readMetrics.allotMetric(FS_READ_TIME, tableName); } public CombinableMetric getCountOfRemoteRPCcalls() { @@ -142,6 +149,14 @@ public CombinableMetric getCountOfRowsPaged() { return countOfRowsPaged; } + public CombinableMetric getCountOfBlockBytesScanned() { + return countOfBlockBytesScanned; + } + + public CombinableMetric getFsReadTime() { + return fsReadTime; + } + public void setScanMetricMap(Map scanMetricMap) { this.scanMetricMap = scanMetricMap; } diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java index 5a1aa3dbc7e..5b3095a31ca 100644 --- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java +++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/TableClientMetrics.java @@ -23,6 +23,8 @@ import java.util.List; import java.util.Map; +import static org.apache.phoenix.monitoring.MetricType.COUNT_BLOCK_BYTES_SCANNED; +import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BATCH_FAILED_SIZE; import static org.apache.phoenix.monitoring.MetricType.MUTATION_BYTES; @@ -140,7 +142,9 @@ public enum TableMetrics { TABLE_NUM_SYSTEM_TABLE_RPC_SUCCESS(NUM_SYSTEM_TABLE_RPC_SUCCESS), TABLE_NUM_SYSTEM_TABLE_RPC_FAILURES(NUM_SYSTEM_TABLE_RPC_FAILURES), TABLE_NUM_METADATA_LOOKUP_FAILURES(NUM_METADATA_LOOKUP_FAILURES), - TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS); + TABLE_TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS(TIME_SPENT_IN_SYSTEM_TABLE_RPC_CALLS), + TABLE_COUNT_BLOCK_BYTES_SCANNED(COUNT_BLOCK_BYTES_SCANNED), + TABLE_FS_READ_TIME(FS_READ_TIME); private MetricType metricType; private PhoenixTableMetric metric; diff --git a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java index 7bdb2c0b8bb..1f51ac6ef3e 100644 --- a/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java +++ b/phoenix-core-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContextUtil.java @@ -42,6 +42,8 @@ public static void updateMetrics(ScannerContext src, ScannerContext dst) { for (Map.Entry entry : src.getMetrics().getMetricsMap().entrySet()) { dst.metrics.addToCounter(entry.getKey(), entry.getValue()); } + dst.incrementBlockProgress((int) src.getBlockSizeProgress()); + dst.getMetrics().fsReadTime.addAndGet(src.getMetrics().fsReadTime.get()); } } diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BlockBytesScannedMetricIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BlockBytesScannedMetricIT.java new file mode 100644 index 00000000000..5251e7af725 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/BlockBytesScannedMetricIT.java @@ -0,0 +1,114 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.phoenix.monitoring; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Map; + +import org.apache.hadoop.hbase.TableName; +import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; +import org.apache.phoenix.jdbc.PhoenixStatement; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.TestUtil; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(NeedsOwnMiniClusterTest.class) +public class BlockBytesScannedMetricIT extends BaseTest { + + @BeforeClass + public static synchronized void doSetup() throws Exception { + Map props = Maps.newHashMapWithExpectedSize(2); + props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, "true"); + setUpTestDriver(new ReadOnlyProps(props)); + } + + @Test + public void testPointLookupBlockBytesScannedMetric() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + stmt.execute("CREATE TABLE " + tableName + + " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG)"); + for (int i = 1; i <= 10; i++) { + String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i); + stmt.execute(sql); + } + conn.commit(); + + String POINT_LOOKUP_QUERY = "SELECT * FROM " + tableName + " WHERE A = 9"; + + // read from memory, block bytes read should be 0 + long count0 = countBlockBytesScannedFromSql(stmt, POINT_LOOKUP_QUERY); + Assert.assertTrue(count0 == 0); + + // flush and clear block cache + TestUtil.flush(utility, TableName.valueOf(tableName)); + TestUtil.clearBlockCache(utility, TableName.valueOf(tableName)); + + long count1 = countBlockBytesScannedFromSql(stmt, POINT_LOOKUP_QUERY); + Assert.assertTrue(count1 > 0); + } + + @Test + public void testRangeScanBlockBytesScannedMetric() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + // create table with small block size and upsert enough rows to have at least 2 blocks + stmt.execute("CREATE TABLE " + tableName + + " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG) BLOCKSIZE=200"); + for (int i = 1; i <= 20; i++) { + String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i); + stmt.execute(sql); + } + conn.commit(); + TestUtil.flush(utility, TableName.valueOf(tableName)); + TestUtil.clearBlockCache(utility, TableName.valueOf(tableName)); + + String RANGE_SCAN_QUERY = "SELECT * FROM " + tableName + " WHERE A > 14 AND A < 18"; + String SERVER_FILTER_QUERY = "SELECT * FROM " + tableName + " WHERE Z > 14 AND Z < 18"; + String SELECT_ALL_QUERY = "SELECT * FROM " + tableName; + + long count1 = countBlockBytesScannedFromSql(stmt, RANGE_SCAN_QUERY); + Assert.assertTrue(count1 > 0); + + long count2 = countBlockBytesScannedFromSql(stmt, SERVER_FILTER_QUERY); + Assert.assertTrue(count2 > 0); + // where clause has non PK column, will have to scan all rows + Assert.assertTrue(count2 > count1); + + long count3 = countBlockBytesScannedFromSql(stmt, SELECT_ALL_QUERY); + Assert.assertTrue(count3 > 0); + // should be same as previous query which also scans all rows + Assert.assertEquals(count3, count2); + } + + private long countBlockBytesScannedFromSql(Statement stmt, String sql) throws SQLException { + return TestUtil.getMetricFromSql(stmt, sql, MetricType.COUNT_BLOCK_BYTES_SCANNED); + } +} \ No newline at end of file diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java index d074d4a4837..2f83f6a747e 100644 --- a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java +++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/CountRowsScannedIT.java @@ -22,19 +22,17 @@ import java.sql.Connection; import java.sql.DriverManager; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Map; import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; -import org.apache.phoenix.jdbc.PhoenixResultSet; import org.apache.phoenix.jdbc.PhoenixStatement; import org.apache.phoenix.query.BaseTest; import org.apache.phoenix.query.QueryServices; import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; -import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.TestUtil; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; @@ -301,32 +299,6 @@ public void testUnionAll() throws Exception { } private long countRowsScannedFromSql(Statement stmt, String sql) throws SQLException { - ResultSet rs = stmt.executeQuery(sql); - while (rs.next()) { - // loop to the end - } - return getRowsScanned(rs); - } - - private long getRowsScanned(ResultSet rs) throws SQLException { - if (!(rs instanceof PhoenixResultSet)) { - return -1; - } - Map> metrics = PhoenixRuntime.getRequestReadMetricInfo(rs); - - long sum = 0; - boolean valid = false; - for (Map.Entry> entry : metrics.entrySet()) { - Long val = entry.getValue().get(MetricType.COUNT_ROWS_SCANNED); - if (val != null) { - sum += val.longValue(); - valid = true; - } - } - if (valid) { - return sum; - } else { - return -1; - } + return TestUtil.getMetricFromSql(stmt, sql, MetricType.COUNT_ROWS_SCANNED); } } \ No newline at end of file diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/FSReadTimeMetricIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/FSReadTimeMetricIT.java new file mode 100644 index 00000000000..3e134dfdf40 --- /dev/null +++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/FSReadTimeMetricIT.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.phoenix.monitoring; + +import org.apache.hadoop.hbase.TableName; +import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest; +import org.apache.phoenix.jdbc.PhoenixStatement; +import org.apache.phoenix.query.BaseTest; +import org.apache.phoenix.query.QueryServices; +import org.apache.phoenix.thirdparty.com.google.common.collect.Maps; +import org.apache.phoenix.util.ReadOnlyProps; +import org.apache.phoenix.util.TestUtil; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Map; + +@Category(NeedsOwnMiniClusterTest.class) +public class FSReadTimeMetricIT extends BaseTest { + @BeforeClass + public static synchronized void doSetup() throws Exception { + Map props = Maps.newHashMapWithExpectedSize(2); + props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, "true"); + setUpTestDriver(new ReadOnlyProps(props)); + } + + @Test + public void testFsReadTimeMetric() throws Exception { + Connection conn = DriverManager.getConnection(getUrl()); + String tableName = generateUniqueName(); + PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class); + // create table with small block size and upsert enough rows to have at least 2 blocks + stmt.execute("CREATE TABLE " + tableName + + " (A UNSIGNED_LONG NOT NULL PRIMARY KEY, Z UNSIGNED_LONG) BLOCKSIZE=200"); + for (int i = 1; i <= 20; i++) { + String sql = String.format("UPSERT INTO %s VALUES (%d, %d)", tableName, i, i); + stmt.execute(sql); + } + conn.commit(); + String SELECT_ALL_QUERY = "SELECT * FROM " + tableName; + + // read from memory + long time0 = getFsReadTimeFromSql(stmt, SELECT_ALL_QUERY); + Assert.assertEquals(0, time0); + + // flush and clear cache + TestUtil.flush(utility, TableName.valueOf(tableName)); + TestUtil.clearBlockCache(utility, TableName.valueOf(tableName)); + + // read from disk + long time1 = getFsReadTimeFromSql(stmt, SELECT_ALL_QUERY); + Assert.assertTrue(time1 > 0); + + // read from cache + long time2 = getFsReadTimeFromSql(stmt, SELECT_ALL_QUERY); + Assert.assertEquals(0, time2); + } + + private long getFsReadTimeFromSql(Statement stmt, String sql) throws SQLException { + return TestUtil.getMetricFromSql(stmt, sql, MetricType.FS_READ_TIME); + } +} diff --git a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java index 6ea2a2eb656..ddf828859df 100644 --- a/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java +++ b/phoenix-core/src/test/java/org/apache/phoenix/util/TestUtil.java @@ -129,6 +129,7 @@ import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData; import org.apache.phoenix.jdbc.PhoenixPreparedStatement; import org.apache.phoenix.jdbc.PhoenixStatement; +import org.apache.phoenix.monitoring.MetricType; import org.apache.phoenix.parse.FilterableStatement; import org.apache.phoenix.parse.SQLParser; import org.apache.phoenix.parse.SelectStatement; @@ -816,6 +817,11 @@ public static void flush(HBaseTestingUtility utility, TableName table) throws IO admin.flush(table); } + public static void clearBlockCache(HBaseTestingUtility utility, TableName table) throws IOException { + Admin admin = utility.getAdmin(); + admin.clearBlockCache(table); + } + public static void minorCompact(HBaseTestingUtility utility, TableName table) throws IOException, InterruptedException { try { @@ -1450,4 +1456,25 @@ public static Path createTempDirectory() throws IOException { return Files.createTempDirectory(Paths.get(System.getProperty("java.io.tmpdir")), null); } + public static long getMetricFromSql(Statement stmt, String sql, MetricType metric) + throws SQLException { + ResultSet rs = stmt.executeQuery(sql); + while (rs.next()) {} + Map> metrics = PhoenixRuntime.getRequestReadMetricInfo(rs); + + long sum = 0; + boolean valid = false; + for (Map.Entry> entry : metrics.entrySet()) { + Long val = entry.getValue().get(metric); + if (val != null) { + sum += val.longValue(); + valid = true; + } + } + if (valid) { + return sum; + } else { + return -1; + } + } }