Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PHOENIX-7535] Capture time spent in various stages of exceuteQuery call #2084

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@
import org.apache.phoenix.log.QueryLoggerUtil;
import org.apache.phoenix.log.QueryStatus;
import org.apache.phoenix.monitoring.GlobalClientMetrics;
import org.apache.phoenix.monitoring.OverAllQueryMetrics;
import org.apache.phoenix.monitoring.TableMetricsManager;
import org.apache.phoenix.optimize.Cost;
import org.apache.phoenix.parse.AddColumnStatement;
Expand Down Expand Up @@ -367,6 +368,7 @@ private PhoenixResultSet executeQuery(final CompilableStatement stmt,
clearResultSet();
PhoenixResultSet rs = null;
QueryPlan plan = null;
OverAllQueryMetrics overallQuerymetrics;
try {
PhoenixConnection conn = getConnection();
conn.checkOpen();
Expand All @@ -376,8 +378,12 @@ private PhoenixResultSet executeQuery(final CompilableStatement stmt,
&& stmt.getOperation() != Operation.UPGRADE) {
throw new UpgradeRequiredException();
}
final long queryPlanCreationStartTime = System.nanoTime();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good to use EnvironmentEdgeManager instead of using System.nanoTime() directly as it gives us flexibility to control clock during ITs.

plan = stmt.compilePlan(PhoenixStatement.this,
Sequence.ValueOp.VALIDATE_SEQUENCE);
StatementContext context = plan.getContext();
overallQuerymetrics = context.getOverallQueryMetrics();
overallQuerymetrics.setQueryCompilerTimeNS(System.nanoTime() - queryPlanCreationStartTime);
// Send mutations to hbase, so they are visible to subsequent reads.
// Use original plan for data table so that data and immutable indexes will be sent
// TODO: for joins, we need to iterate through all tables, but we need the original table,
Expand All @@ -387,9 +393,11 @@ private PhoenixResultSet executeQuery(final CompilableStatement stmt,
}
Iterator<TableRef> tableRefs = plan.getSourceRefs().iterator();
connection.getMutationState().sendUncommitted(tableRefs);
final long queryOptimizerStartTime = System.nanoTime();
plan =
connection.getQueryServices().getOptimizer()
.optimize(PhoenixStatement.this, plan);
overallQuerymetrics.setQueryOptimizerTimeNS(System.nanoTime() - queryOptimizerStartTime);
setLastQueryPlan(plan);

//verify metadata for the table/view/index in the query plan
Expand All @@ -407,6 +415,7 @@ private PhoenixResultSet executeQuery(final CompilableStatement stmt,
tableName = plan.getTableRef().getTable().getPhysicalName()
.toString();
}
final long queryResultItrSetStartTime = System.nanoTime();
// this will create its own trace internally, so we don't wrap this
// whole thing in tracing
ResultIterator resultIterator = plan.iterator();
Expand All @@ -415,7 +424,6 @@ private PhoenixResultSet executeQuery(final CompilableStatement stmt,
LOGGER.debug(LogUtil.addCustomAnnotations(
"Explain plan: " + explainPlan, connection));
}
StatementContext context = plan.getContext();
context.setQueryLogger(queryLogger);
if (queryLogger.isDebugEnabled()) {
queryLogger.log(QueryLogInfo.EXPLAIN_PLAN_I,
Expand All @@ -425,10 +433,11 @@ private PhoenixResultSet executeQuery(final CompilableStatement stmt,
context.getScan().toString() :
null);
}
context.getOverallQueryMetrics().startQuery();
overallQuerymetrics.startQuery();
rs =
newResultSet(resultIterator, plan.getProjector(),
plan.getContext());
overallQuerymetrics.setQueryResultItrSetTimeNs(System.nanoTime() - queryResultItrSetStartTime);
// newResultset sets lastResultset
setLastQueryPlan(plan);
setLastUpdateCount(NO_UPDATE);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,9 @@ public enum MetricType {
CACHE_REFRESH_SPLITS_COUNTER("cr", "Number of times cache was refreshed because of splits",LogLevel.DEBUG, PLong.INSTANCE),
WALL_CLOCK_TIME_MS("tq", "Wall clock time elapsed for the overall query execution",LogLevel.INFO, PLong.INSTANCE),
RESULT_SET_TIME_MS("tn", "Wall clock time elapsed for reading all records using resultSet.next()",LogLevel.INFO, PLong.INSTANCE),
QUERY_COMPILER_TIME_NS("qct","Time in nanoseconds elapsed in query compiler", LogLevel.INFO, PLong.INSTANCE),
QUERY_OPTIMIZER_TIME_NS("qot","Time in nanoseconds elapsed in query optimizer", LogLevel.INFO, PLong.INSTANCE),
QUERY_RESULT_ITR_SET_TIME_NS("qrt","Time in nanoseconds elapsed in query result iterator and set", LogLevel.INFO, PLong.INSTANCE),
OPEN_PHOENIX_CONNECTIONS_COUNTER("o", "Number of open phoenix connections",LogLevel.OFF, PLong.INSTANCE),
OPEN_INTERNAL_PHOENIX_CONNECTIONS_COUNTER("io", "Number of open internal phoenix connections",LogLevel.OFF, PLong.INSTANCE),
QUERY_SERVICES_COUNTER("cqs", "Number of ConnectionQueryServicesImpl instantiated",LogLevel.OFF, PLong.INSTANCE),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@
import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.RESULT_SET_TIME_MS;
import static org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_COMPILER_TIME_NS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_OPTIMIZER_TIME_NS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_RESULT_ITR_SET_TIME_NS;

import java.util.HashMap;
import java.util.Map;
Expand All @@ -43,6 +46,9 @@ public class OverAllQueryMetrics {
private final CombinableMetric numParallelScans;
private final CombinableMetric wallClockTimeMS;
private final CombinableMetric resultSetTimeMS;
private final CombinableMetric queryCompilerTimeNS;
private final CombinableMetric queryOptimizerTimeNS;
private final CombinableMetric queryResultItrSetTimeNs;
private final CombinableMetric queryTimedOut;
private final CombinableMetric queryPointLookupTimedOut;
private final CombinableMetric queryScanTimedOut;
Expand All @@ -56,6 +62,12 @@ public OverAllQueryMetrics(boolean isRequestMetricsEnabled, LogLevel connectionL
WALL_CLOCK_TIME_MS);
resultSetWatch = MetricUtil.getMetricsStopWatch(isRequestMetricsEnabled, connectionLogLevel,
RESULT_SET_TIME_MS);
queryCompilerTimeNS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,
connectionLogLevel, QUERY_COMPILER_TIME_NS);
queryOptimizerTimeNS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,
connectionLogLevel, QUERY_OPTIMIZER_TIME_NS);
queryResultItrSetTimeNs = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,
connectionLogLevel, QUERY_RESULT_ITR_SET_TIME_NS);
numParallelScans = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,
connectionLogLevel, NUM_PARALLEL_SCANS);
wallClockTimeMS = MetricUtil.getCombinableMetric(isRequestMetricsEnabled,
Expand Down Expand Up @@ -106,6 +118,18 @@ public void cacheRefreshedDueToSplits() {
cacheRefreshedDueToSplits.increment();
}

public void setQueryCompilerTimeNS(long timeNS) {
queryCompilerTimeNS.set(timeNS);
}

public void setQueryOptimizerTimeNS(long timeNS) {
queryOptimizerTimeNS.set(timeNS);
}

public void setQueryResultItrSetTimeNs(long timeNS) {
queryResultItrSetTimeNs.set(timeNS);
}

public void startQuery() {
if (!queryWatch.isRunning()) {
queryWatch.start();
Expand Down Expand Up @@ -147,6 +171,9 @@ public Map<MetricType, Long> publish() {
metricsForPublish.put(numParallelScans.getMetricType(), numParallelScans.getValue());
metricsForPublish.put(wallClockTimeMS.getMetricType(), wallClockTimeMS.getValue());
metricsForPublish.put(resultSetTimeMS.getMetricType(), resultSetTimeMS.getValue());
metricsForPublish.put(queryCompilerTimeNS.getMetricType(), queryCompilerTimeNS.getValue());
metricsForPublish.put(queryOptimizerTimeNS.getMetricType(), queryOptimizerTimeNS.getValue());
metricsForPublish.put(queryResultItrSetTimeNs.getMetricType(), queryResultItrSetTimeNs.getValue());
metricsForPublish.put(queryTimedOut.getMetricType(), queryTimedOut.getValue());
metricsForPublish.put(queryPointLookupTimedOut.getMetricType(), queryPointLookupTimedOut.getValue());
metricsForPublish.put(queryScanTimedOut.getMetricType(), queryScanTimedOut.getValue());
Expand All @@ -170,6 +197,9 @@ public void reset() {
cacheRefreshedDueToSplits.reset();
queryWatch.stop();
resultSetWatch.stop();
queryCompilerTimeNS.reset();
queryOptimizerTimeNS.reset();
queryResultItrSetTimeNs.reset();
}

public OverAllQueryMetrics combine(OverAllQueryMetrics metric) {
Expand All @@ -183,6 +213,9 @@ public OverAllQueryMetrics combine(OverAllQueryMetrics metric) {
numParallelScans.combine(metric.numParallelScans);
wallClockTimeMS.combine(metric.wallClockTimeMS);
resultSetTimeMS.combine(metric.resultSetTimeMS);
queryCompilerTimeNS.combine(queryCompilerTimeNS);
queryOptimizerTimeNS.combine(queryOptimizerTimeNS);
queryResultItrSetTimeNs.combine(queryResultItrSetTimeNs);
return this;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@
import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
import static org.apache.phoenix.monitoring.MetricType.TASK_QUEUE_WAIT_TIME;
import static org.apache.phoenix.monitoring.MetricType.UPSERT_COMMIT_TIME;
import static org.apache.phoenix.monitoring.MetricType.QUERY_COMPILER_TIME_NS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_OPTIMIZER_TIME_NS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_RESULT_ITR_SET_TIME_NS;
import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
import static org.apache.phoenix.util.PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB;
import static org.junit.Assert.assertEquals;
Expand Down Expand Up @@ -973,8 +976,12 @@ private void changeInternalStateForTesting(PhoenixResultSet rs) throws NoSuchFie
}

private void assertReadMetricValuesForSelectSql(ArrayList<Long> numRows, ArrayList<Long> numExpectedTasks,
PhoenixResultSet resultSetBeingTested, Set<String> expectedTableNames) throws SQLException {
PhoenixResultSet resultSetBeingTested, Set<String> expectedTableNames) throws Exception {
Map<String, Map<MetricType, Long>> metrics = PhoenixRuntime.getRequestReadMetricInfo(resultSetBeingTested);
Map<MetricType, Long> overallReadMetrics = PhoenixRuntime.getOverAllReadRequestMetricInfo(resultSetBeingTested);
assertTrue("Query compiler time should be greater than zero", overallReadMetrics.get(QUERY_COMPILER_TIME_NS) > 0);
assertTrue("Query optimizer time should be greater than zero", overallReadMetrics.get(QUERY_OPTIMIZER_TIME_NS) > 0);
assertTrue("Query Result Itr time should be greater than zero", overallReadMetrics.get(QUERY_RESULT_ITR_SET_TIME_NS) > 0);
int counter = 0;
for (Entry<String, Map<MetricType, Long>> entry : metrics.entrySet()) {
String tableName = entry.getKey();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@
import static org.apache.phoenix.monitoring.MetricType.QUERY_TIMEOUT_COUNTER;
import static org.apache.phoenix.monitoring.MetricType.RESULT_SET_TIME_MS;
import static org.apache.phoenix.monitoring.MetricType.WALL_CLOCK_TIME_MS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_COMPILER_TIME_NS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_OPTIMIZER_TIME_NS;
import static org.apache.phoenix.monitoring.MetricType.QUERY_RESULT_ITR_SET_TIME_NS;
import static org.junit.Assert.assertEquals;

public class OverAllQueryMetricsTest {
Expand Down Expand Up @@ -105,20 +108,20 @@ public void testPublish() {
overAllQueryMetrics.startQuery();
overAllQueryMetrics.startResultSetWatch();
assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts,
queryFailures, cacheRefreshesDueToSplits, 0L);
queryFailures, cacheRefreshesDueToSplits, 0L, delta, delta, delta);
overAllQueryMetrics.endQuery();
overAllQueryMetrics.stopResultSetWatch();
// expect 2 * delta since we call both endQuery() and stopResultSetWatch()
assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts,
queryFailures, cacheRefreshesDueToSplits, 2*delta);
queryFailures, cacheRefreshesDueToSplits, 2*delta, delta, delta, delta);
}

@Test
public void testReset() {
assertPublishedMetrics(overAllQueryMetrics.publish(), numParallelScans, queryTimeouts,
queryFailures, cacheRefreshesDueToSplits, 0L);
queryFailures, cacheRefreshesDueToSplits, 0L, delta, delta, delta);
overAllQueryMetrics.reset();
assertPublishedMetrics(overAllQueryMetrics.publish(), 0L, 0L, 0L, 0L, 0L);
assertPublishedMetrics(overAllQueryMetrics.publish(), 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L);
}

@Test
Expand All @@ -133,7 +136,7 @@ public void testCombine() {
OverAllQueryMetrics finalMetricObj = this.overAllQueryMetrics.combine(otherMetrics);
assertPublishedMetrics(finalMetricObj.publish(), numParallelScans + otherNumParallelScans,
queryTimeouts + otherQueryTimeouts, queryFailures + otherQueryFailures,
cacheRefreshesDueToSplits + otherCacheRefreshes, 0L);
cacheRefreshesDueToSplits + otherCacheRefreshes, 0L, 2*delta, 2*delta, 2*delta);
}

@Test
Expand All @@ -159,6 +162,9 @@ private void populateMetrics(OverAllQueryMetrics metricsObj, long numParallelSca
for (int i = 0; i < cacheRefreshesDueToSplitsSetting; i++) {
metricsObj.cacheRefreshedDueToSplits();
}
metricsObj.setQueryCompilerTimeNS(delta);
metricsObj.setQueryOptimizerTimeNS(delta);
metricsObj.setQueryResultItrSetTimeNs(delta);
}

private void assertPublishedMetrics(
Expand All @@ -167,13 +173,19 @@ private void assertPublishedMetrics(
final long expectedQueryTimeouts,
final long expectedQueryFailures,
final long expectedCacheRefreshes,
final long expectedElapsedTime) {
final long expectedElapsedTime,
final long expectedQueryCompilerTimeNs,
final long expectedQueryOptimizerTimeNs,
final long expectedQueryResultItrSetTimeNs) {
assertEquals(expectedNumParallelScans, (long)metrics.get(NUM_PARALLEL_SCANS));
assertEquals(expectedQueryTimeouts, (long)metrics.get(QUERY_TIMEOUT_COUNTER));
assertEquals(expectedQueryFailures, (long)metrics.get(QUERY_FAILED_COUNTER));
assertEquals(expectedCacheRefreshes, (long)metrics.get(CACHE_REFRESH_SPLITS_COUNTER));
assertEquals(expectedElapsedTime, (long)metrics.get(WALL_CLOCK_TIME_MS));
assertEquals(expectedElapsedTime, (long)metrics.get(RESULT_SET_TIME_MS));
assertEquals(expectedQueryCompilerTimeNs, (long)metrics.get(QUERY_COMPILER_TIME_NS));
assertEquals(expectedQueryOptimizerTimeNs, (long)metrics.get(QUERY_OPTIMIZER_TIME_NS));
assertEquals(expectedQueryResultItrSetTimeNs, (long)metrics.get(QUERY_RESULT_ITR_SET_TIME_NS));
}

}